diff options
58 files changed, 2953 insertions, 1575 deletions
diff --git a/Documentation/config.txt b/Documentation/config.txt index d4a476e2ff..edf50cd211 100644 --- a/Documentation/config.txt +++ b/Documentation/config.txt @@ -324,10 +324,11 @@ branch.<name>.remote:: If this option is not given, `git fetch` defaults to remote "origin". branch.<name>.merge:: - When in branch <name>, it tells `git fetch` the default refspec to - be marked for merging in FETCH_HEAD. The value has exactly to match - a remote part of one of the refspecs which are fetched from the remote - given by "branch.<name>.remote". + When in branch <name>, it tells `git fetch` the default + refspec to be marked for merging in FETCH_HEAD. The value is + handled like the remote part of a refspec, and must match a + ref which is fetched from the remote given by + "branch.<name>.remote". The merge information is used by `git pull` (which at first calls `git fetch`) to lookup the default branch for merging. Without this option, `git pull` defaults to merge the first refspec fetched. diff --git a/Documentation/git-http-push.txt b/Documentation/git-http-push.txt index 9afb860381..3a69b719b5 100644 --- a/Documentation/git-http-push.txt +++ b/Documentation/git-http-push.txt @@ -8,7 +8,7 @@ git-http-push - Push objects over HTTP/DAV to another repository SYNOPSIS -------- -'git-http-push' [--all] [--force] [--verbose] <url> <ref> [<ref>...] +'git-http-push' [--all] [--dry-run] [--force] [--verbose] <url> <ref> [<ref>...] DESCRIPTION ----------- @@ -30,6 +30,9 @@ OPTIONS the remote repository can lose commits; use it with care. +--dry-run:: + Do everything except actually send the updates. + --verbose:: Report the list of objects being walked locally and the list of objects successfully sent to the remote repository. @@ -38,6 +38,8 @@ all:: # # Define NO_SETENV if you don't have setenv in the C library. # +# Define NO_MKDTEMP if you don't have mkdtemp in the C library. +# # Define NO_SYMLINK_HEAD if you never want .git/HEAD to be a symbolic link. # Enable it on Windows. By default, symrefs are still used. # @@ -208,7 +210,6 @@ BASIC_LDFLAGS = SCRIPT_SH = \ git-bisect.sh git-checkout.sh \ git-clean.sh git-clone.sh git-commit.sh \ - git-fetch.sh \ git-ls-remote.sh \ git-merge-one-file.sh git-mergetool.sh git-parse-remote.sh \ git-pull.sh git-rebase.sh git-rebase--interactive.sh \ @@ -235,14 +236,14 @@ SCRIPTS = $(patsubst %.sh,%,$(SCRIPT_SH)) \ # ... and all the rest that could be moved out of bindir to gitexecdir PROGRAMS = \ git-fetch-pack$X \ - git-hash-object$X git-index-pack$X git-local-fetch$X \ + git-hash-object$X git-index-pack$X \ git-fast-import$X \ git-daemon$X \ git-merge-index$X git-mktag$X git-mktree$X git-patch-id$X \ git-peek-remote$X git-receive-pack$X \ git-send-pack$X git-shell$X \ - git-show-index$X git-ssh-fetch$X \ - git-ssh-upload$X git-unpack-file$X \ + git-show-index$X \ + git-unpack-file$X \ git-update-server-info$X \ git-upload-pack$X \ git-pack-redundant$X git-var$X \ @@ -270,9 +271,6 @@ ifndef NO_TCLTK OTHER_PROGRAMS += gitk-wish endif -# Backward compatibility -- to be removed after 1.0 -PROGRAMS += git-ssh-pull$X git-ssh-push$X - # Set paths to tools early so that they can be used for version tests. ifndef SHELL_PATH SHELL_PATH = /bin/sh @@ -292,7 +290,7 @@ LIB_H = \ run-command.h strbuf.h tag.h tree.h git-compat-util.h revision.h \ tree-walk.h log-tree.h dir.h path-list.h unpack-trees.h builtin.h \ utf8.h reflog-walk.h patch-ids.h attr.h decorate.h progress.h \ - mailmap.h remote.h + mailmap.h remote.h transport.h DIFF_OBJS = \ diff.o diff-lib.o diffcore-break.o diffcore-order.o \ @@ -314,7 +312,8 @@ LIB_OBJS = \ write_or_die.o trace.o list-objects.o grep.o match-trees.o \ alloc.o merge-file.o path-list.o help.o unpack-trees.o $(DIFF_OBJS) \ color.o wt-status.o archive-zip.o archive-tar.o shallow.o utf8.o \ - convert.o attr.o decorate.o progress.o mailmap.o symlinks.o remote.o + convert.o attr.o decorate.o progress.o mailmap.o symlinks.o remote.o \ + transport.o bundle.o walker.o BUILTIN_OBJS = \ builtin-add.o \ @@ -335,6 +334,8 @@ BUILTIN_OBJS = \ builtin-diff-files.o \ builtin-diff-index.o \ builtin-diff-tree.o \ + builtin-fetch.o \ + builtin-fetch-pack.o \ builtin-fetch--tool.o \ builtin-fmt-merge-msg.o \ builtin-for-each-ref.o \ @@ -416,12 +417,14 @@ ifeq ($(uname_S),SunOS) NEEDS_LIBICONV = YesPlease NO_UNSETENV = YesPlease NO_SETENV = YesPlease + NO_MKDTEMP = YesPlease NO_C99_FORMAT = YesPlease NO_STRTOUMAX = YesPlease endif ifeq ($(uname_R),5.9) NO_UNSETENV = YesPlease NO_SETENV = YesPlease + NO_MKDTEMP = YesPlease NO_C99_FORMAT = YesPlease NO_STRTOUMAX = YesPlease endif @@ -518,7 +521,9 @@ else CC_LD_DYNPATH = -R endif -ifndef NO_CURL +ifdef NO_CURL + BASIC_CFLAGS += -DNO_CURL +else ifdef CURLDIR # Try "-Wl,-rpath=$(CURLDIR)/$(lib)" in such a case. BASIC_CFLAGS += -I$(CURLDIR)/include @@ -526,7 +531,9 @@ ifndef NO_CURL else CURL_LIBCURL = -lcurl endif - PROGRAMS += git-http-fetch$X + BUILTIN_OBJS += builtin-http-fetch.o + EXTLIBS += $(CURL_LIBCURL) + LIB_OBJS += http.o http-walker.o curl_check := $(shell (echo 070908; curl-config --vernum) | sort -r | sed -ne 2p) ifeq "$(curl_check)" "070908" ifndef NO_EXPAT @@ -608,6 +615,10 @@ ifdef NO_SETENV COMPAT_CFLAGS += -DNO_SETENV COMPAT_OBJS += compat/setenv.o endif +ifdef NO_MKDTEMP + COMPAT_CFLAGS += -DNO_MKDTEMP + COMPAT_OBJS += compat/mkdtemp.o +endif ifdef NO_UNSETENV COMPAT_CFLAGS += -DNO_UNSETENV COMPAT_OBJS += compat/unsetenv.o @@ -889,33 +900,22 @@ http.o: http.c GIT-CFLAGS $(QUIET_CC)$(CC) -o $*.o -c $(ALL_CFLAGS) -DGIT_USER_AGENT='"git/$(GIT_VERSION)"' $< ifdef NO_EXPAT -http-fetch.o: http-fetch.c http.h GIT-CFLAGS +http-walker.o: http-walker.c http.h GIT-CFLAGS $(QUIET_CC)$(CC) -o $*.o -c $(ALL_CFLAGS) -DNO_EXPAT $< endif git-%$X: %.o $(GITLIBS) $(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) $(filter %.o,$^) $(LIBS) -ssh-pull.o: ssh-fetch.c -ssh-push.o: ssh-upload.c -git-local-fetch$X: fetch.o -git-ssh-fetch$X: rsh.o fetch.o -git-ssh-upload$X: rsh.o -git-ssh-pull$X: rsh.o fetch.o -git-ssh-push$X: rsh.o - git-imap-send$X: imap-send.o $(LIB_FILE) -http.o http-fetch.o http-push.o: http.h -git-http-fetch$X: fetch.o http.o http-fetch.o $(GITLIBS) - $(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) $(filter %.o,$^) \ - $(LIBS) $(CURL_LIBCURL) $(EXPAT_LIBEXPAT) +http.o http-walker.o http-push.o: http.h git-http-push$X: revision.o http.o http-push.o $(GITLIBS) $(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) $(filter %.o,$^) \ $(LIBS) $(CURL_LIBCURL) $(EXPAT_LIBEXPAT) -$(LIB_OBJS) $(BUILTIN_OBJS) fetch.o: $(LIB_H) +$(LIB_OBJS) $(BUILTIN_OBJS): $(LIB_H) $(patsubst git-%$X,%.o,$(PROGRAMS)): $(LIB_H) $(wildcard */*.h) $(DIFF_OBJS): diffcore.h @@ -1131,8 +1131,7 @@ check-docs:: git-merge-octopus | git-merge-ours | git-merge-recursive | \ git-merge-resolve | git-merge-stupid | \ git-add--interactive | git-fsck-objects | git-init-db | \ - git-repo-config | git-fetch--tool | \ - git-ssh-pull | git-ssh-push ) continue ;; \ + git-repo-config | git-fetch--tool ) continue ;; \ esac ; \ test -f "Documentation/$$v.txt" || \ echo "no doc: $$v"; \ diff --git a/builtin-bundle.c b/builtin-bundle.c index 1b650069c9..9f38e2176a 100644 --- a/builtin-bundle.c +++ b/builtin-bundle.c @@ -1,11 +1,6 @@ #include "builtin.h" #include "cache.h" -#include "object.h" -#include "commit.h" -#include "diff.h" -#include "revision.h" -#include "list-objects.h" -#include "run-command.h" +#include "bundle.h" /* * Basic handler for bundle files to connect repositories via sneakernet. @@ -16,355 +11,6 @@ static const char *bundle_usage="git-bundle (create <bundle> <git-rev-list args> | verify <bundle> | list-heads <bundle> [refname]... | unbundle <bundle> [refname]... )"; -static const char bundle_signature[] = "# v2 git bundle\n"; - -struct ref_list { - unsigned int nr, alloc; - struct ref_list_entry { - unsigned char sha1[20]; - char *name; - } *list; -}; - -static void add_to_ref_list(const unsigned char *sha1, const char *name, - struct ref_list *list) -{ - if (list->nr + 1 >= list->alloc) { - list->alloc = alloc_nr(list->nr + 1); - list->list = xrealloc(list->list, - list->alloc * sizeof(list->list[0])); - } - memcpy(list->list[list->nr].sha1, sha1, 20); - list->list[list->nr].name = xstrdup(name); - list->nr++; -} - -struct bundle_header { - struct ref_list prerequisites; - struct ref_list references; -}; - -/* returns an fd */ -static int read_header(const char *path, struct bundle_header *header) { - char buffer[1024]; - int fd; - long fpos; - FILE *ffd = fopen(path, "rb"); - - if (!ffd) - return error("could not open '%s'", path); - if (!fgets(buffer, sizeof(buffer), ffd) || - strcmp(buffer, bundle_signature)) { - fclose(ffd); - return error("'%s' does not look like a v2 bundle file", path); - } - while (fgets(buffer, sizeof(buffer), ffd) - && buffer[0] != '\n') { - int is_prereq = buffer[0] == '-'; - int offset = is_prereq ? 1 : 0; - int len = strlen(buffer); - unsigned char sha1[20]; - struct ref_list *list = is_prereq ? &header->prerequisites - : &header->references; - char delim; - - if (buffer[len - 1] == '\n') - buffer[len - 1] = '\0'; - if (get_sha1_hex(buffer + offset, sha1)) { - warning("unrecognized header: %s", buffer); - continue; - } - delim = buffer[40 + offset]; - if (!isspace(delim) && (delim != '\0' || !is_prereq)) - die ("invalid header: %s", buffer); - add_to_ref_list(sha1, isspace(delim) ? - buffer + 41 + offset : "", list); - } - fpos = ftell(ffd); - fclose(ffd); - fd = open(path, O_RDONLY); - if (fd < 0) - return error("could not open '%s'", path); - lseek(fd, fpos, SEEK_SET); - return fd; -} - -static int list_refs(struct ref_list *r, int argc, const char **argv) -{ - int i; - - for (i = 0; i < r->nr; i++) { - if (argc > 1) { - int j; - for (j = 1; j < argc; j++) - if (!strcmp(r->list[i].name, argv[j])) - break; - if (j == argc) - continue; - } - printf("%s %s\n", sha1_to_hex(r->list[i].sha1), - r->list[i].name); - } - return 0; -} - -#define PREREQ_MARK (1u<<16) - -static int verify_bundle(struct bundle_header *header, int verbose) -{ - /* - * Do fast check, then if any prereqs are missing then go line by line - * to be verbose about the errors - */ - struct ref_list *p = &header->prerequisites; - struct rev_info revs; - const char *argv[] = {NULL, "--all"}; - struct object_array refs; - struct commit *commit; - int i, ret = 0, req_nr; - const char *message = "Repository lacks these prerequisite commits:"; - - init_revisions(&revs, NULL); - for (i = 0; i < p->nr; i++) { - struct ref_list_entry *e = p->list + i; - struct object *o = parse_object(e->sha1); - if (o) { - o->flags |= PREREQ_MARK; - add_pending_object(&revs, o, e->name); - continue; - } - if (++ret == 1) - error(message); - error("%s %s", sha1_to_hex(e->sha1), e->name); - } - if (revs.pending.nr != p->nr) - return ret; - req_nr = revs.pending.nr; - setup_revisions(2, argv, &revs, NULL); - - memset(&refs, 0, sizeof(struct object_array)); - for (i = 0; i < revs.pending.nr; i++) { - struct object_array_entry *e = revs.pending.objects + i; - add_object_array(e->item, e->name, &refs); - } - - prepare_revision_walk(&revs); - - i = req_nr; - while (i && (commit = get_revision(&revs))) - if (commit->object.flags & PREREQ_MARK) - i--; - - for (i = 0; i < req_nr; i++) - if (!(refs.objects[i].item->flags & SHOWN)) { - if (++ret == 1) - error(message); - error("%s %s", sha1_to_hex(refs.objects[i].item->sha1), - refs.objects[i].name); - } - - for (i = 0; i < refs.nr; i++) - clear_commit_marks((struct commit *)refs.objects[i].item, -1); - - if (verbose) { - struct ref_list *r; - - r = &header->references; - printf("The bundle contains %d ref%s\n", - r->nr, (1 < r->nr) ? "s" : ""); - list_refs(r, 0, NULL); - r = &header->prerequisites; - printf("The bundle requires these %d ref%s\n", - r->nr, (1 < r->nr) ? "s" : ""); - list_refs(r, 0, NULL); - } - return ret; -} - -static int list_heads(struct bundle_header *header, int argc, const char **argv) -{ - return list_refs(&header->references, argc, argv); -} - -static int create_bundle(struct bundle_header *header, const char *path, - int argc, const char **argv) -{ - static struct lock_file lock; - int bundle_fd = -1; - int bundle_to_stdout; - const char **argv_boundary = xmalloc((argc + 4) * sizeof(const char *)); - const char **argv_pack = xmalloc(5 * sizeof(const char *)); - int i, ref_count = 0; - char buffer[1024]; - struct rev_info revs; - struct child_process rls; - FILE *rls_fout; - - bundle_to_stdout = !strcmp(path, "-"); - if (bundle_to_stdout) - bundle_fd = 1; - else - bundle_fd = hold_lock_file_for_update(&lock, path, 1); - - /* write signature */ - write_or_die(bundle_fd, bundle_signature, strlen(bundle_signature)); - - /* init revs to list objects for pack-objects later */ - save_commit_buffer = 0; - init_revisions(&revs, NULL); - - /* write prerequisites */ - memcpy(argv_boundary + 3, argv + 1, argc * sizeof(const char *)); - argv_boundary[0] = "rev-list"; - argv_boundary[1] = "--boundary"; - argv_boundary[2] = "--pretty=oneline"; - argv_boundary[argc + 2] = NULL; - memset(&rls, 0, sizeof(rls)); - rls.argv = argv_boundary; - rls.out = -1; - rls.git_cmd = 1; - if (start_command(&rls)) - return -1; - rls_fout = fdopen(rls.out, "r"); - while (fgets(buffer, sizeof(buffer), rls_fout)) { - unsigned char sha1[20]; - if (buffer[0] == '-') { - write_or_die(bundle_fd, buffer, strlen(buffer)); - if (!get_sha1_hex(buffer + 1, sha1)) { - struct object *object = parse_object(sha1); - object->flags |= UNINTERESTING; - add_pending_object(&revs, object, buffer); - } - } else if (!get_sha1_hex(buffer, sha1)) { - struct object *object = parse_object(sha1); - object->flags |= SHOWN; - } - } - fclose(rls_fout); - if (finish_command(&rls)) - return error("rev-list died"); - - /* write references */ - argc = setup_revisions(argc, argv, &revs, NULL); - if (argc > 1) - return error("unrecognized argument: %s'", argv[1]); - - for (i = 0; i < revs.pending.nr; i++) { - struct object_array_entry *e = revs.pending.objects + i; - unsigned char sha1[20]; - char *ref; - - if (e->item->flags & UNINTERESTING) - continue; - if (dwim_ref(e->name, strlen(e->name), sha1, &ref) != 1) - continue; - /* - * Make sure the refs we wrote out is correct; --max-count and - * other limiting options could have prevented all the tips - * from getting output. - * - * Non commit objects such as tags and blobs do not have - * this issue as they are not affected by those extra - * constraints. - */ - if (!(e->item->flags & SHOWN) && e->item->type == OBJ_COMMIT) { - warning("ref '%s' is excluded by the rev-list options", - e->name); - free(ref); - continue; - } - /* - * If you run "git bundle create bndl v1.0..v2.0", the - * name of the positive ref is "v2.0" but that is the - * commit that is referenced by the tag, and not the tag - * itself. - */ - if (hashcmp(sha1, e->item->sha1)) { - /* - * Is this the positive end of a range expressed - * in terms of a tag (e.g. v2.0 from the range - * "v1.0..v2.0")? - */ - struct commit *one = lookup_commit_reference(sha1); - struct object *obj; - - if (e->item == &(one->object)) { - /* - * Need to include e->name as an - * independent ref to the pack-objects - * input, so that the tag is included - * in the output; otherwise we would - * end up triggering "empty bundle" - * error. - */ - obj = parse_object(sha1); - obj->flags |= SHOWN; - add_pending_object(&revs, obj, e->name); - } - free(ref); - continue; - } - - ref_count++; - write_or_die(bundle_fd, sha1_to_hex(e->item->sha1), 40); - write_or_die(bundle_fd, " ", 1); - write_or_die(bundle_fd, ref, strlen(ref)); - write_or_die(bundle_fd, "\n", 1); - free(ref); - } - if (!ref_count) - die ("Refusing to create empty bundle."); - - /* end header */ - write_or_die(bundle_fd, "\n", 1); - - /* write pack */ - argv_pack[0] = "pack-objects"; - argv_pack[1] = "--all-progress"; - argv_pack[2] = "--stdout"; - argv_pack[3] = "--thin"; - argv_pack[4] = NULL; - memset(&rls, 0, sizeof(rls)); - rls.argv = argv_pack; - rls.in = -1; - rls.out = bundle_fd; - rls.git_cmd = 1; - if (start_command(&rls)) - return error("Could not spawn pack-objects"); - for (i = 0; i < revs.pending.nr; i++) { - struct object *object = revs.pending.objects[i].item; - if (object->flags & UNINTERESTING) - write(rls.in, "^", 1); - write(rls.in, sha1_to_hex(object->sha1), 40); - write(rls.in, "\n", 1); - } - if (finish_command(&rls)) - return error ("pack-objects died"); - close(bundle_fd); - if (!bundle_to_stdout) - commit_lock_file(&lock); - return 0; -} - -static int unbundle(struct bundle_header *header, int bundle_fd, - int argc, const char **argv) -{ - const char *argv_index_pack[] = {"index-pack", - "--fix-thin", "--stdin", NULL}; - struct child_process ip; - - if (verify_bundle(header, 0)) - return -1; - memset(&ip, 0, sizeof(ip)); - ip.argv = argv_index_pack; - ip.in = bundle_fd; - ip.no_stdout = 1; - ip.git_cmd = 1; - if (run_command(&ip)) - return error("index-pack died"); - return list_heads(header, argc, argv); -} - int cmd_bundle(int argc, const char **argv, const char *prefix) { struct bundle_header header; @@ -388,8 +34,8 @@ int cmd_bundle(int argc, const char **argv, const char *prefix) } memset(&header, 0, sizeof(header)); - if (strcmp(cmd, "create") && - (bundle_fd = read_header(bundle_file, &header)) < 0) + if (strcmp(cmd, "create") && (bundle_fd = + read_bundle_header(bundle_file, &header)) < 0) return 1; if (!strcmp(cmd, "verify")) { @@ -401,7 +47,7 @@ int cmd_bundle(int argc, const char **argv, const char *prefix) } if (!strcmp(cmd, "list-heads")) { close(bundle_fd); - return !!list_heads(&header, argc, argv); + return !!list_bundle_refs(&header, argc, argv); } if (!strcmp(cmd, "create")) { if (nongit) @@ -410,7 +56,8 @@ int cmd_bundle(int argc, const char **argv, const char *prefix) } else if (!strcmp(cmd, "unbundle")) { if (nongit) die("Need a repository to unbundle."); - return !!unbundle(&header, bundle_fd, argc, argv); + return !!unbundle(&header, bundle_fd) || + list_bundle_refs(&header, argc, argv); } else usage(bundle_usage); } diff --git a/fetch-pack.c b/builtin-fetch-pack.c index 9c81305be5..8f25d509a0 100644 --- a/fetch-pack.c +++ b/builtin-fetch-pack.c @@ -6,16 +6,13 @@ #include "exec_cmd.h" #include "pack.h" #include "sideband.h" +#include "fetch-pack.h" -static int keep_pack; static int transfer_unpack_limit = -1; static int fetch_unpack_limit = -1; static int unpack_limit = 100; -static int quiet; -static int verbose; -static int fetch_all; -static int depth; -static int no_progress; +static struct fetch_pack_args args; + static const char fetch_pack_usage[] = "git-fetch-pack [--all] [--quiet|-q] [--keep|-k] [--thin] [--upload-pack=<git-upload-pack>] [--depth=<n>] [--no-progress] [-v] [<host>:]<directory> [<refs>...]"; static const char *uploadpack = "git-upload-pack"; @@ -180,7 +177,7 @@ static int find_common(int fd[2], unsigned char *result_sha1, (use_sideband == 2 ? " side-band-64k" : ""), (use_sideband == 1 ? " side-band" : ""), (use_thin_pack ? " thin-pack" : ""), - (no_progress ? " no-progress" : ""), + (args.no_progress ? " no-progress" : ""), " ofs-delta"); else packet_write(fd[1], "want %s\n", sha1_to_hex(remote)); @@ -188,13 +185,13 @@ static int find_common(int fd[2], unsigned char *result_sha1, } if (is_repository_shallow()) write_shallow_commits(fd[1], 1); - if (depth > 0) - packet_write(fd[1], "deepen %d", depth); + if (args.depth > 0) + packet_write(fd[1], "deepen %d", args.depth); packet_flush(fd[1]); if (!fetching) return 1; - if (depth > 0) { + if (args.depth > 0) { char line[1024]; unsigned char sha1[20]; int len; @@ -225,7 +222,7 @@ static int find_common(int fd[2], unsigned char *result_sha1, retval = -1; while ((sha1 = get_rev())) { packet_write(fd[1], "have %s\n", sha1_to_hex(sha1)); - if (verbose) + if (args.verbose) fprintf(stderr, "have %s\n", sha1_to_hex(sha1)); in_vain++; if (!(31 & ++count)) { @@ -243,7 +240,7 @@ static int find_common(int fd[2], unsigned char *result_sha1, do { ack = get_ack(fd[0], result_sha1); - if (verbose && ack) + if (args.verbose && ack) fprintf(stderr, "got ack %d %s\n", ack, sha1_to_hex(result_sha1)); if (ack == 1) { @@ -262,7 +259,7 @@ static int find_common(int fd[2], unsigned char *result_sha1, } while (ack); flushes--; if (got_continue && MAX_IN_VAIN < in_vain) { - if (verbose) + if (args.verbose) fprintf(stderr, "giving up\n"); break; /* give up */ } @@ -270,7 +267,7 @@ static int find_common(int fd[2], unsigned char *result_sha1, } done: packet_write(fd[1], "done\n"); - if (verbose) + if (args.verbose) fprintf(stderr, "done\n"); if (retval != 0) { multi_ack = 0; @@ -279,7 +276,7 @@ done: while (flushes || multi_ack) { int ack = get_ack(fd[0], result_sha1); if (ack) { - if (verbose) + if (args.verbose) fprintf(stderr, "got ack (%d) %s\n", ack, sha1_to_hex(result_sha1)); if (ack == 1) @@ -316,7 +313,7 @@ static int mark_complete(const char *path, const unsigned char *sha1, int flag, static void mark_recent_complete_commits(unsigned long cutoff) { while (complete && cutoff <= complete->item->date) { - if (verbose) + if (args.verbose) fprintf(stderr, "Marking %s as complete\n", sha1_to_hex(complete->item->object.sha1)); pop_most_recent_commit(&complete, COMPLETE); @@ -331,7 +328,7 @@ static void filter_refs(struct ref **refs, int nr_match, char **match) struct ref *ref, *next; struct ref *fastarray[32]; - if (nr_match && !fetch_all) { + if (nr_match && !args.fetch_all) { if (ARRAY_SIZE(fastarray) < nr_match) return_refs = xcalloc(nr_match, sizeof(struct ref *)); else { @@ -347,8 +344,8 @@ static void filter_refs(struct ref **refs, int nr_match, char **match) if (!memcmp(ref->name, "refs/", 5) && check_ref_format(ref->name + 5)) ; /* trash */ - else if (fetch_all && - (!depth || prefixcmp(ref->name, "refs/tags/") )) { + else if (args.fetch_all && + (!args.depth || prefixcmp(ref->name, "refs/tags/") )) { *newtail = ref; ref->next = NULL; newtail = &ref->next; @@ -364,7 +361,7 @@ static void filter_refs(struct ref **refs, int nr_match, char **match) free(ref); } - if (!fetch_all) { + if (!args.fetch_all) { int i; for (i = 0; i < nr_match; i++) { ref = return_refs[i]; @@ -407,7 +404,7 @@ static int everything_local(struct ref **refs, int nr_match, char **match) } } - if (!depth) { + if (!args.depth) { for_each_ref(mark_complete, NULL); if (cutoff) mark_recent_complete_commits(cutoff); @@ -441,7 +438,7 @@ static int everything_local(struct ref **refs, int nr_match, char **match) o = lookup_object(remote); if (!o || !(o->flags & COMPLETE)) { retval = 0; - if (!verbose) + if (!args.verbose) continue; fprintf(stderr, "want %s (%s)\n", sha1_to_hex(remote), @@ -450,7 +447,7 @@ static int everything_local(struct ref **refs, int nr_match, char **match) } hashcpy(ref->new_sha1, local); - if (!verbose) + if (!args.verbose) continue; fprintf(stderr, "already have %s (%s)\n", sha1_to_hex(remote), @@ -492,7 +489,7 @@ static pid_t setup_sideband(int fd[2], int xd[2]) return side_pid; } -static int get_pack(int xd[2]) +static int get_pack(int xd[2], char **pack_lockfile) { int status; pid_t pid, side_pid; @@ -501,13 +498,14 @@ static int get_pack(int xd[2]) char keep_arg[256]; char hdr_arg[256]; const char **av; - int do_keep = keep_pack; + int do_keep = args.keep_pack; + int keep_pipe[2]; side_pid = setup_sideband(fd, xd); av = argv; *hdr_arg = 0; - if (unpack_limit) { + if (!args.keep_pack && unpack_limit) { struct pack_header header; if (read_pack_header(fd[0], &header)) @@ -521,13 +519,15 @@ static int get_pack(int xd[2]) } if (do_keep) { + if (pack_lockfile && pipe(keep_pipe)) + die("fetch-pack: pipe setup failure: %s", strerror(errno)); *av++ = "index-pack"; *av++ = "--stdin"; - if (!quiet && !no_progress) + if (!args.quiet && !args.no_progress) *av++ = "-v"; - if (use_thin_pack) + if (args.use_thin_pack) *av++ = "--fix-thin"; - if (keep_pack > 1 || unpack_limit) { + if (args.lock_pack || unpack_limit) { int s = sprintf(keep_arg, "--keep=fetch-pack %d on ", getpid()); if (gethostname(keep_arg + s, sizeof(keep_arg) - s)) @@ -537,7 +537,7 @@ static int get_pack(int xd[2]) } else { *av++ = "unpack-objects"; - if (quiet) + if (args.quiet) *av++ = "-q"; } if (*hdr_arg) @@ -549,6 +549,11 @@ static int get_pack(int xd[2]) die("fetch-pack: unable to fork off %s", argv[0]); if (!pid) { dup2(fd[0], 0); + if (do_keep && pack_lockfile) { + dup2(keep_pipe[1], 1); + close(keep_pipe[0]); + close(keep_pipe[1]); + } close(fd[0]); close(fd[1]); execv_git_cmd(argv); @@ -556,6 +561,11 @@ static int get_pack(int xd[2]) } close(fd[0]); close(fd[1]); + if (do_keep && pack_lockfile) { + close(keep_pipe[1]); + *pack_lockfile = index_pack_lockfile(keep_pipe[0]); + close(keep_pipe[0]); + } while (waitpid(pid, &status, 0) < 0) { if (errno != EINTR) die("waiting for %s: %s", argv[0], strerror(errno)); @@ -573,7 +583,10 @@ static int get_pack(int xd[2]) die("%s died of unnatural causes %d", argv[0], status); } -static int fetch_pack(int fd[2], int nr_match, char **match) +static struct ref *do_fetch_pack(int fd[2], + int nr_match, + char **match, + char **pack_lockfile) { struct ref *ref; unsigned char sha1[20]; @@ -582,17 +595,17 @@ static int fetch_pack(int fd[2], int nr_match, char **match) if (is_repository_shallow() && !server_supports("shallow")) die("Server does not support shallow clients"); if (server_supports("multi_ack")) { - if (verbose) + if (args.verbose) fprintf(stderr, "Server supports multi_ack\n"); multi_ack = 1; } if (server_supports("side-band-64k")) { - if (verbose) + if (args.verbose) fprintf(stderr, "Server supports side-band-64k\n"); use_sideband = 2; } else if (server_supports("side-band")) { - if (verbose) + if (args.verbose) fprintf(stderr, "Server supports side-band\n"); use_sideband = 1; } @@ -605,22 +618,17 @@ static int fetch_pack(int fd[2], int nr_match, char **match) goto all_done; } if (find_common(fd, sha1, ref) < 0) - if (keep_pack != 1) + if (!args.keep_pack) /* When cloning, it is not unusual to have * no common commit. */ fprintf(stderr, "warning: no common commits\n"); - if (get_pack(fd)) + if (get_pack(fd, pack_lockfile)) die("git-fetch-pack: fetch failed."); all_done: - while (ref) { - printf("%s %s\n", - sha1_to_hex(ref->old_sha1), ref->name); - ref = ref->next; - } - return 0; + return ref; } static int remove_duplicates(int nr_heads, char **heads) @@ -642,7 +650,6 @@ static int remove_duplicates(int nr_heads, char **heads) heads[dst] = heads[src]; dst++; } - heads[dst] = 0; return dst; } @@ -663,85 +670,119 @@ static int fetch_pack_config(const char *var, const char *value) static struct lock_file lock; -int main(int argc, char **argv) +static void fetch_pack_setup(void) { - int i, ret, nr_heads; - char *dest = NULL, **heads; - int fd[2]; - pid_t pid; - struct stat st; - - setup_git_directory(); + static int did_setup; + if (did_setup) + return; git_config(fetch_pack_config); - if (0 <= transfer_unpack_limit) unpack_limit = transfer_unpack_limit; else if (0 <= fetch_unpack_limit) unpack_limit = fetch_unpack_limit; + did_setup = 1; +} + +int cmd_fetch_pack(int argc, const char **argv, const char *prefix) +{ + int i, ret, nr_heads; + struct ref *ref; + char *dest = NULL, **heads; nr_heads = 0; heads = NULL; for (i = 1; i < argc; i++) { - char *arg = argv[i]; + const char *arg = argv[i]; if (*arg == '-') { if (!prefixcmp(arg, "--upload-pack=")) { - uploadpack = arg + 14; + args.uploadpack = arg + 14; continue; } if (!prefixcmp(arg, "--exec=")) { - uploadpack = arg + 7; + args.uploadpack = arg + 7; continue; } if (!strcmp("--quiet", arg) || !strcmp("-q", arg)) { - quiet = 1; + args.quiet = 1; continue; } if (!strcmp("--keep", arg) || !strcmp("-k", arg)) { - keep_pack++; - unpack_limit = 0; + args.lock_pack = args.keep_pack; + args.keep_pack = 1; continue; } if (!strcmp("--thin", arg)) { - use_thin_pack = 1; + args.use_thin_pack = 1; continue; } if (!strcmp("--all", arg)) { - fetch_all = 1; + args.fetch_all = 1; continue; } if (!strcmp("-v", arg)) { - verbose = 1; + args.verbose = 1; continue; } if (!prefixcmp(arg, "--depth=")) { - depth = strtol(arg + 8, NULL, 0); - if (stat(git_path("shallow"), &st)) - st.st_mtime = 0; + args.depth = strtol(arg + 8, NULL, 0); continue; } if (!strcmp("--no-progress", arg)) { - no_progress = 1; + args.no_progress = 1; continue; } usage(fetch_pack_usage); } - dest = arg; - heads = argv + i + 1; + dest = (char *)arg; + heads = (char **)(argv + i + 1); nr_heads = argc - i - 1; break; } if (!dest) usage(fetch_pack_usage); - pid = git_connect(fd, dest, uploadpack, verbose ? CONNECT_VERBOSE : 0); + + ref = fetch_pack(&args, dest, nr_heads, heads, NULL); + ret = !ref; + + while (ref) { + printf("%s %s\n", + sha1_to_hex(ref->old_sha1), ref->name); + ref = ref->next; + } + + return ret; +} + +struct ref *fetch_pack(struct fetch_pack_args *my_args, + const char *dest, + int nr_heads, + char **heads, + char **pack_lockfile) +{ + int i, ret; + int fd[2]; + pid_t pid; + struct ref *ref; + struct stat st; + + fetch_pack_setup(); + memcpy(&args, my_args, sizeof(args)); + if (args.depth > 0) { + if (stat(git_path("shallow"), &st)) + st.st_mtime = 0; + } + + pid = git_connect(fd, (char *)dest, uploadpack, + args.verbose ? CONNECT_VERBOSE : 0); if (pid < 0) - return 1; + return NULL; if (heads && nr_heads) nr_heads = remove_duplicates(nr_heads, heads); - ret = fetch_pack(fd, nr_heads, heads); + ref = do_fetch_pack(fd, nr_heads, heads, pack_lockfile); close(fd[0]); close(fd[1]); - ret |= finish_connect(pid); + ret = finish_connect(pid); if (!ret && nr_heads) { /* If the heads to pull were given, we should have @@ -756,7 +797,7 @@ int main(int argc, char **argv) } } - if (!ret && depth > 0) { + if (!ret && args.depth > 0) { struct cache_time mtime; char *shallow = git_path("shallow"); int fd; @@ -785,5 +826,8 @@ int main(int argc, char **argv) } } - return !!ret; + if (ret) + ref = NULL; + + return ref; } diff --git a/builtin-fetch.c b/builtin-fetch.c new file mode 100644 index 0000000000..b9d2b0c27e --- /dev/null +++ b/builtin-fetch.c @@ -0,0 +1,578 @@ +/* + * "git fetch" + */ +#include "cache.h" +#include "refs.h" +#include "commit.h" +#include "builtin.h" +#include "path-list.h" +#include "remote.h" +#include "transport.h" + +static const char fetch_usage[] = "git-fetch [-a | --append] [--upload-pack <upload-pack>] [-f | --force] [--no-tags] [-t | --tags] [-k | --keep] [-u | --update-head-ok] [--depth <depth>] [-v | --verbose] [<repository> <refspec>...]"; + +static int append, force, tags, no_tags, update_head_ok, verbose, quiet; +static char *default_rla = NULL; +static struct transport *transport; + +static void unlock_pack(void) +{ + if (transport) + transport_unlock_pack(transport); +} + +static void unlock_pack_on_signal(int signo) +{ + unlock_pack(); + signal(SIGINT, SIG_DFL); + raise(signo); +} + +static void add_merge_config(struct ref **head, + struct ref *remote_refs, + struct branch *branch, + struct ref ***tail) +{ + int i; + + for (i = 0; i < branch->merge_nr; i++) { + struct ref *rm, **old_tail = *tail; + struct refspec refspec; + + for (rm = *head; rm; rm = rm->next) { + if (branch_merge_matches(branch, i, rm->name)) { + rm->merge = 1; + break; + } + } + if (rm) + continue; + + /* Not fetched to a tracking branch? We need to fetch + * it anyway to allow this branch's "branch.$name.merge" + * to be honored by git-pull. + */ + refspec.src = branch->merge[i]->src; + refspec.dst = NULL; + refspec.pattern = 0; + refspec.force = 0; + get_fetch_map(remote_refs, &refspec, tail); + for (rm = *old_tail; rm; rm = rm->next) + rm->merge = 1; + } +} + +static struct ref *get_ref_map(struct transport *transport, + struct refspec *refs, int ref_count, int tags, + int *autotags) +{ + int i; + struct ref *rm; + struct ref *ref_map = NULL; + struct ref **tail = &ref_map; + + struct ref *remote_refs = transport_get_remote_refs(transport); + + if (ref_count || tags) { + for (i = 0; i < ref_count; i++) { + get_fetch_map(remote_refs, &refs[i], &tail); + if (refs[i].dst && refs[i].dst[0]) + *autotags = 1; + } + /* Merge everything on the command line, but not --tags */ + for (rm = ref_map; rm; rm = rm->next) + rm->merge = 1; + if (tags) { + struct refspec refspec; + refspec.src = "refs/tags/"; + refspec.dst = "refs/tags/"; + refspec.pattern = 1; + refspec.force = 0; + get_fetch_map(remote_refs, &refspec, &tail); + } + } else { + /* Use the defaults */ + struct remote *remote = transport->remote; + struct branch *branch = branch_get(NULL); + int has_merge = branch_has_merge_config(branch); + if (remote && (remote->fetch_refspec_nr || has_merge)) { + for (i = 0; i < remote->fetch_refspec_nr; i++) { + get_fetch_map(remote_refs, &remote->fetch[i], &tail); + if (remote->fetch[i].dst && + remote->fetch[i].dst[0]) + *autotags = 1; + if (!i && !has_merge && ref_map && + !remote->fetch[0].pattern) + ref_map->merge = 1; + } + /* + * if the remote we're fetching from is the same + * as given in branch.<name>.remote, we add the + * ref given in branch.<name>.merge, too. + */ + if (has_merge && !strcmp(branch->remote_name, + remote->name)) + add_merge_config(&ref_map, remote_refs, branch, &tail); + } else { + ref_map = get_remote_ref(remote_refs, "HEAD"); + ref_map->merge = 1; + } + } + ref_remove_duplicates(ref_map); + + return ref_map; +} + +static void show_new(enum object_type type, unsigned char *sha1_new) +{ + fprintf(stderr, " %s: %s\n", typename(type), + find_unique_abbrev(sha1_new, DEFAULT_ABBREV)); +} + +static int s_update_ref(const char *action, + struct ref *ref, + int check_old) +{ + char msg[1024]; + char *rla = getenv("GIT_REFLOG_ACTION"); + static struct ref_lock *lock; + + if (!rla) + rla = default_rla; + snprintf(msg, sizeof(msg), "%s: %s", rla, action); + lock = lock_any_ref_for_update(ref->name, + check_old ? ref->old_sha1 : NULL, 0); + if (!lock) + return 1; + if (write_ref_sha1(lock, ref->new_sha1, msg) < 0) + return 1; + return 0; +} + +static int update_local_ref(struct ref *ref, + const char *note, + int verbose) +{ + char oldh[41], newh[41]; + struct commit *current = NULL, *updated; + enum object_type type; + struct branch *current_branch = branch_get(NULL); + + type = sha1_object_info(ref->new_sha1, NULL); + if (type < 0) + die("object %s not found", sha1_to_hex(ref->new_sha1)); + + if (!*ref->name) { + /* Not storing */ + if (verbose) { + fprintf(stderr, "* fetched %s\n", note); + show_new(type, ref->new_sha1); + } + return 0; + } + + if (!hashcmp(ref->old_sha1, ref->new_sha1)) { + if (verbose) { + fprintf(stderr, "* %s: same as %s\n", + ref->name, note); + show_new(type, ref->new_sha1); + } + return 0; + } + + if (current_branch && + !strcmp(ref->name, current_branch->name) && + !(update_head_ok || is_bare_repository()) && + !is_null_sha1(ref->old_sha1)) { + /* + * If this is the head, and it's not okay to update + * the head, and the old value of the head isn't empty... + */ + fprintf(stderr, + " * %s: Cannot fetch into the current branch.\n", + ref->name); + return 1; + } + + if (!is_null_sha1(ref->old_sha1) && + !prefixcmp(ref->name, "refs/tags/")) { + fprintf(stderr, "* %s: updating with %s\n", + ref->name, note); + show_new(type, ref->new_sha1); + return s_update_ref("updating tag", ref, 0); + } + + current = lookup_commit_reference_gently(ref->old_sha1, 1); + updated = lookup_commit_reference_gently(ref->new_sha1, 1); + if (!current || !updated) { + char *msg; + if (!strncmp(ref->name, "refs/tags/", 10)) + msg = "storing tag"; + else + msg = "storing head"; + fprintf(stderr, "* %s: storing %s\n", + ref->name, note); + show_new(type, ref->new_sha1); + return s_update_ref(msg, ref, 0); + } + + strcpy(oldh, find_unique_abbrev(current->object.sha1, DEFAULT_ABBREV)); + strcpy(newh, find_unique_abbrev(ref->new_sha1, DEFAULT_ABBREV)); + + if (in_merge_bases(current, &updated, 1)) { + fprintf(stderr, "* %s: fast forward to %s\n", + ref->name, note); + fprintf(stderr, " old..new: %s..%s\n", oldh, newh); + return s_update_ref("fast forward", ref, 1); + } + if (!force && !ref->force) { + fprintf(stderr, + "* %s: not updating to non-fast forward %s\n", + ref->name, note); + fprintf(stderr, + " old...new: %s...%s\n", oldh, newh); + return 1; + } + fprintf(stderr, + "* %s: forcing update to non-fast forward %s\n", + ref->name, note); + fprintf(stderr, " old...new: %s...%s\n", oldh, newh); + return s_update_ref("forced-update", ref, 1); +} + +static void store_updated_refs(const char *url, struct ref *ref_map) +{ + FILE *fp; + struct commit *commit; + int url_len, i, note_len; + char note[1024]; + const char *what, *kind; + struct ref *rm; + + fp = fopen(git_path("FETCH_HEAD"), "a"); + for (rm = ref_map; rm; rm = rm->next) { + struct ref *ref = NULL; + + if (rm->peer_ref) { + ref = xcalloc(1, sizeof(*ref) + strlen(rm->peer_ref->name) + 1); + strcpy(ref->name, rm->peer_ref->name); + hashcpy(ref->old_sha1, rm->peer_ref->old_sha1); + hashcpy(ref->new_sha1, rm->old_sha1); + ref->force = rm->peer_ref->force; + } + + commit = lookup_commit_reference_gently(rm->old_sha1, 1); + if (!commit) + rm->merge = 0; + + if (!strcmp(rm->name, "HEAD")) { + kind = ""; + what = ""; + } + else if (!prefixcmp(rm->name, "refs/heads/")) { + kind = "branch"; + what = rm->name + 11; + } + else if (!prefixcmp(rm->name, "refs/tags/")) { + kind = "tag"; + what = rm->name + 10; + } + else if (!prefixcmp(rm->name, "refs/remotes/")) { + kind = "remote branch"; + what = rm->name + 13; + } + else { + kind = ""; + what = rm->name; + } + + url_len = strlen(url); + for (i = url_len - 1; url[i] == '/' && 0 <= i; i--) + ; + url_len = i + 1; + if (4 < i && !strncmp(".git", url + i - 3, 4)) + url_len = i - 3; + + note_len = 0; + if (*what) { + if (*kind) + note_len += sprintf(note + note_len, "%s ", + kind); + note_len += sprintf(note + note_len, "'%s' of ", what); + } + note_len += sprintf(note + note_len, "%.*s", url_len, url); + fprintf(fp, "%s\t%s\t%s\n", + sha1_to_hex(commit ? commit->object.sha1 : + rm->old_sha1), + rm->merge ? "" : "not-for-merge", + note); + + if (ref) + update_local_ref(ref, note, verbose); + } + fclose(fp); +} + +static int fetch_refs(struct transport *transport, struct ref *ref_map) +{ + int ret = transport_fetch_refs(transport, ref_map); + if (!ret) + store_updated_refs(transport->url, ref_map); + transport_unlock_pack(transport); + return ret; +} + +static int add_existing(const char *refname, const unsigned char *sha1, + int flag, void *cbdata) +{ + struct path_list *list = (struct path_list *)cbdata; + path_list_insert(refname, list); + return 0; +} + +static struct ref *find_non_local_tags(struct transport *transport, + struct ref *fetch_map) +{ + static struct path_list existing_refs = { NULL, 0, 0, 0 }; + struct path_list new_refs = { NULL, 0, 0, 1 }; + char *ref_name; + int ref_name_len; + unsigned char *ref_sha1; + struct ref *tag_ref; + struct ref *rm = NULL; + struct ref *ref_map = NULL; + struct ref **tail = &ref_map; + struct ref *ref; + + for_each_ref(add_existing, &existing_refs); + for (ref = transport_get_remote_refs(transport); ref; ref = ref->next) { + if (prefixcmp(ref->name, "refs/tags")) + continue; + + ref_name = xstrdup(ref->name); + ref_name_len = strlen(ref_name); + ref_sha1 = ref->old_sha1; + + if (!strcmp(ref_name + ref_name_len - 3, "^{}")) { + ref_name[ref_name_len - 3] = 0; + tag_ref = transport_get_remote_refs(transport); + while (tag_ref) { + if (!strcmp(tag_ref->name, ref_name)) { + ref_sha1 = tag_ref->old_sha1; + break; + } + tag_ref = tag_ref->next; + } + } + + if (!path_list_has_path(&existing_refs, ref_name) && + !path_list_has_path(&new_refs, ref_name) && + lookup_object(ref->old_sha1)) { + fprintf(stderr, "Auto-following %s\n", + ref_name); + + path_list_insert(ref_name, &new_refs); + + rm = alloc_ref(strlen(ref_name) + 1); + strcpy(rm->name, ref_name); + rm->peer_ref = alloc_ref(strlen(ref_name) + 1); + strcpy(rm->peer_ref->name, ref_name); + hashcpy(rm->old_sha1, ref_sha1); + + *tail = rm; + tail = &rm->next; + } + free(ref_name); + } + + return ref_map; +} + +static int do_fetch(struct transport *transport, + struct refspec *refs, int ref_count) +{ + struct ref *ref_map, *fetch_map; + struct ref *rm; + int autotags = (transport->remote->fetch_tags == 1); + if (transport->remote->fetch_tags == 2 && !no_tags) + tags = 1; + if (transport->remote->fetch_tags == -1) + no_tags = 1; + + if (!transport->get_refs_list || !transport->fetch) + die("Don't know how to fetch from %s", transport->url); + + /* if not appending, truncate FETCH_HEAD */ + if (!append) + fclose(fopen(git_path("FETCH_HEAD"), "w")); + + ref_map = get_ref_map(transport, refs, ref_count, tags, &autotags); + + for (rm = ref_map; rm; rm = rm->next) { + if (rm->peer_ref) + read_ref(rm->peer_ref->name, rm->peer_ref->old_sha1); + } + + if (fetch_refs(transport, ref_map)) { + free_refs(ref_map); + return 1; + } + + fetch_map = ref_map; + + /* if neither --no-tags nor --tags was specified, do automated tag + * following ... */ + if (!(tags || no_tags) && autotags) { + ref_map = find_non_local_tags(transport, fetch_map); + if (ref_map) { + transport_set_option(transport, TRANS_OPT_DEPTH, "0"); + fetch_refs(transport, ref_map); + } + free_refs(ref_map); + } + + free_refs(fetch_map); + + return 0; +} + +static void set_option(const char *name, const char *value) +{ + int r = transport_set_option(transport, name, value); + if (r < 0) + die("Option \"%s\" value \"%s\" is not valid for %s\n", + name, value, transport->url); + if (r > 0) + warning("Option \"%s\" is ignored for %s\n", + name, transport->url); +} + +int cmd_fetch(int argc, const char **argv, const char *prefix) +{ + struct remote *remote; + int i, j, rla_offset; + static const char **refs = NULL; + int ref_nr = 0; + int cmd_len = 0; + const char *depth = NULL, *upload_pack = NULL; + int keep = 0; + + for (i = 1; i < argc; i++) { + const char *arg = argv[i]; + cmd_len += strlen(arg); + + if (arg[0] != '-') + break; + if (!strcmp(arg, "--append") || !strcmp(arg, "-a")) { + append = 1; + continue; + } + if (!prefixcmp(arg, "--upload-pack=")) { + upload_pack = arg + 14; + continue; + } + if (!strcmp(arg, "--upload-pack")) { + i++; + if (i == argc) + usage(fetch_usage); + upload_pack = argv[i]; + continue; + } + if (!strcmp(arg, "--force") || !strcmp(arg, "-f")) { + force = 1; + continue; + } + if (!strcmp(arg, "--no-tags")) { + no_tags = 1; + continue; + } + if (!strcmp(arg, "--tags") || !strcmp(arg, "-t")) { + tags = 1; + continue; + } + if (!strcmp(arg, "--keep") || !strcmp(arg, "-k")) { + keep = 1; + continue; + } + if (!strcmp(arg, "--update-head-ok") || !strcmp(arg, "-u")) { + update_head_ok = 1; + continue; + } + if (!prefixcmp(arg, "--depth=")) { + depth = arg + 8; + continue; + } + if (!strcmp(arg, "--depth")) { + i++; + if (i == argc) + usage(fetch_usage); + depth = argv[i]; + continue; + } + if (!strcmp(arg, "--quiet")) { + quiet = 1; + continue; + } + if (!strcmp(arg, "--verbose") || !strcmp(arg, "-v")) { + verbose++; + continue; + } + usage(fetch_usage); + } + + for (j = i; j < argc; j++) + cmd_len += strlen(argv[j]); + + default_rla = xmalloc(cmd_len + 5 + argc + 1); + sprintf(default_rla, "fetch"); + rla_offset = strlen(default_rla); + for (j = 1; j < argc; j++) { + sprintf(default_rla + rla_offset, " %s", argv[j]); + rla_offset += strlen(argv[j]) + 1; + } + + if (i == argc) + remote = remote_get(NULL); + else + remote = remote_get(argv[i++]); + + transport = transport_get(remote, remote->url[0]); + if (verbose >= 2) + transport->verbose = 1; + if (quiet) + transport->verbose = -1; + if (upload_pack) + set_option(TRANS_OPT_UPLOADPACK, upload_pack); + if (keep) + set_option(TRANS_OPT_KEEP, "yes"); + if (depth) + set_option(TRANS_OPT_DEPTH, depth); + + if (!transport->url) + die("Where do you want to fetch from today?"); + + if (i < argc) { + int j = 0; + refs = xcalloc(argc - i + 1, sizeof(const char *)); + while (i < argc) { + if (!strcmp(argv[i], "tag")) { + char *ref; + i++; + ref = xmalloc(strlen(argv[i]) * 2 + 22); + strcpy(ref, "refs/tags/"); + strcat(ref, argv[i]); + strcat(ref, ":refs/tags/"); + strcat(ref, argv[i]); + refs[j++] = ref; + } else + refs[j++] = argv[i]; + i++; + } + refs[j] = NULL; + ref_nr = j; + } + + signal(SIGINT, unlock_pack_on_signal); + atexit(unlock_pack); + return do_fetch(transport, parse_ref_spec(ref_nr, refs), ref_nr); +} diff --git a/builtin-http-fetch.c b/builtin-http-fetch.c new file mode 100644 index 0000000000..4a50dbd95b --- /dev/null +++ b/builtin-http-fetch.c @@ -0,0 +1,77 @@ +#include "cache.h" +#include "walker.h" + +int cmd_http_fetch(int argc, const char **argv, const char *prefix) +{ + struct walker *walker; + int commits_on_stdin = 0; + int commits; + const char **write_ref = NULL; + char **commit_id; + const char *url; + int arg = 1; + int rc = 0; + int get_tree = 0; + int get_history = 0; + int get_all = 0; + int get_verbosely = 0; + int get_recover = 0; + + git_config(git_default_config); + + while (arg < argc && argv[arg][0] == '-') { + if (argv[arg][1] == 't') { + get_tree = 1; + } else if (argv[arg][1] == 'c') { + get_history = 1; + } else if (argv[arg][1] == 'a') { + get_all = 1; + get_tree = 1; + get_history = 1; + } else if (argv[arg][1] == 'v') { + get_verbosely = 1; + } else if (argv[arg][1] == 'w') { + write_ref = &argv[arg + 1]; + arg++; + } else if (!strcmp(argv[arg], "--recover")) { + get_recover = 1; + } else if (!strcmp(argv[arg], "--stdin")) { + commits_on_stdin = 1; + } + arg++; + } + if (argc < arg + 2 - commits_on_stdin) { + usage("git-http-fetch [-c] [-t] [-a] [-v] [--recover] [-w ref] [--stdin] commit-id url"); + return 1; + } + if (commits_on_stdin) { + commits = walker_targets_stdin(&commit_id, &write_ref); + } else { + commit_id = (char **) &argv[arg++]; + commits = 1; + } + url = argv[arg]; + + walker = get_http_walker(url); + walker->get_tree = get_tree; + walker->get_history = get_history; + walker->get_all = get_all; + walker->get_verbosely = get_verbosely; + walker->get_recover = get_recover; + + rc = walker_fetch(walker, commits, commit_id, write_ref, url); + + if (commits_on_stdin) + walker_targets_free(commits, commit_id, write_ref); + + if (walker->corrupt_object_found) { + fprintf(stderr, +"Some loose object were found to be corrupt, but they might be just\n" +"a false '404 Not Found' error message sent with incorrect HTTP\n" +"status code. Suggest running git-fsck.\n"); + } + + walker_free(walker); + + return rc; +} diff --git a/builtin-push.c b/builtin-push.c index 141380b852..4b39ef3852 100644 --- a/builtin-push.c +++ b/builtin-push.c @@ -6,10 +6,11 @@ #include "run-command.h" #include "builtin.h" #include "remote.h" +#include "transport.h" static const char push_usage[] = "git-push [--all] [--dry-run] [--tags] [--receive-pack=<git-receive-pack>] [--repo=all] [-f | --force] [-v] [<repository> <refspec>...]"; -static int all, dry_run, force, thin, verbose; +static int thin, verbose; static const char *receivepack; static const char **refspec; @@ -43,82 +44,40 @@ static void set_refspecs(const char **refs, int nr) } } -static int do_push(const char *repo) +static int do_push(const char *repo, int flags) { int i, errs; - int common_argc; - const char **argv; - int argc; struct remote *remote = remote_get(repo); if (!remote) die("bad repository '%s'", repo); - if (remote->receivepack) { - char *rp = xmalloc(strlen(remote->receivepack) + 16); - sprintf(rp, "--receive-pack=%s", remote->receivepack); - receivepack = rp; - } - if (!refspec && !all && remote->push_refspec_nr) { + if (!refspec + && !(flags & TRANSPORT_PUSH_ALL) + && remote->push_refspec_nr) { refspec = remote->push_refspec; refspec_nr = remote->push_refspec_nr; } - - argv = xmalloc((refspec_nr + 10) * sizeof(char *)); - argv[0] = "dummy-send-pack"; - argc = 1; - if (all) - argv[argc++] = "--all"; - if (dry_run) - argv[argc++] = "--dry-run"; - if (force) - argv[argc++] = "--force"; - if (receivepack) - argv[argc++] = receivepack; - common_argc = argc; - errs = 0; - for (i = 0; i < remote->uri_nr; i++) { + for (i = 0; i < remote->url_nr; i++) { + struct transport *transport = + transport_get(remote, remote->url[i]); int err; - int dest_argc = common_argc; - int dest_refspec_nr = refspec_nr; - const char **dest_refspec = refspec; - const char *dest = remote->uri[i]; - const char *sender = "send-pack"; - if (!prefixcmp(dest, "http://") || - !prefixcmp(dest, "https://")) - sender = "http-push"; - else { - char *rem = xmalloc(strlen(remote->name) + 10); - sprintf(rem, "--remote=%s", remote->name); - argv[dest_argc++] = rem; - if (thin) - argv[dest_argc++] = "--thin"; - } - argv[0] = sender; - argv[dest_argc++] = dest; - while (dest_refspec_nr--) - argv[dest_argc++] = *dest_refspec++; - argv[dest_argc] = NULL; + if (receivepack) + transport_set_option(transport, + TRANS_OPT_RECEIVEPACK, receivepack); + if (thin) + transport_set_option(transport, TRANS_OPT_THIN, "yes"); + if (verbose) - fprintf(stderr, "Pushing to %s\n", dest); - err = run_command_v_opt(argv, RUN_GIT_CMD); + fprintf(stderr, "Pushing to %s\n", remote->url[i]); + err = transport_push(transport, refspec_nr, refspec, flags); + err |= transport_disconnect(transport); + if (!err) continue; - error("failed to push to '%s'", remote->uri[i]); - switch (err) { - case -ERR_RUN_COMMAND_FORK: - error("unable to fork for %s", sender); - case -ERR_RUN_COMMAND_EXEC: - error("unable to exec %s", sender); - break; - case -ERR_RUN_COMMAND_WAITPID: - case -ERR_RUN_COMMAND_WAITPID_WRONG_PID: - case -ERR_RUN_COMMAND_WAITPID_SIGNAL: - case -ERR_RUN_COMMAND_WAITPID_NOEXIT: - error("%s died with strange error", sender); - } + error("failed to push to '%s'", remote->url[i]); errs++; } return !!errs; @@ -127,6 +86,7 @@ static int do_push(const char *repo) int cmd_push(int argc, const char **argv, const char *prefix) { int i; + int flags = 0; const char *repo = NULL; /* default repository */ for (i = 1; i < argc; i++) { @@ -146,11 +106,11 @@ int cmd_push(int argc, const char **argv, const char *prefix) continue; } if (!strcmp(arg, "--all")) { - all = 1; + flags |= TRANSPORT_PUSH_ALL; continue; } if (!strcmp(arg, "--dry-run")) { - dry_run = 1; + flags |= TRANSPORT_PUSH_DRY_RUN; continue; } if (!strcmp(arg, "--tags")) { @@ -158,7 +118,7 @@ int cmd_push(int argc, const char **argv, const char *prefix) continue; } if (!strcmp(arg, "--force") || !strcmp(arg, "-f")) { - force = 1; + flags |= TRANSPORT_PUSH_FORCE; continue; } if (!strcmp(arg, "--thin")) { @@ -170,18 +130,18 @@ int cmd_push(int argc, const char **argv, const char *prefix) continue; } if (!prefixcmp(arg, "--receive-pack=")) { - receivepack = arg; + receivepack = arg + 15; continue; } if (!prefixcmp(arg, "--exec=")) { - receivepack = arg; + receivepack = arg + 7; continue; } usage(push_usage); } set_refspecs(argv + i, argc - i); - if (all && refspec) + if ((flags & TRANSPORT_PUSH_ALL) && refspec) usage(push_usage); - return do_push(repo); + return do_push(repo, flags); } @@ -30,6 +30,8 @@ extern int cmd_diff_files(int argc, const char **argv, const char *prefix); extern int cmd_diff_index(int argc, const char **argv, const char *prefix); extern int cmd_diff(int argc, const char **argv, const char *prefix); extern int cmd_diff_tree(int argc, const char **argv, const char *prefix); +extern int cmd_fetch(int argc, const char **argv, const char *prefix); +extern int cmd_fetch_pack(int argc, const char **argv, const char *prefix); extern int cmd_fetch__tool(int argc, const char **argv, const char *prefix); extern int cmd_fmt_merge_msg(int argc, const char **argv, const char *prefix); extern int cmd_for_each_ref(int argc, const char **argv, const char *prefix); @@ -39,6 +41,7 @@ extern int cmd_gc(int argc, const char **argv, const char *prefix); extern int cmd_get_tar_commit_id(int argc, const char **argv, const char *prefix); extern int cmd_grep(int argc, const char **argv, const char *prefix); extern int cmd_help(int argc, const char **argv, const char *prefix); +extern int cmd_http_fetch(int argc, const char **argv, const char *prefix); extern int cmd_init_db(int argc, const char **argv, const char *prefix); extern int cmd_log(int argc, const char **argv, const char *prefix); extern int cmd_log_reflog(int argc, const char **argv, const char *prefix); diff --git a/bundle.c b/bundle.c new file mode 100644 index 0000000000..0869fcf026 --- /dev/null +++ b/bundle.c @@ -0,0 +1,343 @@ +#include "cache.h" +#include "bundle.h" +#include "object.h" +#include "commit.h" +#include "diff.h" +#include "revision.h" +#include "list-objects.h" +#include "run-command.h" + +static const char bundle_signature[] = "# v2 git bundle\n"; + +static void add_to_ref_list(const unsigned char *sha1, const char *name, + struct ref_list *list) +{ + if (list->nr + 1 >= list->alloc) { + list->alloc = alloc_nr(list->nr + 1); + list->list = xrealloc(list->list, + list->alloc * sizeof(list->list[0])); + } + memcpy(list->list[list->nr].sha1, sha1, 20); + list->list[list->nr].name = xstrdup(name); + list->nr++; +} + +/* returns an fd */ +int read_bundle_header(const char *path, struct bundle_header *header) { + char buffer[1024]; + int fd; + long fpos; + FILE *ffd = fopen(path, "rb"); + + if (!ffd) + return error("could not open '%s'", path); + if (!fgets(buffer, sizeof(buffer), ffd) || + strcmp(buffer, bundle_signature)) { + fclose(ffd); + return error("'%s' does not look like a v2 bundle file", path); + } + while (fgets(buffer, sizeof(buffer), ffd) + && buffer[0] != '\n') { + int is_prereq = buffer[0] == '-'; + int offset = is_prereq ? 1 : 0; + int len = strlen(buffer); + unsigned char sha1[20]; + struct ref_list *list = is_prereq ? &header->prerequisites + : &header->references; + char delim; + + if (buffer[len - 1] == '\n') + buffer[len - 1] = '\0'; + if (get_sha1_hex(buffer + offset, sha1)) { + warning("unrecognized header: %s", buffer); + continue; + } + delim = buffer[40 + offset]; + if (!isspace(delim) && (delim != '\0' || !is_prereq)) + die ("invalid header: %s", buffer); + add_to_ref_list(sha1, isspace(delim) ? + buffer + 41 + offset : "", list); + } + fpos = ftell(ffd); + fclose(ffd); + fd = open(path, O_RDONLY); + if (fd < 0) + return error("could not open '%s'", path); + lseek(fd, fpos, SEEK_SET); + return fd; +} + +static int list_refs(struct ref_list *r, int argc, const char **argv) +{ + int i; + + for (i = 0; i < r->nr; i++) { + if (argc > 1) { + int j; + for (j = 1; j < argc; j++) + if (!strcmp(r->list[i].name, argv[j])) + break; + if (j == argc) + continue; + } + printf("%s %s\n", sha1_to_hex(r->list[i].sha1), + r->list[i].name); + } + return 0; +} + +#define PREREQ_MARK (1u<<16) + +int verify_bundle(struct bundle_header *header, int verbose) +{ + /* + * Do fast check, then if any prereqs are missing then go line by line + * to be verbose about the errors + */ + struct ref_list *p = &header->prerequisites; + struct rev_info revs; + const char *argv[] = {NULL, "--all"}; + struct object_array refs; + struct commit *commit; + int i, ret = 0, req_nr; + const char *message = "Repository lacks these prerequisite commits:"; + + init_revisions(&revs, NULL); + for (i = 0; i < p->nr; i++) { + struct ref_list_entry *e = p->list + i; + struct object *o = parse_object(e->sha1); + if (o) { + o->flags |= PREREQ_MARK; + add_pending_object(&revs, o, e->name); + continue; + } + if (++ret == 1) + error(message); + error("%s %s", sha1_to_hex(e->sha1), e->name); + } + if (revs.pending.nr != p->nr) + return ret; + req_nr = revs.pending.nr; + setup_revisions(2, argv, &revs, NULL); + + memset(&refs, 0, sizeof(struct object_array)); + for (i = 0; i < revs.pending.nr; i++) { + struct object_array_entry *e = revs.pending.objects + i; + add_object_array(e->item, e->name, &refs); + } + + prepare_revision_walk(&revs); + + i = req_nr; + while (i && (commit = get_revision(&revs))) + if (commit->object.flags & PREREQ_MARK) + i--; + + for (i = 0; i < req_nr; i++) + if (!(refs.objects[i].item->flags & SHOWN)) { + if (++ret == 1) + error(message); + error("%s %s", sha1_to_hex(refs.objects[i].item->sha1), + refs.objects[i].name); + } + + for (i = 0; i < refs.nr; i++) + clear_commit_marks((struct commit *)refs.objects[i].item, -1); + + if (verbose) { + struct ref_list *r; + + r = &header->references; + printf("The bundle contains %d ref%s\n", + r->nr, (1 < r->nr) ? "s" : ""); + list_refs(r, 0, NULL); + r = &header->prerequisites; + printf("The bundle requires these %d ref%s\n", + r->nr, (1 < r->nr) ? "s" : ""); + list_refs(r, 0, NULL); + } + return ret; +} + +int list_bundle_refs(struct bundle_header *header, int argc, const char **argv) +{ + return list_refs(&header->references, argc, argv); +} + +int create_bundle(struct bundle_header *header, const char *path, + int argc, const char **argv) +{ + static struct lock_file lock; + int bundle_fd = -1; + int bundle_to_stdout; + const char **argv_boundary = xmalloc((argc + 4) * sizeof(const char *)); + const char **argv_pack = xmalloc(5 * sizeof(const char *)); + int i, ref_count = 0; + char buffer[1024]; + struct rev_info revs; + struct child_process rls; + FILE *rls_fout; + + bundle_to_stdout = !strcmp(path, "-"); + if (bundle_to_stdout) + bundle_fd = 1; + else + bundle_fd = hold_lock_file_for_update(&lock, path, 1); + + /* write signature */ + write_or_die(bundle_fd, bundle_signature, strlen(bundle_signature)); + + /* init revs to list objects for pack-objects later */ + save_commit_buffer = 0; + init_revisions(&revs, NULL); + + /* write prerequisites */ + memcpy(argv_boundary + 3, argv + 1, argc * sizeof(const char *)); + argv_boundary[0] = "rev-list"; + argv_boundary[1] = "--boundary"; + argv_boundary[2] = "--pretty=oneline"; + argv_boundary[argc + 2] = NULL; + memset(&rls, 0, sizeof(rls)); + rls.argv = argv_boundary; + rls.out = -1; + rls.git_cmd = 1; + if (start_command(&rls)) + return -1; + rls_fout = fdopen(rls.out, "r"); + while (fgets(buffer, sizeof(buffer), rls_fout)) { + unsigned char sha1[20]; + if (buffer[0] == '-') { + write_or_die(bundle_fd, buffer, strlen(buffer)); + if (!get_sha1_hex(buffer + 1, sha1)) { + struct object *object = parse_object(sha1); + object->flags |= UNINTERESTING; + add_pending_object(&revs, object, buffer); + } + } else if (!get_sha1_hex(buffer, sha1)) { + struct object *object = parse_object(sha1); + object->flags |= SHOWN; + } + } + fclose(rls_fout); + if (finish_command(&rls)) + return error("rev-list died"); + + /* write references */ + argc = setup_revisions(argc, argv, &revs, NULL); + if (argc > 1) + return error("unrecognized argument: %s'", argv[1]); + + for (i = 0; i < revs.pending.nr; i++) { + struct object_array_entry *e = revs.pending.objects + i; + unsigned char sha1[20]; + char *ref; + + if (e->item->flags & UNINTERESTING) + continue; + if (dwim_ref(e->name, strlen(e->name), sha1, &ref) != 1) + continue; + /* + * Make sure the refs we wrote out is correct; --max-count and + * other limiting options could have prevented all the tips + * from getting output. + * + * Non commit objects such as tags and blobs do not have + * this issue as they are not affected by those extra + * constraints. + */ + if (!(e->item->flags & SHOWN) && e->item->type == OBJ_COMMIT) { + warning("ref '%s' is excluded by the rev-list options", + e->name); + free(ref); + continue; + } + /* + * If you run "git bundle create bndl v1.0..v2.0", the + * name of the positive ref is "v2.0" but that is the + * commit that is referenced by the tag, and not the tag + * itself. + */ + if (hashcmp(sha1, e->item->sha1)) { + /* + * Is this the positive end of a range expressed + * in terms of a tag (e.g. v2.0 from the range + * "v1.0..v2.0")? + */ + struct commit *one = lookup_commit_reference(sha1); + struct object *obj; + + if (e->item == &(one->object)) { + /* + * Need to include e->name as an + * independent ref to the pack-objects + * input, so that the tag is included + * in the output; otherwise we would + * end up triggering "empty bundle" + * error. + */ + obj = parse_object(sha1); + obj->flags |= SHOWN; + add_pending_object(&revs, obj, e->name); + } + free(ref); + continue; + } + + ref_count++; + write_or_die(bundle_fd, sha1_to_hex(e->item->sha1), 40); + write_or_die(bundle_fd, " ", 1); + write_or_die(bundle_fd, ref, strlen(ref)); + write_or_die(bundle_fd, "\n", 1); + free(ref); + } + if (!ref_count) + die ("Refusing to create empty bundle."); + + /* end header */ + write_or_die(bundle_fd, "\n", 1); + + /* write pack */ + argv_pack[0] = "pack-objects"; + argv_pack[1] = "--all-progress"; + argv_pack[2] = "--stdout"; + argv_pack[3] = "--thin"; + argv_pack[4] = NULL; + memset(&rls, 0, sizeof(rls)); + rls.argv = argv_pack; + rls.in = -1; + rls.out = bundle_fd; + rls.git_cmd = 1; + if (start_command(&rls)) + return error("Could not spawn pack-objects"); + for (i = 0; i < revs.pending.nr; i++) { + struct object *object = revs.pending.objects[i].item; + if (object->flags & UNINTERESTING) + write(rls.in, "^", 1); + write(rls.in, sha1_to_hex(object->sha1), 40); + write(rls.in, "\n", 1); + } + if (finish_command(&rls)) + return error ("pack-objects died"); + close(bundle_fd); + if (!bundle_to_stdout) + commit_lock_file(&lock); + return 0; +} + +int unbundle(struct bundle_header *header, int bundle_fd) +{ + const char *argv_index_pack[] = {"index-pack", + "--fix-thin", "--stdin", NULL}; + struct child_process ip; + + if (verify_bundle(header, 0)) + return -1; + memset(&ip, 0, sizeof(ip)); + ip.argv = argv_index_pack; + ip.in = bundle_fd; + ip.no_stdout = 1; + ip.git_cmd = 1; + if (run_command(&ip)) + return error("index-pack died"); + return 0; +} diff --git a/bundle.h b/bundle.h new file mode 100644 index 0000000000..e2aedd60d6 --- /dev/null +++ b/bundle.h @@ -0,0 +1,25 @@ +#ifndef BUNDLE_H +#define BUNDLE_H + +struct ref_list { + unsigned int nr, alloc; + struct ref_list_entry { + unsigned char sha1[20]; + char *name; + } *list; +}; + +struct bundle_header { + struct ref_list prerequisites; + struct ref_list references; +}; + +int read_bundle_header(const char *path, struct bundle_header *header); +int create_bundle(struct bundle_header *header, const char *path, + int argc, const char **argv); +int verify_bundle(struct bundle_header *header, int verbose); +int unbundle(struct bundle_header *header, int bundle_fd); +int list_bundle_refs(struct bundle_header *header, + int argc, const char **argv); + +#endif @@ -493,6 +493,7 @@ struct ref { unsigned char old_sha1[20]; unsigned char new_sha1[20]; unsigned char force; + unsigned char merge; struct ref *peer_ref; /* when renaming */ char name[FLEX_ARRAY]; /* more */ }; diff --git a/compat/mkdtemp.c b/compat/mkdtemp.c new file mode 100644 index 0000000000..34d4b49818 --- /dev/null +++ b/compat/mkdtemp.c @@ -0,0 +1,8 @@ +#include "../git-compat-util.h" + +char *gitmkdtemp(char *template) +{ + if (!mktemp(template) || mkdir(template, 0700)) + return NULL; + return template; +} @@ -72,9 +72,9 @@ struct ref **get_remote_heads(int in, struct ref **list, continue; if (nr_match && !path_match(name, nr_match, match)) continue; - ref = alloc_ref(len - 40); + ref = alloc_ref(name_len + 1); hashcpy(ref->old_sha1, old_sha1); - memcpy(ref->name, buffer + 41, len - 40); + memcpy(ref->name, buffer + 41, name_len + 1); *list = ref; list = &ref->next; } diff --git a/git-fetch.sh b/contrib/examples/git-fetch.sh index e44af2c86d..e44af2c86d 100755 --- a/git-fetch.sh +++ b/contrib/examples/git-fetch.sh @@ -709,3 +709,44 @@ int is_inside_dir(const char *dir) char buffer[PATH_MAX]; return get_relative_cwd(buffer, sizeof(buffer), dir) != NULL; } + +int remove_dir_recursively(struct strbuf *path, int only_empty) +{ + DIR *dir = opendir(path->buf); + struct dirent *e; + int ret = 0, original_len = path->len, len; + + if (!dir) + return -1; + if (path->buf[original_len - 1] != '/') + strbuf_addch(path, '/'); + + len = path->len; + while ((e = readdir(dir)) != NULL) { + struct stat st; + if ((e->d_name[0] == '.') && + ((e->d_name[1] == 0) || + ((e->d_name[1] == '.') && e->d_name[2] == 0))) + continue; /* "." and ".." */ + + strbuf_setlen(path, len); + strbuf_addstr(path, e->d_name); + if (lstat(path->buf, &st)) + ; /* fall thru */ + else if (S_ISDIR(st.st_mode)) { + if (!remove_dir_recursively(path, only_empty)) + continue; /* happy */ + } else if (!only_empty && !unlink(path->buf)) + continue; /* happy, too */ + + /* path too long, stat fails, or non-directory still exists */ + ret = -1; + break; + } + closedir(dir); + + strbuf_setlen(path, original_len); + if (!ret) + ret = rmdir(path->buf); + return ret; +} @@ -64,4 +64,6 @@ extern struct dir_entry *dir_add_name(struct dir_struct *dir, const char *pathna extern char *get_relative_cwd(char *buffer, int size, const char *dir); extern int is_inside_dir(const char *dir); +extern int remove_dir_recursively(struct strbuf *path, int only_empty); + #endif diff --git a/fetch-pack.h b/fetch-pack.h new file mode 100644 index 0000000000..a7888ea302 --- /dev/null +++ b/fetch-pack.h @@ -0,0 +1,24 @@ +#ifndef FETCH_PACK_H +#define FETCH_PACK_H + +struct fetch_pack_args +{ + const char *uploadpack; + int unpacklimit; + int depth; + unsigned quiet:1, + keep_pack:1, + lock_pack:1, + use_thin_pack:1, + fetch_all:1, + verbose:1, + no_progress:1; +}; + +struct ref *fetch_pack(struct fetch_pack_args *args, + const char *dest, + int nr_heads, + char **heads, + char **pack_lockfile); + +#endif diff --git a/fetch.h b/fetch.h deleted file mode 100644 index be48c6f190..0000000000 --- a/fetch.h +++ /dev/null @@ -1,54 +0,0 @@ -#ifndef PULL_H -#define PULL_H - -/* - * Fetch object given SHA1 from the remote, and store it locally under - * GIT_OBJECT_DIRECTORY. Return 0 on success, -1 on failure. To be - * provided by the particular implementation. - */ -extern int fetch(unsigned char *sha1); - -/* - * Fetch the specified object and store it locally; fetch() will be - * called later to determine success. To be provided by the particular - * implementation. - */ -extern void prefetch(unsigned char *sha1); - -/* - * Fetch ref (relative to $GIT_DIR/refs) from the remote, and store - * the 20-byte SHA1 in sha1. Return 0 on success, -1 on failure. To - * be provided by the particular implementation. - */ -extern int fetch_ref(char *ref, unsigned char *sha1); - -/* Set to fetch the target tree. */ -extern int get_tree; - -/* Set to fetch the commit history. */ -extern int get_history; - -/* Set to fetch the trees in the commit history. */ -extern int get_all; - -/* Set to be verbose */ -extern int get_verbosely; - -/* Set to check on all reachable objects. */ -extern int get_recover; - -/* Report what we got under get_verbosely */ -extern void pull_say(const char *, const char *); - -/* Load pull targets from stdin */ -extern int pull_targets_stdin(char ***target, const char ***write_ref); - -/* Free up loaded targets */ -extern void pull_targets_free(int targets, char **target, const char **write_ref); - -/* If write_ref is set, the ref filename to write the target value to. */ -/* If write_ref_log_details is set, additional text will appear in the ref log. */ -extern int pull(int targets, char **target, const char **write_ref, - const char *write_ref_log_details); - -#endif /* PULL_H */ diff --git a/git-compat-util.h b/git-compat-util.h index f23d934f66..474f1d1ffb 100644 --- a/git-compat-util.h +++ b/git-compat-util.h @@ -147,6 +147,11 @@ extern ssize_t git_pread(int fd, void *buf, size_t count, off_t offset); extern int gitsetenv(const char *, const char *, int); #endif +#ifdef NO_MKDTEMP +#define mkdtemp gitmkdtemp +extern char *gitmkdtemp(char *); +#endif + #ifdef NO_UNSETENV #define unsetenv gitunsetenv extern void gitunsetenv(const char *); @@ -328,6 +328,8 @@ static void handle_internal_command(int argc, const char **argv) { "diff-files", cmd_diff_files }, { "diff-index", cmd_diff_index, RUN_SETUP }, { "diff-tree", cmd_diff_tree, RUN_SETUP }, + { "fetch", cmd_fetch, RUN_SETUP }, + { "fetch-pack", cmd_fetch_pack, RUN_SETUP }, { "fetch--tool", cmd_fetch__tool, RUN_SETUP }, { "fmt-merge-msg", cmd_fmt_merge_msg, RUN_SETUP }, { "for-each-ref", cmd_for_each_ref, RUN_SETUP }, @@ -338,6 +340,9 @@ static void handle_internal_command(int argc, const char **argv) { "get-tar-commit-id", cmd_get_tar_commit_id }, { "grep", cmd_grep, RUN_SETUP | USE_PAGER }, { "help", cmd_help }, +#ifndef NO_CURL + { "http-fetch", cmd_http_fetch, RUN_SETUP }, +#endif { "init", cmd_init_db }, { "init-db", cmd_init_db }, { "log", cmd_log, RUN_SETUP | USE_PAGER }, diff --git a/http-push.c b/http-push.c index 276e1eb1d9..c02a3af634 100644 --- a/http-push.c +++ b/http-push.c @@ -1,7 +1,6 @@ #include "cache.h" #include "commit.h" #include "pack.h" -#include "fetch.h" #include "tag.h" #include "blob.h" #include "http.h" @@ -14,7 +13,7 @@ #include <expat.h> static const char http_push_usage[] = -"git-http-push [--all] [--force] [--verbose] <remote> [<head>...]\n"; +"git-http-push [--all] [--dry-run] [--force] [--verbose] <remote> [<head>...]\n"; #ifndef XML_STATUS_OK enum XML_Status { @@ -81,6 +80,7 @@ static struct curl_slist *default_headers; static int push_verbosely; static int push_all; static int force_all; +static int dry_run; static struct object_list *objects; @@ -795,38 +795,27 @@ static void finish_request(struct transfer_request *request) } #ifdef USE_CURL_MULTI -void fill_active_slots(void) +static int fill_active_slot(void *unused) { struct transfer_request *request = request_queue_head; - struct transfer_request *next; - struct active_request_slot *slot = active_queue_head; - int num_transfers; if (aborted) - return; + return 0; - while (active_requests < max_requests && request != NULL) { - next = request->next; + for (request = request_queue_head; request; request = request->next) { if (request->state == NEED_FETCH) { start_fetch_loose(request); + return 1; } else if (pushing && request->state == NEED_PUSH) { if (remote_dir_exists[request->obj->sha1[0]] == 1) { start_put(request); } else { start_mkcol(request); } - curl_multi_perform(curlm, &num_transfers); - } - request = next; - } - - while (slot != NULL) { - if (!slot->in_use && slot->curl != NULL) { - curl_easy_cleanup(slot->curl); - slot->curl = NULL; + return 1; } - slot = slot->next; } + return 0; } #endif @@ -2314,6 +2303,10 @@ int main(int argc, char **argv) force_all = 1; continue; } + if (!strcmp(arg, "--dry-run")) { + dry_run = 1; + continue; + } if (!strcmp(arg, "--verbose")) { push_verbosely = 1; continue; @@ -2455,7 +2448,8 @@ int main(int argc, char **argv) if (strcmp(ref->name, ref->peer_ref->name)) fprintf(stderr, " using '%s'", ref->peer_ref->name); fprintf(stderr, "\n from %s\n to %s\n", old_hex, new_hex); - + if (dry_run) + continue; /* Lock remote branch ref */ ref_lock = lock_remote(ref->name, LOCK_TIME); @@ -2502,6 +2496,7 @@ int main(int argc, char **argv) objects_to_send); #ifdef USE_CURL_MULTI fill_active_slots(); + add_fill_function(NULL, fill_active_slot); #endif finish_all_active_slots(); @@ -2522,7 +2517,8 @@ int main(int argc, char **argv) if (remote->has_info_refs && new_refs) { if (info_ref_lock && remote->can_update_info_refs) { fprintf(stderr, "Updating remote server info\n"); - update_remote_info_refs(info_ref_lock); + if (!dry_run) + update_remote_info_refs(info_ref_lock); } else { fprintf(stderr, "Unable to update server info\n"); } diff --git a/http-fetch.c b/http-walker.c index 202fae0ba8..444aebf526 100644 --- a/http-fetch.c +++ b/http-walker.c @@ -1,19 +1,12 @@ #include "cache.h" #include "commit.h" #include "pack.h" -#include "fetch.h" +#include "walker.h" #include "http.h" #define PREV_BUF_SIZE 4096 #define RANGE_HEADER_SIZE 30 -static int commits_on_stdin; - -static int got_alternates = -1; -static int corrupt_object_found; - -static struct curl_slist *no_pragma_header; - struct alt_base { char *base; @@ -22,8 +15,6 @@ struct alt_base struct alt_base *next; }; -static struct alt_base *alt; - enum object_request_state { WAITING, ABORTED, @@ -33,6 +24,7 @@ enum object_request_state { struct object_request { + struct walker *walker; unsigned char sha1[20]; struct alt_base *repo; char *url; @@ -53,6 +45,7 @@ struct object_request }; struct alternates_request { + struct walker *walker; const char *base; char *url; struct buffer *buffer; @@ -60,6 +53,13 @@ struct alternates_request { int http_specific; }; +struct walker_data { + const char *url; + int got_alternates; + struct alt_base *alt; + struct curl_slist *no_pragma_header; +}; + static struct object_request *object_queue_head; static size_t fwrite_sha1_file(void *ptr, size_t eltsize, size_t nmemb, @@ -103,11 +103,12 @@ static int missing__target(int code, int result) #define missing_target(a) missing__target((a)->http_code, (a)->curl_result) -static void fetch_alternates(const char *base); +static void fetch_alternates(struct walker *walker, const char *base); static void process_object_response(void *callback_data); -static void start_object_request(struct object_request *obj_req) +static void start_object_request(struct walker *walker, + struct object_request *obj_req) { char *hex = sha1_to_hex(obj_req->sha1); char prevfile[PATH_MAX]; @@ -120,6 +121,7 @@ static void start_object_request(struct object_request *obj_req) char range[RANGE_HEADER_SIZE]; struct curl_slist *range_header = NULL; struct active_request_slot *slot; + struct walker_data *data = walker->data; snprintf(prevfile, sizeof(prevfile), "%s.prev", obj_req->filename); unlink(prevfile); @@ -212,12 +214,12 @@ static void start_object_request(struct object_request *obj_req) curl_easy_setopt(slot->curl, CURLOPT_WRITEFUNCTION, fwrite_sha1_file); curl_easy_setopt(slot->curl, CURLOPT_ERRORBUFFER, obj_req->errorstr); curl_easy_setopt(slot->curl, CURLOPT_URL, url); - curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, no_pragma_header); + curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, data->no_pragma_header); /* If we have successfully processed data from a previous fetch attempt, only fetch the data we don't already have. */ if (prev_posn>0) { - if (get_verbosely) + if (walker->get_verbosely) fprintf(stderr, "Resuming fetch of object %s at byte %ld\n", hex, prev_posn); @@ -268,13 +270,16 @@ static void finish_object_request(struct object_request *obj_req) move_temp_to_file(obj_req->tmpfile, obj_req->filename); if (obj_req->rename == 0) - pull_say("got %s\n", sha1_to_hex(obj_req->sha1)); + walker_say(obj_req->walker, "got %s\n", sha1_to_hex(obj_req->sha1)); } static void process_object_response(void *callback_data) { struct object_request *obj_req = (struct object_request *)callback_data; + struct walker *walker = obj_req->walker; + struct walker_data *data = walker->data; + struct alt_base *alt = data->alt; obj_req->curl_result = obj_req->slot->curl_result; obj_req->http_code = obj_req->slot->http_code; @@ -283,13 +288,13 @@ static void process_object_response(void *callback_data) /* Use alternates if necessary */ if (missing_target(obj_req)) { - fetch_alternates(alt->base); + fetch_alternates(walker, alt->base); if (obj_req->repo->next != NULL) { obj_req->repo = obj_req->repo->next; close(obj_req->local); obj_req->local = -1; - start_object_request(obj_req); + start_object_request(walker, obj_req); return; } } @@ -317,42 +322,35 @@ static void release_object_request(struct object_request *obj_req) } #ifdef USE_CURL_MULTI -void fill_active_slots(void) +static int fill_active_slot(struct walker *walker) { - struct object_request *obj_req = object_queue_head; - struct active_request_slot *slot = active_queue_head; - int num_transfers; + struct object_request *obj_req; - while (active_requests < max_requests && obj_req != NULL) { + for (obj_req = object_queue_head; obj_req; obj_req = obj_req->next) { if (obj_req->state == WAITING) { if (has_sha1_file(obj_req->sha1)) obj_req->state = COMPLETE; - else - start_object_request(obj_req); - curl_multi_perform(curlm, &num_transfers); - } - obj_req = obj_req->next; - } - - while (slot != NULL) { - if (!slot->in_use && slot->curl != NULL) { - curl_easy_cleanup(slot->curl); - slot->curl = NULL; + else { + start_object_request(walker, obj_req); + return 1; + } } - slot = slot->next; } + return 0; } #endif -void prefetch(unsigned char *sha1) +static void prefetch(struct walker *walker, unsigned char *sha1) { struct object_request *newreq; struct object_request *tail; + struct walker_data *data = walker->data; char *filename = sha1_file_name(sha1); newreq = xmalloc(sizeof(*newreq)); + newreq->walker = walker; hashcpy(newreq->sha1, sha1); - newreq->repo = alt; + newreq->repo = data->alt; newreq->url = NULL; newreq->local = -1; newreq->state = WAITING; @@ -378,7 +376,7 @@ void prefetch(unsigned char *sha1) #endif } -static int fetch_index(struct alt_base *repo, unsigned char *sha1) +static int fetch_index(struct walker *walker, struct alt_base *repo, unsigned char *sha1) { char *hex = sha1_to_hex(sha1); char *filename; @@ -387,6 +385,7 @@ static int fetch_index(struct alt_base *repo, unsigned char *sha1) long prev_posn = 0; char range[RANGE_HEADER_SIZE]; struct curl_slist *range_header = NULL; + struct walker_data *data = walker->data; FILE *indexfile; struct active_request_slot *slot; @@ -395,7 +394,7 @@ static int fetch_index(struct alt_base *repo, unsigned char *sha1) if (has_pack_index(sha1)) return 0; - if (get_verbosely) + if (walker->get_verbosely) fprintf(stderr, "Getting index for pack %s\n", hex); url = xmalloc(strlen(repo->base) + 64); @@ -413,14 +412,14 @@ static int fetch_index(struct alt_base *repo, unsigned char *sha1) curl_easy_setopt(slot->curl, CURLOPT_FILE, indexfile); curl_easy_setopt(slot->curl, CURLOPT_WRITEFUNCTION, fwrite); curl_easy_setopt(slot->curl, CURLOPT_URL, url); - curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, no_pragma_header); + curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, data->no_pragma_header); slot->local = indexfile; /* If there is data present from a previous transfer attempt, resume where it left off */ prev_posn = ftell(indexfile); if (prev_posn>0) { - if (get_verbosely) + if (walker->get_verbosely) fprintf(stderr, "Resuming fetch of index for pack %s at byte %ld\n", hex, prev_posn); @@ -446,13 +445,13 @@ static int fetch_index(struct alt_base *repo, unsigned char *sha1) return move_temp_to_file(tmpfile, filename); } -static int setup_index(struct alt_base *repo, unsigned char *sha1) +static int setup_index(struct walker *walker, struct alt_base *repo, unsigned char *sha1) { struct packed_git *new_pack; if (has_pack_file(sha1)) return 0; /* don't list this as something we can get */ - if (fetch_index(repo, sha1)) + if (fetch_index(walker, repo, sha1)) return -1; new_pack = parse_pack_index(sha1); @@ -465,8 +464,10 @@ static void process_alternates_response(void *callback_data) { struct alternates_request *alt_req = (struct alternates_request *)callback_data; + struct walker *walker = alt_req->walker; + struct walker_data *cdata = walker->data; struct active_request_slot *slot = alt_req->slot; - struct alt_base *tail = alt; + struct alt_base *tail = cdata->alt; const char *base = alt_req->base; static const char null_byte = '\0'; char *data; @@ -487,7 +488,7 @@ static void process_alternates_response(void *callback_data) if (slot->finished != NULL) (*slot->finished) = 0; if (!start_active_slot(slot)) { - got_alternates = -1; + cdata->got_alternates = -1; slot->in_use = 0; if (slot->finished != NULL) (*slot->finished) = 1; @@ -496,7 +497,7 @@ static void process_alternates_response(void *callback_data) } } else if (slot->curl_result != CURLE_OK) { if (!missing_target(slot)) { - got_alternates = -1; + cdata->got_alternates = -1; return; } } @@ -573,7 +574,7 @@ static void process_alternates_response(void *callback_data) memcpy(target + serverlen, data + i, posn - i - 7); target[serverlen + posn - i - 7] = 0; - if (get_verbosely) + if (walker->get_verbosely) fprintf(stderr, "Also look at %s\n", target); newalt = xmalloc(sizeof(*newalt)); @@ -590,39 +591,40 @@ static void process_alternates_response(void *callback_data) i = posn + 1; } - got_alternates = 1; + cdata->got_alternates = 1; } -static void fetch_alternates(const char *base) +static void fetch_alternates(struct walker *walker, const char *base) { struct buffer buffer; char *url; char *data; struct active_request_slot *slot; struct alternates_request alt_req; + struct walker_data *cdata = walker->data; /* If another request has already started fetching alternates, wait for them to arrive and return to processing this request's curl message */ #ifdef USE_CURL_MULTI - while (got_alternates == 0) { + while (cdata->got_alternates == 0) { step_active_slots(); } #endif /* Nothing to do if they've already been fetched */ - if (got_alternates == 1) + if (cdata->got_alternates == 1) return; /* Start the fetch */ - got_alternates = 0; + cdata->got_alternates = 0; data = xmalloc(4096); buffer.size = 4096; buffer.posn = 0; buffer.buffer = data; - if (get_verbosely) + if (walker->get_verbosely) fprintf(stderr, "Getting alternates list for %s\n", base); url = xmalloc(strlen(base) + 31); @@ -632,6 +634,7 @@ static void fetch_alternates(const char *base) may fail and need to have alternates loaded before continuing */ slot = get_active_slot(); slot->callback_func = process_alternates_response; + alt_req.walker = walker; slot->callback_data = &alt_req; curl_easy_setopt(slot->curl, CURLOPT_FILE, &buffer); @@ -647,13 +650,13 @@ static void fetch_alternates(const char *base) if (start_active_slot(slot)) run_active_slot(slot); else - got_alternates = -1; + cdata->got_alternates = -1; free(data); free(url); } -static int fetch_indices(struct alt_base *repo) +static int fetch_indices(struct walker *walker, struct alt_base *repo) { unsigned char sha1[20]; char *url; @@ -672,7 +675,7 @@ static int fetch_indices(struct alt_base *repo) buffer.posn = 0; buffer.buffer = data; - if (get_verbosely) + if (walker->get_verbosely) fprintf(stderr, "Getting pack list for %s\n", repo->base); url = xmalloc(strlen(repo->base) + 21); @@ -712,7 +715,7 @@ static int fetch_indices(struct alt_base *repo) !prefixcmp(data + i, " pack-") && !prefixcmp(data + i + 46, ".pack\n")) { get_sha1_hex(data + i + 6, sha1); - setup_index(repo, sha1); + setup_index(walker, repo, sha1); i += 51; break; } @@ -728,7 +731,7 @@ static int fetch_indices(struct alt_base *repo) return 0; } -static int fetch_pack(struct alt_base *repo, unsigned char *sha1) +static int fetch_pack(struct walker *walker, struct alt_base *repo, unsigned char *sha1) { char *url; struct packed_git *target; @@ -740,17 +743,18 @@ static int fetch_pack(struct alt_base *repo, unsigned char *sha1) long prev_posn = 0; char range[RANGE_HEADER_SIZE]; struct curl_slist *range_header = NULL; + struct walker_data *data = walker->data; struct active_request_slot *slot; struct slot_results results; - if (fetch_indices(repo)) + if (fetch_indices(walker, repo)) return -1; target = find_sha1_pack(sha1, repo->packs); if (!target) return -1; - if (get_verbosely) { + if (walker->get_verbosely) { fprintf(stderr, "Getting pack %s\n", sha1_to_hex(target->sha1)); fprintf(stderr, " which contains %s\n", @@ -773,14 +777,14 @@ static int fetch_pack(struct alt_base *repo, unsigned char *sha1) curl_easy_setopt(slot->curl, CURLOPT_FILE, packfile); curl_easy_setopt(slot->curl, CURLOPT_WRITEFUNCTION, fwrite); curl_easy_setopt(slot->curl, CURLOPT_URL, url); - curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, no_pragma_header); + curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, data->no_pragma_header); slot->local = packfile; /* If there is data present from a previous transfer attempt, resume where it left off */ prev_posn = ftell(packfile); if (prev_posn>0) { - if (get_verbosely) + if (walker->get_verbosely) fprintf(stderr, "Resuming fetch of pack %s at byte %ld\n", sha1_to_hex(target->sha1), prev_posn); @@ -834,7 +838,7 @@ static void abort_object_request(struct object_request *obj_req) release_object_request(obj_req); } -static int fetch_object(struct alt_base *repo, unsigned char *sha1) +static int fetch_object(struct walker *walker, struct alt_base *repo, unsigned char *sha1) { char *hex = sha1_to_hex(sha1); int ret = 0; @@ -855,7 +859,7 @@ static int fetch_object(struct alt_base *repo, unsigned char *sha1) step_active_slots(); } #else - start_object_request(obj_req); + start_object_request(walker, obj_req); #endif while (obj_req->state == ACTIVE) { @@ -876,7 +880,7 @@ static int fetch_object(struct alt_base *repo, unsigned char *sha1) obj_req->errorstr, obj_req->curl_result, obj_req->http_code, hex); } else if (obj_req->zret != Z_STREAM_END) { - corrupt_object_found++; + walker->corrupt_object_found++; ret = error("File %s (%s) corrupt", hex, obj_req->url); } else if (hashcmp(obj_req->sha1, obj_req->real_sha1)) { ret = error("File %s has bad hash", hex); @@ -889,20 +893,21 @@ static int fetch_object(struct alt_base *repo, unsigned char *sha1) return ret; } -int fetch(unsigned char *sha1) +static int fetch(struct walker *walker, unsigned char *sha1) { - struct alt_base *altbase = alt; + struct walker_data *data = walker->data; + struct alt_base *altbase = data->alt; - if (!fetch_object(altbase, sha1)) + if (!fetch_object(walker, altbase, sha1)) return 0; while (altbase) { - if (!fetch_pack(altbase, sha1)) + if (!fetch_pack(walker, altbase, sha1)) return 0; - fetch_alternates(alt->base); + fetch_alternates(walker, data->alt->base); altbase = altbase->next; } return error("Unable to find %s under %s", sha1_to_hex(sha1), - alt->base); + data->alt->base); } static inline int needs_quote(int ch) @@ -951,12 +956,13 @@ static char *quote_ref_url(const char *base, const char *ref) return qref; } -int fetch_ref(char *ref, unsigned char *sha1) +static int fetch_ref(struct walker *walker, char *ref, unsigned char *sha1) { char *url; char hex[42]; struct buffer buffer; - const char *base = alt->base; + struct walker_data *data = walker->data; + const char *base = data->alt->base; struct active_request_slot *slot; struct slot_results results; buffer.size = 41; @@ -985,80 +991,45 @@ int fetch_ref(char *ref, unsigned char *sha1) return 0; } -int main(int argc, const char **argv) +static void cleanup(struct walker *walker) +{ + struct walker_data *data = walker->data; + http_cleanup(); + + curl_slist_free_all(data->no_pragma_header); +} + +struct walker *get_http_walker(const char *url) { - int commits; - const char **write_ref = NULL; - char **commit_id; - const char *url; char *s; - int arg = 1; - int rc = 0; - - setup_git_directory(); - git_config(git_default_config); - - while (arg < argc && argv[arg][0] == '-') { - if (argv[arg][1] == 't') { - get_tree = 1; - } else if (argv[arg][1] == 'c') { - get_history = 1; - } else if (argv[arg][1] == 'a') { - get_all = 1; - get_tree = 1; - get_history = 1; - } else if (argv[arg][1] == 'v') { - get_verbosely = 1; - } else if (argv[arg][1] == 'w') { - write_ref = &argv[arg + 1]; - arg++; - } else if (!strcmp(argv[arg], "--recover")) { - get_recover = 1; - } else if (!strcmp(argv[arg], "--stdin")) { - commits_on_stdin = 1; - } - arg++; - } - if (argc < arg + 2 - commits_on_stdin) { - usage("git-http-fetch [-c] [-t] [-a] [-v] [--recover] [-w ref] [--stdin] commit-id url"); - return 1; - } - if (commits_on_stdin) { - commits = pull_targets_stdin(&commit_id, &write_ref); - } else { - commit_id = (char **) &argv[arg++]; - commits = 1; - } - url = argv[arg]; + struct walker_data *data = xmalloc(sizeof(struct walker_data)); + struct walker *walker = xmalloc(sizeof(struct walker)); http_init(); - no_pragma_header = curl_slist_append(no_pragma_header, "Pragma:"); + data->no_pragma_header = curl_slist_append(NULL, "Pragma:"); - alt = xmalloc(sizeof(*alt)); - alt->base = xmalloc(strlen(url) + 1); - strcpy(alt->base, url); - for (s = alt->base + strlen(alt->base) - 1; *s == '/'; --s) + data->alt = xmalloc(sizeof(*data->alt)); + data->alt->base = xmalloc(strlen(url) + 1); + strcpy(data->alt->base, url); + for (s = data->alt->base + strlen(data->alt->base) - 1; *s == '/'; --s) *s = 0; - alt->got_indices = 0; - alt->packs = NULL; - alt->next = NULL; - if (pull(commits, commit_id, write_ref, url)) - rc = 1; - - http_cleanup(); + data->alt->got_indices = 0; + data->alt->packs = NULL; + data->alt->next = NULL; + data->got_alternates = -1; - curl_slist_free_all(no_pragma_header); + walker->corrupt_object_found = 0; + walker->fetch = fetch; + walker->fetch_ref = fetch_ref; + walker->prefetch = prefetch; + walker->cleanup = cleanup; + walker->data = data; - if (commits_on_stdin) - pull_targets_free(commits, commit_id, write_ref); +#ifdef USE_CURL_MULTI + add_fill_function(walker, (int (*)(void *)) fill_active_slot); +#endif - if (corrupt_object_found) { - fprintf(stderr, -"Some loose object were found to be corrupt, but they might be just\n" -"a false '404 Not Found' error message sent with incorrect HTTP\n" -"status code. Suggest running git-fsck.\n"); - } - return rc; + return walker; } @@ -276,6 +276,7 @@ void http_cleanup(void) #endif while (slot != NULL) { + struct active_request_slot *next = slot->next; #ifdef USE_CURL_MULTI if (slot->in_use) { curl_easy_getinfo(slot->curl, @@ -287,8 +288,10 @@ void http_cleanup(void) #endif if (slot->curl != NULL) curl_easy_cleanup(slot->curl); - slot = slot->next; + free(slot); + slot = next; } + active_queue_head = NULL; #ifndef NO_CURL_EASY_DUPHANDLE curl_easy_cleanup(curl_default); @@ -300,7 +303,7 @@ void http_cleanup(void) curl_global_cleanup(); curl_slist_free_all(pragma_header); - pragma_header = NULL; + pragma_header = NULL; } struct active_request_slot *get_active_slot(void) @@ -372,6 +375,7 @@ int start_active_slot(struct active_request_slot *slot) { #ifdef USE_CURL_MULTI CURLMcode curlm_result = curl_multi_add_handle(curlm, slot->curl); + int num_transfers; if (curlm_result != CURLM_OK && curlm_result != CURLM_CALL_MULTI_PERFORM) { @@ -379,11 +383,60 @@ int start_active_slot(struct active_request_slot *slot) slot->in_use = 0; return 0; } + + /* + * We know there must be something to do, since we just added + * something. + */ + curl_multi_perform(curlm, &num_transfers); #endif return 1; } #ifdef USE_CURL_MULTI +struct fill_chain { + void *data; + int (*fill)(void *); + struct fill_chain *next; +}; + +static struct fill_chain *fill_cfg = NULL; + +void add_fill_function(void *data, int (*fill)(void *)) +{ + struct fill_chain *new = malloc(sizeof(*new)); + struct fill_chain **linkp = &fill_cfg; + new->data = data; + new->fill = fill; + new->next = NULL; + while (*linkp) + linkp = &(*linkp)->next; + *linkp = new; +} + +void fill_active_slots(void) +{ + struct active_request_slot *slot = active_queue_head; + + while (active_requests < max_requests) { + struct fill_chain *fill; + for (fill = fill_cfg; fill; fill = fill->next) + if (fill->fill(fill->data)) + break; + + if (!fill) + break; + } + + while (slot != NULL) { + if (!slot->in_use && slot->curl != NULL) { + curl_easy_cleanup(slot->curl); + slot->curl = NULL; + } + slot = slot->next; + } +} + void step_active_slots(void) { int num_transfers; @@ -70,6 +70,7 @@ extern void release_active_slot(struct active_request_slot *slot); #ifdef USE_CURL_MULTI extern void fill_active_slots(void); +extern void add_fill_function(void *data, int (*fill)(void *)); extern void step_active_slots(void); #endif @@ -79,10 +80,6 @@ extern void http_cleanup(void); extern int data_received; extern int active_requests; -#ifdef USE_CURL_MULTI -extern int max_requests; -extern CURLM *curlm; -#endif #ifndef NO_CURL_EASY_DUPHANDLE extern CURL *curl_default; #endif @@ -103,6 +100,4 @@ extern long curl_low_speed_time; extern struct curl_slist *pragma_header; extern struct curl_slist *no_range_header; -extern struct active_request_slot *active_queue_head; - #endif /* HTTP_H */ diff --git a/local-fetch.c b/local-fetch.c deleted file mode 100644 index bf7ec6c2a3..0000000000 --- a/local-fetch.c +++ /dev/null @@ -1,254 +0,0 @@ -/* - * Copyright (C) 2005 Junio C Hamano - */ -#include "cache.h" -#include "commit.h" -#include "fetch.h" - -static int use_link; -static int use_symlink; -static int use_filecopy = 1; -static int commits_on_stdin; - -static const char *path; /* "Remote" git repository */ - -void prefetch(unsigned char *sha1) -{ -} - -static struct packed_git *packs; - -static void setup_index(unsigned char *sha1) -{ - struct packed_git *new_pack; - char filename[PATH_MAX]; - strcpy(filename, path); - strcat(filename, "/objects/pack/pack-"); - strcat(filename, sha1_to_hex(sha1)); - strcat(filename, ".idx"); - new_pack = parse_pack_index_file(sha1, filename); - new_pack->next = packs; - packs = new_pack; -} - -static int setup_indices(void) -{ - DIR *dir; - struct dirent *de; - char filename[PATH_MAX]; - unsigned char sha1[20]; - sprintf(filename, "%s/objects/pack/", path); - dir = opendir(filename); - if (!dir) - return -1; - while ((de = readdir(dir)) != NULL) { - int namelen = strlen(de->d_name); - if (namelen != 50 || - !has_extension(de->d_name, ".pack")) - continue; - get_sha1_hex(de->d_name + 5, sha1); - setup_index(sha1); - } - closedir(dir); - return 0; -} - -static int copy_file(const char *source, char *dest, const char *hex, - int warn_if_not_exists) -{ - safe_create_leading_directories(dest); - if (use_link) { - if (!link(source, dest)) { - pull_say("link %s\n", hex); - return 0; - } - /* If we got ENOENT there is no point continuing. */ - if (errno == ENOENT) { - if (!warn_if_not_exists) - return -1; - return error("does not exist %s", source); - } - } - if (use_symlink) { - struct stat st; - if (stat(source, &st)) { - if (!warn_if_not_exists && errno == ENOENT) - return -1; - return error("cannot stat %s: %s", source, - strerror(errno)); - } - if (!symlink(source, dest)) { - pull_say("symlink %s\n", hex); - return 0; - } - } - if (use_filecopy) { - int ifd, ofd, status = 0; - - ifd = open(source, O_RDONLY); - if (ifd < 0) { - if (!warn_if_not_exists && errno == ENOENT) - return -1; - return error("cannot open %s", source); - } - ofd = open(dest, O_WRONLY | O_CREAT | O_EXCL, 0666); - if (ofd < 0) { - close(ifd); - return error("cannot open %s", dest); - } - status = copy_fd(ifd, ofd); - close(ofd); - if (status) - return error("cannot write %s", dest); - pull_say("copy %s\n", hex); - return 0; - } - return error("failed to copy %s with given copy methods.", hex); -} - -static int fetch_pack(const unsigned char *sha1) -{ - struct packed_git *target; - char filename[PATH_MAX]; - if (setup_indices()) - return -1; - target = find_sha1_pack(sha1, packs); - if (!target) - return error("Couldn't find %s: not separate or in any pack", - sha1_to_hex(sha1)); - if (get_verbosely) { - fprintf(stderr, "Getting pack %s\n", - sha1_to_hex(target->sha1)); - fprintf(stderr, " which contains %s\n", - sha1_to_hex(sha1)); - } - sprintf(filename, "%s/objects/pack/pack-%s.pack", - path, sha1_to_hex(target->sha1)); - copy_file(filename, sha1_pack_name(target->sha1), - sha1_to_hex(target->sha1), 1); - sprintf(filename, "%s/objects/pack/pack-%s.idx", - path, sha1_to_hex(target->sha1)); - copy_file(filename, sha1_pack_index_name(target->sha1), - sha1_to_hex(target->sha1), 1); - install_packed_git(target); - return 0; -} - -static int fetch_file(const unsigned char *sha1) -{ - static int object_name_start = -1; - static char filename[PATH_MAX]; - char *hex = sha1_to_hex(sha1); - char *dest_filename = sha1_file_name(sha1); - - if (object_name_start < 0) { - strcpy(filename, path); /* e.g. git.git */ - strcat(filename, "/objects/"); - object_name_start = strlen(filename); - } - filename[object_name_start+0] = hex[0]; - filename[object_name_start+1] = hex[1]; - filename[object_name_start+2] = '/'; - strcpy(filename + object_name_start + 3, hex + 2); - return copy_file(filename, dest_filename, hex, 0); -} - -int fetch(unsigned char *sha1) -{ - if (has_sha1_file(sha1)) - return 0; - else - return fetch_file(sha1) && fetch_pack(sha1); -} - -int fetch_ref(char *ref, unsigned char *sha1) -{ - static int ref_name_start = -1; - static char filename[PATH_MAX]; - static char hex[41]; - int ifd; - - if (ref_name_start < 0) { - sprintf(filename, "%s/refs/", path); - ref_name_start = strlen(filename); - } - strcpy(filename + ref_name_start, ref); - ifd = open(filename, O_RDONLY); - if (ifd < 0) { - close(ifd); - return error("cannot open %s", filename); - } - if (read_in_full(ifd, hex, 40) != 40 || get_sha1_hex(hex, sha1)) { - close(ifd); - return error("cannot read from %s", filename); - } - close(ifd); - pull_say("ref %s\n", sha1_to_hex(sha1)); - return 0; -} - -static const char local_pull_usage[] = -"git-local-fetch [-c] [-t] [-a] [-v] [-w filename] [--recover] [-l] [-s] [-n] [--stdin] commit-id path"; - -/* - * By default we only use file copy. - * If -l is specified, a hard link is attempted. - * If -s is specified, then a symlink is attempted. - * If -n is _not_ specified, then a regular file-to-file copy is done. - */ -int main(int argc, const char **argv) -{ - int commits; - const char **write_ref = NULL; - char **commit_id; - int arg = 1; - - setup_git_directory(); - git_config(git_default_config); - - while (arg < argc && argv[arg][0] == '-') { - if (argv[arg][1] == 't') - get_tree = 1; - else if (argv[arg][1] == 'c') - get_history = 1; - else if (argv[arg][1] == 'a') { - get_all = 1; - get_tree = 1; - get_history = 1; - } - else if (argv[arg][1] == 'l') - use_link = 1; - else if (argv[arg][1] == 's') - use_symlink = 1; - else if (argv[arg][1] == 'n') - use_filecopy = 0; - else if (argv[arg][1] == 'v') - get_verbosely = 1; - else if (argv[arg][1] == 'w') - write_ref = &argv[++arg]; - else if (!strcmp(argv[arg], "--recover")) - get_recover = 1; - else if (!strcmp(argv[arg], "--stdin")) - commits_on_stdin = 1; - else - usage(local_pull_usage); - arg++; - } - if (argc < arg + 2 - commits_on_stdin) - usage(local_pull_usage); - if (commits_on_stdin) { - commits = pull_targets_stdin(&commit_id, &write_ref); - } else { - commit_id = (char **) &argv[arg++]; - commits = 1; - } - path = argv[arg]; - - if (pull(commits, commit_id, write_ref, path)) - return 1; - - if (commits_on_stdin) - pull_targets_free(commits, commit_id, write_ref); - - return 0; -} diff --git a/pack-write.c b/pack-write.c index e59b197e5e..979bdfff7c 100644 --- a/pack-write.c +++ b/pack-write.c @@ -179,3 +179,29 @@ void fixup_pack_header_footer(int pack_fd, SHA1_Final(pack_file_sha1, &c); write_or_die(pack_fd, pack_file_sha1, 20); } + +char *index_pack_lockfile(int ip_out) +{ + int len, s; + char packname[46]; + + /* + * The first thing we expects from index-pack's output + * is "pack\t%40s\n" or "keep\t%40s\n" (46 bytes) where + * %40s is the newly created pack SHA1 name. In the "keep" + * case, we need it to remove the corresponding .keep file + * later on. If we don't get that then tough luck with it. + */ + for (len = 0; + len < 46 && (s = xread(ip_out, packname+len, 46-len)) > 0; + len += s); + if (len == 46 && packname[45] == '\n' && + memcmp(packname, "keep\t", 5) == 0) { + char path[PATH_MAX]; + packname[45] = 0; + snprintf(path, sizeof(path), "%s/pack/pack-%s.keep", + get_object_directory(), packname + 5); + return xstrdup(path); + } + return NULL; +} @@ -59,6 +59,7 @@ extern const char *write_idx_file(const char *index_name, struct pack_idx_entry extern int verify_pack(struct packed_git *, int); extern void fixup_pack_header_footer(int, unsigned char *, const char *, uint32_t); +extern char *index_pack_lockfile(int fd); #define PH_ERROR_EOF (-1) #define PH_ERROR_PACK_SIGNATURE (-2) diff --git a/receive-pack.c b/receive-pack.c index 1521d0b2de..38e35c06b9 100644 --- a/receive-pack.c +++ b/receive-pack.c @@ -382,9 +382,8 @@ static const char *unpack(void) } } else { const char *keeper[6]; - int s, len, status; + int s, status; char keep_arg[256]; - char packname[46]; struct child_process ip; s = sprintf(keep_arg, "--keep=receive-pack %i on ", getpid()); @@ -403,26 +402,7 @@ static const char *unpack(void) ip.git_cmd = 1; if (start_command(&ip)) return "index-pack fork failed"; - - /* - * The first thing we expects from index-pack's output - * is "pack\t%40s\n" or "keep\t%40s\n" (46 bytes) where - * %40s is the newly created pack SHA1 name. In the "keep" - * case, we need it to remove the corresponding .keep file - * later on. If we don't get that then tough luck with it. - */ - for (len = 0; - len < 46 && (s = xread(ip.out, packname+len, 46-len)) > 0; - len += s); - if (len == 46 && packname[45] == '\n' && - memcmp(packname, "keep\t", 5) == 0) { - char path[PATH_MAX]; - packname[45] = 0; - snprintf(path, sizeof(path), "%s/pack/pack-%s.keep", - get_object_directory(), packname + 5); - pack_lockfile = xstrdup(path); - } - + pack_lockfile = index_pack_lockfile(ip.out); status = finish_command(&ip); if (!status) { reprepare_packed_git(); @@ -2,6 +2,7 @@ #include "refs.h" #include "object.h" #include "tag.h" +#include "dir.h" /* ISSYMREF=01 and ISPACKED=02 are public interfaces */ #define REF_KNOWS_PEELED 04 @@ -671,57 +672,23 @@ static struct ref_lock *verify_lock(struct ref_lock *lock, return lock; } -static int remove_empty_dir_recursive(char *path, int len) -{ - DIR *dir = opendir(path); - struct dirent *e; - int ret = 0; - - if (!dir) - return -1; - if (path[len-1] != '/') - path[len++] = '/'; - while ((e = readdir(dir)) != NULL) { - struct stat st; - int namlen; - if ((e->d_name[0] == '.') && - ((e->d_name[1] == 0) || - ((e->d_name[1] == '.') && e->d_name[2] == 0))) - continue; /* "." and ".." */ - - namlen = strlen(e->d_name); - if ((len + namlen < PATH_MAX) && - strcpy(path + len, e->d_name) && - !lstat(path, &st) && - S_ISDIR(st.st_mode) && - !remove_empty_dir_recursive(path, len + namlen)) - continue; /* happy */ - - /* path too long, stat fails, or non-directory still exists */ - ret = -1; - break; - } - closedir(dir); - if (!ret) { - path[len] = 0; - ret = rmdir(path); - } - return ret; -} - -static int remove_empty_directories(char *file) +static int remove_empty_directories(const char *file) { /* we want to create a file but there is a directory there; * if that is an empty directory (or a directory that contains * only empty directories), remove them. */ - char path[PATH_MAX]; - int len = strlen(file); + struct strbuf path; + int result; - if (len >= PATH_MAX) /* path too long ;-) */ - return -1; - strcpy(path, file); - return remove_empty_dir_recursive(path, len); + strbuf_init(&path, 20); + strbuf_addstr(&path, file); + + result = remove_dir_recursively(&path, 1); + + strbuf_release(&path); + + return result; } static int is_refname_available(const char *ref, const char *oldref, @@ -5,6 +5,12 @@ static struct remote **remotes; static int allocated_remotes; +static struct branch **branches; +static int allocated_branches; + +static struct branch *current_branch; +static const char *default_remote_name; + #define BUF_SIZE (2048) static char buffer[BUF_SIZE]; @@ -26,13 +32,13 @@ static void add_fetch_refspec(struct remote *remote, const char *ref) remote->fetch_refspec_nr = nr; } -static void add_uri(struct remote *remote, const char *uri) +static void add_url(struct remote *remote, const char *url) { - int nr = remote->uri_nr + 1; - remote->uri = - xrealloc(remote->uri, nr * sizeof(char *)); - remote->uri[nr-1] = uri; - remote->uri_nr = nr; + int nr = remote->url_nr + 1; + remote->url = + xrealloc(remote->url, nr * sizeof(char *)); + remote->url[nr-1] = url; + remote->url_nr = nr; } static struct remote *make_remote(const char *name, int len) @@ -67,6 +73,54 @@ static struct remote *make_remote(const char *name, int len) return remotes[empty]; } +static void add_merge(struct branch *branch, const char *name) +{ + int nr = branch->merge_nr + 1; + branch->merge_name = + xrealloc(branch->merge_name, nr * sizeof(char *)); + branch->merge_name[nr-1] = name; + branch->merge_nr = nr; +} + +static struct branch *make_branch(const char *name, int len) +{ + int i, empty = -1; + char *refname; + + for (i = 0; i < allocated_branches; i++) { + if (!branches[i]) { + if (empty < 0) + empty = i; + } else { + if (len ? (!strncmp(name, branches[i]->name, len) && + !branches[i]->name[len]) : + !strcmp(name, branches[i]->name)) + return branches[i]; + } + } + + if (empty < 0) { + empty = allocated_branches; + allocated_branches += allocated_branches ? allocated_branches : 1; + branches = xrealloc(branches, + sizeof(*branches) * allocated_branches); + memset(branches + empty, 0, + (allocated_branches - empty) * sizeof(*branches)); + } + branches[empty] = xcalloc(1, sizeof(struct branch)); + if (len) + branches[empty]->name = xstrndup(name, len); + else + branches[empty]->name = xstrdup(name); + refname = malloc(strlen(name) + strlen("refs/heads/") + 1); + strcpy(refname, "refs/heads/"); + strcpy(refname + strlen("refs/heads/"), + branches[empty]->name); + branches[empty]->refname = refname; + + return branches[empty]; +} + static void read_remotes_file(struct remote *remote) { FILE *f = fopen(git_path("remotes/%s", remote->name), "r"); @@ -100,7 +154,7 @@ static void read_remotes_file(struct remote *remote) switch (value_list) { case 0: - add_uri(remote, xstrdup(s)); + add_url(remote, xstrdup(s)); break; case 1: add_push_refspec(remote, xstrdup(s)); @@ -116,6 +170,8 @@ static void read_remotes_file(struct remote *remote) static void read_branches_file(struct remote *remote) { const char *slash = strchr(remote->name, '/'); + char *frag; + char *branch; int n = slash ? slash - remote->name : 1000; FILE *f = fopen(git_path("branches/%.*s", n, remote->name), "r"); char *s, *p; @@ -141,23 +197,41 @@ static void read_branches_file(struct remote *remote) strcpy(p, s); if (slash) strcat(p, slash); - add_uri(remote, p); + frag = strchr(p, '#'); + if (frag) { + *(frag++) = '\0'; + branch = xmalloc(strlen(frag) + 12); + strcpy(branch, "refs/heads/"); + strcat(branch, frag); + } else { + branch = "refs/heads/master"; + } + add_url(remote, p); + add_fetch_refspec(remote, branch); + remote->fetch_tags = 1; /* always auto-follow */ } -static char *default_remote_name = NULL; -static const char *current_branch = NULL; -static int current_branch_len = 0; - static int handle_config(const char *key, const char *value) { const char *name; const char *subkey; struct remote *remote; - if (!prefixcmp(key, "branch.") && current_branch && - !strncmp(key + 7, current_branch, current_branch_len) && - !strcmp(key + 7 + current_branch_len, ".remote")) { - free(default_remote_name); - default_remote_name = xstrdup(value); + struct branch *branch; + if (!prefixcmp(key, "branch.")) { + name = key + 7; + subkey = strrchr(name, '.'); + branch = make_branch(name, subkey - name); + if (!subkey) + return 0; + if (!value) + return 0; + if (!strcmp(subkey, ".remote")) { + branch->remote_name = xstrdup(value); + if (branch == current_branch) + default_remote_name = branch->remote_name; + } else if (!strcmp(subkey, ".merge")) + add_merge(branch, xstrdup(value)); + return 0; } if (prefixcmp(key, "remote.")) return 0; @@ -186,7 +260,7 @@ static int handle_config(const char *key, const char *value) return 0; /* ignore unknown booleans */ } if (!strcmp(subkey, ".url")) { - add_uri(remote, xstrdup(value)); + add_url(remote, xstrdup(value)); } else if (!strcmp(subkey, ".push")) { add_push_refspec(remote, xstrdup(value)); } else if (!strcmp(subkey, ".fetch")) { @@ -196,6 +270,14 @@ static int handle_config(const char *key, const char *value) remote->receivepack = xstrdup(value); else error("more than one receivepack given, using the first"); + } else if (!strcmp(subkey, ".uploadpack")) { + if (!remote->uploadpack) + remote->uploadpack = xstrdup(value); + else + error("more than one uploadpack given, using the first"); + } else if (!strcmp(subkey, ".tagopt")) { + if (!strcmp(value, "--no-tags")) + remote->fetch_tags = -1; } return 0; } @@ -212,13 +294,13 @@ static void read_config(void) head_ref = resolve_ref("HEAD", sha1, 0, &flag); if (head_ref && (flag & REF_ISSYMREF) && !prefixcmp(head_ref, "refs/heads/")) { - current_branch = head_ref + strlen("refs/heads/"); - current_branch_len = strlen(current_branch); + current_branch = + make_branch(head_ref + strlen("refs/heads/"), 0); } git_config(handle_config); } -static struct refspec *parse_ref_spec(int nr_refspec, const char **refspec) +struct refspec *parse_ref_spec(int nr_refspec, const char **refspec) { int i; struct refspec *rs = xcalloc(sizeof(*rs), nr_refspec); @@ -265,14 +347,14 @@ struct remote *remote_get(const char *name) name = default_remote_name; ret = make_remote(name, 0); if (name[0] != '/') { - if (!ret->uri) + if (!ret->url) read_remotes_file(ret); - if (!ret->uri) + if (!ret->url) read_branches_file(ret); } - if (!ret->uri) - add_uri(ret, name); - if (!ret->uri) + if (!ret->url) + add_url(ret, name); + if (!ret->url) return NULL; ret->fetch = parse_ref_spec(ret->fetch_refspec_nr, ret->fetch_refspec); ret->push = parse_ref_spec(ret->push_refspec_nr, ret->push_refspec); @@ -298,16 +380,62 @@ int for_each_remote(each_remote_fn fn, void *priv) return result; } -int remote_has_uri(struct remote *remote, const char *uri) +void ref_remove_duplicates(struct ref *ref_map) +{ + struct ref **posn; + struct ref *next; + for (; ref_map; ref_map = ref_map->next) { + if (!ref_map->peer_ref) + continue; + posn = &ref_map->next; + while (*posn) { + if ((*posn)->peer_ref && + !strcmp((*posn)->peer_ref->name, + ref_map->peer_ref->name)) { + if (strcmp((*posn)->name, ref_map->name)) + die("%s tracks both %s and %s", + ref_map->peer_ref->name, + (*posn)->name, ref_map->name); + next = (*posn)->next; + free((*posn)->peer_ref); + free(*posn); + *posn = next; + } else { + posn = &(*posn)->next; + } + } + } +} + +int remote_has_url(struct remote *remote, const char *url) { int i; - for (i = 0; i < remote->uri_nr; i++) { - if (!strcmp(remote->uri[i], uri)) + for (i = 0; i < remote->url_nr; i++) { + if (!strcmp(remote->url[i], url)) return 1; } return 0; } +/* + * Returns true if, under the matching rules for fetching, name is the + * same as the given full name. + */ +static int ref_matches_abbrev(const char *name, const char *full) +{ + if (!prefixcmp(name, "refs/") || !strcmp(name, "HEAD")) + return !strcmp(name, full); + if (prefixcmp(full, "refs/")) + return 0; + if (!prefixcmp(name, "heads/") || + !prefixcmp(name, "tags/") || + !prefixcmp(name, "remotes/")) + return !strcmp(name, full + 5); + if (prefixcmp(full + 5, "heads/")) + return 0; + return !strcmp(full + 11, name); +} + int remote_find_tracking(struct remote *remote, struct refspec *refspec) { int find_src = refspec->src == NULL; @@ -315,7 +443,7 @@ int remote_find_tracking(struct remote *remote, struct refspec *refspec) int i; if (find_src) { - if (refspec->dst == NULL) + if (!refspec->dst) return error("find_tracking: need either src or dst"); needle = refspec->dst; result = &refspec->src; @@ -357,6 +485,14 @@ struct ref *alloc_ref(unsigned namelen) return ret; } +static struct ref *copy_ref(struct ref *ref) +{ + struct ref *ret = xmalloc(sizeof(struct ref) + strlen(ref->name) + 1); + memcpy(ret, ref, sizeof(struct ref) + strlen(ref->name) + 1); + ret->next = NULL; + return ret; +} + void free_refs(struct ref *ref) { struct ref *next; @@ -489,23 +625,23 @@ static int match_explicit(struct ref *src, struct ref *dst, * way to delete 'other' ref at the remote end. */ matched_src = try_explicit_object_name(rs->src); - if (matched_src) - break; - error("src refspec %s does not match any.", - rs->src); + if (!matched_src) + error("src refspec %s does not match any.", rs->src); break; default: matched_src = NULL; - error("src refspec %s matches more than one.", - rs->src); + error("src refspec %s matches more than one.", rs->src); break; } if (!matched_src) errs = 1; - if (dst_value == NULL) + if (!dst_value) { + if (!matched_src) + return errs; dst_value = matched_src->name; + } switch (count_refspec_match(dst_value, dst, &matched_dst)) { case 1: @@ -524,7 +660,7 @@ static int match_explicit(struct ref *src, struct ref *dst, dst_value); break; } - if (errs || matched_dst == NULL) + if (errs || !matched_dst) return 1; if (matched_dst->peer_ref) { errs = 1; @@ -633,3 +769,150 @@ int match_refs(struct ref *src, struct ref *dst, struct ref ***dst_tail, } return 0; } + +struct branch *branch_get(const char *name) +{ + struct branch *ret; + + read_config(); + if (!name || !*name || !strcmp(name, "HEAD")) + ret = current_branch; + else + ret = make_branch(name, 0); + if (ret && ret->remote_name) { + ret->remote = remote_get(ret->remote_name); + if (ret->merge_nr) { + int i; + ret->merge = xcalloc(sizeof(*ret->merge), + ret->merge_nr); + for (i = 0; i < ret->merge_nr; i++) { + ret->merge[i] = xcalloc(1, sizeof(**ret->merge)); + ret->merge[i]->src = xstrdup(ret->merge_name[i]); + remote_find_tracking(ret->remote, + ret->merge[i]); + } + } + } + return ret; +} + +int branch_has_merge_config(struct branch *branch) +{ + return branch && !!branch->merge; +} + +int branch_merge_matches(struct branch *branch, + int i, + const char *refname) +{ + if (!branch || i < 0 || i >= branch->merge_nr) + return 0; + return ref_matches_abbrev(branch->merge[i]->src, refname); +} + +static struct ref *get_expanded_map(struct ref *remote_refs, + const struct refspec *refspec) +{ + struct ref *ref; + struct ref *ret = NULL; + struct ref **tail = &ret; + + int remote_prefix_len = strlen(refspec->src); + int local_prefix_len = strlen(refspec->dst); + + for (ref = remote_refs; ref; ref = ref->next) { + if (strchr(ref->name, '^')) + continue; /* a dereference item */ + if (!prefixcmp(ref->name, refspec->src)) { + char *match; + struct ref *cpy = copy_ref(ref); + match = ref->name + remote_prefix_len; + + cpy->peer_ref = alloc_ref(local_prefix_len + + strlen(match) + 1); + sprintf(cpy->peer_ref->name, "%s%s", + refspec->dst, match); + if (refspec->force) + cpy->peer_ref->force = 1; + *tail = cpy; + tail = &cpy->next; + } + } + + return ret; +} + +static struct ref *find_ref_by_name_abbrev(struct ref *refs, const char *name) +{ + struct ref *ref; + for (ref = refs; ref; ref = ref->next) { + if (ref_matches_abbrev(name, ref->name)) + return ref; + } + return NULL; +} + +struct ref *get_remote_ref(struct ref *remote_refs, const char *name) +{ + struct ref *ref = find_ref_by_name_abbrev(remote_refs, name); + + if (!ref) + die("Couldn't find remote ref %s\n", name); + + return copy_ref(ref); +} + +static struct ref *get_local_ref(const char *name) +{ + struct ref *ret; + if (!name) + return NULL; + + if (!prefixcmp(name, "refs/")) { + ret = alloc_ref(strlen(name) + 1); + strcpy(ret->name, name); + return ret; + } + + if (!prefixcmp(name, "heads/") || + !prefixcmp(name, "tags/") || + !prefixcmp(name, "remotes/")) { + ret = alloc_ref(strlen(name) + 6); + sprintf(ret->name, "refs/%s", name); + return ret; + } + + ret = alloc_ref(strlen(name) + 12); + sprintf(ret->name, "refs/heads/%s", name); + return ret; +} + +int get_fetch_map(struct ref *remote_refs, + const struct refspec *refspec, + struct ref ***tail) +{ + struct ref *ref_map, *rm; + + if (refspec->pattern) { + ref_map = get_expanded_map(remote_refs, refspec); + } else { + ref_map = get_remote_ref(remote_refs, + refspec->src[0] ? + refspec->src : "HEAD"); + + ref_map->peer_ref = get_local_ref(refspec->dst); + if (ref_map->peer_ref && refspec->force) + ref_map->peer_ref->force = 1; + } + + for (rm = ref_map; rm; rm = rm->next) { + if (rm->peer_ref && check_ref_format(rm->peer_ref->name + 5)) + die("* refusing to create funny ref '%s' locally", + rm->peer_ref->name); + } + + if (ref_map) + tail_link_ref(ref_map, tail); + + return 0; +} @@ -4,8 +4,8 @@ struct remote { const char *name; - const char **uri; - int uri_nr; + const char **url; + int url_nr; const char **push_refspec; struct refspec *push; @@ -15,7 +15,16 @@ struct remote { struct refspec *fetch; int fetch_refspec_nr; + /* + * -1 to never fetch tags + * 0 to auto-follow tags on heuristic (default) + * 1 to always auto-follow tags + * 2 to always fetch tags + */ + int fetch_tags; + const char *receivepack; + const char *uploadpack; }; struct remote *remote_get(const char *name); @@ -23,7 +32,7 @@ struct remote *remote_get(const char *name); typedef int each_remote_fn(struct remote *remote, void *priv); int for_each_remote(each_remote_fn fn, void *priv); -int remote_has_uri(struct remote *remote, const char *uri); +int remote_has_url(struct remote *remote, const char *url); struct refspec { unsigned force : 1; @@ -40,12 +49,50 @@ struct ref *alloc_ref(unsigned namelen); */ void free_refs(struct ref *ref); +/* + * Removes and frees any duplicate refs in the map. + */ +void ref_remove_duplicates(struct ref *ref_map); + +struct refspec *parse_ref_spec(int nr_refspec, const char **refspec); + int match_refs(struct ref *src, struct ref *dst, struct ref ***dst_tail, int nr_refspec, char **refspec, int all); /* + * Given a list of the remote refs and the specification of things to + * fetch, makes a (separate) list of the refs to fetch and the local + * refs to store into. + * + * *tail is the pointer to the tail pointer of the list of results + * beforehand, and will be set to the tail pointer of the list of + * results afterward. + */ +int get_fetch_map(struct ref *remote_refs, const struct refspec *refspec, + struct ref ***tail); + +struct ref *get_remote_ref(struct ref *remote_refs, const char *name); + +/* * For the given remote, reads the refspec's src and sets the other fields. */ int remote_find_tracking(struct remote *remote, struct refspec *refspec); +struct branch { + const char *name; + const char *refname; + + const char *remote_name; + struct remote *remote; + + const char **merge_name; + struct refspec **merge; + int merge_nr; +}; + +struct branch *branch_get(const char *name); + +int branch_has_merge_config(struct branch *branch); +int branch_merge_matches(struct branch *, int n, const char *); + #endif diff --git a/rsh.c b/rsh.c deleted file mode 100644 index 016d72ead7..0000000000 --- a/rsh.c +++ /dev/null @@ -1,79 +0,0 @@ -#include "cache.h" -#include "rsh.h" -#include "quote.h" - -#define COMMAND_SIZE 4096 - -int setup_connection(int *fd_in, int *fd_out, const char *remote_prog, - char *url, int rmt_argc, char **rmt_argv) -{ - char *host; - char *path; - int sv[2]; - int i; - pid_t pid; - struct strbuf cmd; - - if (!strcmp(url, "-")) { - *fd_in = 0; - *fd_out = 1; - return 0; - } - - host = strstr(url, "//"); - if (host) { - host += 2; - path = strchr(host, '/'); - } else { - host = url; - path = strchr(host, ':'); - if (path) - *(path++) = '\0'; - } - if (!path) { - return error("Bad URL: %s", url); - } - - /* $GIT_RSH <host> "env GIT_DIR=<path> <remote_prog> <args...>" */ - strbuf_init(&cmd, COMMAND_SIZE); - strbuf_addstr(&cmd, "env "); - strbuf_addstr(&cmd, GIT_DIR_ENVIRONMENT "="); - sq_quote_buf(&cmd, path); - strbuf_addch(&cmd, ' '); - sq_quote_buf(&cmd, remote_prog); - - for (i = 0 ; i < rmt_argc ; i++) { - strbuf_addch(&cmd, ' '); - sq_quote_buf(&cmd, rmt_argv[i]); - } - - strbuf_addstr(&cmd, " -"); - - if (cmd.len >= COMMAND_SIZE) - return error("Command line too long"); - - if (socketpair(AF_UNIX, SOCK_STREAM, 0, sv)) - return error("Couldn't create socket"); - - pid = fork(); - if (pid < 0) - return error("Couldn't fork"); - if (!pid) { - const char *ssh, *ssh_basename; - ssh = getenv("GIT_SSH"); - if (!ssh) ssh = "ssh"; - ssh_basename = strrchr(ssh, '/'); - if (!ssh_basename) - ssh_basename = ssh; - else - ssh_basename++; - close(sv[1]); - dup2(sv[0], 0); - dup2(sv[0], 1); - execlp(ssh, ssh_basename, host, cmd.buf, NULL); - } - close(sv[0]); - *fd_in = sv[1]; - *fd_out = sv[1]; - return 0; -} diff --git a/rsh.h b/rsh.h deleted file mode 100644 index ee2f499291..0000000000 --- a/rsh.h +++ /dev/null @@ -1,7 +0,0 @@ -#ifndef RSH_H -#define RSH_H - -int setup_connection(int *fd_in, int *fd_out, const char *remote_prog, - char *url, int rmt_argc, char **rmt_argv); - -#endif diff --git a/send-pack.c b/send-pack.c index c1807f0794..e9b9a39f41 100644 --- a/send-pack.c +++ b/send-pack.c @@ -428,7 +428,7 @@ int main(int argc, char **argv) if (remote_name) { remote = remote_get(remote_name); - if (!remote_has_uri(remote, dest)) { + if (!remote_has_url(remote, dest)) { die("Destination %s is not a uri for %s", dest, remote_name); } diff --git a/ssh-fetch.c b/ssh-fetch.c deleted file mode 100644 index bdf51a7a14..0000000000 --- a/ssh-fetch.c +++ /dev/null @@ -1,166 +0,0 @@ -#ifndef COUNTERPART_ENV_NAME -#define COUNTERPART_ENV_NAME "GIT_SSH_UPLOAD" -#endif -#ifndef COUNTERPART_PROGRAM_NAME -#define COUNTERPART_PROGRAM_NAME "git-ssh-upload" -#endif -#ifndef MY_PROGRAM_NAME -#define MY_PROGRAM_NAME "git-ssh-fetch" -#endif - -#include "cache.h" -#include "commit.h" -#include "rsh.h" -#include "fetch.h" -#include "refs.h" - -static int fd_in; -static int fd_out; - -static unsigned char remote_version; -static unsigned char local_version = 1; - -static int prefetches; - -static struct object_list *in_transit; -static struct object_list **end_of_transit = &in_transit; - -void prefetch(unsigned char *sha1) -{ - char type = 'o'; - struct object_list *node; - if (prefetches > 100) { - fetch(in_transit->item->sha1); - } - node = xmalloc(sizeof(struct object_list)); - node->next = NULL; - node->item = lookup_unknown_object(sha1); - *end_of_transit = node; - end_of_transit = &node->next; - /* XXX: what if these writes fail? */ - write_in_full(fd_out, &type, 1); - write_in_full(fd_out, sha1, 20); - prefetches++; -} - -static char conn_buf[4096]; -static size_t conn_buf_posn; - -int fetch(unsigned char *sha1) -{ - int ret; - signed char remote; - struct object_list *temp; - - if (hashcmp(sha1, in_transit->item->sha1)) { - /* we must have already fetched it to clean the queue */ - return has_sha1_file(sha1) ? 0 : -1; - } - prefetches--; - temp = in_transit; - in_transit = in_transit->next; - if (!in_transit) - end_of_transit = &in_transit; - free(temp); - - if (conn_buf_posn) { - remote = conn_buf[0]; - memmove(conn_buf, conn_buf + 1, --conn_buf_posn); - } else { - if (xread(fd_in, &remote, 1) < 1) - return -1; - } - /* fprintf(stderr, "Got %d\n", remote); */ - if (remote < 0) - return remote; - ret = write_sha1_from_fd(sha1, fd_in, conn_buf, 4096, &conn_buf_posn); - if (!ret) - pull_say("got %s\n", sha1_to_hex(sha1)); - return ret; -} - -static int get_version(void) -{ - char type = 'v'; - if (write_in_full(fd_out, &type, 1) != 1 || - write_in_full(fd_out, &local_version, 1)) { - return error("Couldn't request version from remote end"); - } - if (xread(fd_in, &remote_version, 1) < 1) { - return error("Couldn't read version from remote end"); - } - return 0; -} - -int fetch_ref(char *ref, unsigned char *sha1) -{ - signed char remote; - char type = 'r'; - int length = strlen(ref) + 1; - if (write_in_full(fd_out, &type, 1) != 1 || - write_in_full(fd_out, ref, length) != length) - return -1; - - if (read_in_full(fd_in, &remote, 1) != 1) - return -1; - if (remote < 0) - return remote; - if (read_in_full(fd_in, sha1, 20) != 20) - return -1; - return 0; -} - -static const char ssh_fetch_usage[] = - MY_PROGRAM_NAME - " [-c] [-t] [-a] [-v] [--recover] [-w ref] commit-id url"; -int main(int argc, char **argv) -{ - const char *write_ref = NULL; - char *commit_id; - char *url; - int arg = 1; - const char *prog; - - prog = getenv("GIT_SSH_PUSH"); - if (!prog) prog = "git-ssh-upload"; - - setup_git_directory(); - git_config(git_default_config); - - while (arg < argc && argv[arg][0] == '-') { - if (argv[arg][1] == 't') { - get_tree = 1; - } else if (argv[arg][1] == 'c') { - get_history = 1; - } else if (argv[arg][1] == 'a') { - get_all = 1; - get_tree = 1; - get_history = 1; - } else if (argv[arg][1] == 'v') { - get_verbosely = 1; - } else if (argv[arg][1] == 'w') { - write_ref = argv[arg + 1]; - arg++; - } else if (!strcmp(argv[arg], "--recover")) { - get_recover = 1; - } - arg++; - } - if (argc < arg + 2) { - usage(ssh_fetch_usage); - return 1; - } - commit_id = argv[arg]; - url = argv[arg + 1]; - - if (setup_connection(&fd_in, &fd_out, prog, url, arg, argv + 1)) - return 1; - - if (get_version()) - return 1; - - if (pull(1, &commit_id, &write_ref, url)) - return 1; - - return 0; -} diff --git a/ssh-pull.c b/ssh-pull.c deleted file mode 100644 index 868ce4d41f..0000000000 --- a/ssh-pull.c +++ /dev/null @@ -1,4 +0,0 @@ -#define COUNTERPART_ENV_NAME "GIT_SSH_PUSH" -#define COUNTERPART_PROGRAM_NAME "git-ssh-push" -#define MY_PROGRAM_NAME "git-ssh-pull" -#include "ssh-fetch.c" diff --git a/ssh-push.c b/ssh-push.c deleted file mode 100644 index a562df1b31..0000000000 --- a/ssh-push.c +++ /dev/null @@ -1,4 +0,0 @@ -#define COUNTERPART_ENV_NAME "GIT_SSH_PULL" -#define COUNTERPART_PROGRAM_NAME "git-ssh-pull" -#define MY_PROGRAM_NAME "git-ssh-push" -#include "ssh-upload.c" diff --git a/ssh-upload.c b/ssh-upload.c deleted file mode 100644 index 20c35f03dd..0000000000 --- a/ssh-upload.c +++ /dev/null @@ -1,143 +0,0 @@ -#ifndef COUNTERPART_ENV_NAME -#define COUNTERPART_ENV_NAME "GIT_SSH_FETCH" -#endif -#ifndef COUNTERPART_PROGRAM_NAME -#define COUNTERPART_PROGRAM_NAME "git-ssh-fetch" -#endif -#ifndef MY_PROGRAM_NAME -#define MY_PROGRAM_NAME "git-ssh-upload" -#endif - -#include "cache.h" -#include "rsh.h" -#include "refs.h" - -static unsigned char local_version = 1; -static unsigned char remote_version; - -static int verbose; - -static int serve_object(int fd_in, int fd_out) { - ssize_t size; - unsigned char sha1[20]; - signed char remote; - - size = read_in_full(fd_in, sha1, 20); - if (size < 0) { - perror("git-ssh-upload: read "); - return -1; - } - if (!size) - return -1; - - if (verbose) - fprintf(stderr, "Serving %s\n", sha1_to_hex(sha1)); - - remote = 0; - - if (!has_sha1_file(sha1)) { - fprintf(stderr, "git-ssh-upload: could not find %s\n", - sha1_to_hex(sha1)); - remote = -1; - } - - if (write_in_full(fd_out, &remote, 1) != 1) - return 0; - - if (remote < 0) - return 0; - - return write_sha1_to_fd(fd_out, sha1); -} - -static int serve_version(int fd_in, int fd_out) -{ - if (xread(fd_in, &remote_version, 1) < 1) - return -1; - write_in_full(fd_out, &local_version, 1); - return 0; -} - -static int serve_ref(int fd_in, int fd_out) -{ - char ref[PATH_MAX]; - unsigned char sha1[20]; - int posn = 0; - signed char remote = 0; - do { - if (posn >= PATH_MAX || xread(fd_in, ref + posn, 1) < 1) - return -1; - posn++; - } while (ref[posn - 1]); - - if (verbose) - fprintf(stderr, "Serving %s\n", ref); - - if (get_ref_sha1(ref, sha1)) - remote = -1; - if (write_in_full(fd_out, &remote, 1) != 1) - return 0; - if (remote) - return 0; - write_in_full(fd_out, sha1, 20); - return 0; -} - - -static void service(int fd_in, int fd_out) { - char type; - ssize_t retval; - do { - retval = xread(fd_in, &type, 1); - if (retval < 1) { - if (retval < 0) - perror("git-ssh-upload: read "); - return; - } - if (type == 'v' && serve_version(fd_in, fd_out)) - return; - if (type == 'o' && serve_object(fd_in, fd_out)) - return; - if (type == 'r' && serve_ref(fd_in, fd_out)) - return; - } while (1); -} - -static const char ssh_push_usage[] = - MY_PROGRAM_NAME " [-c] [-t] [-a] [-w ref] commit-id url"; - -int main(int argc, char **argv) -{ - int arg = 1; - char *commit_id; - char *url; - int fd_in, fd_out; - const char *prog; - unsigned char sha1[20]; - char hex[41]; - - prog = getenv(COUNTERPART_ENV_NAME); - if (!prog) prog = COUNTERPART_PROGRAM_NAME; - - setup_git_directory(); - - while (arg < argc && argv[arg][0] == '-') { - if (argv[arg][1] == 'w') - arg++; - arg++; - } - if (argc < arg + 2) - usage(ssh_push_usage); - commit_id = argv[arg]; - url = argv[arg + 1]; - if (get_sha1(commit_id, sha1)) - die("Not a valid object name %s", commit_id); - memcpy(hex, sha1_to_hex(sha1), sizeof(hex)); - argv[arg] = hex; - - if (setup_connection(&fd_in, &fd_out, prog, url, arg, argv + 1)) - return 1; - - service(fd_in, fd_out); - return 0; -} diff --git a/t/t5510-fetch.sh b/t/t5510-fetch.sh index 439430f569..d217657146 100755 --- a/t/t5510-fetch.sh +++ b/t/t5510-fetch.sh @@ -67,6 +67,18 @@ test_expect_success "fetch test for-merge" ' cut -f -2 .git/FETCH_HEAD >actual && diff expected actual' +test_expect_success 'fetch tags when there is no tags' ' + + cd "$D" && + + mkdir notags && + cd notags && + git init && + + git fetch -t .. + +' + test_expect_success 'fetch following tags' ' cd "$D" && @@ -153,4 +165,47 @@ test_expect_success 'bundle should be able to create a full history' ' ' +test "$TEST_RSYNC" && { +test_expect_success 'fetch via rsync' ' + git pack-refs && + mkdir rsynced && + cd rsynced && + git init && + git fetch rsync://127.0.0.1$(pwd)/../.git master:refs/heads/master && + git gc --prune && + test $(git rev-parse master) = $(cd .. && git rev-parse master) && + git fsck --full +' + +test_expect_success 'push via rsync' ' + mkdir ../rsynced2 && + (cd ../rsynced2 && + git init) && + git push rsync://127.0.0.1$(pwd)/../rsynced2/.git master && + cd ../rsynced2 && + git gc --prune && + test $(git rev-parse master) = $(cd .. && git rev-parse master) && + git fsck --full +' + +test_expect_success 'push via rsync' ' + cd .. && + mkdir rsynced3 && + (cd rsynced3 && + git init) && + git push --all rsync://127.0.0.1$(pwd)/rsynced3/.git && + cd rsynced3 && + test $(git rev-parse master) = $(cd .. && git rev-parse master) && + git fsck --full +' +} + +test_expect_success 'fetch with a non-applying branch.<name>.merge' ' + git config branch.master.remote yeti && + git config branch.master.merge refs/heads/bigfoot && + git config remote.blub.url one && + git config remote.blub.fetch "refs/heads/*:refs/remotes/one/*" && + git fetch blub +' + test_done diff --git a/t/t5515-fetch-merge-logic.sh b/t/t5515-fetch-merge-logic.sh index 6c9cc67508..31c1081617 100755 --- a/t/t5515-fetch-merge-logic.sh +++ b/t/t5515-fetch-merge-logic.sh @@ -84,8 +84,7 @@ test_expect_success setup ' git config branch.br-$remote-merge.merge refs/heads/three && git config branch.br-$remote-octopus.remote $remote && git config branch.br-$remote-octopus.merge refs/heads/one && - git config --add branch.br-$remote-octopus.merge two && - git config --add branch.br-$remote-octopus.merge remotes/rem/three + git config --add branch.br-$remote-octopus.merge two done ' diff --git a/t/t5515/fetch.br-branches-default-merge b/t/t5515/fetch.br-branches-default-merge index ea65f31bde..ca2cc1d1b4 100644 --- a/t/t5515/fetch.br-branches-default-merge +++ b/t/t5515/fetch.br-branches-default-merge @@ -1,5 +1,6 @@ # br-branches-default-merge -754b754407bf032e9a2f9d5a9ad05ca79a6b228f branch 'master' of ../ +754b754407bf032e9a2f9d5a9ad05ca79a6b228f not-for-merge branch 'master' of ../ +0567da4d5edd2ff4bb292a465ba9e64dcad9536b branch 'three' of ../ 754b754407bf032e9a2f9d5a9ad05ca79a6b228f not-for-merge tag 'tag-master' of ../ 8e32a6d901327a23ef831511badce7bf3bf46689 not-for-merge tag 'tag-one' of ../ 22feea448b023a2d864ef94b013735af34d238ba not-for-merge tag 'tag-one-tree' of ../ diff --git a/t/t5515/fetch.br-branches-default-merge_branches-default b/t/t5515/fetch.br-branches-default-merge_branches-default index 7b5fa949e6..7d947cd80f 100644 --- a/t/t5515/fetch.br-branches-default-merge_branches-default +++ b/t/t5515/fetch.br-branches-default-merge_branches-default @@ -1,5 +1,6 @@ # br-branches-default-merge branches-default -754b754407bf032e9a2f9d5a9ad05ca79a6b228f branch 'master' of ../ +754b754407bf032e9a2f9d5a9ad05ca79a6b228f not-for-merge branch 'master' of ../ +0567da4d5edd2ff4bb292a465ba9e64dcad9536b branch 'three' of ../ 754b754407bf032e9a2f9d5a9ad05ca79a6b228f not-for-merge tag 'tag-master' of ../ 8e32a6d901327a23ef831511badce7bf3bf46689 not-for-merge tag 'tag-one' of ../ 22feea448b023a2d864ef94b013735af34d238ba not-for-merge tag 'tag-one-tree' of ../ diff --git a/t/t5515/fetch.br-branches-default-octopus b/t/t5515/fetch.br-branches-default-octopus index 128397d737..ec39c54b7e 100644 --- a/t/t5515/fetch.br-branches-default-octopus +++ b/t/t5515/fetch.br-branches-default-octopus @@ -1,5 +1,7 @@ # br-branches-default-octopus -754b754407bf032e9a2f9d5a9ad05ca79a6b228f branch 'master' of ../ +754b754407bf032e9a2f9d5a9ad05ca79a6b228f not-for-merge branch 'master' of ../ +8e32a6d901327a23ef831511badce7bf3bf46689 branch 'one' of ../ +6134ee8f857693b96ff1cc98d3e2fd62b199e5a8 branch 'two' of ../ 754b754407bf032e9a2f9d5a9ad05ca79a6b228f not-for-merge tag 'tag-master' of ../ 8e32a6d901327a23ef831511badce7bf3bf46689 not-for-merge tag 'tag-one' of ../ 22feea448b023a2d864ef94b013735af34d238ba not-for-merge tag 'tag-one-tree' of ../ diff --git a/t/t5515/fetch.br-branches-default-octopus_branches-default b/t/t5515/fetch.br-branches-default-octopus_branches-default index 4b37cd481a..6bf42e24b6 100644 --- a/t/t5515/fetch.br-branches-default-octopus_branches-default +++ b/t/t5515/fetch.br-branches-default-octopus_branches-default @@ -1,5 +1,7 @@ # br-branches-default-octopus branches-default -754b754407bf032e9a2f9d5a9ad05ca79a6b228f branch 'master' of ../ +754b754407bf032e9a2f9d5a9ad05ca79a6b228f not-for-merge branch 'master' of ../ +8e32a6d901327a23ef831511badce7bf3bf46689 branch 'one' of ../ +6134ee8f857693b96ff1cc98d3e2fd62b199e5a8 branch 'two' of ../ 754b754407bf032e9a2f9d5a9ad05ca79a6b228f not-for-merge tag 'tag-master' of ../ 8e32a6d901327a23ef831511badce7bf3bf46689 not-for-merge tag 'tag-one' of ../ 22feea448b023a2d864ef94b013735af34d238ba not-for-merge tag 'tag-one-tree' of ../ diff --git a/t/t5515/fetch.br-branches-one-merge b/t/t5515/fetch.br-branches-one-merge index 3a4e77ead5..b4b3b35ce0 100644 --- a/t/t5515/fetch.br-branches-one-merge +++ b/t/t5515/fetch.br-branches-one-merge @@ -1,5 +1,6 @@ # br-branches-one-merge -8e32a6d901327a23ef831511badce7bf3bf46689 branch 'one' of ../ +8e32a6d901327a23ef831511badce7bf3bf46689 not-for-merge branch 'one' of ../ +0567da4d5edd2ff4bb292a465ba9e64dcad9536b branch 'three' of ../ 754b754407bf032e9a2f9d5a9ad05ca79a6b228f not-for-merge tag 'tag-master' of ../ 8e32a6d901327a23ef831511badce7bf3bf46689 not-for-merge tag 'tag-one' of ../ 22feea448b023a2d864ef94b013735af34d238ba not-for-merge tag 'tag-one-tree' of ../ diff --git a/t/t5515/fetch.br-branches-one-merge_branches-one b/t/t5515/fetch.br-branches-one-merge_branches-one index 00e04b435e..2ecef384eb 100644 --- a/t/t5515/fetch.br-branches-one-merge_branches-one +++ b/t/t5515/fetch.br-branches-one-merge_branches-one @@ -1,5 +1,6 @@ # br-branches-one-merge branches-one -8e32a6d901327a23ef831511badce7bf3bf46689 branch 'one' of ../ +8e32a6d901327a23ef831511badce7bf3bf46689 not-for-merge branch 'one' of ../ +0567da4d5edd2ff4bb292a465ba9e64dcad9536b branch 'three' of ../ 754b754407bf032e9a2f9d5a9ad05ca79a6b228f not-for-merge tag 'tag-master' of ../ 8e32a6d901327a23ef831511badce7bf3bf46689 not-for-merge tag 'tag-one' of ../ 22feea448b023a2d864ef94b013735af34d238ba not-for-merge tag 'tag-one-tree' of ../ diff --git a/t/t5515/fetch.br-branches-one-octopus b/t/t5515/fetch.br-branches-one-octopus index 53fe808a3b..96e3029416 100644 --- a/t/t5515/fetch.br-branches-one-octopus +++ b/t/t5515/fetch.br-branches-one-octopus @@ -1,5 +1,6 @@ # br-branches-one-octopus 8e32a6d901327a23ef831511badce7bf3bf46689 branch 'one' of ../ +6134ee8f857693b96ff1cc98d3e2fd62b199e5a8 branch 'two' of ../ 754b754407bf032e9a2f9d5a9ad05ca79a6b228f not-for-merge tag 'tag-master' of ../ 8e32a6d901327a23ef831511badce7bf3bf46689 not-for-merge tag 'tag-one' of ../ 22feea448b023a2d864ef94b013735af34d238ba not-for-merge tag 'tag-one-tree' of ../ diff --git a/t/t5515/fetch.br-branches-one-octopus_branches-one b/t/t5515/fetch.br-branches-one-octopus_branches-one index 41b18ff78a..55e0bad621 100644 --- a/t/t5515/fetch.br-branches-one-octopus_branches-one +++ b/t/t5515/fetch.br-branches-one-octopus_branches-one @@ -1,5 +1,6 @@ # br-branches-one-octopus branches-one 8e32a6d901327a23ef831511badce7bf3bf46689 branch 'one' of ../ +6134ee8f857693b96ff1cc98d3e2fd62b199e5a8 branch 'two' of ../ 754b754407bf032e9a2f9d5a9ad05ca79a6b228f not-for-merge tag 'tag-master' of ../ 8e32a6d901327a23ef831511badce7bf3bf46689 not-for-merge tag 'tag-one' of ../ 22feea448b023a2d864ef94b013735af34d238ba not-for-merge tag 'tag-one-tree' of ../ diff --git a/t/t5515/fetch.br-config-glob-octopus b/t/t5515/fetch.br-config-glob-octopus index 9ee213ea45..938e532db2 100644 --- a/t/t5515/fetch.br-config-glob-octopus +++ b/t/t5515/fetch.br-config-glob-octopus @@ -2,7 +2,7 @@ 754b754407bf032e9a2f9d5a9ad05ca79a6b228f not-for-merge branch 'master' of ../ 8e32a6d901327a23ef831511badce7bf3bf46689 branch 'one' of ../ 0567da4d5edd2ff4bb292a465ba9e64dcad9536b not-for-merge branch 'three' of ../ -6134ee8f857693b96ff1cc98d3e2fd62b199e5a8 not-for-merge branch 'two' of ../ +6134ee8f857693b96ff1cc98d3e2fd62b199e5a8 branch 'two' of ../ 754b754407bf032e9a2f9d5a9ad05ca79a6b228f not-for-merge tag 'tag-master' of ../ 8e32a6d901327a23ef831511badce7bf3bf46689 not-for-merge tag 'tag-one' of ../ 22feea448b023a2d864ef94b013735af34d238ba not-for-merge tag 'tag-one-tree' of ../ diff --git a/t/t5515/fetch.br-config-glob-octopus_config-glob b/t/t5515/fetch.br-config-glob-octopus_config-glob index 44bd0ec59f..c9225bf6ff 100644 --- a/t/t5515/fetch.br-config-glob-octopus_config-glob +++ b/t/t5515/fetch.br-config-glob-octopus_config-glob @@ -2,7 +2,7 @@ 754b754407bf032e9a2f9d5a9ad05ca79a6b228f not-for-merge branch 'master' of ../ 8e32a6d901327a23ef831511badce7bf3bf46689 branch 'one' of ../ 0567da4d5edd2ff4bb292a465ba9e64dcad9536b not-for-merge branch 'three' of ../ -6134ee8f857693b96ff1cc98d3e2fd62b199e5a8 not-for-merge branch 'two' of ../ +6134ee8f857693b96ff1cc98d3e2fd62b199e5a8 branch 'two' of ../ 754b754407bf032e9a2f9d5a9ad05ca79a6b228f not-for-merge tag 'tag-master' of ../ 8e32a6d901327a23ef831511badce7bf3bf46689 not-for-merge tag 'tag-one' of ../ 22feea448b023a2d864ef94b013735af34d238ba not-for-merge tag 'tag-one-tree' of ../ diff --git a/t/t5515/fetch.br-remote-glob-octopus b/t/t5515/fetch.br-remote-glob-octopus index c1554f8f2d..b08e046195 100644 --- a/t/t5515/fetch.br-remote-glob-octopus +++ b/t/t5515/fetch.br-remote-glob-octopus @@ -2,7 +2,7 @@ 754b754407bf032e9a2f9d5a9ad05ca79a6b228f not-for-merge branch 'master' of ../ 8e32a6d901327a23ef831511badce7bf3bf46689 branch 'one' of ../ 0567da4d5edd2ff4bb292a465ba9e64dcad9536b not-for-merge branch 'three' of ../ -6134ee8f857693b96ff1cc98d3e2fd62b199e5a8 not-for-merge branch 'two' of ../ +6134ee8f857693b96ff1cc98d3e2fd62b199e5a8 branch 'two' of ../ 754b754407bf032e9a2f9d5a9ad05ca79a6b228f not-for-merge tag 'tag-master' of ../ 8e32a6d901327a23ef831511badce7bf3bf46689 not-for-merge tag 'tag-one' of ../ 22feea448b023a2d864ef94b013735af34d238ba not-for-merge tag 'tag-one-tree' of ../ diff --git a/t/t5515/fetch.br-remote-glob-octopus_remote-glob b/t/t5515/fetch.br-remote-glob-octopus_remote-glob index e6134345b8..d4d547c847 100644 --- a/t/t5515/fetch.br-remote-glob-octopus_remote-glob +++ b/t/t5515/fetch.br-remote-glob-octopus_remote-glob @@ -2,7 +2,7 @@ 754b754407bf032e9a2f9d5a9ad05ca79a6b228f not-for-merge branch 'master' of ../ 8e32a6d901327a23ef831511badce7bf3bf46689 branch 'one' of ../ 0567da4d5edd2ff4bb292a465ba9e64dcad9536b not-for-merge branch 'three' of ../ -6134ee8f857693b96ff1cc98d3e2fd62b199e5a8 not-for-merge branch 'two' of ../ +6134ee8f857693b96ff1cc98d3e2fd62b199e5a8 branch 'two' of ../ 754b754407bf032e9a2f9d5a9ad05ca79a6b228f not-for-merge tag 'tag-master' of ../ 8e32a6d901327a23ef831511badce7bf3bf46689 not-for-merge tag 'tag-one' of ../ 22feea448b023a2d864ef94b013735af34d238ba not-for-merge tag 'tag-one-tree' of ../ diff --git a/t/t5700-clone-reference.sh b/t/t5700-clone-reference.sh index 4e93aaab02..b6a54867b4 100755 --- a/t/t5700-clone-reference.sh +++ b/t/t5700-clone-reference.sh @@ -38,7 +38,7 @@ cd "$base_dir" test_expect_success 'pulling from reference' \ 'cd C && -git pull ../B' +git pull ../B master' cd "$base_dir" @@ -61,7 +61,7 @@ test_expect_success 'existence of info/alternates' \ cd "$base_dir" test_expect_success 'pulling from reference' \ -'cd D && git pull ../B' +'cd D && git pull ../B master' cd "$base_dir" diff --git a/transport.c b/transport.c new file mode 100644 index 0000000000..400af71c76 --- /dev/null +++ b/transport.c @@ -0,0 +1,832 @@ +#include "cache.h" +#include "transport.h" +#include "run-command.h" +#ifndef NO_CURL +#include "http.h" +#endif +#include "pkt-line.h" +#include "fetch-pack.h" +#include "walker.h" +#include "bundle.h" +#include "dir.h" +#include "refs.h" + +/* rsync support */ + +/* + * We copy packed-refs and refs/ into a temporary file, then read the + * loose refs recursively (sorting whenever possible), and then inserting + * those packed refs that are not yet in the list (not validating, but + * assuming that the file is sorted). + * + * Appears refactoring this from refs.c is too cumbersome. + */ + +static int str_cmp(const void *a, const void *b) +{ + const char *s1 = a; + const char *s2 = b; + + return strcmp(s1, s2); +} + +/* path->buf + name_offset is expected to point to "refs/" */ + +static int read_loose_refs(struct strbuf *path, int name_offset, + struct ref **tail) +{ + DIR *dir = opendir(path->buf); + struct dirent *de; + struct { + char **entries; + int nr, alloc; + } list; + int i, pathlen; + + if (!dir) + return -1; + + memset (&list, 0, sizeof(list)); + + while ((de = readdir(dir))) { + if (de->d_name[0] == '.' && (de->d_name[1] == '\0' || + (de->d_name[1] == '.' && + de->d_name[2] == '\0'))) + continue; + ALLOC_GROW(list.entries, list.nr + 1, list.alloc); + list.entries[list.nr++] = xstrdup(de->d_name); + } + closedir(dir); + + /* sort the list */ + + qsort(list.entries, list.nr, sizeof(char *), str_cmp); + + pathlen = path->len; + strbuf_addch(path, '/'); + + for (i = 0; i < list.nr; i++, strbuf_setlen(path, pathlen + 1)) { + strbuf_addstr(path, list.entries[i]); + if (read_loose_refs(path, name_offset, tail)) { + int fd = open(path->buf, O_RDONLY); + char buffer[40]; + struct ref *next; + + if (fd < 0) + continue; + next = alloc_ref(path->len - name_offset + 1); + if (read_in_full(fd, buffer, 40) != 40 || + get_sha1_hex(buffer, next->old_sha1)) { + close(fd); + free(next); + continue; + } + close(fd); + strcpy(next->name, path->buf + name_offset); + (*tail)->next = next; + *tail = next; + } + } + strbuf_setlen(path, pathlen); + + for (i = 0; i < list.nr; i++) + free(list.entries[i]); + free(list.entries); + + return 0; +} + +/* insert the packed refs for which no loose refs were found */ + +static void insert_packed_refs(const char *packed_refs, struct ref **list) +{ + FILE *f = fopen(packed_refs, "r"); + static char buffer[PATH_MAX]; + + if (!f) + return; + + for (;;) { + int cmp, len; + + if (!fgets(buffer, sizeof(buffer), f)) { + fclose(f); + return; + } + + if (hexval(buffer[0]) > 0xf) + continue; + len = strlen(buffer); + if (buffer[len - 1] == '\n') + buffer[--len] = '\0'; + if (len < 41) + continue; + while ((*list)->next && + (cmp = strcmp(buffer + 41, + (*list)->next->name)) > 0) + list = &(*list)->next; + if (!(*list)->next || cmp < 0) { + struct ref *next = alloc_ref(len - 40); + buffer[40] = '\0'; + if (get_sha1_hex(buffer, next->old_sha1)) { + warning ("invalid SHA-1: %s", buffer); + free(next); + continue; + } + strcpy(next->name, buffer + 41); + next->next = (*list)->next; + (*list)->next = next; + list = &(*list)->next; + } + } +} + +static struct ref *get_refs_via_rsync(const struct transport *transport) +{ + struct strbuf buf = STRBUF_INIT, temp_dir = STRBUF_INIT; + struct ref dummy, *tail = &dummy; + struct child_process rsync; + const char *args[5]; + int temp_dir_len; + + /* copy the refs to the temporary directory */ + + strbuf_addstr(&temp_dir, git_path("rsync-refs-XXXXXX")); + if (!mkdtemp(temp_dir.buf)) + die ("Could not make temporary directory"); + temp_dir_len = temp_dir.len; + + strbuf_addstr(&buf, transport->url); + strbuf_addstr(&buf, "/refs"); + + memset(&rsync, 0, sizeof(rsync)); + rsync.argv = args; + rsync.stdout_to_stderr = 1; + args[0] = "rsync"; + args[1] = (transport->verbose > 0) ? "-rv" : "-r"; + args[2] = buf.buf; + args[3] = temp_dir.buf; + args[4] = NULL; + + if (run_command(&rsync)) + die ("Could not run rsync to get refs"); + + strbuf_reset(&buf); + strbuf_addstr(&buf, transport->url); + strbuf_addstr(&buf, "/packed-refs"); + + args[2] = buf.buf; + + if (run_command(&rsync)) + die ("Could not run rsync to get refs"); + + /* read the copied refs */ + + strbuf_addstr(&temp_dir, "/refs"); + read_loose_refs(&temp_dir, temp_dir_len + 1, &tail); + strbuf_setlen(&temp_dir, temp_dir_len); + + tail = &dummy; + strbuf_addstr(&temp_dir, "/packed-refs"); + insert_packed_refs(temp_dir.buf, &tail); + strbuf_setlen(&temp_dir, temp_dir_len); + + if (remove_dir_recursively(&temp_dir, 0)) + warning ("Error removing temporary directory %s.", + temp_dir.buf); + + strbuf_release(&buf); + strbuf_release(&temp_dir); + + return dummy.next; +} + +static int fetch_objs_via_rsync(struct transport *transport, + int nr_objs, struct ref **to_fetch) +{ + struct strbuf buf = STRBUF_INIT; + struct child_process rsync; + const char *args[8]; + int result; + + strbuf_addstr(&buf, transport->url); + strbuf_addstr(&buf, "/objects/"); + + memset(&rsync, 0, sizeof(rsync)); + rsync.argv = args; + rsync.stdout_to_stderr = 1; + args[0] = "rsync"; + args[1] = (transport->verbose > 0) ? "-rv" : "-r"; + args[2] = "--ignore-existing"; + args[3] = "--exclude"; + args[4] = "info"; + args[5] = buf.buf; + args[6] = get_object_directory(); + args[7] = NULL; + + /* NEEDSWORK: handle one level of alternates */ + result = run_command(&rsync); + + strbuf_release(&buf); + + return result; +} + +static int write_one_ref(const char *name, const unsigned char *sha1, + int flags, void *data) +{ + struct strbuf *buf = data; + int len = buf->len; + FILE *f; + + /* when called via for_each_ref(), flags is non-zero */ + if (flags && prefixcmp(name, "refs/heads/") && + prefixcmp(name, "refs/tags/")) + return 0; + + strbuf_addstr(buf, name); + if (safe_create_leading_directories(buf->buf) || + !(f = fopen(buf->buf, "w")) || + fprintf(f, "%s\n", sha1_to_hex(sha1)) < 0 || + fclose(f)) + return error("problems writing temporary file %s", buf->buf); + strbuf_setlen(buf, len); + return 0; +} + +static int write_refs_to_temp_dir(struct strbuf *temp_dir, + int refspec_nr, const char **refspec) +{ + int i; + + for (i = 0; i < refspec_nr; i++) { + unsigned char sha1[20]; + char *ref; + + if (dwim_ref(refspec[i], strlen(refspec[i]), sha1, &ref) != 1) + return error("Could not get ref %s", refspec[i]); + + if (write_one_ref(ref, sha1, 0, temp_dir)) { + free(ref); + return -1; + } + free(ref); + } + return 0; +} + +static int rsync_transport_push(struct transport *transport, + int refspec_nr, const char **refspec, int flags) +{ + struct strbuf buf = STRBUF_INIT, temp_dir = STRBUF_INIT; + int result = 0, i; + struct child_process rsync; + const char *args[10]; + + /* first push the objects */ + + strbuf_addstr(&buf, transport->url); + strbuf_addch(&buf, '/'); + + memset(&rsync, 0, sizeof(rsync)); + rsync.argv = args; + rsync.stdout_to_stderr = 1; + i = 0; + args[i++] = "rsync"; + args[i++] = "-a"; + if (flags & TRANSPORT_PUSH_DRY_RUN) + args[i++] = "--dry-run"; + if (transport->verbose > 0) + args[i++] = "-v"; + args[i++] = "--ignore-existing"; + args[i++] = "--exclude"; + args[i++] = "info"; + args[i++] = get_object_directory(); + args[i++] = buf.buf; + args[i++] = NULL; + + if (run_command(&rsync)) + return error("Could not push objects to %s", transport->url); + + /* copy the refs to the temporary directory; they could be packed. */ + + strbuf_addstr(&temp_dir, git_path("rsync-refs-XXXXXX")); + if (!mkdtemp(temp_dir.buf)) + die ("Could not make temporary directory"); + strbuf_addch(&temp_dir, '/'); + + if (flags & TRANSPORT_PUSH_ALL) { + if (for_each_ref(write_one_ref, &temp_dir)) + return -1; + } else if (write_refs_to_temp_dir(&temp_dir, refspec_nr, refspec)) + return -1; + + i = 2; + if (flags & TRANSPORT_PUSH_DRY_RUN) + args[i++] = "--dry-run"; + if (!(flags & TRANSPORT_PUSH_FORCE)) + args[i++] = "--ignore-existing"; + args[i++] = temp_dir.buf; + args[i++] = transport->url; + args[i++] = NULL; + if (run_command(&rsync)) + result = error("Could not push to %s", transport->url); + + if (remove_dir_recursively(&temp_dir, 0)) + warning ("Could not remove temporary directory %s.", + temp_dir.buf); + + strbuf_release(&buf); + strbuf_release(&temp_dir); + + return result; +} + +/* Generic functions for using commit walkers */ + +static int fetch_objs_via_walker(struct transport *transport, + int nr_objs, struct ref **to_fetch) +{ + char *dest = xstrdup(transport->url); + struct walker *walker = transport->data; + char **objs = xmalloc(nr_objs * sizeof(*objs)); + int i; + + walker->get_all = 1; + walker->get_tree = 1; + walker->get_history = 1; + walker->get_verbosely = transport->verbose >= 0; + walker->get_recover = 0; + + for (i = 0; i < nr_objs; i++) + objs[i] = xstrdup(sha1_to_hex(to_fetch[i]->old_sha1)); + + if (walker_fetch(walker, nr_objs, objs, NULL, NULL)) + die("Fetch failed."); + + for (i = 0; i < nr_objs; i++) + free(objs[i]); + free(objs); + free(dest); + return 0; +} + +static int disconnect_walker(struct transport *transport) +{ + struct walker *walker = transport->data; + if (walker) + walker_free(walker); + return 0; +} + +#ifndef NO_CURL +static int curl_transport_push(struct transport *transport, int refspec_nr, const char **refspec, int flags) { + const char **argv; + int argc; + int err; + + argv = xmalloc((refspec_nr + 11) * sizeof(char *)); + argv[0] = "http-push"; + argc = 1; + if (flags & TRANSPORT_PUSH_ALL) + argv[argc++] = "--all"; + if (flags & TRANSPORT_PUSH_FORCE) + argv[argc++] = "--force"; + if (flags & TRANSPORT_PUSH_DRY_RUN) + argv[argc++] = "--dry-run"; + argv[argc++] = transport->url; + while (refspec_nr--) + argv[argc++] = *refspec++; + argv[argc] = NULL; + err = run_command_v_opt(argv, RUN_GIT_CMD); + switch (err) { + case -ERR_RUN_COMMAND_FORK: + error("unable to fork for %s", argv[0]); + case -ERR_RUN_COMMAND_EXEC: + error("unable to exec %s", argv[0]); + break; + case -ERR_RUN_COMMAND_WAITPID: + case -ERR_RUN_COMMAND_WAITPID_WRONG_PID: + case -ERR_RUN_COMMAND_WAITPID_SIGNAL: + case -ERR_RUN_COMMAND_WAITPID_NOEXIT: + error("%s died with strange error", argv[0]); + } + return !!err; +} + +static int missing__target(int code, int result) +{ + return /* file:// URL -- do we ever use one??? */ + (result == CURLE_FILE_COULDNT_READ_FILE) || + /* http:// and https:// URL */ + (code == 404 && result == CURLE_HTTP_RETURNED_ERROR) || + /* ftp:// URL */ + (code == 550 && result == CURLE_FTP_COULDNT_RETR_FILE) + ; +} + +#define missing_target(a) missing__target((a)->http_code, (a)->curl_result) + +static struct ref *get_refs_via_curl(const struct transport *transport) +{ + struct buffer buffer; + char *data, *start, *mid; + char *ref_name; + char *refs_url; + int i = 0; + + struct active_request_slot *slot; + struct slot_results results; + + struct ref *refs = NULL; + struct ref *ref = NULL; + struct ref *last_ref = NULL; + + data = xmalloc(4096); + buffer.size = 4096; + buffer.posn = 0; + buffer.buffer = data; + + refs_url = xmalloc(strlen(transport->url) + 11); + sprintf(refs_url, "%s/info/refs", transport->url); + + http_init(); + + slot = get_active_slot(); + slot->results = &results; + curl_easy_setopt(slot->curl, CURLOPT_FILE, &buffer); + curl_easy_setopt(slot->curl, CURLOPT_WRITEFUNCTION, fwrite_buffer); + curl_easy_setopt(slot->curl, CURLOPT_URL, refs_url); + curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, NULL); + if (start_active_slot(slot)) { + run_active_slot(slot); + if (results.curl_result != CURLE_OK) { + if (missing_target(&results)) { + free(buffer.buffer); + return NULL; + } else { + free(buffer.buffer); + error("%s", curl_errorstr); + return NULL; + } + } + } else { + free(buffer.buffer); + error("Unable to start request"); + return NULL; + } + + http_cleanup(); + + data = buffer.buffer; + start = NULL; + mid = data; + while (i < buffer.posn) { + if (!start) + start = &data[i]; + if (data[i] == '\t') + mid = &data[i]; + if (data[i] == '\n') { + data[i] = 0; + ref_name = mid + 1; + ref = xmalloc(sizeof(struct ref) + + strlen(ref_name) + 1); + memset(ref, 0, sizeof(struct ref)); + strcpy(ref->name, ref_name); + get_sha1_hex(start, ref->old_sha1); + if (!refs) + refs = ref; + if (last_ref) + last_ref->next = ref; + last_ref = ref; + start = NULL; + } + i++; + } + + free(buffer.buffer); + + return refs; +} + +static int fetch_objs_via_curl(struct transport *transport, + int nr_objs, struct ref **to_fetch) +{ + if (!transport->data) + transport->data = get_http_walker(transport->url); + return fetch_objs_via_walker(transport, nr_objs, to_fetch); +} + +#endif + +struct bundle_transport_data { + int fd; + struct bundle_header header; +}; + +static struct ref *get_refs_from_bundle(const struct transport *transport) +{ + struct bundle_transport_data *data = transport->data; + struct ref *result = NULL; + int i; + + if (data->fd > 0) + close(data->fd); + data->fd = read_bundle_header(transport->url, &data->header); + if (data->fd < 0) + die ("Could not read bundle '%s'.", transport->url); + for (i = 0; i < data->header.references.nr; i++) { + struct ref_list_entry *e = data->header.references.list + i; + struct ref *ref = alloc_ref(strlen(e->name) + 1); + hashcpy(ref->old_sha1, e->sha1); + strcpy(ref->name, e->name); + ref->next = result; + result = ref; + } + return result; +} + +static int fetch_refs_from_bundle(struct transport *transport, + int nr_heads, struct ref **to_fetch) +{ + struct bundle_transport_data *data = transport->data; + return unbundle(&data->header, data->fd); +} + +static int close_bundle(struct transport *transport) +{ + struct bundle_transport_data *data = transport->data; + if (data->fd > 0) + close(data->fd); + free(data); + return 0; +} + +struct git_transport_data { + unsigned thin : 1; + unsigned keep : 1; + int depth; + const char *uploadpack; + const char *receivepack; +}; + +static int set_git_option(struct transport *connection, + const char *name, const char *value) +{ + struct git_transport_data *data = connection->data; + if (!strcmp(name, TRANS_OPT_UPLOADPACK)) { + data->uploadpack = value; + return 0; + } else if (!strcmp(name, TRANS_OPT_RECEIVEPACK)) { + data->receivepack = value; + return 0; + } else if (!strcmp(name, TRANS_OPT_THIN)) { + data->thin = !!value; + return 0; + } else if (!strcmp(name, TRANS_OPT_KEEP)) { + data->keep = !!value; + return 0; + } else if (!strcmp(name, TRANS_OPT_DEPTH)) { + if (!value) + data->depth = 0; + else + data->depth = atoi(value); + return 0; + } + return 1; +} + +static struct ref *get_refs_via_connect(const struct transport *transport) +{ + struct git_transport_data *data = transport->data; + struct ref *refs; + int fd[2]; + pid_t pid; + char *dest = xstrdup(transport->url); + + pid = git_connect(fd, dest, data->uploadpack, 0); + + if (pid < 0) + die("Failed to connect to \"%s\"", transport->url); + + get_remote_heads(fd[0], &refs, 0, NULL, 0); + packet_flush(fd[1]); + + finish_connect(pid); + + free(dest); + + return refs; +} + +static int fetch_refs_via_pack(struct transport *transport, + int nr_heads, struct ref **to_fetch) +{ + struct git_transport_data *data = transport->data; + char **heads = xmalloc(nr_heads * sizeof(*heads)); + char **origh = xmalloc(nr_heads * sizeof(*origh)); + struct ref *refs; + char *dest = xstrdup(transport->url); + struct fetch_pack_args args; + int i; + + memset(&args, 0, sizeof(args)); + args.uploadpack = data->uploadpack; + args.keep_pack = data->keep; + args.lock_pack = 1; + args.use_thin_pack = data->thin; + args.verbose = transport->verbose > 0; + args.depth = data->depth; + + for (i = 0; i < nr_heads; i++) + origh[i] = heads[i] = xstrdup(to_fetch[i]->name); + refs = fetch_pack(&args, dest, nr_heads, heads, &transport->pack_lockfile); + + for (i = 0; i < nr_heads; i++) + free(origh[i]); + free(origh); + free(heads); + free_refs(refs); + free(dest); + return 0; +} + +static int git_transport_push(struct transport *transport, int refspec_nr, const char **refspec, int flags) { + struct git_transport_data *data = transport->data; + const char **argv; + char *rem; + int argc; + int err; + + argv = xmalloc((refspec_nr + 11) * sizeof(char *)); + argv[0] = "send-pack"; + argc = 1; + if (flags & TRANSPORT_PUSH_ALL) + argv[argc++] = "--all"; + if (flags & TRANSPORT_PUSH_FORCE) + argv[argc++] = "--force"; + if (flags & TRANSPORT_PUSH_DRY_RUN) + argv[argc++] = "--dry-run"; + if (data->receivepack) { + char *rp = xmalloc(strlen(data->receivepack) + 16); + sprintf(rp, "--receive-pack=%s", data->receivepack); + argv[argc++] = rp; + } + if (data->thin) + argv[argc++] = "--thin"; + rem = xmalloc(strlen(transport->remote->name) + 10); + sprintf(rem, "--remote=%s", transport->remote->name); + argv[argc++] = rem; + argv[argc++] = transport->url; + while (refspec_nr--) + argv[argc++] = *refspec++; + argv[argc] = NULL; + err = run_command_v_opt(argv, RUN_GIT_CMD); + switch (err) { + case -ERR_RUN_COMMAND_FORK: + error("unable to fork for %s", argv[0]); + case -ERR_RUN_COMMAND_EXEC: + error("unable to exec %s", argv[0]); + break; + case -ERR_RUN_COMMAND_WAITPID: + case -ERR_RUN_COMMAND_WAITPID_WRONG_PID: + case -ERR_RUN_COMMAND_WAITPID_SIGNAL: + case -ERR_RUN_COMMAND_WAITPID_NOEXIT: + error("%s died with strange error", argv[0]); + } + return !!err; +} + +static int disconnect_git(struct transport *transport) +{ + free(transport->data); + return 0; +} + +static int is_local(const char *url) +{ + const char *colon = strchr(url, ':'); + const char *slash = strchr(url, '/'); + return !colon || (slash && slash < colon); +} + +static int is_file(const char *url) +{ + struct stat buf; + if (stat(url, &buf)) + return 0; + return S_ISREG(buf.st_mode); +} + +struct transport *transport_get(struct remote *remote, const char *url) +{ + struct transport *ret = xcalloc(1, sizeof(*ret)); + + ret->remote = remote; + ret->url = url; + + if (!prefixcmp(url, "rsync://")) { + ret->get_refs_list = get_refs_via_rsync; + ret->fetch = fetch_objs_via_rsync; + ret->push = rsync_transport_push; + + } else if (!prefixcmp(url, "http://") + || !prefixcmp(url, "https://") + || !prefixcmp(url, "ftp://")) { +#ifdef NO_CURL + error("git was compiled without libcurl support."); +#else + ret->get_refs_list = get_refs_via_curl; + ret->fetch = fetch_objs_via_curl; + ret->push = curl_transport_push; +#endif + ret->disconnect = disconnect_walker; + + } else if (is_local(url) && is_file(url)) { + struct bundle_transport_data *data = xcalloc(1, sizeof(*data)); + ret->data = data; + ret->get_refs_list = get_refs_from_bundle; + ret->fetch = fetch_refs_from_bundle; + ret->disconnect = close_bundle; + + } else { + struct git_transport_data *data = xcalloc(1, sizeof(*data)); + ret->data = data; + ret->set_option = set_git_option; + ret->get_refs_list = get_refs_via_connect; + ret->fetch = fetch_refs_via_pack; + ret->push = git_transport_push; + ret->disconnect = disconnect_git; + + data->thin = 1; + data->uploadpack = "git-upload-pack"; + if (remote && remote->uploadpack) + data->uploadpack = remote->uploadpack; + data->receivepack = "git-receive-pack"; + if (remote && remote->receivepack) + data->receivepack = remote->receivepack; + } + + return ret; +} + +int transport_set_option(struct transport *transport, + const char *name, const char *value) +{ + if (transport->set_option) + return transport->set_option(transport, name, value); + return 1; +} + +int transport_push(struct transport *transport, + int refspec_nr, const char **refspec, int flags) +{ + if (!transport->push) + return 1; + return transport->push(transport, refspec_nr, refspec, flags); +} + +struct ref *transport_get_remote_refs(struct transport *transport) +{ + if (!transport->remote_refs) + transport->remote_refs = transport->get_refs_list(transport); + return transport->remote_refs; +} + +int transport_fetch_refs(struct transport *transport, struct ref *refs) +{ + int rc; + int nr_heads = 0, nr_alloc = 0; + struct ref **heads = NULL; + struct ref *rm; + + for (rm = refs; rm; rm = rm->next) { + if (rm->peer_ref && + !hashcmp(rm->peer_ref->old_sha1, rm->old_sha1)) + continue; + ALLOC_GROW(heads, nr_heads + 1, nr_alloc); + heads[nr_heads++] = rm; + } + + rc = transport->fetch(transport, nr_heads, heads); + free(heads); + return rc; +} + +void transport_unlock_pack(struct transport *transport) +{ + if (transport->pack_lockfile) { + unlink(transport->pack_lockfile); + free(transport->pack_lockfile); + transport->pack_lockfile = NULL; + } +} + +int transport_disconnect(struct transport *transport) +{ + int ret = 0; + if (transport->disconnect) + ret = transport->disconnect(transport); + free(transport); + return ret; +} diff --git a/transport.h b/transport.h new file mode 100644 index 0000000000..df12ea7424 --- /dev/null +++ b/transport.h @@ -0,0 +1,70 @@ +#ifndef TRANSPORT_H +#define TRANSPORT_H + +#include "cache.h" +#include "remote.h" + +struct transport { + struct remote *remote; + const char *url; + void *data; + struct ref *remote_refs; + + /** + * Returns 0 if successful, positive if the option is not + * recognized or is inapplicable, and negative if the option + * is applicable but the value is invalid. + **/ + int (*set_option)(struct transport *connection, const char *name, + const char *value); + + struct ref *(*get_refs_list)(const struct transport *transport); + int (*fetch)(struct transport *transport, int refs_nr, struct ref **refs); + int (*push)(struct transport *connection, int refspec_nr, const char **refspec, int flags); + + int (*disconnect)(struct transport *connection); + char *pack_lockfile; + signed verbose : 2; +}; + +#define TRANSPORT_PUSH_ALL 1 +#define TRANSPORT_PUSH_FORCE 2 +#define TRANSPORT_PUSH_DRY_RUN 4 + +/* Returns a transport suitable for the url */ +struct transport *transport_get(struct remote *, const char *); + +/* Transport options which apply to git:// and scp-style URLs */ + +/* The program to use on the remote side to send a pack */ +#define TRANS_OPT_UPLOADPACK "uploadpack" + +/* The program to use on the remote side to receive a pack */ +#define TRANS_OPT_RECEIVEPACK "receivepack" + +/* Transfer the data as a thin pack if not null */ +#define TRANS_OPT_THIN "thin" + +/* Keep the pack that was transferred if not null */ +#define TRANS_OPT_KEEP "keep" + +/* Limit the depth of the fetch if not null */ +#define TRANS_OPT_DEPTH "depth" + +/** + * Returns 0 if the option was used, non-zero otherwise. Prints a + * message to stderr if the option is not used. + **/ +int transport_set_option(struct transport *transport, const char *name, + const char *value); + +int transport_push(struct transport *connection, + int refspec_nr, const char **refspec, int flags); + +struct ref *transport_get_remote_refs(struct transport *transport); + +int transport_fetch_refs(struct transport *transport, struct ref *refs); +void transport_unlock_pack(struct transport *transport); +int transport_disconnect(struct transport *transport); + +#endif @@ -1,5 +1,5 @@ #include "cache.h" -#include "fetch.h" +#include "walker.h" #include "commit.h" #include "tree.h" #include "tree-walk.h" @@ -7,16 +7,11 @@ #include "blob.h" #include "refs.h" -int get_tree = 0; -int get_history = 0; -int get_all = 0; -int get_verbosely = 0; -int get_recover = 0; static unsigned char current_commit_sha1[20]; -void pull_say(const char *fmt, const char *hex) +void walker_say(struct walker *walker, const char *fmt, const char *hex) { - if (get_verbosely) + if (walker->get_verbosely) fprintf(stderr, fmt, hex); } @@ -31,9 +26,9 @@ static void report_missing(const struct object *obj) sha1_to_hex(current_commit_sha1)); } -static int process(struct object *obj); +static int process(struct walker *walker, struct object *obj); -static int process_tree(struct tree *tree) +static int process_tree(struct walker *walker, struct tree *tree) { struct tree_desc desc; struct name_entry entry; @@ -58,7 +53,7 @@ static int process_tree(struct tree *tree) if (blob) obj = &blob->object; } - if (!obj || process(obj)) + if (!obj || process(walker, obj)) return -1; } free(tree->buffer); @@ -73,7 +68,7 @@ static int process_tree(struct tree *tree) static struct commit_list *complete = NULL; -static int process_commit(struct commit *commit) +static int process_commit(struct walker *walker, struct commit *commit) { if (parse_commit(commit)) return -1; @@ -87,43 +82,43 @@ static int process_commit(struct commit *commit) hashcpy(current_commit_sha1, commit->object.sha1); - pull_say("walk %s\n", sha1_to_hex(commit->object.sha1)); + walker_say(walker, "walk %s\n", sha1_to_hex(commit->object.sha1)); - if (get_tree) { - if (process(&commit->tree->object)) + if (walker->get_tree) { + if (process(walker, &commit->tree->object)) return -1; - if (!get_all) - get_tree = 0; + if (!walker->get_all) + walker->get_tree = 0; } - if (get_history) { + if (walker->get_history) { struct commit_list *parents = commit->parents; for (; parents; parents = parents->next) { - if (process(&parents->item->object)) + if (process(walker, &parents->item->object)) return -1; } } return 0; } -static int process_tag(struct tag *tag) +static int process_tag(struct walker *walker, struct tag *tag) { if (parse_tag(tag)) return -1; - return process(tag->tagged); + return process(walker, tag->tagged); } static struct object_list *process_queue = NULL; static struct object_list **process_queue_end = &process_queue; -static int process_object(struct object *obj) +static int process_object(struct walker *walker, struct object *obj) { if (obj->type == OBJ_COMMIT) { - if (process_commit((struct commit *)obj)) + if (process_commit(walker, (struct commit *)obj)) return -1; return 0; } if (obj->type == OBJ_TREE) { - if (process_tree((struct tree *)obj)) + if (process_tree(walker, (struct tree *)obj)) return -1; return 0; } @@ -131,7 +126,7 @@ static int process_object(struct object *obj) return 0; } if (obj->type == OBJ_TAG) { - if (process_tag((struct tag *)obj)) + if (process_tag(walker, (struct tag *)obj)) return -1; return 0; } @@ -140,7 +135,7 @@ static int process_object(struct object *obj) typename(obj->type), sha1_to_hex(obj->sha1)); } -static int process(struct object *obj) +static int process(struct walker *walker, struct object *obj) { if (obj->flags & SEEN) return 0; @@ -153,7 +148,7 @@ static int process(struct object *obj) else { if (obj->flags & COMPLETE) return 0; - prefetch(obj->sha1); + walker->prefetch(walker, obj->sha1); } object_list_insert(obj, process_queue_end); @@ -161,7 +156,7 @@ static int process(struct object *obj) return 0; } -static int loop(void) +static int loop(struct walker *walker) { struct object_list *elem; @@ -177,25 +172,25 @@ static int loop(void) * the queue because we needed to fetch it first. */ if (! (obj->flags & TO_SCAN)) { - if (fetch(obj->sha1)) { + if (walker->fetch(walker, obj->sha1)) { report_missing(obj); return -1; } } if (!obj->type) parse_object(obj->sha1); - if (process_object(obj)) + if (process_object(walker, obj)) return -1; } return 0; } -static int interpret_target(char *target, unsigned char *sha1) +static int interpret_target(struct walker *walker, char *target, unsigned char *sha1) { if (!get_sha1_hex(target, sha1)) return 0; if (!check_ref_format(target)) { - if (!fetch_ref(target, sha1)) { + if (!walker->fetch_ref(walker, target, sha1)) { return 0; } } @@ -212,7 +207,7 @@ static int mark_complete(const char *path, const unsigned char *sha1, int flag, return 0; } -int pull_targets_stdin(char ***target, const char ***write_ref) +int walker_targets_stdin(char ***target, const char ***write_ref) { int targets = 0, targets_alloc = 0; struct strbuf buf; @@ -242,7 +237,7 @@ int pull_targets_stdin(char ***target, const char ***write_ref) return targets; } -void pull_targets_free(int targets, char **target, const char **write_ref) +void walker_targets_free(int targets, char **target, const char **write_ref) { while (targets--) { free(target[targets]); @@ -251,8 +246,8 @@ void pull_targets_free(int targets, char **target, const char **write_ref) } } -int pull(int targets, char **target, const char **write_ref, - const char *write_ref_log_details) +int walker_fetch(struct walker *walker, int targets, char **target, + const char **write_ref, const char *write_ref_log_details) { struct ref_lock **lock = xcalloc(targets, sizeof(struct ref_lock *)); unsigned char *sha1 = xmalloc(targets * 20); @@ -274,19 +269,19 @@ int pull(int targets, char **target, const char **write_ref, } } - if (!get_recover) + if (!walker->get_recover) for_each_ref(mark_complete, NULL); for (i = 0; i < targets; i++) { - if (interpret_target(target[i], &sha1[20 * i])) { + if (interpret_target(walker, target[i], &sha1[20 * i])) { error("Could not interpret %s as something to pull", target[i]); goto unlock_and_fail; } - if (process(lookup_unknown_object(&sha1[20 * i]))) + if (process(walker, lookup_unknown_object(&sha1[20 * i]))) goto unlock_and_fail; } - if (loop()) + if (loop(walker)) goto unlock_and_fail; if (write_ref_log_details) { @@ -307,10 +302,16 @@ int pull(int targets, char **target, const char **write_ref, return 0; - unlock_and_fail: for (i = 0; i < targets; i++) if (lock[i]) unlock_ref(lock[i]); + return -1; } + +void walker_free(struct walker *walker) +{ + walker->cleanup(walker); + free(walker); +} diff --git a/walker.h b/walker.h new file mode 100644 index 0000000000..ea2c363f4e --- /dev/null +++ b/walker.h @@ -0,0 +1,37 @@ +#ifndef WALKER_H +#define WALKER_H + +struct walker { + void *data; + int (*fetch_ref)(struct walker *, char *ref, unsigned char *sha1); + void (*prefetch)(struct walker *, unsigned char *sha1); + int (*fetch)(struct walker *, unsigned char *sha1); + void (*cleanup)(struct walker *); + int get_tree; + int get_history; + int get_all; + int get_verbosely; + int get_recover; + + int corrupt_object_found; +}; + +/* Report what we got under get_verbosely */ +void walker_say(struct walker *walker, const char *, const char *); + +/* Load pull targets from stdin */ +int walker_targets_stdin(char ***target, const char ***write_ref); + +/* Free up loaded targets */ +void walker_targets_free(int targets, char **target, const char **write_ref); + +/* If write_ref is set, the ref filename to write the target value to. */ +/* If write_ref_log_details is set, additional text will appear in the ref log. */ +int walker_fetch(struct walker *impl, int targets, char **target, + const char **write_ref, const char *write_ref_log_details); + +void walker_free(struct walker *walker); + +struct walker *get_http_walker(const char *url); + +#endif /* WALKER_H */ |