2 #include "repository.h"
12 #include "fetch-pack.h"
14 #include "run-command.h"
16 #include "transport.h"
18 #include "prio-queue.h"
19 #include "sha1-array.h"
23 static int transfer_unpack_limit = -1;
24 static int fetch_unpack_limit = -1;
25 static int unpack_limit = 100;
26 static int prefer_ofs_delta = 1;
28 static int deepen_since_ok;
29 static int deepen_not_ok;
30 static int fetch_fsck_objects = -1;
31 static int transfer_fsck_objects = -1;
32 static int agent_supported;
33 static int server_supports_filtering;
34 static struct lock_file shallow_lock;
35 static const char *alternate_shallow_file;
37 /* Remember to update object flag allocation in object.h */
38 #define COMPLETE (1U << 0)
39 #define COMMON (1U << 1)
40 #define COMMON_REF (1U << 2)
41 #define SEEN (1U << 3)
42 #define POPPED (1U << 4)
43 #define ALTERNATE (1U << 5)
48 * After sending this many "have"s if we do not get any new ACK , we
49 * give up traversing our history.
51 #define MAX_IN_VAIN 256
53 static struct prio_queue rev_list = { compare_commits_by_commit_date };
54 static int non_common_revs, multi_ack, use_sideband;
55 /* Allow specifying sha1 if it is a ref tip. */
56 #define ALLOW_TIP_SHA1 01
57 /* Allow request of a sha1 if it is reachable from a ref (possibly hidden ref). */
58 #define ALLOW_REACHABLE_SHA1 02
59 static unsigned int allow_unadvertised_object_request;
61 __attribute__((format (printf, 2, 3)))
62 static inline void print_verbose(const struct fetch_pack_args *args,
70 va_start(params, fmt);
71 vfprintf(stderr, fmt, params);
76 struct alternate_object_cache {
77 struct object **items;
81 static void cache_one_alternate(const char *refname,
82 const struct object_id *oid,
85 struct alternate_object_cache *cache = vcache;
86 struct object *obj = parse_object(oid);
88 if (!obj || (obj->flags & ALTERNATE))
91 obj->flags |= ALTERNATE;
92 ALLOC_GROW(cache->items, cache->nr + 1, cache->alloc);
93 cache->items[cache->nr++] = obj;
96 static void for_each_cached_alternate(void (*cb)(struct object *))
98 static int initialized;
99 static struct alternate_object_cache cache;
103 for_each_alternate_ref(cache_one_alternate, &cache);
107 for (i = 0; i < cache.nr; i++)
111 static void rev_list_push(struct commit *commit, int mark)
113 if (!(commit->object.flags & mark)) {
114 commit->object.flags |= mark;
116 if (parse_commit(commit))
119 prio_queue_put(&rev_list, commit);
121 if (!(commit->object.flags & COMMON))
126 static int rev_list_insert_ref(const char *refname, const struct object_id *oid)
128 struct object *o = deref_tag(parse_object(oid), refname, 0);
130 if (o && o->type == OBJ_COMMIT)
131 rev_list_push((struct commit *)o, SEEN);
136 static int rev_list_insert_ref_oid(const char *refname, const struct object_id *oid,
137 int flag, void *cb_data)
139 return rev_list_insert_ref(refname, oid);
142 static int clear_marks(const char *refname, const struct object_id *oid,
143 int flag, void *cb_data)
145 struct object *o = deref_tag(parse_object(oid), refname, 0);
147 if (o && o->type == OBJ_COMMIT)
148 clear_commit_marks((struct commit *)o,
149 COMMON | COMMON_REF | SEEN | POPPED);
154 This function marks a rev and its ancestors as common.
155 In some cases, it is desirable to mark only the ancestors (for example
156 when only the server does not yet know that they are common).
159 static void mark_common(struct commit *commit,
160 int ancestors_only, int dont_parse)
162 if (commit != NULL && !(commit->object.flags & COMMON)) {
163 struct object *o = (struct object *)commit;
168 if (!(o->flags & SEEN))
169 rev_list_push(commit, SEEN);
171 struct commit_list *parents;
173 if (!ancestors_only && !(o->flags & POPPED))
175 if (!o->parsed && !dont_parse)
176 if (parse_commit(commit))
179 for (parents = commit->parents;
181 parents = parents->next)
182 mark_common(parents->item, 0, dont_parse);
188 Get the next rev to send, ignoring the common.
191 static const struct object_id *get_rev(void)
193 struct commit *commit = NULL;
195 while (commit == NULL) {
197 struct commit_list *parents;
199 if (rev_list.nr == 0 || non_common_revs == 0)
202 commit = prio_queue_get(&rev_list);
203 parse_commit(commit);
204 parents = commit->parents;
206 commit->object.flags |= POPPED;
207 if (!(commit->object.flags & COMMON))
210 if (commit->object.flags & COMMON) {
211 /* do not send "have", and ignore ancestors */
213 mark = COMMON | SEEN;
214 } else if (commit->object.flags & COMMON_REF)
215 /* send "have", and ignore ancestors */
216 mark = COMMON | SEEN;
218 /* send "have", also for its ancestors */
222 if (!(parents->item->object.flags & SEEN))
223 rev_list_push(parents->item, mark);
225 mark_common(parents->item, 1, 0);
226 parents = parents->next;
230 return &commit->object.oid;
241 static void consume_shallow_list(struct fetch_pack_args *args, int fd)
243 if (args->stateless_rpc && args->deepen) {
244 /* If we sent a depth we will get back "duplicate"
245 * shallow and unshallow commands every time there
246 * is a block of have lines exchanged.
249 while ((line = packet_read_line(fd, NULL))) {
250 if (starts_with(line, "shallow "))
252 if (starts_with(line, "unshallow "))
254 die(_("git fetch-pack: expected shallow list"));
259 static enum ack_type get_ack(int fd, struct object_id *result_oid)
262 char *line = packet_read_line(fd, &len);
266 die(_("git fetch-pack: expected ACK/NAK, got a flush packet"));
267 if (!strcmp(line, "NAK"))
269 if (skip_prefix(line, "ACK ", &arg)) {
270 if (!get_oid_hex(arg, result_oid)) {
275 if (strstr(arg, "continue"))
277 if (strstr(arg, "common"))
279 if (strstr(arg, "ready"))
284 if (skip_prefix(line, "ERR ", &arg))
285 die(_("remote error: %s"), arg);
286 die(_("git fetch-pack: expected ACK/NAK, got '%s'"), line);
289 static void send_request(struct fetch_pack_args *args,
290 int fd, struct strbuf *buf)
292 if (args->stateless_rpc) {
293 send_sideband(fd, -1, buf->buf, buf->len, LARGE_PACKET_MAX);
296 write_or_die(fd, buf->buf, buf->len);
299 static void insert_one_alternate_object(struct object *obj)
301 rev_list_insert_ref(NULL, &obj->oid);
304 #define INITIAL_FLUSH 16
305 #define PIPESAFE_FLUSH 32
306 #define LARGE_FLUSH 16384
308 static int next_flush(int stateless_rpc, int count)
311 if (count < LARGE_FLUSH)
314 count = count * 11 / 10;
316 if (count < PIPESAFE_FLUSH)
319 count += PIPESAFE_FLUSH;
324 static int find_common(struct fetch_pack_args *args,
325 int fd[2], struct object_id *result_oid,
329 int count = 0, flushes = 0, flush_at = INITIAL_FLUSH, retval;
330 const struct object_id *oid;
331 unsigned in_vain = 0;
332 int got_continue = 0;
334 struct strbuf req_buf = STRBUF_INIT;
335 size_t state_len = 0;
337 if (args->stateless_rpc && multi_ack == 1)
338 die(_("--stateless-rpc requires multi_ack_detailed"));
340 for_each_ref(rev_list_insert_ref_oid, NULL);
341 for_each_cached_alternate(insert_one_alternate_object);
344 for ( ; refs ; refs = refs->next) {
345 struct object_id *remote = &refs->old_oid;
346 const char *remote_hex;
350 * If that object is complete (i.e. it is an ancestor of a
351 * local ref), we tell them we have it but do not have to
352 * tell them about its ancestors, which they already know
355 * We use lookup_object here because we are only
356 * interested in the case we *know* the object is
357 * reachable and we have already scanned it.
359 if (((o = lookup_object(remote->hash)) != NULL) &&
360 (o->flags & COMPLETE)) {
364 remote_hex = oid_to_hex(remote);
366 struct strbuf c = STRBUF_INIT;
367 if (multi_ack == 2) strbuf_addstr(&c, " multi_ack_detailed");
368 if (multi_ack == 1) strbuf_addstr(&c, " multi_ack");
369 if (no_done) strbuf_addstr(&c, " no-done");
370 if (use_sideband == 2) strbuf_addstr(&c, " side-band-64k");
371 if (use_sideband == 1) strbuf_addstr(&c, " side-band");
372 if (args->deepen_relative) strbuf_addstr(&c, " deepen-relative");
373 if (args->use_thin_pack) strbuf_addstr(&c, " thin-pack");
374 if (args->no_progress) strbuf_addstr(&c, " no-progress");
375 if (args->include_tag) strbuf_addstr(&c, " include-tag");
376 if (prefer_ofs_delta) strbuf_addstr(&c, " ofs-delta");
377 if (deepen_since_ok) strbuf_addstr(&c, " deepen-since");
378 if (deepen_not_ok) strbuf_addstr(&c, " deepen-not");
379 if (agent_supported) strbuf_addf(&c, " agent=%s",
380 git_user_agent_sanitized());
381 if (args->filter_options.choice)
382 strbuf_addstr(&c, " filter");
383 packet_buf_write(&req_buf, "want %s%s\n", remote_hex, c.buf);
386 packet_buf_write(&req_buf, "want %s\n", remote_hex);
391 strbuf_release(&req_buf);
396 if (is_repository_shallow())
397 write_shallow_commits(&req_buf, 1, NULL);
399 packet_buf_write(&req_buf, "deepen %d", args->depth);
400 if (args->deepen_since) {
401 timestamp_t max_age = approxidate(args->deepen_since);
402 packet_buf_write(&req_buf, "deepen-since %"PRItime, max_age);
404 if (args->deepen_not) {
406 for (i = 0; i < args->deepen_not->nr; i++) {
407 struct string_list_item *s = args->deepen_not->items + i;
408 packet_buf_write(&req_buf, "deepen-not %s", s->string);
411 if (server_supports_filtering && args->filter_options.choice)
412 packet_buf_write(&req_buf, "filter %s",
413 args->filter_options.filter_spec);
414 packet_buf_flush(&req_buf);
415 state_len = req_buf.len;
420 struct object_id oid;
422 send_request(args, fd[1], &req_buf);
423 while ((line = packet_read_line(fd[0], NULL))) {
424 if (skip_prefix(line, "shallow ", &arg)) {
425 if (get_oid_hex(arg, &oid))
426 die(_("invalid shallow line: %s"), line);
427 register_shallow(&oid);
430 if (skip_prefix(line, "unshallow ", &arg)) {
431 if (get_oid_hex(arg, &oid))
432 die(_("invalid unshallow line: %s"), line);
433 if (!lookup_object(oid.hash))
434 die(_("object not found: %s"), line);
435 /* make sure that it is parsed as shallow */
436 if (!parse_object(&oid))
437 die(_("error in object: %s"), line);
438 if (unregister_shallow(&oid))
439 die(_("no shallow found: %s"), line);
442 die(_("expected shallow/unshallow, got %s"), line);
444 } else if (!args->stateless_rpc)
445 send_request(args, fd[1], &req_buf);
447 if (!args->stateless_rpc) {
448 /* If we aren't using the stateless-rpc interface
449 * we don't need to retain the headers.
451 strbuf_setlen(&req_buf, 0);
457 if (args->no_dependents)
459 while ((oid = get_rev())) {
460 packet_buf_write(&req_buf, "have %s\n", oid_to_hex(oid));
461 print_verbose(args, "have %s", oid_to_hex(oid));
463 if (flush_at <= ++count) {
466 packet_buf_flush(&req_buf);
467 send_request(args, fd[1], &req_buf);
468 strbuf_setlen(&req_buf, state_len);
470 flush_at = next_flush(args->stateless_rpc, count);
473 * We keep one window "ahead" of the other side, and
474 * will wait for an ACK only on the next one
476 if (!args->stateless_rpc && count == INITIAL_FLUSH)
479 consume_shallow_list(args, fd[0]);
481 ack = get_ack(fd[0], result_oid);
483 print_verbose(args, _("got %s %d %s"), "ack",
484 ack, oid_to_hex(result_oid));
494 struct commit *commit =
495 lookup_commit(result_oid);
497 die(_("invalid commit %s"), oid_to_hex(result_oid));
498 if (args->stateless_rpc
500 && !(commit->object.flags & COMMON)) {
501 /* We need to replay the have for this object
502 * on the next RPC request so the peer knows
503 * it is in common with us.
505 const char *hex = oid_to_hex(result_oid);
506 packet_buf_write(&req_buf, "have %s\n", hex);
507 state_len = req_buf.len;
509 * Reset in_vain because an ack
510 * for this commit has not been
514 } else if (!args->stateless_rpc
515 || ack != ACK_common)
517 mark_common(commit, 0, 1);
520 if (ack == ACK_ready) {
521 clear_prio_queue(&rev_list);
529 if (got_continue && MAX_IN_VAIN < in_vain) {
530 print_verbose(args, _("giving up"));
536 if (!got_ready || !no_done) {
537 packet_buf_write(&req_buf, "done\n");
538 send_request(args, fd[1], &req_buf);
540 print_verbose(args, _("done"));
545 strbuf_release(&req_buf);
547 if (!got_ready || !no_done)
548 consume_shallow_list(args, fd[0]);
549 while (flushes || multi_ack) {
550 int ack = get_ack(fd[0], result_oid);
552 print_verbose(args, _("got %s (%d) %s"), "ack",
553 ack, oid_to_hex(result_oid));
561 /* it is no error to fetch into a completely empty repo */
562 return count ? retval : 0;
565 static struct commit_list *complete;
567 static int mark_complete(const struct object_id *oid)
569 struct object *o = parse_object(oid);
571 while (o && o->type == OBJ_TAG) {
572 struct tag *t = (struct tag *) o;
574 break; /* broken repository */
575 o->flags |= COMPLETE;
576 o = parse_object(&t->tagged->oid);
578 if (o && o->type == OBJ_COMMIT) {
579 struct commit *commit = (struct commit *)o;
580 if (!(commit->object.flags & COMPLETE)) {
581 commit->object.flags |= COMPLETE;
582 commit_list_insert(commit, &complete);
588 static int mark_complete_oid(const char *refname, const struct object_id *oid,
589 int flag, void *cb_data)
591 return mark_complete(oid);
594 static void mark_recent_complete_commits(struct fetch_pack_args *args,
597 while (complete && cutoff <= complete->item->date) {
598 print_verbose(args, _("Marking %s as complete"),
599 oid_to_hex(&complete->item->object.oid));
600 pop_most_recent_commit(&complete, COMPLETE);
604 static void add_refs_to_oidset(struct oidset *oids, struct ref *refs)
606 for (; refs; refs = refs->next)
607 oidset_insert(oids, &refs->old_oid);
610 static int tip_oids_contain(struct oidset *tip_oids,
611 struct ref *unmatched, struct ref *newlist,
612 const struct object_id *id)
615 * Note that this only looks at the ref lists the first time it's
616 * called. This works out in filter_refs() because even though it may
617 * add to "newlist" between calls, the additions will always be for
618 * oids that are already in the set.
620 if (!tip_oids->map.map.tablesize) {
621 add_refs_to_oidset(tip_oids, unmatched);
622 add_refs_to_oidset(tip_oids, newlist);
624 return oidset_contains(tip_oids, id);
627 static void filter_refs(struct fetch_pack_args *args,
629 struct ref **sought, int nr_sought)
631 struct ref *newlist = NULL;
632 struct ref **newtail = &newlist;
633 struct ref *unmatched = NULL;
634 struct ref *ref, *next;
635 struct oidset tip_oids = OIDSET_INIT;
639 for (ref = *refs; ref; ref = next) {
643 if (starts_with(ref->name, "refs/") &&
644 check_refname_format(ref->name, 0))
647 while (i < nr_sought) {
648 int cmp = strcmp(ref->name, sought[i]->name);
650 break; /* definitely do not have it */
652 keep = 1; /* definitely have it */
653 sought[i]->match_status = REF_MATCHED;
659 if (!keep && args->fetch_all &&
660 (!args->deepen || !starts_with(ref->name, "refs/tags/")))
666 newtail = &ref->next;
668 ref->next = unmatched;
673 /* Append unmatched requests to the list */
674 for (i = 0; i < nr_sought; i++) {
675 struct object_id oid;
679 if (ref->match_status != REF_NOT_MATCHED)
681 if (parse_oid_hex(ref->name, &oid, &p) ||
683 oidcmp(&oid, &ref->old_oid))
686 if ((allow_unadvertised_object_request &
687 (ALLOW_TIP_SHA1 | ALLOW_REACHABLE_SHA1)) ||
688 tip_oids_contain(&tip_oids, unmatched, newlist,
690 ref->match_status = REF_MATCHED;
691 *newtail = copy_ref(ref);
692 newtail = &(*newtail)->next;
694 ref->match_status = REF_UNADVERTISED_NOT_ALLOWED;
698 oidset_clear(&tip_oids);
699 for (ref = unmatched; ref; ref = next) {
707 static void mark_alternate_complete(struct object *obj)
709 mark_complete(&obj->oid);
712 struct loose_object_iter {
713 struct oidset *loose_object_set;
718 * If the number of refs is not larger than the number of loose objects,
719 * this function stops inserting.
721 static int add_loose_objects_to_set(const struct object_id *oid,
725 struct loose_object_iter *iter = data;
726 oidset_insert(iter->loose_object_set, oid);
727 if (iter->refs == NULL)
730 iter->refs = iter->refs->next;
735 * Mark recent commits available locally and reachable from a local ref as
736 * COMPLETE. If args->no_dependents is false, also mark COMPLETE remote refs as
737 * COMMON_REF (otherwise, we are not planning to participate in negotiation, and
738 * thus do not need COMMON_REF marks).
740 * The cutoff time for recency is determined by this heuristic: it is the
741 * earliest commit time of the objects in refs that are commits and that we know
742 * the commit time of.
744 static void mark_complete_and_common_ref(struct fetch_pack_args *args,
748 int old_save_commit_buffer = save_commit_buffer;
749 timestamp_t cutoff = 0;
750 struct oidset loose_oid_set = OIDSET_INIT;
752 struct loose_object_iter iter = {&loose_oid_set, *refs};
754 /* Enumerate all loose objects or know refs are not so many. */
755 use_oidset = !for_each_loose_object(add_loose_objects_to_set,
758 save_commit_buffer = 0;
760 for (ref = *refs; ref; ref = ref->next) {
762 unsigned int flags = OBJECT_INFO_QUICK;
765 !oidset_contains(&loose_oid_set, &ref->old_oid)) {
767 * I know this does not exist in the loose form,
768 * so check if it exists in a non-loose form.
770 flags |= OBJECT_INFO_IGNORE_LOOSE;
773 if (!has_object_file_with_flags(&ref->old_oid, flags))
775 o = parse_object(&ref->old_oid);
779 /* We already have it -- which may mean that we were
780 * in sync with the other side at some time after
781 * that (it is OK if we guess wrong here).
783 if (o->type == OBJ_COMMIT) {
784 struct commit *commit = (struct commit *)o;
785 if (!cutoff || cutoff < commit->date)
786 cutoff = commit->date;
790 oidset_clear(&loose_oid_set);
792 if (!args->no_dependents) {
794 for_each_ref(mark_complete_oid, NULL);
795 for_each_cached_alternate(mark_alternate_complete);
796 commit_list_sort_by_date(&complete);
798 mark_recent_complete_commits(args, cutoff);
802 * Mark all complete remote refs as common refs.
803 * Don't mark them common yet; the server has to be told so first.
805 for (ref = *refs; ref; ref = ref->next) {
806 struct object *o = deref_tag(lookup_object(ref->old_oid.hash),
809 if (!o || o->type != OBJ_COMMIT || !(o->flags & COMPLETE))
812 if (!(o->flags & SEEN)) {
813 rev_list_push((struct commit *)o, COMMON_REF | SEEN);
815 mark_common((struct commit *)o, 1, 1);
820 save_commit_buffer = old_save_commit_buffer;
824 * Returns 1 if every object pointed to by the given remote refs is available
825 * locally and reachable from a local ref, and 0 otherwise.
827 static int everything_local(struct fetch_pack_args *args,
833 for (retval = 1, ref = *refs; ref ; ref = ref->next) {
834 const struct object_id *remote = &ref->old_oid;
837 o = lookup_object(remote->hash);
838 if (!o || !(o->flags & COMPLETE)) {
840 print_verbose(args, "want %s (%s)", oid_to_hex(remote),
844 print_verbose(args, _("already have %s (%s)"), oid_to_hex(remote),
851 static int sideband_demux(int in, int out, void *data)
856 ret = recv_sideband("fetch-pack", xd[0], out);
861 static int get_pack(struct fetch_pack_args *args,
862 int xd[2], char **pack_lockfile)
865 int do_keep = args->keep_pack;
866 const char *cmd_name;
867 struct pack_header header;
869 struct child_process cmd = CHILD_PROCESS_INIT;
872 memset(&demux, 0, sizeof(demux));
874 /* xd[] is talking with upload-pack; subprocess reads from
875 * xd[0], spits out band#2 to stderr, and feeds us band#1
876 * through demux->out.
878 demux.proc = sideband_demux;
881 demux.isolate_sigpipe = 1;
882 if (start_async(&demux))
883 die(_("fetch-pack: unable to fork off sideband demultiplexer"));
888 if (!args->keep_pack && unpack_limit) {
890 if (read_pack_header(demux.out, &header))
891 die(_("protocol error: bad pack header"));
893 if (ntohl(header.hdr_entries) < unpack_limit)
899 if (alternate_shallow_file) {
900 argv_array_push(&cmd.args, "--shallow-file");
901 argv_array_push(&cmd.args, alternate_shallow_file);
904 if (do_keep || args->from_promisor) {
907 cmd_name = "index-pack";
908 argv_array_push(&cmd.args, cmd_name);
909 argv_array_push(&cmd.args, "--stdin");
910 if (!args->quiet && !args->no_progress)
911 argv_array_push(&cmd.args, "-v");
912 if (args->use_thin_pack)
913 argv_array_push(&cmd.args, "--fix-thin");
914 if (do_keep && (args->lock_pack || unpack_limit)) {
915 char hostname[HOST_NAME_MAX + 1];
916 if (xgethostname(hostname, sizeof(hostname)))
917 xsnprintf(hostname, sizeof(hostname), "localhost");
918 argv_array_pushf(&cmd.args,
919 "--keep=fetch-pack %"PRIuMAX " on %s",
920 (uintmax_t)getpid(), hostname);
922 if (args->check_self_contained_and_connected)
923 argv_array_push(&cmd.args, "--check-self-contained-and-connected");
924 if (args->from_promisor)
925 argv_array_push(&cmd.args, "--promisor");
928 cmd_name = "unpack-objects";
929 argv_array_push(&cmd.args, cmd_name);
930 if (args->quiet || args->no_progress)
931 argv_array_push(&cmd.args, "-q");
932 args->check_self_contained_and_connected = 0;
936 argv_array_pushf(&cmd.args, "--pack_header=%"PRIu32",%"PRIu32,
937 ntohl(header.hdr_version),
938 ntohl(header.hdr_entries));
939 if (fetch_fsck_objects >= 0
941 : transfer_fsck_objects >= 0
942 ? transfer_fsck_objects
944 if (args->from_promisor)
946 * We cannot use --strict in index-pack because it
947 * checks both broken objects and links, but we only
948 * want to check for broken objects.
950 argv_array_push(&cmd.args, "--fsck-objects");
952 argv_array_push(&cmd.args, "--strict");
957 if (start_command(&cmd))
958 die(_("fetch-pack: unable to fork off %s"), cmd_name);
959 if (do_keep && pack_lockfile) {
960 *pack_lockfile = index_pack_lockfile(cmd.out);
965 /* Closed by start_command() */
968 ret = finish_command(&cmd);
969 if (!ret || (args->check_self_contained_and_connected && ret == 1))
970 args->self_contained_and_connected =
971 args->check_self_contained_and_connected &&
974 die(_("%s failed"), cmd_name);
975 if (use_sideband && finish_async(&demux))
976 die(_("error in sideband demultiplexer"));
980 static int cmp_ref_by_name(const void *a_, const void *b_)
982 const struct ref *a = *((const struct ref **)a_);
983 const struct ref *b = *((const struct ref **)b_);
984 return strcmp(a->name, b->name);
987 static struct ref *do_fetch_pack(struct fetch_pack_args *args,
989 const struct ref *orig_ref,
990 struct ref **sought, int nr_sought,
991 struct shallow_info *si,
992 char **pack_lockfile)
994 struct ref *ref = copy_ref_list(orig_ref);
995 struct object_id oid;
996 const char *agent_feature;
999 sort_ref_list(&ref, ref_compare_name);
1000 QSORT(sought, nr_sought, cmp_ref_by_name);
1002 if ((args->depth > 0 || is_repository_shallow()) && !server_supports("shallow"))
1003 die(_("Server does not support shallow clients"));
1004 if (args->depth > 0 || args->deepen_since || args->deepen_not)
1006 if (server_supports("multi_ack_detailed")) {
1007 print_verbose(args, _("Server supports multi_ack_detailed"));
1009 if (server_supports("no-done")) {
1010 print_verbose(args, _("Server supports no-done"));
1011 if (args->stateless_rpc)
1015 else if (server_supports("multi_ack")) {
1016 print_verbose(args, _("Server supports multi_ack"));
1019 if (server_supports("side-band-64k")) {
1020 print_verbose(args, _("Server supports side-band-64k"));
1023 else if (server_supports("side-band")) {
1024 print_verbose(args, _("Server supports side-band"));
1027 if (server_supports("allow-tip-sha1-in-want")) {
1028 print_verbose(args, _("Server supports allow-tip-sha1-in-want"));
1029 allow_unadvertised_object_request |= ALLOW_TIP_SHA1;
1031 if (server_supports("allow-reachable-sha1-in-want")) {
1032 print_verbose(args, _("Server supports allow-reachable-sha1-in-want"));
1033 allow_unadvertised_object_request |= ALLOW_REACHABLE_SHA1;
1035 if (!server_supports("thin-pack"))
1036 args->use_thin_pack = 0;
1037 if (!server_supports("no-progress"))
1038 args->no_progress = 0;
1039 if (!server_supports("include-tag"))
1040 args->include_tag = 0;
1041 if (server_supports("ofs-delta"))
1042 print_verbose(args, _("Server supports ofs-delta"));
1044 prefer_ofs_delta = 0;
1046 if (server_supports("filter")) {
1047 server_supports_filtering = 1;
1048 print_verbose(args, _("Server supports filter"));
1049 } else if (args->filter_options.choice) {
1050 warning("filtering not recognized by server, ignoring");
1053 if ((agent_feature = server_feature_value("agent", &agent_len))) {
1054 agent_supported = 1;
1056 print_verbose(args, _("Server version is %.*s"),
1057 agent_len, agent_feature);
1059 if (server_supports("deepen-since"))
1060 deepen_since_ok = 1;
1061 else if (args->deepen_since)
1062 die(_("Server does not support --shallow-since"));
1063 if (server_supports("deepen-not"))
1065 else if (args->deepen_not)
1066 die(_("Server does not support --shallow-exclude"));
1067 if (!server_supports("deepen-relative") && args->deepen_relative)
1068 die(_("Server does not support --deepen"));
1071 for_each_ref(clear_marks, NULL);
1073 mark_complete_and_common_ref(args, &ref);
1074 filter_refs(args, &ref, sought, nr_sought);
1075 if (everything_local(args, &ref)) {
1076 packet_flush(fd[1]);
1079 if (find_common(args, fd, &oid, ref) < 0)
1080 if (!args->keep_pack)
1081 /* When cloning, it is not unusual to have
1084 warning(_("no common commits"));
1086 if (args->stateless_rpc)
1087 packet_flush(fd[1]);
1089 setup_alternate_shallow(&shallow_lock, &alternate_shallow_file,
1091 else if (si->nr_ours || si->nr_theirs)
1092 alternate_shallow_file = setup_temporary_shallow(si->shallow);
1094 alternate_shallow_file = NULL;
1095 if (get_pack(args, fd, pack_lockfile))
1096 die(_("git fetch-pack: fetch failed."));
1102 static void add_shallow_requests(struct strbuf *req_buf,
1103 const struct fetch_pack_args *args)
1105 if (is_repository_shallow())
1106 write_shallow_commits(req_buf, 1, NULL);
1107 if (args->depth > 0)
1108 packet_buf_write(req_buf, "deepen %d", args->depth);
1109 if (args->deepen_since) {
1110 timestamp_t max_age = approxidate(args->deepen_since);
1111 packet_buf_write(req_buf, "deepen-since %"PRItime, max_age);
1113 if (args->deepen_not) {
1115 for (i = 0; i < args->deepen_not->nr; i++) {
1116 struct string_list_item *s = args->deepen_not->items + i;
1117 packet_buf_write(req_buf, "deepen-not %s", s->string);
1122 static void add_wants(const struct ref *wants, struct strbuf *req_buf)
1124 for ( ; wants ; wants = wants->next) {
1125 const struct object_id *remote = &wants->old_oid;
1126 const char *remote_hex;
1130 * If that object is complete (i.e. it is an ancestor of a
1131 * local ref), we tell them we have it but do not have to
1132 * tell them about its ancestors, which they already know
1135 * We use lookup_object here because we are only
1136 * interested in the case we *know* the object is
1137 * reachable and we have already scanned it.
1139 if (((o = lookup_object(remote->hash)) != NULL) &&
1140 (o->flags & COMPLETE)) {
1144 remote_hex = oid_to_hex(remote);
1145 packet_buf_write(req_buf, "want %s\n", remote_hex);
1149 static void add_common(struct strbuf *req_buf, struct oidset *common)
1151 struct oidset_iter iter;
1152 const struct object_id *oid;
1153 oidset_iter_init(common, &iter);
1155 while ((oid = oidset_iter_next(&iter))) {
1156 packet_buf_write(req_buf, "have %s\n", oid_to_hex(oid));
1160 static int add_haves(struct strbuf *req_buf, int *haves_to_send, int *in_vain)
1163 int haves_added = 0;
1164 const struct object_id *oid;
1166 while ((oid = get_rev())) {
1167 packet_buf_write(req_buf, "have %s\n", oid_to_hex(oid));
1168 if (++haves_added >= *haves_to_send)
1172 *in_vain += haves_added;
1173 if (!haves_added || *in_vain >= MAX_IN_VAIN) {
1175 packet_buf_write(req_buf, "done\n");
1179 /* Increase haves to send on next round */
1180 *haves_to_send = next_flush(1, *haves_to_send);
1185 static int send_fetch_request(int fd_out, const struct fetch_pack_args *args,
1186 const struct ref *wants, struct oidset *common,
1187 int *haves_to_send, int *in_vain)
1190 struct strbuf req_buf = STRBUF_INIT;
1192 if (server_supports_v2("fetch", 1))
1193 packet_buf_write(&req_buf, "command=fetch");
1194 if (server_supports_v2("agent", 0))
1195 packet_buf_write(&req_buf, "agent=%s", git_user_agent_sanitized());
1196 if (args->server_options && args->server_options->nr &&
1197 server_supports_v2("server-option", 1)) {
1199 for (i = 0; i < args->server_options->nr; i++)
1200 packet_write_fmt(fd_out, "server-option=%s",
1201 args->server_options->items[i].string);
1204 packet_buf_delim(&req_buf);
1205 if (args->use_thin_pack)
1206 packet_buf_write(&req_buf, "thin-pack");
1207 if (args->no_progress)
1208 packet_buf_write(&req_buf, "no-progress");
1209 if (args->include_tag)
1210 packet_buf_write(&req_buf, "include-tag");
1211 if (prefer_ofs_delta)
1212 packet_buf_write(&req_buf, "ofs-delta");
1214 /* Add shallow-info and deepen request */
1215 if (server_supports_feature("fetch", "shallow", 0))
1216 add_shallow_requests(&req_buf, args);
1217 else if (is_repository_shallow() || args->deepen)
1218 die(_("Server does not support shallow requests"));
1221 if (server_supports_feature("fetch", "filter", 0) &&
1222 args->filter_options.choice) {
1223 print_verbose(args, _("Server supports filter"));
1224 packet_buf_write(&req_buf, "filter %s",
1225 args->filter_options.filter_spec);
1226 } else if (args->filter_options.choice) {
1227 warning("filtering not recognized by server, ignoring");
1231 add_wants(wants, &req_buf);
1233 if (args->no_dependents) {
1234 packet_buf_write(&req_buf, "done");
1237 /* Add all of the common commits we've found in previous rounds */
1238 add_common(&req_buf, common);
1240 /* Add initial haves */
1241 ret = add_haves(&req_buf, haves_to_send, in_vain);
1245 packet_buf_flush(&req_buf);
1246 write_or_die(fd_out, req_buf.buf, req_buf.len);
1248 strbuf_release(&req_buf);
1253 * Processes a section header in a server's response and checks if it matches
1254 * `section`. If the value of `peek` is 1, the header line will be peeked (and
1255 * not consumed); if 0, the line will be consumed and the function will die if
1256 * the section header doesn't match what was expected.
1258 static int process_section_header(struct packet_reader *reader,
1259 const char *section, int peek)
1263 if (packet_reader_peek(reader) != PACKET_READ_NORMAL)
1264 die("error reading section header '%s'", section);
1266 ret = !strcmp(reader->line, section);
1270 die("expected '%s', received '%s'",
1271 section, reader->line);
1272 packet_reader_read(reader);
1278 static int process_acks(struct packet_reader *reader, struct oidset *common)
1281 int received_ready = 0;
1282 int received_ack = 0;
1284 process_section_header(reader, "acknowledgments", 0);
1285 while (packet_reader_read(reader) == PACKET_READ_NORMAL) {
1288 if (!strcmp(reader->line, "NAK"))
1291 if (skip_prefix(reader->line, "ACK ", &arg)) {
1292 struct object_id oid;
1293 if (!get_oid_hex(arg, &oid)) {
1294 struct commit *commit;
1295 oidset_insert(common, &oid);
1296 commit = lookup_commit(&oid);
1297 mark_common(commit, 0, 1);
1302 if (!strcmp(reader->line, "ready")) {
1303 clear_prio_queue(&rev_list);
1308 die("unexpected acknowledgment line: '%s'", reader->line);
1311 if (reader->status != PACKET_READ_FLUSH &&
1312 reader->status != PACKET_READ_DELIM)
1313 die("error processing acks: %d", reader->status);
1315 /* return 0 if no common, 1 if there are common, or 2 if ready */
1316 return received_ready ? 2 : (received_ack ? 1 : 0);
1319 static void receive_shallow_info(struct fetch_pack_args *args,
1320 struct packet_reader *reader)
1322 process_section_header(reader, "shallow-info", 0);
1323 while (packet_reader_read(reader) == PACKET_READ_NORMAL) {
1325 struct object_id oid;
1327 if (skip_prefix(reader->line, "shallow ", &arg)) {
1328 if (get_oid_hex(arg, &oid))
1329 die(_("invalid shallow line: %s"), reader->line);
1330 register_shallow(&oid);
1333 if (skip_prefix(reader->line, "unshallow ", &arg)) {
1334 if (get_oid_hex(arg, &oid))
1335 die(_("invalid unshallow line: %s"), reader->line);
1336 if (!lookup_object(oid.hash))
1337 die(_("object not found: %s"), reader->line);
1338 /* make sure that it is parsed as shallow */
1339 if (!parse_object(&oid))
1340 die(_("error in object: %s"), reader->line);
1341 if (unregister_shallow(&oid))
1342 die(_("no shallow found: %s"), reader->line);
1345 die(_("expected shallow/unshallow, got %s"), reader->line);
1348 if (reader->status != PACKET_READ_FLUSH &&
1349 reader->status != PACKET_READ_DELIM)
1350 die("error processing shallow info: %d", reader->status);
1352 setup_alternate_shallow(&shallow_lock, &alternate_shallow_file, NULL);
1357 FETCH_CHECK_LOCAL = 0,
1364 static struct ref *do_fetch_pack_v2(struct fetch_pack_args *args,
1366 const struct ref *orig_ref,
1367 struct ref **sought, int nr_sought,
1368 char **pack_lockfile)
1370 struct ref *ref = copy_ref_list(orig_ref);
1371 enum fetch_state state = FETCH_CHECK_LOCAL;
1372 struct oidset common = OIDSET_INIT;
1373 struct packet_reader reader;
1375 int haves_to_send = INITIAL_FLUSH;
1376 packet_reader_init(&reader, fd[0], NULL, 0,
1377 PACKET_READ_CHOMP_NEWLINE);
1379 while (state != FETCH_DONE) {
1381 case FETCH_CHECK_LOCAL:
1382 sort_ref_list(&ref, ref_compare_name);
1383 QSORT(sought, nr_sought, cmp_ref_by_name);
1385 /* v2 supports these by default */
1386 allow_unadvertised_object_request |= ALLOW_REACHABLE_SHA1;
1388 if (args->depth > 0 || args->deepen_since || args->deepen_not)
1392 for_each_ref(clear_marks, NULL);
1395 for_each_ref(rev_list_insert_ref_oid, NULL);
1396 for_each_cached_alternate(insert_one_alternate_object);
1398 /* Filter 'ref' by 'sought' and those that aren't local */
1399 mark_complete_and_common_ref(args, &ref);
1400 filter_refs(args, &ref, sought, nr_sought);
1401 if (everything_local(args, &ref))
1404 state = FETCH_SEND_REQUEST;
1406 case FETCH_SEND_REQUEST:
1407 if (send_fetch_request(fd[1], args, ref, &common,
1408 &haves_to_send, &in_vain))
1409 state = FETCH_GET_PACK;
1411 state = FETCH_PROCESS_ACKS;
1413 case FETCH_PROCESS_ACKS:
1414 /* Process ACKs/NAKs */
1415 switch (process_acks(&reader, &common)) {
1417 state = FETCH_GET_PACK;
1423 state = FETCH_SEND_REQUEST;
1427 case FETCH_GET_PACK:
1428 /* Check for shallow-info section */
1429 if (process_section_header(&reader, "shallow-info", 1))
1430 receive_shallow_info(args, &reader);
1433 process_section_header(&reader, "packfile", 0);
1434 if (get_pack(args, fd, pack_lockfile))
1435 die(_("git fetch-pack: fetch failed."));
1444 oidset_clear(&common);
1448 static void fetch_pack_config(void)
1450 git_config_get_int("fetch.unpacklimit", &fetch_unpack_limit);
1451 git_config_get_int("transfer.unpacklimit", &transfer_unpack_limit);
1452 git_config_get_bool("repack.usedeltabaseoffset", &prefer_ofs_delta);
1453 git_config_get_bool("fetch.fsckobjects", &fetch_fsck_objects);
1454 git_config_get_bool("transfer.fsckobjects", &transfer_fsck_objects);
1456 git_config(git_default_config, NULL);
1459 static void fetch_pack_setup(void)
1461 static int did_setup;
1464 fetch_pack_config();
1465 if (0 <= transfer_unpack_limit)
1466 unpack_limit = transfer_unpack_limit;
1467 else if (0 <= fetch_unpack_limit)
1468 unpack_limit = fetch_unpack_limit;
1472 static int remove_duplicates_in_refs(struct ref **ref, int nr)
1474 struct string_list names = STRING_LIST_INIT_NODUP;
1477 for (src = dst = 0; src < nr; src++) {
1478 struct string_list_item *item;
1479 item = string_list_insert(&names, ref[src]->name);
1481 continue; /* already have it */
1482 item->util = ref[src];
1484 ref[dst] = ref[src];
1487 for (src = dst; src < nr; src++)
1489 string_list_clear(&names, 0);
1493 static void update_shallow(struct fetch_pack_args *args,
1494 struct ref **sought, int nr_sought,
1495 struct shallow_info *si)
1497 struct oid_array ref = OID_ARRAY_INIT;
1501 if (args->deepen && alternate_shallow_file) {
1502 if (*alternate_shallow_file == '\0') { /* --unshallow */
1503 unlink_or_warn(git_path_shallow());
1504 rollback_lock_file(&shallow_lock);
1506 commit_lock_file(&shallow_lock);
1510 if (!si->shallow || !si->shallow->nr)
1513 if (args->cloning) {
1515 * remote is shallow, but this is a clone, there are
1516 * no objects in repo to worry about. Accept any
1517 * shallow points that exist in the pack (iow in repo
1518 * after get_pack() and reprepare_packed_git())
1520 struct oid_array extra = OID_ARRAY_INIT;
1521 struct object_id *oid = si->shallow->oid;
1522 for (i = 0; i < si->shallow->nr; i++)
1523 if (has_object_file(&oid[i]))
1524 oid_array_append(&extra, &oid[i]);
1526 setup_alternate_shallow(&shallow_lock,
1527 &alternate_shallow_file,
1529 commit_lock_file(&shallow_lock);
1531 oid_array_clear(&extra);
1535 if (!si->nr_ours && !si->nr_theirs)
1538 remove_nonexistent_theirs_shallow(si);
1539 if (!si->nr_ours && !si->nr_theirs)
1541 for (i = 0; i < nr_sought; i++)
1542 oid_array_append(&ref, &sought[i]->old_oid);
1545 if (args->update_shallow) {
1547 * remote is also shallow, .git/shallow may be updated
1548 * so all refs can be accepted. Make sure we only add
1549 * shallow roots that are actually reachable from new
1552 struct oid_array extra = OID_ARRAY_INIT;
1553 struct object_id *oid = si->shallow->oid;
1554 assign_shallow_commits_to_refs(si, NULL, NULL);
1555 if (!si->nr_ours && !si->nr_theirs) {
1556 oid_array_clear(&ref);
1559 for (i = 0; i < si->nr_ours; i++)
1560 oid_array_append(&extra, &oid[si->ours[i]]);
1561 for (i = 0; i < si->nr_theirs; i++)
1562 oid_array_append(&extra, &oid[si->theirs[i]]);
1563 setup_alternate_shallow(&shallow_lock,
1564 &alternate_shallow_file,
1566 commit_lock_file(&shallow_lock);
1567 oid_array_clear(&extra);
1568 oid_array_clear(&ref);
1573 * remote is also shallow, check what ref is safe to update
1574 * without updating .git/shallow
1576 status = xcalloc(nr_sought, sizeof(*status));
1577 assign_shallow_commits_to_refs(si, NULL, status);
1578 if (si->nr_ours || si->nr_theirs) {
1579 for (i = 0; i < nr_sought; i++)
1581 sought[i]->status = REF_STATUS_REJECT_SHALLOW;
1584 oid_array_clear(&ref);
1587 struct ref *fetch_pack(struct fetch_pack_args *args,
1588 int fd[], struct child_process *conn,
1589 const struct ref *ref,
1591 struct ref **sought, int nr_sought,
1592 struct oid_array *shallow,
1593 char **pack_lockfile,
1594 enum protocol_version version)
1596 struct ref *ref_cpy;
1597 struct shallow_info si;
1601 nr_sought = remove_duplicates_in_refs(sought, nr_sought);
1604 packet_flush(fd[1]);
1605 die(_("no matching remote head"));
1607 prepare_shallow_info(&si, shallow);
1608 if (version == protocol_v2)
1609 ref_cpy = do_fetch_pack_v2(args, fd, ref, sought, nr_sought,
1612 ref_cpy = do_fetch_pack(args, fd, ref, sought, nr_sought,
1613 &si, pack_lockfile);
1614 reprepare_packed_git(the_repository);
1615 update_shallow(args, sought, nr_sought, &si);
1616 clear_shallow_info(&si);
1620 int report_unmatched_refs(struct ref **sought, int nr_sought)
1624 for (i = 0; i < nr_sought; i++) {
1627 switch (sought[i]->match_status) {
1630 case REF_NOT_MATCHED:
1631 error(_("no such remote ref %s"), sought[i]->name);
1633 case REF_UNADVERTISED_NOT_ALLOWED:
1634 error(_("Server does not allow request for unadvertised object %s"),