10 #include "fetch-pack.h"
12 #include "run-command.h"
14 #include "transport.h"
16 #include "prio-queue.h"
17 #include "sha1-array.h"
20 static int transfer_unpack_limit = -1;
21 static int fetch_unpack_limit = -1;
22 static int unpack_limit = 100;
23 static int prefer_ofs_delta = 1;
25 static int deepen_since_ok;
26 static int deepen_not_ok;
27 static int fetch_fsck_objects = -1;
28 static int transfer_fsck_objects = -1;
29 static int agent_supported;
30 static struct lock_file shallow_lock;
31 static const char *alternate_shallow_file;
33 /* Remember to update object flag allocation in object.h */
34 #define COMPLETE (1U << 0)
35 #define COMMON (1U << 1)
36 #define COMMON_REF (1U << 2)
37 #define SEEN (1U << 3)
38 #define POPPED (1U << 4)
39 #define ALTERNATE (1U << 5)
44 * After sending this many "have"s if we do not get any new ACK , we
45 * give up traversing our history.
47 #define MAX_IN_VAIN 256
49 static struct prio_queue rev_list = { compare_commits_by_commit_date };
50 static int non_common_revs, multi_ack, use_sideband;
51 /* Allow specifying sha1 if it is a ref tip. */
52 #define ALLOW_TIP_SHA1 01
53 /* Allow request of a sha1 if it is reachable from a ref (possibly hidden ref). */
54 #define ALLOW_REACHABLE_SHA1 02
55 static unsigned int allow_unadvertised_object_request;
57 __attribute__((format (printf, 2, 3)))
58 static inline void print_verbose(const struct fetch_pack_args *args,
66 va_start(params, fmt);
67 vfprintf(stderr, fmt, params);
72 struct alternate_object_cache {
73 struct object **items;
77 static void cache_one_alternate(const char *refname,
78 const struct object_id *oid,
81 struct alternate_object_cache *cache = vcache;
82 struct object *obj = parse_object(oid->hash);
84 if (!obj || (obj->flags & ALTERNATE))
87 obj->flags |= ALTERNATE;
88 ALLOC_GROW(cache->items, cache->nr + 1, cache->alloc);
89 cache->items[cache->nr++] = obj;
92 static void for_each_cached_alternate(void (*cb)(struct object *))
94 static int initialized;
95 static struct alternate_object_cache cache;
99 for_each_alternate_ref(cache_one_alternate, &cache);
103 for (i = 0; i < cache.nr; i++)
107 static void rev_list_push(struct commit *commit, int mark)
109 if (!(commit->object.flags & mark)) {
110 commit->object.flags |= mark;
112 if (parse_commit(commit))
115 prio_queue_put(&rev_list, commit);
117 if (!(commit->object.flags & COMMON))
122 static int rev_list_insert_ref(const char *refname, const unsigned char *sha1)
124 struct object *o = deref_tag(parse_object(sha1), refname, 0);
126 if (o && o->type == OBJ_COMMIT)
127 rev_list_push((struct commit *)o, SEEN);
132 static int rev_list_insert_ref_oid(const char *refname, const struct object_id *oid,
133 int flag, void *cb_data)
135 return rev_list_insert_ref(refname, oid->hash);
138 static int clear_marks(const char *refname, const struct object_id *oid,
139 int flag, void *cb_data)
141 struct object *o = deref_tag(parse_object(oid->hash), refname, 0);
143 if (o && o->type == OBJ_COMMIT)
144 clear_commit_marks((struct commit *)o,
145 COMMON | COMMON_REF | SEEN | POPPED);
150 This function marks a rev and its ancestors as common.
151 In some cases, it is desirable to mark only the ancestors (for example
152 when only the server does not yet know that they are common).
155 static void mark_common(struct commit *commit,
156 int ancestors_only, int dont_parse)
158 if (commit != NULL && !(commit->object.flags & COMMON)) {
159 struct object *o = (struct object *)commit;
164 if (!(o->flags & SEEN))
165 rev_list_push(commit, SEEN);
167 struct commit_list *parents;
169 if (!ancestors_only && !(o->flags & POPPED))
171 if (!o->parsed && !dont_parse)
172 if (parse_commit(commit))
175 for (parents = commit->parents;
177 parents = parents->next)
178 mark_common(parents->item, 0, dont_parse);
184 Get the next rev to send, ignoring the common.
187 static const unsigned char *get_rev(void)
189 struct commit *commit = NULL;
191 while (commit == NULL) {
193 struct commit_list *parents;
195 if (rev_list.nr == 0 || non_common_revs == 0)
198 commit = prio_queue_get(&rev_list);
199 parse_commit(commit);
200 parents = commit->parents;
202 commit->object.flags |= POPPED;
203 if (!(commit->object.flags & COMMON))
206 if (commit->object.flags & COMMON) {
207 /* do not send "have", and ignore ancestors */
209 mark = COMMON | SEEN;
210 } else if (commit->object.flags & COMMON_REF)
211 /* send "have", and ignore ancestors */
212 mark = COMMON | SEEN;
214 /* send "have", also for its ancestors */
218 if (!(parents->item->object.flags & SEEN))
219 rev_list_push(parents->item, mark);
221 mark_common(parents->item, 1, 0);
222 parents = parents->next;
226 return commit->object.oid.hash;
237 static void consume_shallow_list(struct fetch_pack_args *args, int fd)
239 if (args->stateless_rpc && args->deepen) {
240 /* If we sent a depth we will get back "duplicate"
241 * shallow and unshallow commands every time there
242 * is a block of have lines exchanged.
245 while ((line = packet_read_line(fd, NULL))) {
246 if (starts_with(line, "shallow "))
248 if (starts_with(line, "unshallow "))
250 die(_("git fetch-pack: expected shallow list"));
255 static enum ack_type get_ack(int fd, unsigned char *result_sha1)
258 char *line = packet_read_line(fd, &len);
262 die(_("git fetch-pack: expected ACK/NAK, got EOF"));
263 if (!strcmp(line, "NAK"))
265 if (skip_prefix(line, "ACK ", &arg)) {
266 if (!get_sha1_hex(arg, result_sha1)) {
271 if (strstr(arg, "continue"))
273 if (strstr(arg, "common"))
275 if (strstr(arg, "ready"))
280 if (skip_prefix(line, "ERR ", &arg))
281 die(_("remote error: %s"), arg);
282 die(_("git fetch-pack: expected ACK/NAK, got '%s'"), line);
285 static void send_request(struct fetch_pack_args *args,
286 int fd, struct strbuf *buf)
288 if (args->stateless_rpc) {
289 send_sideband(fd, -1, buf->buf, buf->len, LARGE_PACKET_MAX);
292 write_or_die(fd, buf->buf, buf->len);
295 static void insert_one_alternate_object(struct object *obj)
297 rev_list_insert_ref(NULL, obj->oid.hash);
300 #define INITIAL_FLUSH 16
301 #define PIPESAFE_FLUSH 32
302 #define LARGE_FLUSH 16384
304 static int next_flush(struct fetch_pack_args *args, int count)
306 if (args->stateless_rpc) {
307 if (count < LARGE_FLUSH)
310 count = count * 11 / 10;
312 if (count < PIPESAFE_FLUSH)
315 count += PIPESAFE_FLUSH;
320 static int find_common(struct fetch_pack_args *args,
321 int fd[2], unsigned char *result_sha1,
325 int count = 0, flushes = 0, flush_at = INITIAL_FLUSH, retval;
326 const unsigned char *sha1;
327 unsigned in_vain = 0;
328 int got_continue = 0;
330 struct strbuf req_buf = STRBUF_INIT;
331 size_t state_len = 0;
333 if (args->stateless_rpc && multi_ack == 1)
334 die(_("--stateless-rpc requires multi_ack_detailed"));
336 for_each_ref(clear_marks, NULL);
339 for_each_ref(rev_list_insert_ref_oid, NULL);
340 for_each_cached_alternate(insert_one_alternate_object);
343 for ( ; refs ; refs = refs->next) {
344 unsigned char *remote = refs->old_oid.hash;
345 const char *remote_hex;
349 * If that object is complete (i.e. it is an ancestor of a
350 * local ref), we tell them we have it but do not have to
351 * tell them about its ancestors, which they already know
354 * We use lookup_object here because we are only
355 * interested in the case we *know* the object is
356 * reachable and we have already scanned it.
358 if (((o = lookup_object(remote)) != NULL) &&
359 (o->flags & COMPLETE)) {
363 remote_hex = sha1_to_hex(remote);
365 struct strbuf c = STRBUF_INIT;
366 if (multi_ack == 2) strbuf_addstr(&c, " multi_ack_detailed");
367 if (multi_ack == 1) strbuf_addstr(&c, " multi_ack");
368 if (no_done) strbuf_addstr(&c, " no-done");
369 if (use_sideband == 2) strbuf_addstr(&c, " side-band-64k");
370 if (use_sideband == 1) strbuf_addstr(&c, " side-band");
371 if (args->deepen_relative) strbuf_addstr(&c, " deepen-relative");
372 if (args->use_thin_pack) strbuf_addstr(&c, " thin-pack");
373 if (args->no_progress) strbuf_addstr(&c, " no-progress");
374 if (args->include_tag) strbuf_addstr(&c, " include-tag");
375 if (prefer_ofs_delta) strbuf_addstr(&c, " ofs-delta");
376 if (deepen_since_ok) strbuf_addstr(&c, " deepen-since");
377 if (deepen_not_ok) strbuf_addstr(&c, " deepen-not");
378 if (agent_supported) strbuf_addf(&c, " agent=%s",
379 git_user_agent_sanitized());
380 packet_buf_write(&req_buf, "want %s%s\n", remote_hex, c.buf);
383 packet_buf_write(&req_buf, "want %s\n", remote_hex);
388 strbuf_release(&req_buf);
393 if (is_repository_shallow())
394 write_shallow_commits(&req_buf, 1, NULL);
396 packet_buf_write(&req_buf, "deepen %d", args->depth);
397 if (args->deepen_since) {
398 unsigned long max_age = approxidate(args->deepen_since);
399 packet_buf_write(&req_buf, "deepen-since %lu", max_age);
401 if (args->deepen_not) {
403 for (i = 0; i < args->deepen_not->nr; i++) {
404 struct string_list_item *s = args->deepen_not->items + i;
405 packet_buf_write(&req_buf, "deepen-not %s", s->string);
408 packet_buf_flush(&req_buf);
409 state_len = req_buf.len;
414 unsigned char sha1[20];
416 send_request(args, fd[1], &req_buf);
417 while ((line = packet_read_line(fd[0], NULL))) {
418 if (skip_prefix(line, "shallow ", &arg)) {
419 if (get_sha1_hex(arg, sha1))
420 die(_("invalid shallow line: %s"), line);
421 register_shallow(sha1);
424 if (skip_prefix(line, "unshallow ", &arg)) {
425 if (get_sha1_hex(arg, sha1))
426 die(_("invalid unshallow line: %s"), line);
427 if (!lookup_object(sha1))
428 die(_("object not found: %s"), line);
429 /* make sure that it is parsed as shallow */
430 if (!parse_object(sha1))
431 die(_("error in object: %s"), line);
432 if (unregister_shallow(sha1))
433 die(_("no shallow found: %s"), line);
436 die(_("expected shallow/unshallow, got %s"), line);
438 } else if (!args->stateless_rpc)
439 send_request(args, fd[1], &req_buf);
441 if (!args->stateless_rpc) {
442 /* If we aren't using the stateless-rpc interface
443 * we don't need to retain the headers.
445 strbuf_setlen(&req_buf, 0);
451 while ((sha1 = get_rev())) {
452 packet_buf_write(&req_buf, "have %s\n", sha1_to_hex(sha1));
453 print_verbose(args, "have %s", sha1_to_hex(sha1));
455 if (flush_at <= ++count) {
458 packet_buf_flush(&req_buf);
459 send_request(args, fd[1], &req_buf);
460 strbuf_setlen(&req_buf, state_len);
462 flush_at = next_flush(args, count);
465 * We keep one window "ahead" of the other side, and
466 * will wait for an ACK only on the next one
468 if (!args->stateless_rpc && count == INITIAL_FLUSH)
471 consume_shallow_list(args, fd[0]);
473 ack = get_ack(fd[0], result_sha1);
475 print_verbose(args, _("got %s %d %s"), "ack",
476 ack, sha1_to_hex(result_sha1));
486 struct commit *commit =
487 lookup_commit(result_sha1);
489 die(_("invalid commit %s"), sha1_to_hex(result_sha1));
490 if (args->stateless_rpc
492 && !(commit->object.flags & COMMON)) {
493 /* We need to replay the have for this object
494 * on the next RPC request so the peer knows
495 * it is in common with us.
497 const char *hex = sha1_to_hex(result_sha1);
498 packet_buf_write(&req_buf, "have %s\n", hex);
499 state_len = req_buf.len;
501 * Reset in_vain because an ack
502 * for this commit has not been
506 } else if (!args->stateless_rpc
507 || ack != ACK_common)
509 mark_common(commit, 0, 1);
512 if (ack == ACK_ready) {
513 clear_prio_queue(&rev_list);
521 if (got_continue && MAX_IN_VAIN < in_vain) {
522 print_verbose(args, _("giving up"));
528 if (!got_ready || !no_done) {
529 packet_buf_write(&req_buf, "done\n");
530 send_request(args, fd[1], &req_buf);
532 print_verbose(args, _("done"));
537 strbuf_release(&req_buf);
539 if (!got_ready || !no_done)
540 consume_shallow_list(args, fd[0]);
541 while (flushes || multi_ack) {
542 int ack = get_ack(fd[0], result_sha1);
544 print_verbose(args, _("got %s (%d) %s"), "ack",
545 ack, sha1_to_hex(result_sha1));
553 /* it is no error to fetch into a completely empty repo */
554 return count ? retval : 0;
557 static struct commit_list *complete;
559 static int mark_complete(const unsigned char *sha1)
561 struct object *o = parse_object(sha1);
563 while (o && o->type == OBJ_TAG) {
564 struct tag *t = (struct tag *) o;
566 break; /* broken repository */
567 o->flags |= COMPLETE;
568 o = parse_object(t->tagged->oid.hash);
570 if (o && o->type == OBJ_COMMIT) {
571 struct commit *commit = (struct commit *)o;
572 if (!(commit->object.flags & COMPLETE)) {
573 commit->object.flags |= COMPLETE;
574 commit_list_insert(commit, &complete);
580 static int mark_complete_oid(const char *refname, const struct object_id *oid,
581 int flag, void *cb_data)
583 return mark_complete(oid->hash);
586 static void mark_recent_complete_commits(struct fetch_pack_args *args,
587 unsigned long cutoff)
589 while (complete && cutoff <= complete->item->date) {
590 print_verbose(args, _("Marking %s as complete"),
591 oid_to_hex(&complete->item->object.oid));
592 pop_most_recent_commit(&complete, COMPLETE);
596 static void add_refs_to_oidset(struct oidset *oids, struct ref *refs)
598 for (; refs; refs = refs->next)
599 oidset_insert(oids, &refs->old_oid);
602 static int tip_oids_contain(struct oidset *tip_oids,
603 struct ref *unmatched, struct ref *newlist,
604 const struct object_id *id)
607 * Note that this only looks at the ref lists the first time it's
608 * called. This works out in filter_refs() because even though it may
609 * add to "newlist" between calls, the additions will always be for
610 * oids that are already in the set.
612 if (!tip_oids->map.tablesize) {
613 add_refs_to_oidset(tip_oids, unmatched);
614 add_refs_to_oidset(tip_oids, newlist);
616 return oidset_contains(tip_oids, id);
619 static void filter_refs(struct fetch_pack_args *args,
621 struct ref **sought, int nr_sought)
623 struct ref *newlist = NULL;
624 struct ref **newtail = &newlist;
625 struct ref *unmatched = NULL;
626 struct ref *ref, *next;
627 struct oidset tip_oids = OIDSET_INIT;
631 for (ref = *refs; ref; ref = next) {
635 if (starts_with(ref->name, "refs/") &&
636 check_refname_format(ref->name, 0))
639 while (i < nr_sought) {
640 int cmp = strcmp(ref->name, sought[i]->name);
642 break; /* definitely do not have it */
644 keep = 1; /* definitely have it */
645 sought[i]->match_status = REF_MATCHED;
651 if (!keep && args->fetch_all &&
652 (!args->deepen || !starts_with(ref->name, "refs/tags/")))
658 newtail = &ref->next;
660 ref->next = unmatched;
665 /* Append unmatched requests to the list */
666 for (i = 0; i < nr_sought; i++) {
667 unsigned char sha1[20];
670 if (ref->match_status != REF_NOT_MATCHED)
672 if (get_sha1_hex(ref->name, sha1) ||
673 ref->name[40] != '\0' ||
674 hashcmp(sha1, ref->old_oid.hash))
677 if ((allow_unadvertised_object_request &
678 (ALLOW_TIP_SHA1 | ALLOW_REACHABLE_SHA1)) ||
679 tip_oids_contain(&tip_oids, unmatched, newlist,
681 ref->match_status = REF_MATCHED;
682 *newtail = copy_ref(ref);
683 newtail = &(*newtail)->next;
685 ref->match_status = REF_UNADVERTISED_NOT_ALLOWED;
689 oidset_clear(&tip_oids);
690 for (ref = unmatched; ref; ref = next) {
698 static void mark_alternate_complete(struct object *obj)
700 mark_complete(obj->oid.hash);
703 static int everything_local(struct fetch_pack_args *args,
705 struct ref **sought, int nr_sought)
709 unsigned long cutoff = 0;
711 save_commit_buffer = 0;
713 for (ref = *refs; ref; ref = ref->next) {
716 if (!has_object_file(&ref->old_oid))
719 o = parse_object(ref->old_oid.hash);
723 /* We already have it -- which may mean that we were
724 * in sync with the other side at some time after
725 * that (it is OK if we guess wrong here).
727 if (o->type == OBJ_COMMIT) {
728 struct commit *commit = (struct commit *)o;
729 if (!cutoff || cutoff < commit->date)
730 cutoff = commit->date;
735 for_each_ref(mark_complete_oid, NULL);
736 for_each_cached_alternate(mark_alternate_complete);
737 commit_list_sort_by_date(&complete);
739 mark_recent_complete_commits(args, cutoff);
743 * Mark all complete remote refs as common refs.
744 * Don't mark them common yet; the server has to be told so first.
746 for (ref = *refs; ref; ref = ref->next) {
747 struct object *o = deref_tag(lookup_object(ref->old_oid.hash),
750 if (!o || o->type != OBJ_COMMIT || !(o->flags & COMPLETE))
753 if (!(o->flags & SEEN)) {
754 rev_list_push((struct commit *)o, COMMON_REF | SEEN);
756 mark_common((struct commit *)o, 1, 1);
760 filter_refs(args, refs, sought, nr_sought);
762 for (retval = 1, ref = *refs; ref ; ref = ref->next) {
763 const unsigned char *remote = ref->old_oid.hash;
766 o = lookup_object(remote);
767 if (!o || !(o->flags & COMPLETE)) {
769 print_verbose(args, "want %s (%s)", sha1_to_hex(remote),
773 print_verbose(args, _("already have %s (%s)"), sha1_to_hex(remote),
779 static int sideband_demux(int in, int out, void *data)
784 ret = recv_sideband("fetch-pack", xd[0], out);
789 static int get_pack(struct fetch_pack_args *args,
790 int xd[2], char **pack_lockfile)
793 int do_keep = args->keep_pack;
794 const char *cmd_name;
795 struct pack_header header;
797 struct child_process cmd = CHILD_PROCESS_INIT;
800 memset(&demux, 0, sizeof(demux));
802 /* xd[] is talking with upload-pack; subprocess reads from
803 * xd[0], spits out band#2 to stderr, and feeds us band#1
804 * through demux->out.
806 demux.proc = sideband_demux;
809 demux.isolate_sigpipe = 1;
810 if (start_async(&demux))
811 die(_("fetch-pack: unable to fork off sideband demultiplexer"));
816 if (!args->keep_pack && unpack_limit) {
818 if (read_pack_header(demux.out, &header))
819 die(_("protocol error: bad pack header"));
821 if (ntohl(header.hdr_entries) < unpack_limit)
827 if (alternate_shallow_file) {
828 argv_array_push(&cmd.args, "--shallow-file");
829 argv_array_push(&cmd.args, alternate_shallow_file);
835 cmd_name = "index-pack";
836 argv_array_push(&cmd.args, cmd_name);
837 argv_array_push(&cmd.args, "--stdin");
838 if (!args->quiet && !args->no_progress)
839 argv_array_push(&cmd.args, "-v");
840 if (args->use_thin_pack)
841 argv_array_push(&cmd.args, "--fix-thin");
842 if (args->lock_pack || unpack_limit) {
843 char hostname[HOST_NAME_MAX + 1];
844 if (xgethostname(hostname, sizeof(hostname)))
845 xsnprintf(hostname, sizeof(hostname), "localhost");
846 argv_array_pushf(&cmd.args,
847 "--keep=fetch-pack %"PRIuMAX " on %s",
848 (uintmax_t)getpid(), hostname);
850 if (args->check_self_contained_and_connected)
851 argv_array_push(&cmd.args, "--check-self-contained-and-connected");
854 cmd_name = "unpack-objects";
855 argv_array_push(&cmd.args, cmd_name);
856 if (args->quiet || args->no_progress)
857 argv_array_push(&cmd.args, "-q");
858 args->check_self_contained_and_connected = 0;
862 argv_array_pushf(&cmd.args, "--pack_header=%"PRIu32",%"PRIu32,
863 ntohl(header.hdr_version),
864 ntohl(header.hdr_entries));
865 if (fetch_fsck_objects >= 0
867 : transfer_fsck_objects >= 0
868 ? transfer_fsck_objects
870 argv_array_push(&cmd.args, "--strict");
874 if (start_command(&cmd))
875 die(_("fetch-pack: unable to fork off %s"), cmd_name);
876 if (do_keep && pack_lockfile) {
877 *pack_lockfile = index_pack_lockfile(cmd.out);
882 /* Closed by start_command() */
885 ret = finish_command(&cmd);
886 if (!ret || (args->check_self_contained_and_connected && ret == 1))
887 args->self_contained_and_connected =
888 args->check_self_contained_and_connected &&
891 die(_("%s failed"), cmd_name);
892 if (use_sideband && finish_async(&demux))
893 die(_("error in sideband demultiplexer"));
897 static int cmp_ref_by_name(const void *a_, const void *b_)
899 const struct ref *a = *((const struct ref **)a_);
900 const struct ref *b = *((const struct ref **)b_);
901 return strcmp(a->name, b->name);
904 static struct ref *do_fetch_pack(struct fetch_pack_args *args,
906 const struct ref *orig_ref,
907 struct ref **sought, int nr_sought,
908 struct shallow_info *si,
909 char **pack_lockfile)
911 struct ref *ref = copy_ref_list(orig_ref);
912 unsigned char sha1[20];
913 const char *agent_feature;
916 sort_ref_list(&ref, ref_compare_name);
917 QSORT(sought, nr_sought, cmp_ref_by_name);
919 if ((args->depth > 0 || is_repository_shallow()) && !server_supports("shallow"))
920 die(_("Server does not support shallow clients"));
921 if (args->depth > 0 || args->deepen_since || args->deepen_not)
923 if (server_supports("multi_ack_detailed")) {
924 print_verbose(args, _("Server supports multi_ack_detailed"));
926 if (server_supports("no-done")) {
927 print_verbose(args, _("Server supports no-done"));
928 if (args->stateless_rpc)
932 else if (server_supports("multi_ack")) {
933 print_verbose(args, _("Server supports multi_ack"));
936 if (server_supports("side-band-64k")) {
937 print_verbose(args, _("Server supports side-band-64k"));
940 else if (server_supports("side-band")) {
941 print_verbose(args, _("Server supports side-band"));
944 if (server_supports("allow-tip-sha1-in-want")) {
945 print_verbose(args, _("Server supports allow-tip-sha1-in-want"));
946 allow_unadvertised_object_request |= ALLOW_TIP_SHA1;
948 if (server_supports("allow-reachable-sha1-in-want")) {
949 print_verbose(args, _("Server supports allow-reachable-sha1-in-want"));
950 allow_unadvertised_object_request |= ALLOW_REACHABLE_SHA1;
952 if (!server_supports("thin-pack"))
953 args->use_thin_pack = 0;
954 if (!server_supports("no-progress"))
955 args->no_progress = 0;
956 if (!server_supports("include-tag"))
957 args->include_tag = 0;
958 if (server_supports("ofs-delta"))
959 print_verbose(args, _("Server supports ofs-delta"));
961 prefer_ofs_delta = 0;
963 if ((agent_feature = server_feature_value("agent", &agent_len))) {
966 print_verbose(args, _("Server version is %.*s"),
967 agent_len, agent_feature);
969 if (server_supports("deepen-since"))
971 else if (args->deepen_since)
972 die(_("Server does not support --shallow-since"));
973 if (server_supports("deepen-not"))
975 else if (args->deepen_not)
976 die(_("Server does not support --shallow-exclude"));
977 if (!server_supports("deepen-relative") && args->deepen_relative)
978 die(_("Server does not support --deepen"));
980 if (everything_local(args, &ref, sought, nr_sought)) {
984 if (find_common(args, fd, sha1, ref) < 0)
985 if (!args->keep_pack)
986 /* When cloning, it is not unusual to have
989 warning(_("no common commits"));
991 if (args->stateless_rpc)
994 setup_alternate_shallow(&shallow_lock, &alternate_shallow_file,
996 else if (si->nr_ours || si->nr_theirs)
997 alternate_shallow_file = setup_temporary_shallow(si->shallow);
999 alternate_shallow_file = NULL;
1000 if (get_pack(args, fd, pack_lockfile))
1001 die(_("git fetch-pack: fetch failed."));
1007 static void fetch_pack_config(void)
1009 git_config_get_int("fetch.unpacklimit", &fetch_unpack_limit);
1010 git_config_get_int("transfer.unpacklimit", &transfer_unpack_limit);
1011 git_config_get_bool("repack.usedeltabaseoffset", &prefer_ofs_delta);
1012 git_config_get_bool("fetch.fsckobjects", &fetch_fsck_objects);
1013 git_config_get_bool("transfer.fsckobjects", &transfer_fsck_objects);
1015 git_config(git_default_config, NULL);
1018 static void fetch_pack_setup(void)
1020 static int did_setup;
1023 fetch_pack_config();
1024 if (0 <= transfer_unpack_limit)
1025 unpack_limit = transfer_unpack_limit;
1026 else if (0 <= fetch_unpack_limit)
1027 unpack_limit = fetch_unpack_limit;
1031 static int remove_duplicates_in_refs(struct ref **ref, int nr)
1033 struct string_list names = STRING_LIST_INIT_NODUP;
1036 for (src = dst = 0; src < nr; src++) {
1037 struct string_list_item *item;
1038 item = string_list_insert(&names, ref[src]->name);
1040 continue; /* already have it */
1041 item->util = ref[src];
1043 ref[dst] = ref[src];
1046 for (src = dst; src < nr; src++)
1048 string_list_clear(&names, 0);
1052 static void update_shallow(struct fetch_pack_args *args,
1053 struct ref **sought, int nr_sought,
1054 struct shallow_info *si)
1056 struct oid_array ref = OID_ARRAY_INIT;
1060 if (args->deepen && alternate_shallow_file) {
1061 if (*alternate_shallow_file == '\0') { /* --unshallow */
1062 unlink_or_warn(git_path_shallow());
1063 rollback_lock_file(&shallow_lock);
1065 commit_lock_file(&shallow_lock);
1069 if (!si->shallow || !si->shallow->nr)
1072 if (args->cloning) {
1074 * remote is shallow, but this is a clone, there are
1075 * no objects in repo to worry about. Accept any
1076 * shallow points that exist in the pack (iow in repo
1077 * after get_pack() and reprepare_packed_git())
1079 struct oid_array extra = OID_ARRAY_INIT;
1080 struct object_id *oid = si->shallow->oid;
1081 for (i = 0; i < si->shallow->nr; i++)
1082 if (has_object_file(&oid[i]))
1083 oid_array_append(&extra, &oid[i]);
1085 setup_alternate_shallow(&shallow_lock,
1086 &alternate_shallow_file,
1088 commit_lock_file(&shallow_lock);
1090 oid_array_clear(&extra);
1094 if (!si->nr_ours && !si->nr_theirs)
1097 remove_nonexistent_theirs_shallow(si);
1098 if (!si->nr_ours && !si->nr_theirs)
1100 for (i = 0; i < nr_sought; i++)
1101 oid_array_append(&ref, &sought[i]->old_oid);
1104 if (args->update_shallow) {
1106 * remote is also shallow, .git/shallow may be updated
1107 * so all refs can be accepted. Make sure we only add
1108 * shallow roots that are actually reachable from new
1111 struct oid_array extra = OID_ARRAY_INIT;
1112 struct object_id *oid = si->shallow->oid;
1113 assign_shallow_commits_to_refs(si, NULL, NULL);
1114 if (!si->nr_ours && !si->nr_theirs) {
1115 oid_array_clear(&ref);
1118 for (i = 0; i < si->nr_ours; i++)
1119 oid_array_append(&extra, &oid[si->ours[i]]);
1120 for (i = 0; i < si->nr_theirs; i++)
1121 oid_array_append(&extra, &oid[si->theirs[i]]);
1122 setup_alternate_shallow(&shallow_lock,
1123 &alternate_shallow_file,
1125 commit_lock_file(&shallow_lock);
1126 oid_array_clear(&extra);
1127 oid_array_clear(&ref);
1132 * remote is also shallow, check what ref is safe to update
1133 * without updating .git/shallow
1135 status = xcalloc(nr_sought, sizeof(*status));
1136 assign_shallow_commits_to_refs(si, NULL, status);
1137 if (si->nr_ours || si->nr_theirs) {
1138 for (i = 0; i < nr_sought; i++)
1140 sought[i]->status = REF_STATUS_REJECT_SHALLOW;
1143 oid_array_clear(&ref);
1146 struct ref *fetch_pack(struct fetch_pack_args *args,
1147 int fd[], struct child_process *conn,
1148 const struct ref *ref,
1150 struct ref **sought, int nr_sought,
1151 struct oid_array *shallow,
1152 char **pack_lockfile)
1154 struct ref *ref_cpy;
1155 struct shallow_info si;
1159 nr_sought = remove_duplicates_in_refs(sought, nr_sought);
1162 packet_flush(fd[1]);
1163 die(_("no matching remote head"));
1165 prepare_shallow_info(&si, shallow);
1166 ref_cpy = do_fetch_pack(args, fd, ref, sought, nr_sought,
1167 &si, pack_lockfile);
1168 reprepare_packed_git();
1169 update_shallow(args, sought, nr_sought, &si);
1170 clear_shallow_info(&si);
1174 int report_unmatched_refs(struct ref **sought, int nr_sought)
1178 for (i = 0; i < nr_sought; i++) {
1181 switch (sought[i]->match_status) {
1184 case REF_NOT_MATCHED:
1185 error(_("no such remote ref %s"), sought[i]->name);
1187 case REF_UNADVERTISED_NOT_ALLOWED:
1188 error(_("Server does not allow request for unadvertised object %s"),