2 #include "repository.h"
5 #include "object-store.h"
11 #include "sha1-array.h"
14 #include "commit-slab.h"
16 #include "list-objects.h"
18 static int is_shallow = -1;
19 static struct stat_validity shallow_stat;
20 static char *alternate_shallow_file;
22 void set_alternate_shallow_file(const char *path, int override)
25 die("BUG: is_repository_shallow must not be called before set_alternate_shallow_file");
26 if (alternate_shallow_file && !override)
28 free(alternate_shallow_file);
29 alternate_shallow_file = xstrdup_or_null(path);
32 int register_shallow(const struct object_id *oid)
34 struct commit_graft *graft =
35 xmalloc(sizeof(struct commit_graft));
36 struct commit *commit = lookup_commit(oid);
38 oidcpy(&graft->oid, oid);
39 graft->nr_parent = -1;
40 if (commit && commit->object.parsed)
41 commit->parents = NULL;
42 return register_commit_graft(the_repository, graft, 0);
45 int is_repository_shallow(void)
49 const char *path = alternate_shallow_file;
55 path = git_path_shallow();
57 * fetch-pack sets '--shallow-file ""' as an indicator that no
58 * shallow file should be used. We could just open it and it
59 * will likely fail. But let's do an explicit check instead.
61 if (!*path || (fp = fopen(path, "r")) == NULL) {
62 stat_validity_clear(&shallow_stat);
66 stat_validity_update(&shallow_stat, fileno(fp));
69 while (fgets(buf, sizeof(buf), fp)) {
71 if (get_oid_hex(buf, &oid))
72 die("bad shallow line: %s", buf);
73 register_shallow(&oid);
79 struct commit_list *get_shallow_commits(struct object_array *heads, int depth,
80 int shallow_flag, int not_shallow_flag)
82 int i = 0, cur_depth = 0;
83 struct commit_list *result = NULL;
84 struct object_array stack = OBJECT_ARRAY_INIT;
85 struct commit *commit = NULL;
86 struct commit_graft *graft;
88 while (commit || i < heads->nr || stack.nr) {
89 struct commit_list *p;
92 commit = (struct commit *)
93 deref_tag(heads->objects[i++].item, NULL, 0);
94 if (!commit || commit->object.type != OBJ_COMMIT) {
99 commit->util = xmalloc(sizeof(int));
100 *(int *)commit->util = 0;
103 commit = (struct commit *)
104 object_array_pop(&stack);
105 cur_depth = *(int *)commit->util;
108 parse_commit_or_die(commit);
110 if ((depth != INFINITE_DEPTH && cur_depth >= depth) ||
111 (is_repository_shallow() && !commit->parents &&
112 (graft = lookup_commit_graft(the_repository, &commit->object.oid)) != NULL &&
113 graft->nr_parent < 0)) {
114 commit_list_insert(commit, &result);
115 commit->object.flags |= shallow_flag;
119 commit->object.flags |= not_shallow_flag;
120 for (p = commit->parents, commit = NULL; p; p = p->next) {
121 if (!p->item->util) {
122 int *pointer = xmalloc(sizeof(int));
123 p->item->util = pointer;
124 *pointer = cur_depth;
126 int *pointer = p->item->util;
127 if (cur_depth >= *pointer)
129 *pointer = cur_depth;
132 add_object_array(&p->item->object,
136 cur_depth = *(int *)commit->util;
144 static void show_commit(struct commit *commit, void *data)
146 commit_list_insert(commit, data);
150 * Given rev-list arguments, run rev-list. All reachable commits
151 * except border ones are marked with not_shallow_flag. Border commits
152 * are marked with shallow_flag. The list of border/shallow commits
155 struct commit_list *get_shallow_commits_by_rev_list(int ac, const char **av,
157 int not_shallow_flag)
159 struct commit_list *result = NULL, *p;
160 struct commit_list *not_shallow_list = NULL;
161 struct rev_info revs;
162 int both_flags = shallow_flag | not_shallow_flag;
165 * SHALLOW (excluded) and NOT_SHALLOW (included) should not be
166 * set at this point. But better be safe than sorry.
168 clear_object_flags(both_flags);
170 is_repository_shallow(); /* make sure shallows are read */
172 init_revisions(&revs, NULL);
173 save_commit_buffer = 0;
174 setup_revisions(ac, av, &revs, NULL);
176 if (prepare_revision_walk(&revs))
177 die("revision walk setup failed");
178 traverse_commit_list(&revs, show_commit, NULL, ¬_shallow_list);
180 /* Mark all reachable commits as NOT_SHALLOW */
181 for (p = not_shallow_list; p; p = p->next)
182 p->item->object.flags |= not_shallow_flag;
185 * mark border commits SHALLOW + NOT_SHALLOW.
186 * We cannot clear NOT_SHALLOW right now. Imagine border
187 * commit A is processed first, then commit B, whose parent is
188 * A, later. If NOT_SHALLOW on A is cleared at step 1, B
189 * itself is considered border at step 2, which is incorrect.
191 for (p = not_shallow_list; p; p = p->next) {
192 struct commit *c = p->item;
193 struct commit_list *parent;
196 die("unable to parse commit %s",
197 oid_to_hex(&c->object.oid));
199 for (parent = c->parents; parent; parent = parent->next)
200 if (!(parent->item->object.flags & not_shallow_flag)) {
201 c->object.flags |= shallow_flag;
202 commit_list_insert(c, &result);
206 free_commit_list(not_shallow_list);
209 * Now we can clean up NOT_SHALLOW on border commits. Having
210 * both flags set can confuse the caller.
212 for (p = result; p; p = p->next) {
213 struct object *o = &p->item->object;
214 if ((o->flags & both_flags) == both_flags)
215 o->flags &= ~not_shallow_flag;
220 static void check_shallow_file_for_update(void)
222 if (is_shallow == -1)
223 die("BUG: shallow must be initialized by now");
225 if (!stat_validity_check(&shallow_stat, git_path_shallow()))
226 die("shallow file has changed since we read it");
232 struct write_shallow_data {
234 int use_pack_protocol;
239 static int write_one_shallow(const struct commit_graft *graft, void *cb_data)
241 struct write_shallow_data *data = cb_data;
242 const char *hex = oid_to_hex(&graft->oid);
243 if (graft->nr_parent != -1)
245 if (data->flags & SEEN_ONLY) {
246 struct commit *c = lookup_commit(&graft->oid);
247 if (!c || !(c->object.flags & SEEN)) {
248 if (data->flags & VERBOSE)
249 printf("Removing %s from .git/shallow\n",
250 oid_to_hex(&c->object.oid));
255 if (data->use_pack_protocol)
256 packet_buf_write(data->out, "shallow %s", hex);
258 strbuf_addstr(data->out, hex);
259 strbuf_addch(data->out, '\n');
264 static int write_shallow_commits_1(struct strbuf *out, int use_pack_protocol,
265 const struct oid_array *extra,
268 struct write_shallow_data data;
271 data.use_pack_protocol = use_pack_protocol;
274 for_each_commit_graft(write_one_shallow, &data);
277 for (i = 0; i < extra->nr; i++) {
278 strbuf_addstr(out, oid_to_hex(extra->oid + i));
279 strbuf_addch(out, '\n');
285 int write_shallow_commits(struct strbuf *out, int use_pack_protocol,
286 const struct oid_array *extra)
288 return write_shallow_commits_1(out, use_pack_protocol, extra, 0);
291 const char *setup_temporary_shallow(const struct oid_array *extra)
293 struct tempfile *temp;
294 struct strbuf sb = STRBUF_INIT;
296 if (write_shallow_commits(&sb, 0, extra)) {
297 temp = xmks_tempfile(git_path("shallow_XXXXXX"));
299 if (write_in_full(temp->fd, sb.buf, sb.len) < 0 ||
300 close_tempfile_gently(temp) < 0)
301 die_errno("failed to write to %s",
302 get_tempfile_path(temp));
304 return get_tempfile_path(temp);
307 * is_repository_shallow() sees empty string as "no shallow
313 void setup_alternate_shallow(struct lock_file *shallow_lock,
314 const char **alternate_shallow_file,
315 const struct oid_array *extra)
317 struct strbuf sb = STRBUF_INIT;
320 fd = hold_lock_file_for_update(shallow_lock, git_path_shallow(),
322 check_shallow_file_for_update();
323 if (write_shallow_commits(&sb, 0, extra)) {
324 if (write_in_full(fd, sb.buf, sb.len) < 0)
325 die_errno("failed to write to %s",
326 get_lock_file_path(shallow_lock));
327 *alternate_shallow_file = get_lock_file_path(shallow_lock);
330 * is_repository_shallow() sees empty string as "no
333 *alternate_shallow_file = "";
337 static int advertise_shallow_grafts_cb(const struct commit_graft *graft, void *cb)
340 if (graft->nr_parent == -1)
341 packet_write_fmt(fd, "shallow %s\n", oid_to_hex(&graft->oid));
345 void advertise_shallow_grafts(int fd)
347 if (!is_repository_shallow())
349 for_each_commit_graft(advertise_shallow_grafts_cb, &fd);
353 * mark_reachable_objects() should have been run prior to this and all
354 * reachable commits marked as "SEEN".
356 void prune_shallow(int show_only)
358 static struct lock_file shallow_lock;
359 struct strbuf sb = STRBUF_INIT;
363 write_shallow_commits_1(&sb, 0, NULL, SEEN_ONLY | VERBOSE);
367 fd = hold_lock_file_for_update(&shallow_lock, git_path_shallow(),
369 check_shallow_file_for_update();
370 if (write_shallow_commits_1(&sb, 0, NULL, SEEN_ONLY)) {
371 if (write_in_full(fd, sb.buf, sb.len) < 0)
372 die_errno("failed to write to %s",
373 get_lock_file_path(&shallow_lock));
374 commit_lock_file(&shallow_lock);
376 unlink(git_path_shallow());
377 rollback_lock_file(&shallow_lock);
382 struct trace_key trace_shallow = TRACE_KEY_INIT(SHALLOW);
385 * Step 1, split sender shallow commits into "ours" and "theirs"
386 * Step 2, clean "ours" based on .git/shallow
388 void prepare_shallow_info(struct shallow_info *info, struct oid_array *sa)
391 trace_printf_key(&trace_shallow, "shallow: prepare_shallow_info\n");
392 memset(info, 0, sizeof(*info));
396 ALLOC_ARRAY(info->ours, sa->nr);
397 ALLOC_ARRAY(info->theirs, sa->nr);
398 for (i = 0; i < sa->nr; i++) {
399 if (has_object_file(sa->oid + i)) {
400 struct commit_graft *graft;
401 graft = lookup_commit_graft(the_repository,
403 if (graft && graft->nr_parent < 0)
405 info->ours[info->nr_ours++] = i;
407 info->theirs[info->nr_theirs++] = i;
411 void clear_shallow_info(struct shallow_info *info)
417 /* Step 4, remove non-existent ones in "theirs" after getting the pack */
419 void remove_nonexistent_theirs_shallow(struct shallow_info *info)
421 struct object_id *oid = info->shallow->oid;
423 trace_printf_key(&trace_shallow, "shallow: remove_nonexistent_theirs_shallow\n");
424 for (i = dst = 0; i < info->nr_theirs; i++) {
426 info->theirs[dst] = info->theirs[i];
427 if (has_object_file(oid + info->theirs[i]))
430 info->nr_theirs = dst;
433 define_commit_slab(ref_bitmap, uint32_t *);
435 #define POOL_SIZE (512 * 1024)
438 struct ref_bitmap ref_bitmap;
445 static uint32_t *paint_alloc(struct paint_info *info)
447 unsigned nr = DIV_ROUND_UP(info->nr_bits, 32);
448 unsigned size = nr * sizeof(uint32_t);
450 if (!info->pool_count || size > info->end - info->free) {
451 if (size > POOL_SIZE)
452 die("BUG: pool size too small for %d in paint_alloc()",
455 REALLOC_ARRAY(info->pools, info->pool_count);
456 info->free = xmalloc(POOL_SIZE);
457 info->pools[info->pool_count - 1] = info->free;
458 info->end = info->free + POOL_SIZE;
466 * Given a commit SHA-1, walk down to parents until either SEEN,
467 * UNINTERESTING or BOTTOM is hit. Set the id-th bit in ref_bitmap for
468 * all walked commits.
470 static void paint_down(struct paint_info *info, const struct object_id *oid,
474 struct commit_list *head = NULL;
475 int bitmap_nr = DIV_ROUND_UP(info->nr_bits, 32);
476 size_t bitmap_size = st_mult(sizeof(uint32_t), bitmap_nr);
477 struct commit *c = lookup_commit_reference_gently(oid, 1);
478 uint32_t *tmp; /* to be freed before return */
484 tmp = xmalloc(bitmap_size);
485 bitmap = paint_alloc(info);
486 memset(bitmap, 0, bitmap_size);
487 bitmap[id / 32] |= (1U << (id % 32));
488 commit_list_insert(c, &head);
490 struct commit_list *p;
491 struct commit *c = pop_commit(&head);
492 uint32_t **refs = ref_bitmap_at(&info->ref_bitmap, c);
494 /* XXX check "UNINTERESTING" from pack bitmaps if available */
495 if (c->object.flags & (SEEN | UNINTERESTING))
498 c->object.flags |= SEEN;
503 memcpy(tmp, *refs, bitmap_size);
504 for (i = 0; i < bitmap_nr; i++)
506 if (memcmp(tmp, *refs, bitmap_size)) {
507 *refs = paint_alloc(info);
508 memcpy(*refs, tmp, bitmap_size);
512 if (c->object.flags & BOTTOM)
516 die("unable to parse commit %s",
517 oid_to_hex(&c->object.oid));
519 for (p = c->parents; p; p = p->next) {
520 if (p->item->object.flags & SEEN)
522 commit_list_insert(p->item, &head);
526 nr = get_max_object_index();
527 for (i = 0; i < nr; i++) {
528 struct object *o = get_indexed_object(i);
529 if (o && o->type == OBJ_COMMIT)
536 static int mark_uninteresting(const char *refname, const struct object_id *oid,
537 int flags, void *cb_data)
539 struct commit *commit = lookup_commit_reference_gently(oid, 1);
542 commit->object.flags |= UNINTERESTING;
543 mark_parents_uninteresting(commit);
547 static void post_assign_shallow(struct shallow_info *info,
548 struct ref_bitmap *ref_bitmap,
551 * Step 6(+7), associate shallow commits with new refs
553 * info->ref must be initialized before calling this function.
555 * If used is not NULL, it's an array of info->shallow->nr
556 * bitmaps. The n-th bit set in the m-th bitmap if ref[n] needs the
557 * m-th shallow commit from info->shallow.
559 * If used is NULL, "ours" and "theirs" are updated. And if ref_status
560 * is not NULL it's an array of ref->nr ints. ref_status[i] is true if
561 * the ref needs some shallow commits from either info->ours or
564 void assign_shallow_commits_to_refs(struct shallow_info *info,
565 uint32_t **used, int *ref_status)
567 struct object_id *oid = info->shallow->oid;
568 struct oid_array *ref = info->ref;
570 int *shallow, nr_shallow = 0;
571 struct paint_info pi;
573 trace_printf_key(&trace_shallow, "shallow: assign_shallow_commits_to_refs\n");
574 ALLOC_ARRAY(shallow, info->nr_ours + info->nr_theirs);
575 for (i = 0; i < info->nr_ours; i++)
576 shallow[nr_shallow++] = info->ours[i];
577 for (i = 0; i < info->nr_theirs; i++)
578 shallow[nr_shallow++] = info->theirs[i];
581 * Prepare the commit graph to track what refs can reach what
582 * (new) shallow commits.
584 nr = get_max_object_index();
585 for (i = 0; i < nr; i++) {
586 struct object *o = get_indexed_object(i);
587 if (!o || o->type != OBJ_COMMIT)
590 o->flags &= ~(UNINTERESTING | BOTTOM | SEEN);
593 memset(&pi, 0, sizeof(pi));
594 init_ref_bitmap(&pi.ref_bitmap);
595 pi.nr_bits = ref->nr;
598 * "--not --all" to cut short the traversal if new refs
599 * connect to old refs. If not (e.g. force ref updates) it'll
600 * have to go down to the current shallow commits.
602 head_ref(mark_uninteresting, NULL);
603 for_each_ref(mark_uninteresting, NULL);
605 /* Mark potential bottoms so we won't go out of bound */
606 for (i = 0; i < nr_shallow; i++) {
607 struct commit *c = lookup_commit(&oid[shallow[i]]);
608 c->object.flags |= BOTTOM;
611 for (i = 0; i < ref->nr; i++)
612 paint_down(&pi, ref->oid + i, i);
615 int bitmap_size = DIV_ROUND_UP(pi.nr_bits, 32) * sizeof(uint32_t);
616 memset(used, 0, sizeof(*used) * info->shallow->nr);
617 for (i = 0; i < nr_shallow; i++) {
618 const struct commit *c = lookup_commit(&oid[shallow[i]]);
619 uint32_t **map = ref_bitmap_at(&pi.ref_bitmap, c);
621 used[shallow[i]] = xmemdupz(*map, bitmap_size);
624 * unreachable shallow commits are not removed from
625 * "ours" and "theirs". The user is supposed to run
626 * step 7 on every ref separately and not trust "ours"
627 * and "theirs" any more.
630 post_assign_shallow(info, &pi.ref_bitmap, ref_status);
632 clear_ref_bitmap(&pi.ref_bitmap);
633 for (i = 0; i < pi.pool_count; i++)
639 struct commit_array {
640 struct commit **commits;
644 static int add_ref(const char *refname, const struct object_id *oid,
645 int flags, void *cb_data)
647 struct commit_array *ca = cb_data;
648 ALLOC_GROW(ca->commits, ca->nr + 1, ca->alloc);
649 ca->commits[ca->nr] = lookup_commit_reference_gently(oid, 1);
650 if (ca->commits[ca->nr])
655 static void update_refstatus(int *ref_status, int nr, uint32_t *bitmap)
660 for (i = 0; i < nr; i++)
661 if (bitmap[i / 32] & (1U << (i % 32)))
666 * Step 7, reachability test on "ours" at commit level
668 static void post_assign_shallow(struct shallow_info *info,
669 struct ref_bitmap *ref_bitmap,
672 struct object_id *oid = info->shallow->oid;
676 int bitmap_nr = DIV_ROUND_UP(info->ref->nr, 32);
677 struct commit_array ca;
679 trace_printf_key(&trace_shallow, "shallow: post_assign_shallow\n");
681 memset(ref_status, 0, sizeof(*ref_status) * info->ref->nr);
683 /* Remove unreachable shallow commits from "theirs" */
684 for (i = dst = 0; i < info->nr_theirs; i++) {
686 info->theirs[dst] = info->theirs[i];
687 c = lookup_commit(&oid[info->theirs[i]]);
688 bitmap = ref_bitmap_at(ref_bitmap, c);
691 for (j = 0; j < bitmap_nr; j++)
693 update_refstatus(ref_status, info->ref->nr, *bitmap);
698 info->nr_theirs = dst;
700 memset(&ca, 0, sizeof(ca));
701 head_ref(add_ref, &ca);
702 for_each_ref(add_ref, &ca);
704 /* Remove unreachable shallow commits from "ours" */
705 for (i = dst = 0; i < info->nr_ours; i++) {
707 info->ours[dst] = info->ours[i];
708 c = lookup_commit(&oid[info->ours[i]]);
709 bitmap = ref_bitmap_at(ref_bitmap, c);
712 for (j = 0; j < bitmap_nr; j++)
714 /* Step 7, reachability test at commit level */
715 !in_merge_bases_many(c, ca.nr, ca.commits)) {
716 update_refstatus(ref_status, info->ref->nr, *bitmap);
726 /* (Delayed) step 7, reachability test at commit level */
727 int delayed_reachability_test(struct shallow_info *si, int c)
729 if (si->need_reachability_test[c]) {
730 struct commit *commit = lookup_commit(&si->shallow->oid[c]);
733 struct commit_array ca;
735 memset(&ca, 0, sizeof(ca));
736 head_ref(add_ref, &ca);
737 for_each_ref(add_ref, &ca);
738 si->commits = ca.commits;
739 si->nr_commits = ca.nr;
742 si->reachable[c] = in_merge_bases_many(commit,
745 si->need_reachability_test[c] = 0;
747 return si->reachable[c];