2 #include "repository.h"
5 #include "object-store.h"
11 #include "oid-array.h"
14 #include "commit-slab.h"
15 #include "list-objects.h"
16 #include "commit-reach.h"
19 void set_alternate_shallow_file(struct repository *r, const char *path, int override)
21 if (r->parsed_objects->is_shallow != -1)
22 BUG("is_repository_shallow must not be called before set_alternate_shallow_file");
23 if (r->parsed_objects->alternate_shallow_file && !override)
25 free(r->parsed_objects->alternate_shallow_file);
26 r->parsed_objects->alternate_shallow_file = xstrdup_or_null(path);
29 int register_shallow(struct repository *r, const struct object_id *oid)
31 struct commit_graft *graft =
32 xmalloc(sizeof(struct commit_graft));
33 struct commit *commit = lookup_commit(the_repository, oid);
35 oidcpy(&graft->oid, oid);
36 graft->nr_parent = -1;
37 if (commit && commit->object.parsed)
38 commit->parents = NULL;
39 return register_commit_graft(r, graft, 0);
42 int unregister_shallow(const struct object_id *oid)
44 int pos = commit_graft_pos(the_repository, oid->hash);
47 if (pos + 1 < the_repository->parsed_objects->grafts_nr)
48 MOVE_ARRAY(the_repository->parsed_objects->grafts + pos,
49 the_repository->parsed_objects->grafts + pos + 1,
50 the_repository->parsed_objects->grafts_nr - pos - 1);
51 the_repository->parsed_objects->grafts_nr--;
55 int is_repository_shallow(struct repository *r)
59 const char *path = r->parsed_objects->alternate_shallow_file;
61 if (r->parsed_objects->is_shallow >= 0)
62 return r->parsed_objects->is_shallow;
65 path = git_path_shallow(r);
67 * fetch-pack sets '--shallow-file ""' as an indicator that no
68 * shallow file should be used. We could just open it and it
69 * will likely fail. But let's do an explicit check instead.
71 if (!*path || (fp = fopen(path, "r")) == NULL) {
72 stat_validity_clear(r->parsed_objects->shallow_stat);
73 r->parsed_objects->is_shallow = 0;
74 return r->parsed_objects->is_shallow;
76 stat_validity_update(r->parsed_objects->shallow_stat, fileno(fp));
77 r->parsed_objects->is_shallow = 1;
79 while (fgets(buf, sizeof(buf), fp)) {
81 if (get_oid_hex(buf, &oid))
82 die("bad shallow line: %s", buf);
83 register_shallow(r, &oid);
86 return r->parsed_objects->is_shallow;
89 static void reset_repository_shallow(struct repository *r)
91 r->parsed_objects->is_shallow = -1;
92 stat_validity_clear(r->parsed_objects->shallow_stat);
95 int commit_shallow_file(struct repository *r, struct shallow_lock *lk)
97 int res = commit_lock_file(&lk->lock);
98 reset_repository_shallow(r);
102 void rollback_shallow_file(struct repository *r, struct shallow_lock *lk)
104 rollback_lock_file(&lk->lock);
105 reset_repository_shallow(r);
109 * TODO: use "int" elemtype instead of "int *" when/if commit-slab
110 * supports a "valid" flag.
112 define_commit_slab(commit_depth, int *);
113 struct commit_list *get_shallow_commits(struct object_array *heads, int depth,
114 int shallow_flag, int not_shallow_flag)
116 int i = 0, cur_depth = 0;
117 struct commit_list *result = NULL;
118 struct object_array stack = OBJECT_ARRAY_INIT;
119 struct commit *commit = NULL;
120 struct commit_graft *graft;
121 struct commit_depth depths;
123 init_commit_depth(&depths);
124 while (commit || i < heads->nr || stack.nr) {
125 struct commit_list *p;
129 commit = (struct commit *)
130 deref_tag(the_repository,
131 heads->objects[i++].item,
133 if (!commit || commit->object.type != OBJ_COMMIT) {
137 depth_slot = commit_depth_at(&depths, commit);
139 *depth_slot = xmalloc(sizeof(int));
143 commit = (struct commit *)
144 object_array_pop(&stack);
145 cur_depth = **commit_depth_at(&depths, commit);
148 parse_commit_or_die(commit);
150 if ((depth != INFINITE_DEPTH && cur_depth >= depth) ||
151 (is_repository_shallow(the_repository) && !commit->parents &&
152 (graft = lookup_commit_graft(the_repository, &commit->object.oid)) != NULL &&
153 graft->nr_parent < 0)) {
154 commit_list_insert(commit, &result);
155 commit->object.flags |= shallow_flag;
159 commit->object.flags |= not_shallow_flag;
160 for (p = commit->parents, commit = NULL; p; p = p->next) {
161 int **depth_slot = commit_depth_at(&depths, p->item);
163 *depth_slot = xmalloc(sizeof(int));
164 **depth_slot = cur_depth;
166 if (cur_depth >= **depth_slot)
168 **depth_slot = cur_depth;
171 add_object_array(&p->item->object,
175 cur_depth = **commit_depth_at(&depths, commit);
179 for (i = 0; i < depths.slab_count; i++) {
184 for (j = 0; j < depths.slab_size; j++)
185 free(depths.slab[i][j]);
187 clear_commit_depth(&depths);
192 static void show_commit(struct commit *commit, void *data)
194 commit_list_insert(commit, data);
198 * Given rev-list arguments, run rev-list. All reachable commits
199 * except border ones are marked with not_shallow_flag. Border commits
200 * are marked with shallow_flag. The list of border/shallow commits
203 struct commit_list *get_shallow_commits_by_rev_list(int ac, const char **av,
205 int not_shallow_flag)
207 struct commit_list *result = NULL, *p;
208 struct commit_list *not_shallow_list = NULL;
209 struct rev_info revs;
210 int both_flags = shallow_flag | not_shallow_flag;
213 * SHALLOW (excluded) and NOT_SHALLOW (included) should not be
214 * set at this point. But better be safe than sorry.
216 clear_object_flags(both_flags);
218 is_repository_shallow(the_repository); /* make sure shallows are read */
220 repo_init_revisions(the_repository, &revs, NULL);
221 save_commit_buffer = 0;
222 setup_revisions(ac, av, &revs, NULL);
224 if (prepare_revision_walk(&revs))
225 die("revision walk setup failed");
226 traverse_commit_list(&revs, show_commit, NULL, ¬_shallow_list);
228 if (!not_shallow_list)
229 die("no commits selected for shallow requests");
231 /* Mark all reachable commits as NOT_SHALLOW */
232 for (p = not_shallow_list; p; p = p->next)
233 p->item->object.flags |= not_shallow_flag;
236 * mark border commits SHALLOW + NOT_SHALLOW.
237 * We cannot clear NOT_SHALLOW right now. Imagine border
238 * commit A is processed first, then commit B, whose parent is
239 * A, later. If NOT_SHALLOW on A is cleared at step 1, B
240 * itself is considered border at step 2, which is incorrect.
242 for (p = not_shallow_list; p; p = p->next) {
243 struct commit *c = p->item;
244 struct commit_list *parent;
247 die("unable to parse commit %s",
248 oid_to_hex(&c->object.oid));
250 for (parent = c->parents; parent; parent = parent->next)
251 if (!(parent->item->object.flags & not_shallow_flag)) {
252 c->object.flags |= shallow_flag;
253 commit_list_insert(c, &result);
257 free_commit_list(not_shallow_list);
260 * Now we can clean up NOT_SHALLOW on border commits. Having
261 * both flags set can confuse the caller.
263 for (p = result; p; p = p->next) {
264 struct object *o = &p->item->object;
265 if ((o->flags & both_flags) == both_flags)
266 o->flags &= ~not_shallow_flag;
271 static void check_shallow_file_for_update(struct repository *r)
273 if (r->parsed_objects->is_shallow == -1)
274 BUG("shallow must be initialized by now");
276 if (!stat_validity_check(r->parsed_objects->shallow_stat,
277 git_path_shallow(r)))
278 die("shallow file has changed since we read it");
285 struct write_shallow_data {
287 int use_pack_protocol;
292 static int write_one_shallow(const struct commit_graft *graft, void *cb_data)
294 struct write_shallow_data *data = cb_data;
295 const char *hex = oid_to_hex(&graft->oid);
296 if (graft->nr_parent != -1)
298 if (data->flags & QUICK) {
299 if (!has_object_file(&graft->oid))
301 } else if (data->flags & SEEN_ONLY) {
302 struct commit *c = lookup_commit(the_repository, &graft->oid);
303 if (!c || !(c->object.flags & SEEN)) {
304 if (data->flags & VERBOSE)
305 printf("Removing %s from .git/shallow\n",
306 oid_to_hex(&c->object.oid));
311 if (data->use_pack_protocol)
312 packet_buf_write(data->out, "shallow %s", hex);
314 strbuf_addstr(data->out, hex);
315 strbuf_addch(data->out, '\n');
320 static int write_shallow_commits_1(struct strbuf *out, int use_pack_protocol,
321 const struct oid_array *extra,
324 struct write_shallow_data data;
327 data.use_pack_protocol = use_pack_protocol;
330 for_each_commit_graft(write_one_shallow, &data);
333 for (i = 0; i < extra->nr; i++) {
334 strbuf_addstr(out, oid_to_hex(extra->oid + i));
335 strbuf_addch(out, '\n');
341 int write_shallow_commits(struct strbuf *out, int use_pack_protocol,
342 const struct oid_array *extra)
344 return write_shallow_commits_1(out, use_pack_protocol, extra, 0);
347 const char *setup_temporary_shallow(const struct oid_array *extra)
349 struct tempfile *temp;
350 struct strbuf sb = STRBUF_INIT;
352 if (write_shallow_commits(&sb, 0, extra)) {
353 temp = xmks_tempfile(git_path("shallow_XXXXXX"));
355 if (write_in_full(temp->fd, sb.buf, sb.len) < 0 ||
356 close_tempfile_gently(temp) < 0)
357 die_errno("failed to write to %s",
358 get_tempfile_path(temp));
360 return get_tempfile_path(temp);
363 * is_repository_shallow() sees empty string as "no shallow
369 void setup_alternate_shallow(struct shallow_lock *shallow_lock,
370 const char **alternate_shallow_file,
371 const struct oid_array *extra)
373 struct strbuf sb = STRBUF_INIT;
376 fd = hold_lock_file_for_update(&shallow_lock->lock,
377 git_path_shallow(the_repository),
379 check_shallow_file_for_update(the_repository);
380 if (write_shallow_commits(&sb, 0, extra)) {
381 if (write_in_full(fd, sb.buf, sb.len) < 0)
382 die_errno("failed to write to %s",
383 get_lock_file_path(&shallow_lock->lock));
384 *alternate_shallow_file = get_lock_file_path(&shallow_lock->lock);
387 * is_repository_shallow() sees empty string as "no
390 *alternate_shallow_file = "";
394 static int advertise_shallow_grafts_cb(const struct commit_graft *graft, void *cb)
397 if (graft->nr_parent == -1)
398 packet_write_fmt(fd, "shallow %s\n", oid_to_hex(&graft->oid));
402 void advertise_shallow_grafts(int fd)
404 if (!is_repository_shallow(the_repository))
406 for_each_commit_graft(advertise_shallow_grafts_cb, &fd);
410 * mark_reachable_objects() should have been run prior to this and all
411 * reachable commits marked as "SEEN", except when quick_prune is non-zero,
412 * in which case lines are excised from the shallow file if they refer to
413 * commits that do not exist (any longer).
415 void prune_shallow(unsigned options)
417 struct shallow_lock shallow_lock = SHALLOW_LOCK_INIT;
418 struct strbuf sb = STRBUF_INIT;
419 unsigned flags = SEEN_ONLY;
422 if (options & PRUNE_QUICK)
425 if (options & PRUNE_SHOW_ONLY) {
427 write_shallow_commits_1(&sb, 0, NULL, flags);
431 fd = hold_lock_file_for_update(&shallow_lock.lock,
432 git_path_shallow(the_repository),
434 check_shallow_file_for_update(the_repository);
435 if (write_shallow_commits_1(&sb, 0, NULL, flags)) {
436 if (write_in_full(fd, sb.buf, sb.len) < 0)
437 die_errno("failed to write to %s",
438 get_lock_file_path(&shallow_lock.lock));
439 commit_shallow_file(the_repository, &shallow_lock);
441 unlink(git_path_shallow(the_repository));
442 rollback_shallow_file(the_repository, &shallow_lock);
447 struct trace_key trace_shallow = TRACE_KEY_INIT(SHALLOW);
450 * Step 1, split sender shallow commits into "ours" and "theirs"
451 * Step 2, clean "ours" based on .git/shallow
453 void prepare_shallow_info(struct shallow_info *info, struct oid_array *sa)
456 trace_printf_key(&trace_shallow, "shallow: prepare_shallow_info\n");
457 memset(info, 0, sizeof(*info));
461 ALLOC_ARRAY(info->ours, sa->nr);
462 ALLOC_ARRAY(info->theirs, sa->nr);
463 for (i = 0; i < sa->nr; i++) {
464 if (has_object_file(sa->oid + i)) {
465 struct commit_graft *graft;
466 graft = lookup_commit_graft(the_repository,
468 if (graft && graft->nr_parent < 0)
470 info->ours[info->nr_ours++] = i;
472 info->theirs[info->nr_theirs++] = i;
476 void clear_shallow_info(struct shallow_info *info)
482 /* Step 4, remove non-existent ones in "theirs" after getting the pack */
484 void remove_nonexistent_theirs_shallow(struct shallow_info *info)
486 struct object_id *oid = info->shallow->oid;
488 trace_printf_key(&trace_shallow, "shallow: remove_nonexistent_theirs_shallow\n");
489 for (i = dst = 0; i < info->nr_theirs; i++) {
491 info->theirs[dst] = info->theirs[i];
492 if (has_object_file(oid + info->theirs[i]))
495 info->nr_theirs = dst;
498 define_commit_slab(ref_bitmap, uint32_t *);
500 #define POOL_SIZE (512 * 1024)
503 struct ref_bitmap ref_bitmap;
510 static uint32_t *paint_alloc(struct paint_info *info)
512 unsigned nr = DIV_ROUND_UP(info->nr_bits, 32);
513 unsigned size = nr * sizeof(uint32_t);
515 if (!info->pool_count || size > info->end - info->free) {
516 if (size > POOL_SIZE)
517 BUG("pool size too small for %d in paint_alloc()",
520 REALLOC_ARRAY(info->pools, info->pool_count);
521 info->free = xmalloc(POOL_SIZE);
522 info->pools[info->pool_count - 1] = info->free;
523 info->end = info->free + POOL_SIZE;
531 * Given a commit SHA-1, walk down to parents until either SEEN,
532 * UNINTERESTING or BOTTOM is hit. Set the id-th bit in ref_bitmap for
533 * all walked commits.
535 static void paint_down(struct paint_info *info, const struct object_id *oid,
539 struct commit_list *head = NULL;
540 int bitmap_nr = DIV_ROUND_UP(info->nr_bits, 32);
541 size_t bitmap_size = st_mult(sizeof(uint32_t), bitmap_nr);
542 struct commit *c = lookup_commit_reference_gently(the_repository, oid,
544 uint32_t *tmp; /* to be freed before return */
550 tmp = xmalloc(bitmap_size);
551 bitmap = paint_alloc(info);
552 memset(bitmap, 0, bitmap_size);
553 bitmap[id / 32] |= (1U << (id % 32));
554 commit_list_insert(c, &head);
556 struct commit_list *p;
557 struct commit *c = pop_commit(&head);
558 uint32_t **refs = ref_bitmap_at(&info->ref_bitmap, c);
560 /* XXX check "UNINTERESTING" from pack bitmaps if available */
561 if (c->object.flags & (SEEN | UNINTERESTING))
564 c->object.flags |= SEEN;
569 memcpy(tmp, *refs, bitmap_size);
570 for (i = 0; i < bitmap_nr; i++)
572 if (memcmp(tmp, *refs, bitmap_size)) {
573 *refs = paint_alloc(info);
574 memcpy(*refs, tmp, bitmap_size);
578 if (c->object.flags & BOTTOM)
582 die("unable to parse commit %s",
583 oid_to_hex(&c->object.oid));
585 for (p = c->parents; p; p = p->next) {
586 if (p->item->object.flags & SEEN)
588 commit_list_insert(p->item, &head);
592 nr = get_max_object_index();
593 for (i = 0; i < nr; i++) {
594 struct object *o = get_indexed_object(i);
595 if (o && o->type == OBJ_COMMIT)
602 static int mark_uninteresting(const char *refname, const struct object_id *oid,
603 int flags, void *cb_data)
605 struct commit *commit = lookup_commit_reference_gently(the_repository,
609 commit->object.flags |= UNINTERESTING;
610 mark_parents_uninteresting(commit);
614 static void post_assign_shallow(struct shallow_info *info,
615 struct ref_bitmap *ref_bitmap,
618 * Step 6(+7), associate shallow commits with new refs
620 * info->ref must be initialized before calling this function.
622 * If used is not NULL, it's an array of info->shallow->nr
623 * bitmaps. The n-th bit set in the m-th bitmap if ref[n] needs the
624 * m-th shallow commit from info->shallow.
626 * If used is NULL, "ours" and "theirs" are updated. And if ref_status
627 * is not NULL it's an array of ref->nr ints. ref_status[i] is true if
628 * the ref needs some shallow commits from either info->ours or
631 void assign_shallow_commits_to_refs(struct shallow_info *info,
632 uint32_t **used, int *ref_status)
634 struct object_id *oid = info->shallow->oid;
635 struct oid_array *ref = info->ref;
637 int *shallow, nr_shallow = 0;
638 struct paint_info pi;
640 trace_printf_key(&trace_shallow, "shallow: assign_shallow_commits_to_refs\n");
641 ALLOC_ARRAY(shallow, info->nr_ours + info->nr_theirs);
642 for (i = 0; i < info->nr_ours; i++)
643 shallow[nr_shallow++] = info->ours[i];
644 for (i = 0; i < info->nr_theirs; i++)
645 shallow[nr_shallow++] = info->theirs[i];
648 * Prepare the commit graph to track what refs can reach what
649 * (new) shallow commits.
651 nr = get_max_object_index();
652 for (i = 0; i < nr; i++) {
653 struct object *o = get_indexed_object(i);
654 if (!o || o->type != OBJ_COMMIT)
657 o->flags &= ~(UNINTERESTING | BOTTOM | SEEN);
660 memset(&pi, 0, sizeof(pi));
661 init_ref_bitmap(&pi.ref_bitmap);
662 pi.nr_bits = ref->nr;
665 * "--not --all" to cut short the traversal if new refs
666 * connect to old refs. If not (e.g. force ref updates) it'll
667 * have to go down to the current shallow commits.
669 head_ref(mark_uninteresting, NULL);
670 for_each_ref(mark_uninteresting, NULL);
672 /* Mark potential bottoms so we won't go out of bound */
673 for (i = 0; i < nr_shallow; i++) {
674 struct commit *c = lookup_commit(the_repository,
676 c->object.flags |= BOTTOM;
679 for (i = 0; i < ref->nr; i++)
680 paint_down(&pi, ref->oid + i, i);
683 int bitmap_size = DIV_ROUND_UP(pi.nr_bits, 32) * sizeof(uint32_t);
684 memset(used, 0, sizeof(*used) * info->shallow->nr);
685 for (i = 0; i < nr_shallow; i++) {
686 const struct commit *c = lookup_commit(the_repository,
688 uint32_t **map = ref_bitmap_at(&pi.ref_bitmap, c);
690 used[shallow[i]] = xmemdupz(*map, bitmap_size);
693 * unreachable shallow commits are not removed from
694 * "ours" and "theirs". The user is supposed to run
695 * step 7 on every ref separately and not trust "ours"
696 * and "theirs" any more.
699 post_assign_shallow(info, &pi.ref_bitmap, ref_status);
701 clear_ref_bitmap(&pi.ref_bitmap);
702 for (i = 0; i < pi.pool_count; i++)
708 struct commit_array {
709 struct commit **commits;
713 static int add_ref(const char *refname, const struct object_id *oid,
714 int flags, void *cb_data)
716 struct commit_array *ca = cb_data;
717 ALLOC_GROW(ca->commits, ca->nr + 1, ca->alloc);
718 ca->commits[ca->nr] = lookup_commit_reference_gently(the_repository,
720 if (ca->commits[ca->nr])
725 static void update_refstatus(int *ref_status, int nr, uint32_t *bitmap)
730 for (i = 0; i < nr; i++)
731 if (bitmap[i / 32] & (1U << (i % 32)))
736 * Step 7, reachability test on "ours" at commit level
738 static void post_assign_shallow(struct shallow_info *info,
739 struct ref_bitmap *ref_bitmap,
742 struct object_id *oid = info->shallow->oid;
746 int bitmap_nr = DIV_ROUND_UP(info->ref->nr, 32);
747 struct commit_array ca;
749 trace_printf_key(&trace_shallow, "shallow: post_assign_shallow\n");
751 memset(ref_status, 0, sizeof(*ref_status) * info->ref->nr);
753 /* Remove unreachable shallow commits from "theirs" */
754 for (i = dst = 0; i < info->nr_theirs; i++) {
756 info->theirs[dst] = info->theirs[i];
757 c = lookup_commit(the_repository, &oid[info->theirs[i]]);
758 bitmap = ref_bitmap_at(ref_bitmap, c);
761 for (j = 0; j < bitmap_nr; j++)
763 update_refstatus(ref_status, info->ref->nr, *bitmap);
768 info->nr_theirs = dst;
770 memset(&ca, 0, sizeof(ca));
771 head_ref(add_ref, &ca);
772 for_each_ref(add_ref, &ca);
774 /* Remove unreachable shallow commits from "ours" */
775 for (i = dst = 0; i < info->nr_ours; i++) {
777 info->ours[dst] = info->ours[i];
778 c = lookup_commit(the_repository, &oid[info->ours[i]]);
779 bitmap = ref_bitmap_at(ref_bitmap, c);
782 for (j = 0; j < bitmap_nr; j++)
784 /* Step 7, reachability test at commit level */
785 !in_merge_bases_many(c, ca.nr, ca.commits)) {
786 update_refstatus(ref_status, info->ref->nr, *bitmap);
796 /* (Delayed) step 7, reachability test at commit level */
797 int delayed_reachability_test(struct shallow_info *si, int c)
799 if (si->need_reachability_test[c]) {
800 struct commit *commit = lookup_commit(the_repository,
801 &si->shallow->oid[c]);
804 struct commit_array ca;
806 memset(&ca, 0, sizeof(ca));
807 head_ref(add_ref, &ca);
808 for_each_ref(add_ref, &ca);
809 si->commits = ca.commits;
810 si->nr_commits = ca.nr;
813 si->reachable[c] = in_merge_bases_many(commit,
816 si->need_reachability_test[c] = 0;
818 return si->reachable[c];