4 #include "refs-internal.h"
5 #include "packed-backend.h"
6 #include "../iterator.h"
7 #include "../lockfile.h"
11 * Don't use mmap() at all for reading `packed-refs`.
16 * Can use mmap() for reading `packed-refs`, but the file must
17 * not remain mmapped. This is the usual option on Windows,
18 * where you cannot rename a new version of a file onto a file
19 * that is currently mmapped.
24 * It is OK to leave the `packed-refs` file mmapped while
25 * arbitrary other code is running.
31 static enum mmap_strategy mmap_strategy = MMAP_NONE;
32 #elif defined(MMAP_PREVENTS_DELETE)
33 static enum mmap_strategy mmap_strategy = MMAP_TEMPORARY;
35 static enum mmap_strategy mmap_strategy = MMAP_OK;
38 struct packed_ref_store;
41 * A `snapshot` represents one snapshot of a `packed-refs` file.
43 * Normally, this will be a mmapped view of the contents of the
44 * `packed-refs` file at the time the snapshot was created. However,
45 * if the `packed-refs` file was not sorted, this might point at heap
46 * memory holding the contents of the `packed-refs` file with its
47 * records sorted by refname.
49 * `snapshot` instances are reference counted (via
50 * `acquire_snapshot()` and `release_snapshot()`). This is to prevent
51 * an instance from disappearing while an iterator is still iterating
52 * over it. Instances are garbage collected when their `referrers`
55 * The most recent `snapshot`, if available, is referenced by the
56 * `packed_ref_store`. Its freshness is checked whenever
57 * `get_snapshot()` is called; if the existing snapshot is obsolete, a
58 * new snapshot is taken.
62 * A back-pointer to the packed_ref_store with which this
63 * snapshot is associated:
65 struct packed_ref_store *refs;
67 /* Is the `packed-refs` file currently mmapped? */
71 * The contents of the `packed-refs` file:
73 * - buf -- a pointer to the start of the memory
74 * - start -- a pointer to the first byte of actual references
75 * (i.e., after the header line, if one is present)
76 * - eof -- a pointer just past the end of the reference
79 * If the `packed-refs` file was already sorted, `buf` points
80 * at the mmapped contents of the file. If not, it points at
81 * heap-allocated memory containing the contents, sorted. If
82 * there were no contents (e.g., because the file didn't
83 * exist), `buf`, `start`, and `eof` are all NULL.
85 char *buf, *start, *eof;
88 * What is the peeled state of the `packed-refs` file that
89 * this snapshot represents? (This is usually determined from
92 enum { PEELED_NONE, PEELED_TAGS, PEELED_FULLY } peeled;
95 * Count of references to this instance, including the pointer
96 * from `packed_ref_store::snapshot`, if any. The instance
97 * will not be freed as long as the reference count is
100 unsigned int referrers;
103 * The metadata of the `packed-refs` file from which this
104 * snapshot was created, used to tell if the file has been
105 * replaced since we read it.
107 struct stat_validity validity;
111 * A `ref_store` representing references stored in a `packed-refs`
112 * file. It implements the `ref_store` interface, though it has some
115 * - It cannot store symbolic references.
117 * - It cannot store reflogs.
119 * - It does not support reference renaming (though it could).
121 * On the other hand, it can be locked outside of a reference
122 * transaction. In that case, it remains locked even after the
123 * transaction is done and the new `packed-refs` file is activated.
125 struct packed_ref_store {
126 struct ref_store base;
128 unsigned int store_flags;
130 /* The path of the "packed-refs" file: */
134 * A snapshot of the values read from the `packed-refs` file,
135 * if it might still be current; otherwise, NULL.
137 struct snapshot *snapshot;
140 * Lock used for the "packed-refs" file. Note that this (and
141 * thus the enclosing `packed_ref_store`) must not be freed.
143 struct lock_file lock;
146 * Temporary file used when rewriting new contents to the
147 * "packed-refs" file. Note that this (and thus the enclosing
148 * `packed_ref_store`) must not be freed.
150 struct tempfile *tempfile;
154 * Increment the reference count of `*snapshot`.
156 static void acquire_snapshot(struct snapshot *snapshot)
158 snapshot->referrers++;
162 * If the buffer in `snapshot` is active, then either munmap the
163 * memory and close the file, or free the memory. Then set the buffer
166 static void clear_snapshot_buffer(struct snapshot *snapshot)
168 if (snapshot->mmapped) {
169 if (munmap(snapshot->buf, snapshot->eof - snapshot->buf))
170 die_errno("error ummapping packed-refs file %s",
171 snapshot->refs->path);
172 snapshot->mmapped = 0;
176 snapshot->buf = snapshot->start = snapshot->eof = NULL;
180 * Decrease the reference count of `*snapshot`. If it goes to zero,
181 * free `*snapshot` and return true; otherwise return false.
183 static int release_snapshot(struct snapshot *snapshot)
185 if (!--snapshot->referrers) {
186 stat_validity_clear(&snapshot->validity);
187 clear_snapshot_buffer(snapshot);
195 struct ref_store *packed_ref_store_create(const char *path,
196 unsigned int store_flags)
198 struct packed_ref_store *refs = xcalloc(1, sizeof(*refs));
199 struct ref_store *ref_store = (struct ref_store *)refs;
201 base_ref_store_init(ref_store, &refs_be_packed);
202 refs->store_flags = store_flags;
204 refs->path = xstrdup(path);
209 * Downcast `ref_store` to `packed_ref_store`. Die if `ref_store` is
210 * not a `packed_ref_store`. Also die if `packed_ref_store` doesn't
211 * support at least the flags specified in `required_flags`. `caller`
212 * is used in any necessary error messages.
214 static struct packed_ref_store *packed_downcast(struct ref_store *ref_store,
215 unsigned int required_flags,
218 struct packed_ref_store *refs;
220 if (ref_store->be != &refs_be_packed)
221 die("BUG: ref_store is type \"%s\" not \"packed\" in %s",
222 ref_store->be->name, caller);
224 refs = (struct packed_ref_store *)ref_store;
226 if ((refs->store_flags & required_flags) != required_flags)
227 die("BUG: unallowed operation (%s), requires %x, has %x\n",
228 caller, required_flags, refs->store_flags);
233 static void clear_snapshot(struct packed_ref_store *refs)
235 if (refs->snapshot) {
236 struct snapshot *snapshot = refs->snapshot;
238 refs->snapshot = NULL;
239 release_snapshot(snapshot);
243 static NORETURN void die_unterminated_line(const char *path,
244 const char *p, size_t len)
247 die("unterminated line in %s: %.*s", path, (int)len, p);
249 die("unterminated line in %s: %.75s...", path, p);
252 static NORETURN void die_invalid_line(const char *path,
253 const char *p, size_t len)
255 const char *eol = memchr(p, '\n', len);
258 die_unterminated_line(path, p, len);
259 else if (eol - p < 80)
260 die("unexpected line in %s: %.*s", path, (int)(eol - p), p);
262 die("unexpected line in %s: %.75s...", path, p);
266 struct snapshot_record {
271 static int cmp_packed_ref_records(const void *v1, const void *v2)
273 const struct snapshot_record *e1 = v1, *e2 = v2;
274 const char *r1 = e1->start + GIT_SHA1_HEXSZ + 1;
275 const char *r2 = e2->start + GIT_SHA1_HEXSZ + 1;
279 return *r2 == '\n' ? 0 : -1;
284 return (unsigned char)*r1 < (unsigned char)*r2 ? -1 : +1;
292 * Compare a snapshot record at `rec` to the specified NUL-terminated
295 static int cmp_record_to_refname(const char *rec, const char *refname)
297 const char *r1 = rec + GIT_SHA1_HEXSZ + 1;
298 const char *r2 = refname;
306 return (unsigned char)*r1 < (unsigned char)*r2 ? -1 : +1;
313 * `snapshot->buf` is not known to be sorted. Check whether it is, and
314 * if not, sort it into new memory and munmap/free the old storage.
316 static void sort_snapshot(struct snapshot *snapshot)
318 struct snapshot_record *records = NULL;
319 size_t alloc = 0, nr = 0;
321 const char *pos, *eof, *eol;
323 char *new_buffer, *dst;
325 pos = snapshot->start;
334 * Initialize records based on a crude estimate of the number
335 * of references in the file (we'll grow it below if needed):
337 ALLOC_GROW(records, len / 80 + 20, alloc);
340 eol = memchr(pos, '\n', eof - pos);
342 /* The safety check should prevent this. */
343 BUG("unterminated line found in packed-refs");
344 if (eol - pos < GIT_SHA1_HEXSZ + 2)
345 die_invalid_line(snapshot->refs->path,
348 if (eol < eof && *eol == '^') {
350 * Keep any peeled line together with its
353 const char *peeled_start = eol;
355 eol = memchr(peeled_start, '\n', eof - peeled_start);
357 /* The safety check should prevent this. */
358 BUG("unterminated peeled line found in packed-refs");
362 ALLOC_GROW(records, nr + 1, alloc);
363 records[nr].start = pos;
364 records[nr].len = eol - pos;
369 cmp_packed_ref_records(&records[nr - 2],
370 &records[nr - 1]) >= 0)
379 /* We need to sort the memory. First we sort the records array: */
380 QSORT(records, nr, cmp_packed_ref_records);
383 * Allocate a new chunk of memory, and copy the old memory to
384 * the new in the order indicated by `records` (not bothering
385 * with the header line):
387 new_buffer = xmalloc(len);
388 for (dst = new_buffer, i = 0; i < nr; i++) {
389 memcpy(dst, records[i].start, records[i].len);
390 dst += records[i].len;
394 * Now munmap the old buffer and use the sorted buffer in its
397 clear_snapshot_buffer(snapshot);
398 snapshot->buf = snapshot->start = new_buffer;
399 snapshot->eof = new_buffer + len;
406 * Return a pointer to the start of the record that contains the
407 * character `*p` (which must be within the buffer). If no other
408 * record start is found, return `buf`.
410 static const char *find_start_of_record(const char *buf, const char *p)
412 while (p > buf && (p[-1] != '\n' || p[0] == '^'))
418 * Return a pointer to the start of the record following the record
419 * that contains `*p`. If none is found before `end`, return `end`.
421 static const char *find_end_of_record(const char *p, const char *end)
423 while (++p < end && (p[-1] != '\n' || p[0] == '^'))
429 * We want to be able to compare mmapped reference records quickly,
430 * without totally parsing them. We can do so because the records are
431 * LF-terminated, and the refname should start exactly (GIT_SHA1_HEXSZ
432 * + 1) bytes past the beginning of the record.
434 * But what if the `packed-refs` file contains garbage? We're willing
435 * to tolerate not detecting the problem, as long as we don't produce
436 * totally garbled output (we can't afford to check the integrity of
437 * the whole file during every Git invocation). But we do want to be
438 * sure that we never read past the end of the buffer in memory and
439 * perform an illegal memory access.
441 * Guarantee that minimum level of safety by verifying that the last
442 * record in the file is LF-terminated, and that it has at least
443 * (GIT_SHA1_HEXSZ + 1) characters before the LF. Die if either of
444 * these checks fails.
446 static void verify_buffer_safe(struct snapshot *snapshot)
448 const char *start = snapshot->start;
449 const char *eof = snapshot->eof;
450 const char *last_line;
455 last_line = find_start_of_record(start, eof - 1);
456 if (*(eof - 1) != '\n' || eof - last_line < GIT_SHA1_HEXSZ + 2)
457 die_invalid_line(snapshot->refs->path,
458 last_line, eof - last_line);
462 * Depending on `mmap_strategy`, either mmap or read the contents of
463 * the `packed-refs` file into the snapshot. Return 1 if the file
464 * existed and was read, or 0 if the file was absent or empty. Die on
467 static int load_contents(struct snapshot *snapshot)
474 fd = open(snapshot->refs->path, O_RDONLY);
476 if (errno == ENOENT) {
478 * This is OK; it just means that no
479 * "packed-refs" file has been written yet,
480 * which is equivalent to it being empty,
481 * which is its state when initialized with
486 die_errno("couldn't read %s", snapshot->refs->path);
490 stat_validity_update(&snapshot->validity, fd);
492 if (fstat(fd, &st) < 0)
493 die_errno("couldn't stat %s", snapshot->refs->path);
494 size = xsize_t(st.st_size);
498 } else if (mmap_strategy == MMAP_NONE) {
499 snapshot->buf = xmalloc(size);
500 bytes_read = read_in_full(fd, snapshot->buf, size);
501 if (bytes_read < 0 || bytes_read != size)
502 die_errno("couldn't read %s", snapshot->refs->path);
503 snapshot->mmapped = 0;
505 snapshot->buf = xmmap(NULL, size, PROT_READ, MAP_PRIVATE, fd, 0);
506 snapshot->mmapped = 1;
510 snapshot->start = snapshot->buf;
511 snapshot->eof = snapshot->buf + size;
517 * Find the place in `snapshot->buf` where the start of the record for
518 * `refname` starts. If `mustexist` is true and the reference doesn't
519 * exist, then return NULL. If `mustexist` is false and the reference
520 * doesn't exist, then return the point where that reference would be
521 * inserted, or `snapshot->eof` (which might be NULL) if it would be
522 * inserted at the end of the file. In the latter mode, `refname`
523 * doesn't have to be a proper reference name; for example, one could
524 * search for "refs/replace/" to find the start of any replace
527 * The record is sought using a binary search, so `snapshot->buf` must
530 static const char *find_reference_location(struct snapshot *snapshot,
531 const char *refname, int mustexist)
534 * This is not *quite* a garden-variety binary search, because
535 * the data we're searching is made up of records, and we
536 * always need to find the beginning of a record to do a
537 * comparison. A "record" here is one line for the reference
538 * itself and zero or one peel lines that start with '^'. Our
539 * loop invariant is described in the next two comments.
543 * A pointer to the character at the start of a record whose
544 * preceding records all have reference names that come
545 * *before* `refname`.
547 const char *lo = snapshot->start;
550 * A pointer to a the first character of a record whose
551 * reference name comes *after* `refname`.
553 const char *hi = snapshot->eof;
556 const char *mid, *rec;
559 mid = lo + (hi - lo) / 2;
560 rec = find_start_of_record(lo, mid);
561 cmp = cmp_record_to_refname(rec, refname);
563 lo = find_end_of_record(mid, hi);
564 } else if (cmp > 0) {
578 * Create a newly-allocated `snapshot` of the `packed-refs` file in
579 * its current state and return it. The return value will already have
580 * its reference count incremented.
582 * A comment line of the form "# pack-refs with: " may contain zero or
583 * more traits. We interpret the traits as follows:
585 * Neither `peeled` nor `fully-peeled`:
587 * Probably no references are peeled. But if the file contains a
588 * peeled value for a reference, we will use it.
592 * References under "refs/tags/", if they *can* be peeled, *are*
593 * peeled in this file. References outside of "refs/tags/" are
594 * probably not peeled even if they could have been, but if we find
595 * a peeled value for such a reference we will use it.
599 * All references in the file that can be peeled are peeled.
600 * Inversely (and this is more important), any references in the
601 * file for which no peeled value is recorded is not peelable. This
602 * trait should typically be written alongside "peeled" for
603 * compatibility with older clients, but we do not require it
604 * (i.e., "peeled" is a no-op if "fully-peeled" is set).
608 * The references in this file are known to be sorted by refname.
610 static struct snapshot *create_snapshot(struct packed_ref_store *refs)
612 struct snapshot *snapshot = xcalloc(1, sizeof(*snapshot));
615 snapshot->refs = refs;
616 acquire_snapshot(snapshot);
617 snapshot->peeled = PEELED_NONE;
619 if (!load_contents(snapshot))
622 /* If the file has a header line, process it: */
623 if (snapshot->buf < snapshot->eof && *snapshot->buf == '#') {
625 struct string_list traits = STRING_LIST_INIT_NODUP;
627 eol = memchr(snapshot->buf, '\n',
628 snapshot->eof - snapshot->buf);
630 die_unterminated_line(refs->path,
632 snapshot->eof - snapshot->buf);
634 tmp = xmemdupz(snapshot->buf, eol - snapshot->buf);
636 if (!skip_prefix(tmp, "# pack-refs with:", (const char **)&p))
637 die_invalid_line(refs->path,
639 snapshot->eof - snapshot->buf);
641 string_list_split_in_place(&traits, p, ' ', -1);
643 if (unsorted_string_list_has_string(&traits, "fully-peeled"))
644 snapshot->peeled = PEELED_FULLY;
645 else if (unsorted_string_list_has_string(&traits, "peeled"))
646 snapshot->peeled = PEELED_TAGS;
648 sorted = unsorted_string_list_has_string(&traits, "sorted");
650 /* perhaps other traits later as well */
652 /* The "+ 1" is for the LF character. */
653 snapshot->start = eol + 1;
655 string_list_clear(&traits, 0);
659 verify_buffer_safe(snapshot);
662 sort_snapshot(snapshot);
665 * Reordering the records might have moved a short one
666 * to the end of the buffer, so verify the buffer's
669 verify_buffer_safe(snapshot);
672 if (mmap_strategy != MMAP_OK && snapshot->mmapped) {
674 * We don't want to leave the file mmapped, so we are
675 * forced to make a copy now:
677 size_t size = snapshot->eof - snapshot->start;
678 char *buf_copy = xmalloc(size);
680 memcpy(buf_copy, snapshot->start, size);
681 clear_snapshot_buffer(snapshot);
682 snapshot->buf = snapshot->start = buf_copy;
683 snapshot->eof = buf_copy + size;
690 * Check that `refs->snapshot` (if present) still reflects the
691 * contents of the `packed-refs` file. If not, clear the snapshot.
693 static void validate_snapshot(struct packed_ref_store *refs)
695 if (refs->snapshot &&
696 !stat_validity_check(&refs->snapshot->validity, refs->path))
697 clear_snapshot(refs);
701 * Get the `snapshot` for the specified packed_ref_store, creating and
702 * populating it if it hasn't been read before or if the file has been
703 * changed (according to its `validity` field) since it was last read.
704 * On the other hand, if we hold the lock, then assume that the file
705 * hasn't been changed out from under us, so skip the extra `stat()`
706 * call in `stat_validity_check()`. This function does *not* increase
707 * the snapshot's reference count on behalf of the caller.
709 static struct snapshot *get_snapshot(struct packed_ref_store *refs)
711 if (!is_lock_file_locked(&refs->lock))
712 validate_snapshot(refs);
715 refs->snapshot = create_snapshot(refs);
717 return refs->snapshot;
720 static int packed_read_raw_ref(struct ref_store *ref_store,
721 const char *refname, unsigned char *sha1,
722 struct strbuf *referent, unsigned int *type)
724 struct packed_ref_store *refs =
725 packed_downcast(ref_store, REF_STORE_READ, "read_raw_ref");
726 struct snapshot *snapshot = get_snapshot(refs);
731 rec = find_reference_location(snapshot, refname, 1);
734 /* refname is not a packed reference. */
739 if (get_sha1_hex(rec, sha1))
740 die_invalid_line(refs->path, rec, snapshot->eof - rec);
742 *type = REF_ISPACKED;
747 * This value is set in `base.flags` if the peeled value of the
748 * current reference is known. In that case, `peeled` contains the
749 * correct peeled value for the reference, which might be `null_sha1`
750 * if the reference is not a tag or if it is broken.
752 #define REF_KNOWS_PEELED 0x40
755 * An iterator over a snapshot of a `packed-refs` file.
757 struct packed_ref_iterator {
758 struct ref_iterator base;
760 struct snapshot *snapshot;
762 /* The current position in the snapshot's buffer: */
765 /* The end of the part of the buffer that will be iterated over: */
768 /* Scratch space for current values: */
769 struct object_id oid, peeled;
770 struct strbuf refname_buf;
776 * Move the iterator to the next record in the snapshot, without
777 * respect for whether the record is actually required by the current
778 * iteration. Adjust the fields in `iter` and return `ITER_OK` or
779 * `ITER_DONE`. This function does not free the iterator in the case
782 static int next_record(struct packed_ref_iterator *iter)
784 const char *p = iter->pos, *eol;
786 strbuf_reset(&iter->refname_buf);
788 if (iter->pos == iter->eof)
791 iter->base.flags = REF_ISPACKED;
793 if (iter->eof - p < GIT_SHA1_HEXSZ + 2 ||
794 parse_oid_hex(p, &iter->oid, &p) ||
796 die_invalid_line(iter->snapshot->refs->path,
797 iter->pos, iter->eof - iter->pos);
799 eol = memchr(p, '\n', iter->eof - p);
801 die_unterminated_line(iter->snapshot->refs->path,
802 iter->pos, iter->eof - iter->pos);
804 strbuf_add(&iter->refname_buf, p, eol - p);
805 iter->base.refname = iter->refname_buf.buf;
807 if (check_refname_format(iter->base.refname, REFNAME_ALLOW_ONELEVEL)) {
808 if (!refname_is_safe(iter->base.refname))
809 die("packed refname is dangerous: %s",
812 iter->base.flags |= REF_BAD_NAME | REF_ISBROKEN;
814 if (iter->snapshot->peeled == PEELED_FULLY ||
815 (iter->snapshot->peeled == PEELED_TAGS &&
816 starts_with(iter->base.refname, "refs/tags/")))
817 iter->base.flags |= REF_KNOWS_PEELED;
821 if (iter->pos < iter->eof && *iter->pos == '^') {
823 if (iter->eof - p < GIT_SHA1_HEXSZ + 1 ||
824 parse_oid_hex(p, &iter->peeled, &p) ||
826 die_invalid_line(iter->snapshot->refs->path,
827 iter->pos, iter->eof - iter->pos);
831 * Regardless of what the file header said, we
832 * definitely know the value of *this* reference. But
833 * we suppress it if the reference is broken:
835 if ((iter->base.flags & REF_ISBROKEN)) {
836 oidclr(&iter->peeled);
837 iter->base.flags &= ~REF_KNOWS_PEELED;
839 iter->base.flags |= REF_KNOWS_PEELED;
842 oidclr(&iter->peeled);
848 static int packed_ref_iterator_advance(struct ref_iterator *ref_iterator)
850 struct packed_ref_iterator *iter =
851 (struct packed_ref_iterator *)ref_iterator;
854 while ((ok = next_record(iter)) == ITER_OK) {
855 if (iter->flags & DO_FOR_EACH_PER_WORKTREE_ONLY &&
856 ref_type(iter->base.refname) != REF_TYPE_PER_WORKTREE)
859 if (!(iter->flags & DO_FOR_EACH_INCLUDE_BROKEN) &&
860 !ref_resolves_to_object(iter->base.refname, &iter->oid,
867 if (ref_iterator_abort(ref_iterator) != ITER_DONE)
873 static int packed_ref_iterator_peel(struct ref_iterator *ref_iterator,
874 struct object_id *peeled)
876 struct packed_ref_iterator *iter =
877 (struct packed_ref_iterator *)ref_iterator;
879 if ((iter->base.flags & REF_KNOWS_PEELED)) {
880 oidcpy(peeled, &iter->peeled);
881 return is_null_oid(&iter->peeled) ? -1 : 0;
882 } else if ((iter->base.flags & (REF_ISBROKEN | REF_ISSYMREF))) {
885 return !!peel_object(iter->oid.hash, peeled->hash);
889 static int packed_ref_iterator_abort(struct ref_iterator *ref_iterator)
891 struct packed_ref_iterator *iter =
892 (struct packed_ref_iterator *)ref_iterator;
895 strbuf_release(&iter->refname_buf);
896 release_snapshot(iter->snapshot);
897 base_ref_iterator_free(ref_iterator);
901 static struct ref_iterator_vtable packed_ref_iterator_vtable = {
902 packed_ref_iterator_advance,
903 packed_ref_iterator_peel,
904 packed_ref_iterator_abort
907 static struct ref_iterator *packed_ref_iterator_begin(
908 struct ref_store *ref_store,
909 const char *prefix, unsigned int flags)
911 struct packed_ref_store *refs;
912 struct snapshot *snapshot;
914 struct packed_ref_iterator *iter;
915 struct ref_iterator *ref_iterator;
916 unsigned int required_flags = REF_STORE_READ;
918 if (!(flags & DO_FOR_EACH_INCLUDE_BROKEN))
919 required_flags |= REF_STORE_ODB;
920 refs = packed_downcast(ref_store, required_flags, "ref_iterator_begin");
923 * Note that `get_snapshot()` internally checks whether the
924 * snapshot is up to date with what is on disk, and re-reads
927 snapshot = get_snapshot(refs);
929 if (prefix && *prefix)
930 start = find_reference_location(snapshot, prefix, 0);
932 start = snapshot->start;
934 if (start == snapshot->eof)
935 return empty_ref_iterator_begin();
937 iter = xcalloc(1, sizeof(*iter));
938 ref_iterator = &iter->base;
939 base_ref_iterator_init(ref_iterator, &packed_ref_iterator_vtable, 1);
941 iter->snapshot = snapshot;
942 acquire_snapshot(snapshot);
945 iter->eof = snapshot->eof;
946 strbuf_init(&iter->refname_buf, 0);
948 iter->base.oid = &iter->oid;
952 if (prefix && *prefix)
953 /* Stop iteration after we've gone *past* prefix: */
954 ref_iterator = prefix_ref_iterator_begin(ref_iterator, prefix, 0);
960 * Write an entry to the packed-refs file for the specified refname.
961 * If peeled is non-NULL, write it as the entry's peeled value. On
962 * error, return a nonzero value and leave errno set at the value left
963 * by the failing call to `fprintf()`.
965 static int write_packed_entry(FILE *fh, const char *refname,
966 const unsigned char *sha1,
967 const unsigned char *peeled)
969 if (fprintf(fh, "%s %s\n", sha1_to_hex(sha1), refname) < 0 ||
970 (peeled && fprintf(fh, "^%s\n", sha1_to_hex(peeled)) < 0))
976 int packed_refs_lock(struct ref_store *ref_store, int flags, struct strbuf *err)
978 struct packed_ref_store *refs =
979 packed_downcast(ref_store, REF_STORE_WRITE | REF_STORE_MAIN,
981 static int timeout_configured = 0;
982 static int timeout_value = 1000;
984 if (!timeout_configured) {
985 git_config_get_int("core.packedrefstimeout", &timeout_value);
986 timeout_configured = 1;
990 * Note that we close the lockfile immediately because we
991 * don't write new content to it, but rather to a separate
994 if (hold_lock_file_for_update_timeout(
997 flags, timeout_value) < 0) {
998 unable_to_lock_message(refs->path, errno, err);
1002 if (close_lock_file_gently(&refs->lock)) {
1003 strbuf_addf(err, "unable to close %s: %s", refs->path, strerror(errno));
1004 rollback_lock_file(&refs->lock);
1009 * Now that we hold the `packed-refs` lock, make sure that our
1010 * snapshot matches the current version of the file. Normally
1011 * `get_snapshot()` does that for us, but that function
1012 * assumes that when the file is locked, any existing snapshot
1013 * is still valid. We've just locked the file, but it might
1014 * have changed the moment *before* we locked it.
1016 validate_snapshot(refs);
1019 * Now make sure that the packed-refs file as it exists in the
1020 * locked state is loaded into the snapshot:
1026 void packed_refs_unlock(struct ref_store *ref_store)
1028 struct packed_ref_store *refs = packed_downcast(
1030 REF_STORE_READ | REF_STORE_WRITE,
1031 "packed_refs_unlock");
1033 if (!is_lock_file_locked(&refs->lock))
1034 die("BUG: packed_refs_unlock() called when not locked");
1035 rollback_lock_file(&refs->lock);
1038 int packed_refs_is_locked(struct ref_store *ref_store)
1040 struct packed_ref_store *refs = packed_downcast(
1042 REF_STORE_READ | REF_STORE_WRITE,
1043 "packed_refs_is_locked");
1045 return is_lock_file_locked(&refs->lock);
1049 * The packed-refs header line that we write out. Perhaps other traits
1050 * will be added later.
1052 * Note that earlier versions of Git used to parse these traits by
1053 * looking for " trait " in the line. For this reason, the space after
1054 * the colon and the trailing space are required.
1056 static const char PACKED_REFS_HEADER[] =
1057 "# pack-refs with: peeled fully-peeled sorted \n";
1059 static int packed_init_db(struct ref_store *ref_store, struct strbuf *err)
1061 /* Nothing to do. */
1066 * Write the packed refs from the current snapshot to the packed-refs
1067 * tempfile, incorporating any changes from `updates`. `updates` must
1068 * be a sorted string list whose keys are the refnames and whose util
1069 * values are `struct ref_update *`. On error, rollback the tempfile,
1070 * write an error message to `err`, and return a nonzero value.
1072 * The packfile must be locked before calling this function and will
1073 * remain locked when it is done.
1075 static int write_with_updates(struct packed_ref_store *refs,
1076 struct string_list *updates,
1079 struct ref_iterator *iter = NULL;
1083 struct strbuf sb = STRBUF_INIT;
1084 char *packed_refs_path;
1086 if (!is_lock_file_locked(&refs->lock))
1087 die("BUG: write_with_updates() called while unlocked");
1090 * If packed-refs is a symlink, we want to overwrite the
1091 * symlinked-to file, not the symlink itself. Also, put the
1092 * staging file next to it:
1094 packed_refs_path = get_locked_file_path(&refs->lock);
1095 strbuf_addf(&sb, "%s.new", packed_refs_path);
1096 free(packed_refs_path);
1097 refs->tempfile = create_tempfile(sb.buf);
1098 if (!refs->tempfile) {
1099 strbuf_addf(err, "unable to create file %s: %s",
1100 sb.buf, strerror(errno));
1101 strbuf_release(&sb);
1104 strbuf_release(&sb);
1106 out = fdopen_tempfile(refs->tempfile, "w");
1108 strbuf_addf(err, "unable to fdopen packed-refs tempfile: %s",
1113 if (fprintf(out, "%s", PACKED_REFS_HEADER) < 0)
1117 * We iterate in parallel through the current list of refs and
1118 * the list of updates, processing an entry from at least one
1119 * of the lists each time through the loop. When the current
1120 * list of refs is exhausted, set iter to NULL. When the list
1121 * of updates is exhausted, leave i set to updates->nr.
1123 iter = packed_ref_iterator_begin(&refs->base, "",
1124 DO_FOR_EACH_INCLUDE_BROKEN);
1125 if ((ok = ref_iterator_advance(iter)) != ITER_OK)
1130 while (iter || i < updates->nr) {
1131 struct ref_update *update = NULL;
1134 if (i >= updates->nr) {
1137 update = updates->items[i].util;
1142 cmp = strcmp(iter->refname, update->refname);
1147 * There is both an old value and an update
1148 * for this reference. Check the old value if
1151 if ((update->flags & REF_HAVE_OLD)) {
1152 if (is_null_oid(&update->old_oid)) {
1153 strbuf_addf(err, "cannot update ref '%s': "
1154 "reference already exists",
1157 } else if (oidcmp(&update->old_oid, iter->oid)) {
1158 strbuf_addf(err, "cannot update ref '%s': "
1159 "is at %s but expected %s",
1161 oid_to_hex(iter->oid),
1162 oid_to_hex(&update->old_oid));
1167 /* Now figure out what to use for the new value: */
1168 if ((update->flags & REF_HAVE_NEW)) {
1170 * The update takes precedence. Skip
1171 * the iterator over the unneeded
1174 if ((ok = ref_iterator_advance(iter)) != ITER_OK)
1179 * The update doesn't actually want to
1180 * change anything. We're done with it.
1185 } else if (cmp > 0) {
1187 * There is no old value but there is an
1188 * update for this reference. Make sure that
1189 * the update didn't expect an existing value:
1191 if ((update->flags & REF_HAVE_OLD) &&
1192 !is_null_oid(&update->old_oid)) {
1193 strbuf_addf(err, "cannot update ref '%s': "
1194 "reference is missing but expected %s",
1196 oid_to_hex(&update->old_oid));
1202 /* Pass the old reference through. */
1204 struct object_id peeled;
1205 int peel_error = ref_iterator_peel(iter, &peeled);
1207 if (write_packed_entry(out, iter->refname,
1209 peel_error ? NULL : peeled.hash))
1212 if ((ok = ref_iterator_advance(iter)) != ITER_OK)
1214 } else if (is_null_oid(&update->new_oid)) {
1216 * The update wants to delete the reference,
1217 * and the reference either didn't exist or we
1218 * have already skipped it. So we're done with
1219 * the update (and don't have to write
1224 struct object_id peeled;
1225 int peel_error = peel_object(update->new_oid.hash,
1228 if (write_packed_entry(out, update->refname,
1229 update->new_oid.hash,
1230 peel_error ? NULL : peeled.hash))
1237 if (ok != ITER_DONE) {
1238 strbuf_addstr(err, "unable to write packed-refs file: "
1239 "error iterating over old contents");
1243 if (close_tempfile_gently(refs->tempfile)) {
1244 strbuf_addf(err, "error closing file %s: %s",
1245 get_tempfile_path(refs->tempfile),
1247 strbuf_release(&sb);
1248 delete_tempfile(&refs->tempfile);
1255 strbuf_addf(err, "error writing to %s: %s",
1256 get_tempfile_path(refs->tempfile), strerror(errno));
1260 ref_iterator_abort(iter);
1262 delete_tempfile(&refs->tempfile);
1266 int is_packed_transaction_needed(struct ref_store *ref_store,
1267 struct ref_transaction *transaction)
1269 struct packed_ref_store *refs = packed_downcast(
1272 "is_packed_transaction_needed");
1273 struct strbuf referent = STRBUF_INIT;
1277 if (!is_lock_file_locked(&refs->lock))
1278 BUG("is_packed_transaction_needed() called while unlocked");
1281 * We're only going to bother returning false for the common,
1282 * trivial case that references are only being deleted, their
1283 * old values are not being checked, and the old `packed-refs`
1284 * file doesn't contain any of those reference(s). This gives
1285 * false positives for some other cases that could
1286 * theoretically be optimized away:
1288 * 1. It could be that the old value is being verified without
1289 * setting a new value. In this case, we could verify the
1290 * old value here and skip the update if it agrees. If it
1291 * disagrees, we could either let the update go through
1292 * (the actual commit would re-detect and report the
1293 * problem), or come up with a way of reporting such an
1294 * error to *our* caller.
1296 * 2. It could be that a new value is being set, but that it
1297 * is identical to the current packed value of the
1300 * Neither of these cases will come up in the current code,
1301 * because the only caller of this function passes to it a
1302 * transaction that only includes `delete` updates with no
1303 * `old_id`. Even if that ever changes, false positives only
1304 * cause an optimization to be missed; they do not affect
1309 * Start with the cheap checks that don't require old
1310 * reference values to be read:
1312 for (i = 0; i < transaction->nr; i++) {
1313 struct ref_update *update = transaction->updates[i];
1315 if (update->flags & REF_HAVE_OLD)
1316 /* Have to check the old value -> needed. */
1319 if ((update->flags & REF_HAVE_NEW) && !is_null_oid(&update->new_oid))
1320 /* Have to set a new value -> needed. */
1325 * The transaction isn't checking any old values nor is it
1326 * setting any nonzero new values, so it still might be able
1327 * to be skipped. Now do the more expensive check: the update
1328 * is needed if any of the updates is a delete, and the old
1329 * `packed-refs` file contains a value for that reference.
1332 for (i = 0; i < transaction->nr; i++) {
1333 struct ref_update *update = transaction->updates[i];
1335 struct object_id oid;
1337 if (!(update->flags & REF_HAVE_NEW))
1339 * This reference isn't being deleted -> not
1344 if (!refs_read_raw_ref(ref_store, update->refname,
1345 oid.hash, &referent, &type) ||
1348 * We have to actually delete that reference
1349 * -> this transaction is needed.
1356 strbuf_release(&referent);
1360 struct packed_transaction_backend_data {
1361 /* True iff the transaction owns the packed-refs lock. */
1364 struct string_list updates;
1367 static void packed_transaction_cleanup(struct packed_ref_store *refs,
1368 struct ref_transaction *transaction)
1370 struct packed_transaction_backend_data *data = transaction->backend_data;
1373 string_list_clear(&data->updates, 0);
1375 if (is_tempfile_active(refs->tempfile))
1376 delete_tempfile(&refs->tempfile);
1378 if (data->own_lock && is_lock_file_locked(&refs->lock)) {
1379 packed_refs_unlock(&refs->base);
1384 transaction->backend_data = NULL;
1387 transaction->state = REF_TRANSACTION_CLOSED;
1390 static int packed_transaction_prepare(struct ref_store *ref_store,
1391 struct ref_transaction *transaction,
1394 struct packed_ref_store *refs = packed_downcast(
1396 REF_STORE_READ | REF_STORE_WRITE | REF_STORE_ODB,
1397 "ref_transaction_prepare");
1398 struct packed_transaction_backend_data *data;
1400 int ret = TRANSACTION_GENERIC_ERROR;
1403 * Note that we *don't* skip transactions with zero updates,
1404 * because such a transaction might be executed for the side
1405 * effect of ensuring that all of the references are peeled or
1406 * ensuring that the `packed-refs` file is sorted. If the
1407 * caller wants to optimize away empty transactions, it should
1411 data = xcalloc(1, sizeof(*data));
1412 string_list_init(&data->updates, 0);
1414 transaction->backend_data = data;
1417 * Stick the updates in a string list by refname so that we
1420 for (i = 0; i < transaction->nr; i++) {
1421 struct ref_update *update = transaction->updates[i];
1422 struct string_list_item *item =
1423 string_list_append(&data->updates, update->refname);
1425 /* Store a pointer to update in item->util: */
1426 item->util = update;
1428 string_list_sort(&data->updates);
1430 if (ref_update_reject_duplicates(&data->updates, err))
1433 if (!is_lock_file_locked(&refs->lock)) {
1434 if (packed_refs_lock(ref_store, 0, err))
1439 if (write_with_updates(refs, &data->updates, err))
1442 transaction->state = REF_TRANSACTION_PREPARED;
1446 packed_transaction_cleanup(refs, transaction);
1450 static int packed_transaction_abort(struct ref_store *ref_store,
1451 struct ref_transaction *transaction,
1454 struct packed_ref_store *refs = packed_downcast(
1456 REF_STORE_READ | REF_STORE_WRITE | REF_STORE_ODB,
1457 "ref_transaction_abort");
1459 packed_transaction_cleanup(refs, transaction);
1463 static int packed_transaction_finish(struct ref_store *ref_store,
1464 struct ref_transaction *transaction,
1467 struct packed_ref_store *refs = packed_downcast(
1469 REF_STORE_READ | REF_STORE_WRITE | REF_STORE_ODB,
1470 "ref_transaction_finish");
1471 int ret = TRANSACTION_GENERIC_ERROR;
1472 char *packed_refs_path;
1474 clear_snapshot(refs);
1476 packed_refs_path = get_locked_file_path(&refs->lock);
1477 if (rename_tempfile(&refs->tempfile, packed_refs_path)) {
1478 strbuf_addf(err, "error replacing %s: %s",
1479 refs->path, strerror(errno));
1486 free(packed_refs_path);
1487 packed_transaction_cleanup(refs, transaction);
1491 static int packed_initial_transaction_commit(struct ref_store *ref_store,
1492 struct ref_transaction *transaction,
1495 return ref_transaction_commit(transaction, err);
1498 static int packed_delete_refs(struct ref_store *ref_store, const char *msg,
1499 struct string_list *refnames, unsigned int flags)
1501 struct packed_ref_store *refs =
1502 packed_downcast(ref_store, REF_STORE_WRITE, "delete_refs");
1503 struct strbuf err = STRBUF_INIT;
1504 struct ref_transaction *transaction;
1505 struct string_list_item *item;
1508 (void)refs; /* We need the check above, but don't use the variable */
1514 * Since we don't check the references' old_oids, the
1515 * individual updates can't fail, so we can pack all of the
1516 * updates into a single transaction.
1519 transaction = ref_store_transaction_begin(ref_store, &err);
1523 for_each_string_list_item(item, refnames) {
1524 if (ref_transaction_delete(transaction, item->string, NULL,
1525 flags, msg, &err)) {
1526 warning(_("could not delete reference %s: %s"),
1527 item->string, err.buf);
1532 ret = ref_transaction_commit(transaction, &err);
1535 if (refnames->nr == 1)
1536 error(_("could not delete reference %s: %s"),
1537 refnames->items[0].string, err.buf);
1539 error(_("could not delete references: %s"), err.buf);
1542 ref_transaction_free(transaction);
1543 strbuf_release(&err);
1547 static int packed_pack_refs(struct ref_store *ref_store, unsigned int flags)
1550 * Packed refs are already packed. It might be that loose refs
1551 * are packed *into* a packed refs store, but that is done by
1552 * updating the packed references via a transaction.
1557 static int packed_create_symref(struct ref_store *ref_store,
1558 const char *refname, const char *target,
1561 die("BUG: packed reference store does not support symrefs");
1564 static int packed_rename_ref(struct ref_store *ref_store,
1565 const char *oldrefname, const char *newrefname,
1568 die("BUG: packed reference store does not support renaming references");
1571 static int packed_copy_ref(struct ref_store *ref_store,
1572 const char *oldrefname, const char *newrefname,
1575 die("BUG: packed reference store does not support copying references");
1578 static struct ref_iterator *packed_reflog_iterator_begin(struct ref_store *ref_store)
1580 return empty_ref_iterator_begin();
1583 static int packed_for_each_reflog_ent(struct ref_store *ref_store,
1584 const char *refname,
1585 each_reflog_ent_fn fn, void *cb_data)
1590 static int packed_for_each_reflog_ent_reverse(struct ref_store *ref_store,
1591 const char *refname,
1592 each_reflog_ent_fn fn,
1598 static int packed_reflog_exists(struct ref_store *ref_store,
1599 const char *refname)
1604 static int packed_create_reflog(struct ref_store *ref_store,
1605 const char *refname, int force_create,
1608 die("BUG: packed reference store does not support reflogs");
1611 static int packed_delete_reflog(struct ref_store *ref_store,
1612 const char *refname)
1617 static int packed_reflog_expire(struct ref_store *ref_store,
1618 const char *refname, const unsigned char *sha1,
1620 reflog_expiry_prepare_fn prepare_fn,
1621 reflog_expiry_should_prune_fn should_prune_fn,
1622 reflog_expiry_cleanup_fn cleanup_fn,
1623 void *policy_cb_data)
1628 struct ref_storage_be refs_be_packed = {
1631 packed_ref_store_create,
1633 packed_transaction_prepare,
1634 packed_transaction_finish,
1635 packed_transaction_abort,
1636 packed_initial_transaction_commit,
1639 packed_create_symref,
1644 packed_ref_iterator_begin,
1645 packed_read_raw_ref,
1647 packed_reflog_iterator_begin,
1648 packed_for_each_reflog_ent,
1649 packed_for_each_reflog_ent_reverse,
1650 packed_reflog_exists,
1651 packed_create_reflog,
1652 packed_delete_reflog,
1653 packed_reflog_expire