3 #include "refs-internal.h"
5 #include "../iterator.h"
7 /* FIXME: This declaration shouldn't be here */
8 void read_loose_refs(const char *dirname, struct ref_dir *dir);
10 void add_entry_to_dir(struct ref_dir *dir, struct ref_entry *entry)
12 ALLOC_GROW(dir->entries, dir->nr + 1, dir->alloc);
13 dir->entries[dir->nr++] = entry;
14 /* optimize for the case that entries are added in order */
16 (dir->nr == dir->sorted + 1 &&
17 strcmp(dir->entries[dir->nr - 2]->name,
18 dir->entries[dir->nr - 1]->name) < 0))
19 dir->sorted = dir->nr;
22 struct ref_dir *get_ref_dir(struct ref_entry *entry)
25 assert(entry->flag & REF_DIR);
26 dir = &entry->u.subdir;
27 if (entry->flag & REF_INCOMPLETE) {
28 read_loose_refs(entry->name, dir);
31 * Manually add refs/bisect, which, being
32 * per-worktree, might not appear in the directory
33 * listing for refs/ in the main repo.
35 if (!strcmp(entry->name, "refs/")) {
36 int pos = search_ref_dir(dir, "refs/bisect/", 12);
38 struct ref_entry *child_entry;
39 child_entry = create_dir_entry(dir->ref_store,
42 add_entry_to_dir(dir, child_entry);
45 entry->flag &= ~REF_INCOMPLETE;
50 struct ref_entry *create_ref_entry(const char *refname,
51 const unsigned char *sha1, int flag,
54 struct ref_entry *ref;
57 check_refname_format(refname, REFNAME_ALLOW_ONELEVEL))
58 die("Reference has invalid format: '%s'", refname);
59 FLEX_ALLOC_STR(ref, name, refname);
60 hashcpy(ref->u.value.oid.hash, sha1);
61 oidclr(&ref->u.value.peeled);
66 static void clear_ref_dir(struct ref_dir *dir);
68 void free_ref_entry(struct ref_entry *entry)
70 if (entry->flag & REF_DIR) {
72 * Do not use get_ref_dir() here, as that might
73 * trigger the reading of loose refs.
75 clear_ref_dir(&entry->u.subdir);
81 * Clear and free all entries in dir, recursively.
83 static void clear_ref_dir(struct ref_dir *dir)
86 for (i = 0; i < dir->nr; i++)
87 free_ref_entry(dir->entries[i]);
89 dir->sorted = dir->nr = dir->alloc = 0;
93 struct ref_entry *create_dir_entry(struct files_ref_store *ref_store,
94 const char *dirname, size_t len,
97 struct ref_entry *direntry;
98 FLEX_ALLOC_MEM(direntry, name, dirname, len);
99 direntry->u.subdir.ref_store = ref_store;
100 direntry->flag = REF_DIR | (incomplete ? REF_INCOMPLETE : 0);
104 static int ref_entry_cmp(const void *a, const void *b)
106 struct ref_entry *one = *(struct ref_entry **)a;
107 struct ref_entry *two = *(struct ref_entry **)b;
108 return strcmp(one->name, two->name);
111 static void sort_ref_dir(struct ref_dir *dir);
113 struct string_slice {
118 static int ref_entry_cmp_sslice(const void *key_, const void *ent_)
120 const struct string_slice *key = key_;
121 const struct ref_entry *ent = *(const struct ref_entry * const *)ent_;
122 int cmp = strncmp(key->str, ent->name, key->len);
125 return '\0' - (unsigned char)ent->name[key->len];
128 int search_ref_dir(struct ref_dir *dir, const char *refname, size_t len)
130 struct ref_entry **r;
131 struct string_slice key;
133 if (refname == NULL || !dir->nr)
139 r = bsearch(&key, dir->entries, dir->nr, sizeof(*dir->entries),
140 ref_entry_cmp_sslice);
145 return r - dir->entries;
149 * Search for a directory entry directly within dir (without
150 * recursing). Sort dir if necessary. subdirname must be a directory
151 * name (i.e., end in '/'). If mkdir is set, then create the
152 * directory if it is missing; otherwise, return NULL if the desired
153 * directory cannot be found. dir must already be complete.
155 static struct ref_dir *search_for_subdir(struct ref_dir *dir,
156 const char *subdirname, size_t len,
159 int entry_index = search_ref_dir(dir, subdirname, len);
160 struct ref_entry *entry;
161 if (entry_index == -1) {
165 * Since dir is complete, the absence of a subdir
166 * means that the subdir really doesn't exist;
167 * therefore, create an empty record for it but mark
168 * the record complete.
170 entry = create_dir_entry(dir->ref_store, subdirname, len, 0);
171 add_entry_to_dir(dir, entry);
173 entry = dir->entries[entry_index];
175 return get_ref_dir(entry);
178 struct ref_dir *find_containing_dir(struct ref_dir *dir,
179 const char *refname, int mkdir)
182 for (slash = strchr(refname, '/'); slash; slash = strchr(slash + 1, '/')) {
183 size_t dirnamelen = slash - refname + 1;
184 struct ref_dir *subdir;
185 subdir = search_for_subdir(dir, refname, dirnamelen, mkdir);
196 struct ref_entry *find_ref_entry(struct ref_dir *dir, const char *refname)
199 struct ref_entry *entry;
200 dir = find_containing_dir(dir, refname, 0);
203 entry_index = search_ref_dir(dir, refname, strlen(refname));
204 if (entry_index == -1)
206 entry = dir->entries[entry_index];
207 return (entry->flag & REF_DIR) ? NULL : entry;
210 int remove_entry_from_dir(struct ref_dir *dir, const char *refname)
212 int refname_len = strlen(refname);
214 struct ref_entry *entry;
215 int is_dir = refname[refname_len - 1] == '/';
218 * refname represents a reference directory. Remove
219 * the trailing slash; otherwise we will get the
220 * directory *representing* refname rather than the
221 * one *containing* it.
223 char *dirname = xmemdupz(refname, refname_len - 1);
224 dir = find_containing_dir(dir, dirname, 0);
227 dir = find_containing_dir(dir, refname, 0);
231 entry_index = search_ref_dir(dir, refname, refname_len);
232 if (entry_index == -1)
234 entry = dir->entries[entry_index];
236 memmove(&dir->entries[entry_index],
237 &dir->entries[entry_index + 1],
238 (dir->nr - entry_index - 1) * sizeof(*dir->entries)
241 if (dir->sorted > entry_index)
243 free_ref_entry(entry);
247 int add_ref_entry(struct ref_dir *dir, struct ref_entry *ref)
249 dir = find_containing_dir(dir, ref->name, 1);
252 add_entry_to_dir(dir, ref);
257 * Emit a warning and return true iff ref1 and ref2 have the same name
258 * and the same sha1. Die if they have the same name but different
261 static int is_dup_ref(const struct ref_entry *ref1, const struct ref_entry *ref2)
263 if (strcmp(ref1->name, ref2->name))
266 /* Duplicate name; make sure that they don't conflict: */
268 if ((ref1->flag & REF_DIR) || (ref2->flag & REF_DIR))
269 /* This is impossible by construction */
270 die("Reference directory conflict: %s", ref1->name);
272 if (oidcmp(&ref1->u.value.oid, &ref2->u.value.oid))
273 die("Duplicated ref, and SHA1s don't match: %s", ref1->name);
275 warning("Duplicated ref: %s", ref1->name);
280 * Sort the entries in dir non-recursively (if they are not already
281 * sorted) and remove any duplicate entries.
283 static void sort_ref_dir(struct ref_dir *dir)
286 struct ref_entry *last = NULL;
289 * This check also prevents passing a zero-length array to qsort(),
290 * which is a problem on some platforms.
292 if (dir->sorted == dir->nr)
295 QSORT(dir->entries, dir->nr, ref_entry_cmp);
297 /* Remove any duplicates: */
298 for (i = 0, j = 0; j < dir->nr; j++) {
299 struct ref_entry *entry = dir->entries[j];
300 if (last && is_dup_ref(last, entry))
301 free_ref_entry(entry);
303 last = dir->entries[i++] = entry;
305 dir->sorted = dir->nr = i;
308 int do_for_each_entry_in_dir(struct ref_dir *dir, int offset,
309 each_ref_entry_fn fn, void *cb_data)
312 assert(dir->sorted == dir->nr);
313 for (i = offset; i < dir->nr; i++) {
314 struct ref_entry *entry = dir->entries[i];
316 if (entry->flag & REF_DIR) {
317 struct ref_dir *subdir = get_ref_dir(entry);
318 sort_ref_dir(subdir);
319 retval = do_for_each_entry_in_dir(subdir, 0, fn, cb_data);
321 retval = fn(entry, cb_data);
329 void prime_ref_dir(struct ref_dir *dir)
332 * The hard work of loading loose refs is done by get_ref_dir(), so we
333 * just need to recurse through all of the sub-directories. We do not
334 * even need to care about sorting, as traversal order does not matter
338 for (i = 0; i < dir->nr; i++) {
339 struct ref_entry *entry = dir->entries[i];
340 if (entry->flag & REF_DIR)
341 prime_ref_dir(get_ref_dir(entry));
346 * A level in the reference hierarchy that is currently being iterated
349 struct cache_ref_iterator_level {
351 * The ref_dir being iterated over at this level. The ref_dir
352 * is sorted before being stored here.
357 * The index of the current entry within dir (which might
358 * itself be a directory). If index == -1, then the iteration
359 * hasn't yet begun. If index == dir->nr, then the iteration
360 * through this level is over.
366 * Represent an iteration through a ref_dir in the memory cache. The
367 * iteration recurses through subdirectories.
369 struct cache_ref_iterator {
370 struct ref_iterator base;
373 * The number of levels currently on the stack. This is always
374 * at least 1, because when it becomes zero the iteration is
375 * ended and this struct is freed.
379 /* The number of levels that have been allocated on the stack */
383 * A stack of levels. levels[0] is the uppermost level that is
384 * being iterated over in this iteration. (This is not
385 * necessary the top level in the references hierarchy. If we
386 * are iterating through a subtree, then levels[0] will hold
387 * the ref_dir for that subtree, and subsequent levels will go
390 struct cache_ref_iterator_level *levels;
393 static int cache_ref_iterator_advance(struct ref_iterator *ref_iterator)
395 struct cache_ref_iterator *iter =
396 (struct cache_ref_iterator *)ref_iterator;
399 struct cache_ref_iterator_level *level =
400 &iter->levels[iter->levels_nr - 1];
401 struct ref_dir *dir = level->dir;
402 struct ref_entry *entry;
404 if (level->index == -1)
407 if (++level->index == level->dir->nr) {
408 /* This level is exhausted; pop up a level */
409 if (--iter->levels_nr == 0)
410 return ref_iterator_abort(ref_iterator);
415 entry = dir->entries[level->index];
417 if (entry->flag & REF_DIR) {
418 /* push down a level */
419 ALLOC_GROW(iter->levels, iter->levels_nr + 1,
422 level = &iter->levels[iter->levels_nr++];
423 level->dir = get_ref_dir(entry);
426 iter->base.refname = entry->name;
427 iter->base.oid = &entry->u.value.oid;
428 iter->base.flags = entry->flag;
434 enum peel_status peel_entry(struct ref_entry *entry, int repeel)
436 enum peel_status status;
438 if (entry->flag & REF_KNOWS_PEELED) {
440 entry->flag &= ~REF_KNOWS_PEELED;
441 oidclr(&entry->u.value.peeled);
443 return is_null_oid(&entry->u.value.peeled) ?
444 PEEL_NON_TAG : PEEL_PEELED;
447 if (entry->flag & REF_ISBROKEN)
449 if (entry->flag & REF_ISSYMREF)
450 return PEEL_IS_SYMREF;
452 status = peel_object(entry->u.value.oid.hash, entry->u.value.peeled.hash);
453 if (status == PEEL_PEELED || status == PEEL_NON_TAG)
454 entry->flag |= REF_KNOWS_PEELED;
458 static int cache_ref_iterator_peel(struct ref_iterator *ref_iterator,
459 struct object_id *peeled)
461 struct cache_ref_iterator *iter =
462 (struct cache_ref_iterator *)ref_iterator;
463 struct cache_ref_iterator_level *level;
464 struct ref_entry *entry;
466 level = &iter->levels[iter->levels_nr - 1];
468 if (level->index == -1)
469 die("BUG: peel called before advance for cache iterator");
471 entry = level->dir->entries[level->index];
473 if (peel_entry(entry, 0))
475 oidcpy(peeled, &entry->u.value.peeled);
479 static int cache_ref_iterator_abort(struct ref_iterator *ref_iterator)
481 struct cache_ref_iterator *iter =
482 (struct cache_ref_iterator *)ref_iterator;
485 base_ref_iterator_free(ref_iterator);
489 static struct ref_iterator_vtable cache_ref_iterator_vtable = {
490 cache_ref_iterator_advance,
491 cache_ref_iterator_peel,
492 cache_ref_iterator_abort
495 struct ref_iterator *cache_ref_iterator_begin(struct ref_dir *dir)
497 struct cache_ref_iterator *iter;
498 struct ref_iterator *ref_iterator;
499 struct cache_ref_iterator_level *level;
501 iter = xcalloc(1, sizeof(*iter));
502 ref_iterator = &iter->base;
503 base_ref_iterator_init(ref_iterator, &cache_ref_iterator_vtable);
504 ALLOC_GROW(iter->levels, 10, iter->levels_alloc);
507 level = &iter->levels[0];