3 #include "refs-internal.h"
5 #include "../iterator.h"
7 void add_entry_to_dir(struct ref_dir *dir, struct ref_entry *entry)
9 ALLOC_GROW(dir->entries, dir->nr + 1, dir->alloc);
10 dir->entries[dir->nr++] = entry;
11 /* optimize for the case that entries are added in order */
13 (dir->nr == dir->sorted + 1 &&
14 strcmp(dir->entries[dir->nr - 2]->name,
15 dir->entries[dir->nr - 1]->name) < 0))
16 dir->sorted = dir->nr;
19 struct ref_dir *get_ref_dir(struct ref_entry *entry)
22 assert(entry->flag & REF_DIR);
23 dir = &entry->u.subdir;
24 if (entry->flag & REF_INCOMPLETE) {
25 if (!dir->cache->fill_ref_dir)
26 die("BUG: incomplete ref_store without fill_ref_dir function");
28 dir->cache->fill_ref_dir(dir->cache->ref_store, dir, entry->name);
29 entry->flag &= ~REF_INCOMPLETE;
34 struct ref_entry *create_ref_entry(const char *refname,
35 const unsigned char *sha1, int flag,
38 struct ref_entry *ref;
41 check_refname_format(refname, REFNAME_ALLOW_ONELEVEL))
42 die("Reference has invalid format: '%s'", refname);
43 FLEX_ALLOC_STR(ref, name, refname);
44 hashcpy(ref->u.value.oid.hash, sha1);
45 oidclr(&ref->u.value.peeled);
50 struct ref_cache *create_ref_cache(struct ref_store *refs,
51 fill_ref_dir_fn *fill_ref_dir)
53 struct ref_cache *ret = xcalloc(1, sizeof(*ret));
55 ret->ref_store = refs;
56 ret->fill_ref_dir = fill_ref_dir;
57 ret->root = create_dir_entry(ret, "", 0, 1);
61 static void clear_ref_dir(struct ref_dir *dir);
63 static void free_ref_entry(struct ref_entry *entry)
65 if (entry->flag & REF_DIR) {
67 * Do not use get_ref_dir() here, as that might
68 * trigger the reading of loose refs.
70 clear_ref_dir(&entry->u.subdir);
75 void free_ref_cache(struct ref_cache *cache)
77 free_ref_entry(cache->root);
82 * Clear and free all entries in dir, recursively.
84 static void clear_ref_dir(struct ref_dir *dir)
87 for (i = 0; i < dir->nr; i++)
88 free_ref_entry(dir->entries[i]);
90 dir->sorted = dir->nr = dir->alloc = 0;
94 struct ref_entry *create_dir_entry(struct ref_cache *cache,
95 const char *dirname, size_t len,
98 struct ref_entry *direntry;
100 FLEX_ALLOC_MEM(direntry, name, dirname, len);
101 direntry->u.subdir.cache = cache;
102 direntry->flag = REF_DIR | (incomplete ? REF_INCOMPLETE : 0);
106 static int ref_entry_cmp(const void *a, const void *b)
108 struct ref_entry *one = *(struct ref_entry **)a;
109 struct ref_entry *two = *(struct ref_entry **)b;
110 return strcmp(one->name, two->name);
113 static void sort_ref_dir(struct ref_dir *dir);
115 struct string_slice {
120 static int ref_entry_cmp_sslice(const void *key_, const void *ent_)
122 const struct string_slice *key = key_;
123 const struct ref_entry *ent = *(const struct ref_entry * const *)ent_;
124 int cmp = strncmp(key->str, ent->name, key->len);
127 return '\0' - (unsigned char)ent->name[key->len];
130 int search_ref_dir(struct ref_dir *dir, const char *refname, size_t len)
132 struct ref_entry **r;
133 struct string_slice key;
135 if (refname == NULL || !dir->nr)
141 r = bsearch(&key, dir->entries, dir->nr, sizeof(*dir->entries),
142 ref_entry_cmp_sslice);
147 return r - dir->entries;
151 * Search for a directory entry directly within dir (without
152 * recursing). Sort dir if necessary. subdirname must be a directory
153 * name (i.e., end in '/'). If mkdir is set, then create the
154 * directory if it is missing; otherwise, return NULL if the desired
155 * directory cannot be found. dir must already be complete.
157 static struct ref_dir *search_for_subdir(struct ref_dir *dir,
158 const char *subdirname, size_t len,
161 int entry_index = search_ref_dir(dir, subdirname, len);
162 struct ref_entry *entry;
163 if (entry_index == -1) {
167 * Since dir is complete, the absence of a subdir
168 * means that the subdir really doesn't exist;
169 * therefore, create an empty record for it but mark
170 * the record complete.
172 entry = create_dir_entry(dir->cache, subdirname, len, 0);
173 add_entry_to_dir(dir, entry);
175 entry = dir->entries[entry_index];
177 return get_ref_dir(entry);
181 * If refname is a reference name, find the ref_dir within the dir
182 * tree that should hold refname. If refname is a directory name
183 * (i.e., it ends in '/'), then return that ref_dir itself. dir must
184 * represent the top-level directory and must already be complete.
185 * Sort ref_dirs and recurse into subdirectories as necessary. If
186 * mkdir is set, then create any missing directories; otherwise,
187 * return NULL if the desired directory cannot be found.
189 static struct ref_dir *find_containing_dir(struct ref_dir *dir,
190 const char *refname, int mkdir)
193 for (slash = strchr(refname, '/'); slash; slash = strchr(slash + 1, '/')) {
194 size_t dirnamelen = slash - refname + 1;
195 struct ref_dir *subdir;
196 subdir = search_for_subdir(dir, refname, dirnamelen, mkdir);
207 struct ref_entry *find_ref_entry(struct ref_dir *dir, const char *refname)
210 struct ref_entry *entry;
211 dir = find_containing_dir(dir, refname, 0);
214 entry_index = search_ref_dir(dir, refname, strlen(refname));
215 if (entry_index == -1)
217 entry = dir->entries[entry_index];
218 return (entry->flag & REF_DIR) ? NULL : entry;
221 int remove_entry_from_dir(struct ref_dir *dir, const char *refname)
223 int refname_len = strlen(refname);
225 struct ref_entry *entry;
226 int is_dir = refname[refname_len - 1] == '/';
229 * refname represents a reference directory. Remove
230 * the trailing slash; otherwise we will get the
231 * directory *representing* refname rather than the
232 * one *containing* it.
234 char *dirname = xmemdupz(refname, refname_len - 1);
235 dir = find_containing_dir(dir, dirname, 0);
238 dir = find_containing_dir(dir, refname, 0);
242 entry_index = search_ref_dir(dir, refname, refname_len);
243 if (entry_index == -1)
245 entry = dir->entries[entry_index];
247 memmove(&dir->entries[entry_index],
248 &dir->entries[entry_index + 1],
249 (dir->nr - entry_index - 1) * sizeof(*dir->entries)
252 if (dir->sorted > entry_index)
254 free_ref_entry(entry);
258 int add_ref_entry(struct ref_dir *dir, struct ref_entry *ref)
260 dir = find_containing_dir(dir, ref->name, 1);
263 add_entry_to_dir(dir, ref);
268 * Emit a warning and return true iff ref1 and ref2 have the same name
269 * and the same sha1. Die if they have the same name but different
272 static int is_dup_ref(const struct ref_entry *ref1, const struct ref_entry *ref2)
274 if (strcmp(ref1->name, ref2->name))
277 /* Duplicate name; make sure that they don't conflict: */
279 if ((ref1->flag & REF_DIR) || (ref2->flag & REF_DIR))
280 /* This is impossible by construction */
281 die("Reference directory conflict: %s", ref1->name);
283 if (oidcmp(&ref1->u.value.oid, &ref2->u.value.oid))
284 die("Duplicated ref, and SHA1s don't match: %s", ref1->name);
286 warning("Duplicated ref: %s", ref1->name);
291 * Sort the entries in dir non-recursively (if they are not already
292 * sorted) and remove any duplicate entries.
294 static void sort_ref_dir(struct ref_dir *dir)
297 struct ref_entry *last = NULL;
300 * This check also prevents passing a zero-length array to qsort(),
301 * which is a problem on some platforms.
303 if (dir->sorted == dir->nr)
306 QSORT(dir->entries, dir->nr, ref_entry_cmp);
308 /* Remove any duplicates: */
309 for (i = 0, j = 0; j < dir->nr; j++) {
310 struct ref_entry *entry = dir->entries[j];
311 if (last && is_dup_ref(last, entry))
312 free_ref_entry(entry);
314 last = dir->entries[i++] = entry;
316 dir->sorted = dir->nr = i;
320 * Load all of the refs from `dir` (recursively) into our in-memory
323 static void prime_ref_dir(struct ref_dir *dir)
326 * The hard work of loading loose refs is done by get_ref_dir(), so we
327 * just need to recurse through all of the sub-directories. We do not
328 * even need to care about sorting, as traversal order does not matter
332 for (i = 0; i < dir->nr; i++) {
333 struct ref_entry *entry = dir->entries[i];
334 if (entry->flag & REF_DIR)
335 prime_ref_dir(get_ref_dir(entry));
340 * A level in the reference hierarchy that is currently being iterated
343 struct cache_ref_iterator_level {
345 * The ref_dir being iterated over at this level. The ref_dir
346 * is sorted before being stored here.
351 * The index of the current entry within dir (which might
352 * itself be a directory). If index == -1, then the iteration
353 * hasn't yet begun. If index == dir->nr, then the iteration
354 * through this level is over.
360 * Represent an iteration through a ref_dir in the memory cache. The
361 * iteration recurses through subdirectories.
363 struct cache_ref_iterator {
364 struct ref_iterator base;
367 * The number of levels currently on the stack. This is always
368 * at least 1, because when it becomes zero the iteration is
369 * ended and this struct is freed.
373 /* The number of levels that have been allocated on the stack */
377 * A stack of levels. levels[0] is the uppermost level that is
378 * being iterated over in this iteration. (This is not
379 * necessary the top level in the references hierarchy. If we
380 * are iterating through a subtree, then levels[0] will hold
381 * the ref_dir for that subtree, and subsequent levels will go
384 struct cache_ref_iterator_level *levels;
387 static int cache_ref_iterator_advance(struct ref_iterator *ref_iterator)
389 struct cache_ref_iterator *iter =
390 (struct cache_ref_iterator *)ref_iterator;
393 struct cache_ref_iterator_level *level =
394 &iter->levels[iter->levels_nr - 1];
395 struct ref_dir *dir = level->dir;
396 struct ref_entry *entry;
398 if (level->index == -1)
401 if (++level->index == level->dir->nr) {
402 /* This level is exhausted; pop up a level */
403 if (--iter->levels_nr == 0)
404 return ref_iterator_abort(ref_iterator);
409 entry = dir->entries[level->index];
411 if (entry->flag & REF_DIR) {
412 /* push down a level */
413 ALLOC_GROW(iter->levels, iter->levels_nr + 1,
416 level = &iter->levels[iter->levels_nr++];
417 level->dir = get_ref_dir(entry);
420 iter->base.refname = entry->name;
421 iter->base.oid = &entry->u.value.oid;
422 iter->base.flags = entry->flag;
428 enum peel_status peel_entry(struct ref_entry *entry, int repeel)
430 enum peel_status status;
432 if (entry->flag & REF_KNOWS_PEELED) {
434 entry->flag &= ~REF_KNOWS_PEELED;
435 oidclr(&entry->u.value.peeled);
437 return is_null_oid(&entry->u.value.peeled) ?
438 PEEL_NON_TAG : PEEL_PEELED;
441 if (entry->flag & REF_ISBROKEN)
443 if (entry->flag & REF_ISSYMREF)
444 return PEEL_IS_SYMREF;
446 status = peel_object(entry->u.value.oid.hash, entry->u.value.peeled.hash);
447 if (status == PEEL_PEELED || status == PEEL_NON_TAG)
448 entry->flag |= REF_KNOWS_PEELED;
452 static int cache_ref_iterator_peel(struct ref_iterator *ref_iterator,
453 struct object_id *peeled)
455 struct cache_ref_iterator *iter =
456 (struct cache_ref_iterator *)ref_iterator;
457 struct cache_ref_iterator_level *level;
458 struct ref_entry *entry;
460 level = &iter->levels[iter->levels_nr - 1];
462 if (level->index == -1)
463 die("BUG: peel called before advance for cache iterator");
465 entry = level->dir->entries[level->index];
467 if (peel_entry(entry, 0))
469 oidcpy(peeled, &entry->u.value.peeled);
473 static int cache_ref_iterator_abort(struct ref_iterator *ref_iterator)
475 struct cache_ref_iterator *iter =
476 (struct cache_ref_iterator *)ref_iterator;
479 base_ref_iterator_free(ref_iterator);
483 static struct ref_iterator_vtable cache_ref_iterator_vtable = {
484 cache_ref_iterator_advance,
485 cache_ref_iterator_peel,
486 cache_ref_iterator_abort
489 struct ref_iterator *cache_ref_iterator_begin(struct ref_cache *cache,
494 struct cache_ref_iterator *iter;
495 struct ref_iterator *ref_iterator;
496 struct cache_ref_iterator_level *level;
498 dir = get_ref_dir(cache->root);
499 if (prefix && *prefix)
500 dir = find_containing_dir(dir, prefix, 0);
502 /* There's nothing to iterate over. */
503 return empty_ref_iterator_begin();
508 iter = xcalloc(1, sizeof(*iter));
509 ref_iterator = &iter->base;
510 base_ref_iterator_init(ref_iterator, &cache_ref_iterator_vtable);
511 ALLOC_GROW(iter->levels, 10, iter->levels_alloc);
514 level = &iter->levels[0];
518 if (prefix && *prefix)
519 ref_iterator = prefix_ref_iterator_begin(ref_iterator,