4 * Hashing names in the index state
6 * Copyright (C) 2008 Linus Torvalds
8 #define NO_THE_INDEX_COMPATIBILITY_MACROS
12 struct hashmap_entry ent;
13 struct dir_entry *parent;
16 char name[FLEX_ARRAY];
19 static int dir_entry_cmp(const struct dir_entry *e1,
20 const struct dir_entry *e2, const char *name)
22 return e1->namelen != e2->namelen || strncasecmp(e1->name,
23 name ? name : e2->name, e1->namelen);
26 static struct dir_entry *find_dir_entry__hash(struct index_state *istate,
27 const char *name, unsigned int namelen, unsigned int hash)
30 hashmap_entry_init(&key, hash);
31 key.namelen = namelen;
32 return hashmap_get(&istate->dir_hash, &key, name);
35 static struct dir_entry *find_dir_entry(struct index_state *istate,
36 const char *name, unsigned int namelen)
38 return find_dir_entry__hash(istate, name, namelen, memihash(name, namelen));
41 static struct dir_entry *hash_dir_entry(struct index_state *istate,
42 struct cache_entry *ce, int namelen)
45 * Throw each directory component in the hash for quick lookup
46 * during a git status. Directory components are stored without their
47 * closing slash. Despite submodules being a directory, they never
48 * reach this point, because they are stored
49 * in index_state.name_hash (as ordinary cache_entries).
51 struct dir_entry *dir;
53 /* get length of parent directory */
54 while (namelen > 0 && !is_dir_sep(ce->name[namelen - 1]))
60 /* lookup existing entry for that directory */
61 dir = find_dir_entry(istate, ce->name, namelen);
63 /* not found, create it and add to hash table */
64 FLEX_ALLOC_MEM(dir, name, ce->name, namelen);
65 hashmap_entry_init(dir, memihash(ce->name, namelen));
66 dir->namelen = namelen;
67 hashmap_add(&istate->dir_hash, dir);
69 /* recursively add missing parent directories */
70 dir->parent = hash_dir_entry(istate, ce, namelen);
75 static void add_dir_entry(struct index_state *istate, struct cache_entry *ce)
77 /* Add reference to the directory entry (and parents if 0). */
78 struct dir_entry *dir = hash_dir_entry(istate, ce, ce_namelen(ce));
79 while (dir && !(dir->nr++))
83 static void remove_dir_entry(struct index_state *istate, struct cache_entry *ce)
86 * Release reference to the directory entry. If 0, remove and continue
87 * with parent directory.
89 struct dir_entry *dir = hash_dir_entry(istate, ce, ce_namelen(ce));
90 while (dir && !(--dir->nr)) {
91 struct dir_entry *parent = dir->parent;
92 hashmap_remove(&istate->dir_hash, dir, NULL);
98 static void hash_index_entry(struct index_state *istate, struct cache_entry *ce)
100 if (ce->ce_flags & CE_HASHED)
102 ce->ce_flags |= CE_HASHED;
103 hashmap_entry_init(ce, memihash(ce->name, ce_namelen(ce)));
104 hashmap_add(&istate->name_hash, ce);
107 add_dir_entry(istate, ce);
110 static int cache_entry_cmp(const struct cache_entry *ce1,
111 const struct cache_entry *ce2, const void *remove)
114 * For remove_name_hash, find the exact entry (pointer equality); for
115 * index_file_exists, find all entries with matching hash code and
116 * decide whether the entry matches in same_name.
118 return remove ? !(ce1 == ce2) : 0;
121 static int lazy_try_threaded = 1;
122 static int lazy_nr_dir_threads;
126 static inline int lookup_lazy_params(struct index_state *istate)
131 static inline void threaded_lazy_init_name_hash(
132 struct index_state *istate)
138 #include "thread-utils.h"
141 * Set a minimum number of cache_entries that we will handle per
142 * thread and use that to decide how many threads to run (upto
143 * the number on the system).
145 * For guidance setting the lower per-thread bound, see:
146 * t/helper/test-lazy-init-name-hash --analyze
148 #define LAZY_THREAD_COST (2000)
151 * We use n mutexes to guard n partitions of the "istate->dir_hash"
152 * hashtable. Since "find" and "insert" operations will hash to a
153 * particular bucket and modify/search a single chain, we can say
154 * that "all chains mod n" are guarded by the same mutex -- rather
155 * than having a single mutex to guard the entire table. (This does
156 * require that we disable "rehashing" on the hashtable.)
158 * So, a larger value here decreases the probability of a collision
159 * and the time that each thread must wait for the mutex.
161 #define LAZY_MAX_MUTEX (32)
163 static pthread_mutex_t *lazy_dir_mutex_array;
166 * An array of lazy_entry items is used by the n threads in
167 * the directory parse (first) phase to (lock-free) store the
168 * intermediate results. These values are then referenced by
169 * the 2 threads in the second phase.
172 struct dir_entry *dir;
173 unsigned int hash_dir;
174 unsigned int hash_name;
178 * Decide if we want to use threads (if available) to load
179 * the hash tables. We set "lazy_nr_dir_threads" to zero when
180 * it is not worth it.
182 static int lookup_lazy_params(struct index_state *istate)
186 lazy_nr_dir_threads = 0;
188 if (!lazy_try_threaded)
192 * If we are respecting case, just use the original
193 * code to build the "istate->name_hash". We don't
194 * need the complexity here.
199 nr_cpus = online_cpus();
203 if (istate->cache_nr < 2 * LAZY_THREAD_COST)
206 if (istate->cache_nr < nr_cpus * LAZY_THREAD_COST)
207 nr_cpus = istate->cache_nr / LAZY_THREAD_COST;
208 lazy_nr_dir_threads = nr_cpus;
209 return lazy_nr_dir_threads;
213 * Initialize n mutexes for use when searching and inserting
214 * into "istate->dir_hash". All "dir" threads are trying
215 * to insert partial pathnames into the hash as they iterate
216 * over their portions of the index, so lock contention is
219 * However, the hashmap is going to put items into bucket
220 * chains based on their hash values. Use that to create n
221 * mutexes and lock on mutex[bucket(hash) % n]. This will
222 * decrease the collision rate by (hopefully) by a factor of n.
224 static void init_dir_mutex(void)
228 lazy_dir_mutex_array = xcalloc(LAZY_MAX_MUTEX, sizeof(pthread_mutex_t));
230 for (j = 0; j < LAZY_MAX_MUTEX; j++)
231 init_recursive_mutex(&lazy_dir_mutex_array[j]);
234 static void cleanup_dir_mutex(void)
238 for (j = 0; j < LAZY_MAX_MUTEX; j++)
239 pthread_mutex_destroy(&lazy_dir_mutex_array[j]);
241 free(lazy_dir_mutex_array);
244 static void lock_dir_mutex(int j)
246 pthread_mutex_lock(&lazy_dir_mutex_array[j]);
249 static void unlock_dir_mutex(int j)
251 pthread_mutex_unlock(&lazy_dir_mutex_array[j]);
254 static inline int compute_dir_lock_nr(
255 const struct hashmap *map,
258 return hashmap_bucket(map, hash) % LAZY_MAX_MUTEX;
261 static struct dir_entry *hash_dir_entry_with_parent_and_prefix(
262 struct index_state *istate,
263 struct dir_entry *parent,
264 struct strbuf *prefix)
266 struct dir_entry *dir;
271 * Either we have a parent directory and path with slash(es)
272 * or the directory is an immediate child of the root directory.
274 assert((parent != NULL) ^ (strchr(prefix->buf, '/') == NULL));
277 hash = memihash_cont(parent->ent.hash,
278 prefix->buf + parent->namelen,
279 prefix->len - parent->namelen);
281 hash = memihash(prefix->buf, prefix->len);
283 lock_nr = compute_dir_lock_nr(&istate->dir_hash, hash);
284 lock_dir_mutex(lock_nr);
286 dir = find_dir_entry__hash(istate, prefix->buf, prefix->len, hash);
288 FLEX_ALLOC_MEM(dir, name, prefix->buf, prefix->len);
289 hashmap_entry_init(dir, hash);
290 dir->namelen = prefix->len;
291 dir->parent = parent;
292 hashmap_add(&istate->dir_hash, dir);
295 unlock_dir_mutex(lock_nr);
297 /* All I really need here is an InterlockedIncrement(&(parent->nr)) */
298 lock_nr = compute_dir_lock_nr(&istate->dir_hash, parent->ent.hash);
299 lock_dir_mutex(lock_nr);
304 unlock_dir_mutex(lock_nr);
310 * handle_range_1() and handle_range_dir() are derived from
311 * clear_ce_flags_1() and clear_ce_flags_dir() in unpack-trees.c
312 * and handle the iteration over the entire array of index entries.
313 * They use recursion for adjacent entries in the same parent
316 static int handle_range_1(
317 struct index_state *istate,
320 struct dir_entry *parent,
321 struct strbuf *prefix,
322 struct lazy_entry *lazy_entries);
324 static int handle_range_dir(
325 struct index_state *istate,
328 struct dir_entry *parent,
329 struct strbuf *prefix,
330 struct lazy_entry *lazy_entries,
331 struct dir_entry **dir_new_out)
334 int input_prefix_len = prefix->len;
335 struct dir_entry *dir_new;
337 dir_new = hash_dir_entry_with_parent_and_prefix(istate, parent, prefix);
339 strbuf_addch(prefix, '/');
342 * Scan forward in the index array for index entries having the same
343 * path prefix (that are also in this directory).
345 if (strncmp(istate->cache[k_start + 1]->name, prefix->buf, prefix->len) > 0)
347 else if (strncmp(istate->cache[k_end - 1]->name, prefix->buf, prefix->len) == 0)
352 while (begin < end) {
353 int mid = (begin + end) >> 1;
354 int cmp = strncmp(istate->cache[mid]->name, prefix->buf, prefix->len);
355 if (cmp == 0) /* mid has same prefix; look in second part */
357 else if (cmp > 0) /* mid is past group; look in first part */
360 die("cache entry out of order");
366 * Recurse and process what we can of this subset [k_start, k).
368 rc = handle_range_1(istate, k_start, k, dir_new, prefix, lazy_entries);
370 strbuf_setlen(prefix, input_prefix_len);
372 *dir_new_out = dir_new;
376 static int handle_range_1(
377 struct index_state *istate,
380 struct dir_entry *parent,
381 struct strbuf *prefix,
382 struct lazy_entry *lazy_entries)
384 int input_prefix_len = prefix->len;
388 struct cache_entry *ce_k = istate->cache[k];
389 const char *name, *slash;
391 if (prefix->len && strncmp(ce_k->name, prefix->buf, prefix->len))
394 name = ce_k->name + prefix->len;
395 slash = strchr(name, '/');
398 int len = slash - name;
400 struct dir_entry *dir_new;
402 strbuf_add(prefix, name, len);
403 processed = handle_range_dir(istate, k, k_end, parent, prefix, lazy_entries, &dir_new);
406 strbuf_setlen(prefix, input_prefix_len);
410 strbuf_addch(prefix, '/');
411 processed = handle_range_1(istate, k, k_end, dir_new, prefix, lazy_entries);
413 strbuf_setlen(prefix, input_prefix_len);
418 * It is too expensive to take a lock to insert "ce_k"
419 * into "istate->name_hash" and increment the ref-count
420 * on the "parent" dir. So we defer actually updating
421 * permanent data structures until phase 2 (where we
422 * can change the locking requirements) and simply
423 * accumulate our current results into the lazy_entries
426 * We do not need to lock the lazy_entries array because
427 * we have exclusive access to the cells in the range
428 * [k_start,k_end) that this thread was given.
430 lazy_entries[k].dir = parent;
432 lazy_entries[k].hash_name = memihash_cont(
434 ce_k->name + parent->namelen,
435 ce_namelen(ce_k) - parent->namelen);
436 lazy_entries[k].hash_dir = parent->ent.hash;
438 lazy_entries[k].hash_name = memihash(ce_k->name, ce_namelen(ce_k));
447 struct lazy_dir_thread_data {
449 struct index_state *istate;
450 struct lazy_entry *lazy_entries;
455 static void *lazy_dir_thread_proc(void *_data)
457 struct lazy_dir_thread_data *d = _data;
458 struct strbuf prefix = STRBUF_INIT;
459 handle_range_1(d->istate, d->k_start, d->k_end, NULL, &prefix, d->lazy_entries);
460 strbuf_release(&prefix);
464 struct lazy_name_thread_data {
466 struct index_state *istate;
467 struct lazy_entry *lazy_entries;
470 static void *lazy_name_thread_proc(void *_data)
472 struct lazy_name_thread_data *d = _data;
475 for (k = 0; k < d->istate->cache_nr; k++) {
476 struct cache_entry *ce_k = d->istate->cache[k];
477 ce_k->ce_flags |= CE_HASHED;
478 hashmap_entry_init(ce_k, d->lazy_entries[k].hash_name);
479 hashmap_add(&d->istate->name_hash, ce_k);
485 static inline void lazy_update_dir_ref_counts(
486 struct index_state *istate,
487 struct lazy_entry *lazy_entries)
491 for (k = 0; k < istate->cache_nr; k++) {
492 if (lazy_entries[k].dir)
493 lazy_entries[k].dir->nr++;
497 static void threaded_lazy_init_name_hash(
498 struct index_state *istate)
503 struct lazy_entry *lazy_entries;
504 struct lazy_dir_thread_data *td_dir;
505 struct lazy_name_thread_data *td_name;
508 nr_each = DIV_ROUND_UP(istate->cache_nr, lazy_nr_dir_threads);
510 lazy_entries = xcalloc(istate->cache_nr, sizeof(struct lazy_entry));
511 td_dir = xcalloc(lazy_nr_dir_threads, sizeof(struct lazy_dir_thread_data));
512 td_name = xcalloc(1, sizeof(struct lazy_name_thread_data));
518 * Build "istate->dir_hash" using n "dir" threads (and a read-only index).
520 for (t = 0; t < lazy_nr_dir_threads; t++) {
521 struct lazy_dir_thread_data *td_dir_t = td_dir + t;
522 td_dir_t->istate = istate;
523 td_dir_t->lazy_entries = lazy_entries;
524 td_dir_t->k_start = k_start;
526 if (k_start > istate->cache_nr)
527 k_start = istate->cache_nr;
528 td_dir_t->k_end = k_start;
529 if (pthread_create(&td_dir_t->pthread, NULL, lazy_dir_thread_proc, td_dir_t))
530 die("unable to create lazy_dir_thread");
532 for (t = 0; t < lazy_nr_dir_threads; t++) {
533 struct lazy_dir_thread_data *td_dir_t = td_dir + t;
534 if (pthread_join(td_dir_t->pthread, NULL))
535 die("unable to join lazy_dir_thread");
540 * Iterate over all index entries and add them to the "istate->name_hash"
541 * using a single "name" background thread.
542 * (Testing showed it wasn't worth running more than 1 thread for this.)
544 * Meanwhile, finish updating the parent directory ref-counts for each
545 * index entry using the current thread. (This step is very fast and
546 * doesn't need threading.)
548 td_name->istate = istate;
549 td_name->lazy_entries = lazy_entries;
550 if (pthread_create(&td_name->pthread, NULL, lazy_name_thread_proc, td_name))
551 die("unable to create lazy_name_thread");
553 lazy_update_dir_ref_counts(istate, lazy_entries);
555 if (pthread_join(td_name->pthread, NULL))
556 die("unable to join lazy_name_thread");
567 static void lazy_init_name_hash(struct index_state *istate)
569 if (istate->name_hash_initialized)
571 hashmap_init(&istate->name_hash, (hashmap_cmp_fn) cache_entry_cmp,
573 hashmap_init(&istate->dir_hash, (hashmap_cmp_fn) dir_entry_cmp,
576 if (lookup_lazy_params(istate)) {
577 hashmap_disallow_rehash(&istate->dir_hash, 1);
578 threaded_lazy_init_name_hash(istate);
579 hashmap_disallow_rehash(&istate->dir_hash, 0);
582 for (nr = 0; nr < istate->cache_nr; nr++)
583 hash_index_entry(istate, istate->cache[nr]);
586 istate->name_hash_initialized = 1;
590 * A test routine for t/helper/ sources.
592 * Returns the number of threads used or 0 when
593 * the non-threaded code path was used.
595 * Requesting threading WILL NOT override guards
596 * in lookup_lazy_params().
598 int test_lazy_init_name_hash(struct index_state *istate, int try_threaded)
600 lazy_nr_dir_threads = 0;
601 lazy_try_threaded = try_threaded;
603 lazy_init_name_hash(istate);
605 return lazy_nr_dir_threads;
608 void add_name_hash(struct index_state *istate, struct cache_entry *ce)
610 if (istate->name_hash_initialized)
611 hash_index_entry(istate, ce);
614 void remove_name_hash(struct index_state *istate, struct cache_entry *ce)
616 if (!istate->name_hash_initialized || !(ce->ce_flags & CE_HASHED))
618 ce->ce_flags &= ~CE_HASHED;
619 hashmap_remove(&istate->name_hash, ce, ce);
622 remove_dir_entry(istate, ce);
625 static int slow_same_name(const char *name1, int len1, const char *name2, int len2)
631 unsigned char c1 = *name1++;
632 unsigned char c2 = *name2++;
644 static int same_name(const struct cache_entry *ce, const char *name, int namelen, int icase)
646 int len = ce_namelen(ce);
649 * Always do exact compare, even if we want a case-ignoring comparison;
650 * we do the quick exact one first, because it will be the common case.
652 if (len == namelen && !memcmp(name, ce->name, len))
658 return slow_same_name(name, namelen, ce->name, len);
661 int index_dir_exists(struct index_state *istate, const char *name, int namelen)
663 struct dir_entry *dir;
665 lazy_init_name_hash(istate);
666 dir = find_dir_entry(istate, name, namelen);
667 return dir && dir->nr;
670 void adjust_dirname_case(struct index_state *istate, char *name)
672 const char *startPtr = name;
673 const char *ptr = startPtr;
675 lazy_init_name_hash(istate);
677 while (*ptr && *ptr != '/')
681 struct dir_entry *dir;
684 dir = find_dir_entry(istate, name, ptr - name + 1);
686 memcpy((void *)startPtr, dir->name + (startPtr - name), ptr - startPtr);
693 struct cache_entry *index_file_exists(struct index_state *istate, const char *name, int namelen, int icase)
695 struct cache_entry *ce;
697 lazy_init_name_hash(istate);
699 ce = hashmap_get_from_hash(&istate->name_hash,
700 memihash(name, namelen), NULL);
702 if (same_name(ce, name, namelen, icase))
704 ce = hashmap_get_next(&istate->name_hash, ce);
709 void free_name_hash(struct index_state *istate)
711 if (!istate->name_hash_initialized)
713 istate->name_hash_initialized = 0;
715 hashmap_free(&istate->name_hash, 0);
716 hashmap_free(&istate->dir_hash, 1);