4 * Hashing names in the index state
6 * Copyright (C) 2008 Linus Torvalds
9 #include "thread-utils.h"
12 struct hashmap_entry ent;
13 struct dir_entry *parent;
16 char name[FLEX_ARRAY];
19 static int dir_entry_cmp(const void *unused_cmp_data,
21 const void *entry_or_key,
24 const struct dir_entry *e1 = entry;
25 const struct dir_entry *e2 = entry_or_key;
26 const char *name = keydata;
28 return e1->namelen != e2->namelen || strncasecmp(e1->name,
29 name ? name : e2->name, e1->namelen);
32 static struct dir_entry *find_dir_entry__hash(struct index_state *istate,
33 const char *name, unsigned int namelen, unsigned int hash)
36 hashmap_entry_init(&key.ent, hash);
37 key.namelen = namelen;
38 return hashmap_get_entry(&istate->dir_hash, &key, name,
39 struct dir_entry, ent);
42 static struct dir_entry *find_dir_entry(struct index_state *istate,
43 const char *name, unsigned int namelen)
45 return find_dir_entry__hash(istate, name, namelen, memihash(name, namelen));
48 static struct dir_entry *hash_dir_entry(struct index_state *istate,
49 struct cache_entry *ce, int namelen)
52 * Throw each directory component in the hash for quick lookup
53 * during a git status. Directory components are stored without their
54 * closing slash. Despite submodules being a directory, they never
55 * reach this point, because they are stored
56 * in index_state.name_hash (as ordinary cache_entries).
58 struct dir_entry *dir;
60 /* get length of parent directory */
61 while (namelen > 0 && !is_dir_sep(ce->name[namelen - 1]))
67 /* lookup existing entry for that directory */
68 dir = find_dir_entry(istate, ce->name, namelen);
70 /* not found, create it and add to hash table */
71 FLEX_ALLOC_MEM(dir, name, ce->name, namelen);
72 hashmap_entry_init(&dir->ent, memihash(ce->name, namelen));
73 dir->namelen = namelen;
74 hashmap_add(&istate->dir_hash, &dir->ent);
76 /* recursively add missing parent directories */
77 dir->parent = hash_dir_entry(istate, ce, namelen);
82 static void add_dir_entry(struct index_state *istate, struct cache_entry *ce)
84 /* Add reference to the directory entry (and parents if 0). */
85 struct dir_entry *dir = hash_dir_entry(istate, ce, ce_namelen(ce));
86 while (dir && !(dir->nr++))
90 static void remove_dir_entry(struct index_state *istate, struct cache_entry *ce)
93 * Release reference to the directory entry. If 0, remove and continue
94 * with parent directory.
96 struct dir_entry *dir = hash_dir_entry(istate, ce, ce_namelen(ce));
97 while (dir && !(--dir->nr)) {
98 struct dir_entry *parent = dir->parent;
99 hashmap_remove(&istate->dir_hash, &dir->ent, NULL);
105 static void hash_index_entry(struct index_state *istate, struct cache_entry *ce)
107 if (ce->ce_flags & CE_HASHED)
109 ce->ce_flags |= CE_HASHED;
110 hashmap_entry_init(&ce->ent, memihash(ce->name, ce_namelen(ce)));
111 hashmap_add(&istate->name_hash, &ce->ent);
114 add_dir_entry(istate, ce);
117 static int cache_entry_cmp(const void *unused_cmp_data,
119 const void *entry_or_key,
122 const struct cache_entry *ce1 = entry;
123 const struct cache_entry *ce2 = entry_or_key;
125 * For remove_name_hash, find the exact entry (pointer equality); for
126 * index_file_exists, find all entries with matching hash code and
127 * decide whether the entry matches in same_name.
129 return remove ? !(ce1 == ce2) : 0;
132 static int lazy_try_threaded = 1;
133 static int lazy_nr_dir_threads;
136 * Set a minimum number of cache_entries that we will handle per
137 * thread and use that to decide how many threads to run (upto
138 * the number on the system).
140 * For guidance setting the lower per-thread bound, see:
141 * t/helper/test-lazy-init-name-hash --analyze
143 #define LAZY_THREAD_COST (2000)
146 * We use n mutexes to guard n partitions of the "istate->dir_hash"
147 * hashtable. Since "find" and "insert" operations will hash to a
148 * particular bucket and modify/search a single chain, we can say
149 * that "all chains mod n" are guarded by the same mutex -- rather
150 * than having a single mutex to guard the entire table. (This does
151 * require that we disable "rehashing" on the hashtable.)
153 * So, a larger value here decreases the probability of a collision
154 * and the time that each thread must wait for the mutex.
156 #define LAZY_MAX_MUTEX (32)
158 static pthread_mutex_t *lazy_dir_mutex_array;
161 * An array of lazy_entry items is used by the n threads in
162 * the directory parse (first) phase to (lock-free) store the
163 * intermediate results. These values are then referenced by
164 * the 2 threads in the second phase.
167 struct dir_entry *dir;
168 unsigned int hash_dir;
169 unsigned int hash_name;
173 * Decide if we want to use threads (if available) to load
174 * the hash tables. We set "lazy_nr_dir_threads" to zero when
175 * it is not worth it.
177 static int lookup_lazy_params(struct index_state *istate)
181 lazy_nr_dir_threads = 0;
183 if (!lazy_try_threaded)
187 * If we are respecting case, just use the original
188 * code to build the "istate->name_hash". We don't
189 * need the complexity here.
194 nr_cpus = online_cpus();
198 if (istate->cache_nr < 2 * LAZY_THREAD_COST)
201 if (istate->cache_nr < nr_cpus * LAZY_THREAD_COST)
202 nr_cpus = istate->cache_nr / LAZY_THREAD_COST;
203 lazy_nr_dir_threads = nr_cpus;
204 return lazy_nr_dir_threads;
208 * Initialize n mutexes for use when searching and inserting
209 * into "istate->dir_hash". All "dir" threads are trying
210 * to insert partial pathnames into the hash as they iterate
211 * over their portions of the index, so lock contention is
214 * However, the hashmap is going to put items into bucket
215 * chains based on their hash values. Use that to create n
216 * mutexes and lock on mutex[bucket(hash) % n]. This will
217 * decrease the collision rate by (hopefully) by a factor of n.
219 static void init_dir_mutex(void)
223 lazy_dir_mutex_array = xcalloc(LAZY_MAX_MUTEX, sizeof(pthread_mutex_t));
225 for (j = 0; j < LAZY_MAX_MUTEX; j++)
226 init_recursive_mutex(&lazy_dir_mutex_array[j]);
229 static void cleanup_dir_mutex(void)
233 for (j = 0; j < LAZY_MAX_MUTEX; j++)
234 pthread_mutex_destroy(&lazy_dir_mutex_array[j]);
236 free(lazy_dir_mutex_array);
239 static void lock_dir_mutex(int j)
241 pthread_mutex_lock(&lazy_dir_mutex_array[j]);
244 static void unlock_dir_mutex(int j)
246 pthread_mutex_unlock(&lazy_dir_mutex_array[j]);
249 static inline int compute_dir_lock_nr(
250 const struct hashmap *map,
253 return hashmap_bucket(map, hash) % LAZY_MAX_MUTEX;
256 static struct dir_entry *hash_dir_entry_with_parent_and_prefix(
257 struct index_state *istate,
258 struct dir_entry *parent,
259 struct strbuf *prefix)
261 struct dir_entry *dir;
266 * Either we have a parent directory and path with slash(es)
267 * or the directory is an immediate child of the root directory.
269 assert((parent != NULL) ^ (strchr(prefix->buf, '/') == NULL));
272 hash = memihash_cont(parent->ent.hash,
273 prefix->buf + parent->namelen,
274 prefix->len - parent->namelen);
276 hash = memihash(prefix->buf, prefix->len);
278 lock_nr = compute_dir_lock_nr(&istate->dir_hash, hash);
279 lock_dir_mutex(lock_nr);
281 dir = find_dir_entry__hash(istate, prefix->buf, prefix->len, hash);
283 FLEX_ALLOC_MEM(dir, name, prefix->buf, prefix->len);
284 hashmap_entry_init(&dir->ent, hash);
285 dir->namelen = prefix->len;
286 dir->parent = parent;
287 hashmap_add(&istate->dir_hash, &dir->ent);
290 unlock_dir_mutex(lock_nr);
292 /* All I really need here is an InterlockedIncrement(&(parent->nr)) */
293 lock_nr = compute_dir_lock_nr(&istate->dir_hash, parent->ent.hash);
294 lock_dir_mutex(lock_nr);
299 unlock_dir_mutex(lock_nr);
305 * handle_range_1() and handle_range_dir() are derived from
306 * clear_ce_flags_1() and clear_ce_flags_dir() in unpack-trees.c
307 * and handle the iteration over the entire array of index entries.
308 * They use recursion for adjacent entries in the same parent
311 static int handle_range_1(
312 struct index_state *istate,
315 struct dir_entry *parent,
316 struct strbuf *prefix,
317 struct lazy_entry *lazy_entries);
319 static int handle_range_dir(
320 struct index_state *istate,
323 struct dir_entry *parent,
324 struct strbuf *prefix,
325 struct lazy_entry *lazy_entries,
326 struct dir_entry **dir_new_out)
329 int input_prefix_len = prefix->len;
330 struct dir_entry *dir_new;
332 dir_new = hash_dir_entry_with_parent_and_prefix(istate, parent, prefix);
334 strbuf_addch(prefix, '/');
337 * Scan forward in the index array for index entries having the same
338 * path prefix (that are also in this directory).
340 if (k_start + 1 >= k_end)
342 else if (strncmp(istate->cache[k_start + 1]->name, prefix->buf, prefix->len) > 0)
344 else if (strncmp(istate->cache[k_end - 1]->name, prefix->buf, prefix->len) == 0)
350 while (begin < end) {
351 int mid = begin + ((end - begin) >> 1);
352 int cmp = strncmp(istate->cache[mid]->name, prefix->buf, prefix->len);
353 if (cmp == 0) /* mid has same prefix; look in second part */
355 else if (cmp > 0) /* mid is past group; look in first part */
358 die("cache entry out of order");
364 * Recurse and process what we can of this subset [k_start, k).
366 rc = handle_range_1(istate, k_start, k, dir_new, prefix, lazy_entries);
368 strbuf_setlen(prefix, input_prefix_len);
370 *dir_new_out = dir_new;
374 static int handle_range_1(
375 struct index_state *istate,
378 struct dir_entry *parent,
379 struct strbuf *prefix,
380 struct lazy_entry *lazy_entries)
382 int input_prefix_len = prefix->len;
386 struct cache_entry *ce_k = istate->cache[k];
387 const char *name, *slash;
389 if (prefix->len && strncmp(ce_k->name, prefix->buf, prefix->len))
392 name = ce_k->name + prefix->len;
393 slash = strchr(name, '/');
396 int len = slash - name;
398 struct dir_entry *dir_new;
400 strbuf_add(prefix, name, len);
401 processed = handle_range_dir(istate, k, k_end, parent, prefix, lazy_entries, &dir_new);
404 strbuf_setlen(prefix, input_prefix_len);
408 strbuf_addch(prefix, '/');
409 processed = handle_range_1(istate, k, k_end, dir_new, prefix, lazy_entries);
411 strbuf_setlen(prefix, input_prefix_len);
416 * It is too expensive to take a lock to insert "ce_k"
417 * into "istate->name_hash" and increment the ref-count
418 * on the "parent" dir. So we defer actually updating
419 * permanent data structures until phase 2 (where we
420 * can change the locking requirements) and simply
421 * accumulate our current results into the lazy_entries
424 * We do not need to lock the lazy_entries array because
425 * we have exclusive access to the cells in the range
426 * [k_start,k_end) that this thread was given.
428 lazy_entries[k].dir = parent;
430 lazy_entries[k].hash_name = memihash_cont(
432 ce_k->name + parent->namelen,
433 ce_namelen(ce_k) - parent->namelen);
434 lazy_entries[k].hash_dir = parent->ent.hash;
436 lazy_entries[k].hash_name = memihash(ce_k->name, ce_namelen(ce_k));
445 struct lazy_dir_thread_data {
447 struct index_state *istate;
448 struct lazy_entry *lazy_entries;
453 static void *lazy_dir_thread_proc(void *_data)
455 struct lazy_dir_thread_data *d = _data;
456 struct strbuf prefix = STRBUF_INIT;
457 handle_range_1(d->istate, d->k_start, d->k_end, NULL, &prefix, d->lazy_entries);
458 strbuf_release(&prefix);
462 struct lazy_name_thread_data {
464 struct index_state *istate;
465 struct lazy_entry *lazy_entries;
468 static void *lazy_name_thread_proc(void *_data)
470 struct lazy_name_thread_data *d = _data;
473 for (k = 0; k < d->istate->cache_nr; k++) {
474 struct cache_entry *ce_k = d->istate->cache[k];
475 ce_k->ce_flags |= CE_HASHED;
476 hashmap_entry_init(&ce_k->ent, d->lazy_entries[k].hash_name);
477 hashmap_add(&d->istate->name_hash, &ce_k->ent);
483 static inline void lazy_update_dir_ref_counts(
484 struct index_state *istate,
485 struct lazy_entry *lazy_entries)
489 for (k = 0; k < istate->cache_nr; k++) {
490 if (lazy_entries[k].dir)
491 lazy_entries[k].dir->nr++;
495 static void threaded_lazy_init_name_hash(
496 struct index_state *istate)
502 struct lazy_entry *lazy_entries;
503 struct lazy_dir_thread_data *td_dir;
504 struct lazy_name_thread_data *td_name;
510 nr_each = DIV_ROUND_UP(istate->cache_nr, lazy_nr_dir_threads);
512 lazy_entries = xcalloc(istate->cache_nr, sizeof(struct lazy_entry));
513 td_dir = xcalloc(lazy_nr_dir_threads, sizeof(struct lazy_dir_thread_data));
514 td_name = xcalloc(1, sizeof(struct lazy_name_thread_data));
520 * Build "istate->dir_hash" using n "dir" threads (and a read-only index).
522 for (t = 0; t < lazy_nr_dir_threads; t++) {
523 struct lazy_dir_thread_data *td_dir_t = td_dir + t;
524 td_dir_t->istate = istate;
525 td_dir_t->lazy_entries = lazy_entries;
526 td_dir_t->k_start = k_start;
528 if (k_start > istate->cache_nr)
529 k_start = istate->cache_nr;
530 td_dir_t->k_end = k_start;
531 err = pthread_create(&td_dir_t->pthread, NULL, lazy_dir_thread_proc, td_dir_t);
533 die(_("unable to create lazy_dir thread: %s"), strerror(err));
535 for (t = 0; t < lazy_nr_dir_threads; t++) {
536 struct lazy_dir_thread_data *td_dir_t = td_dir + t;
537 if (pthread_join(td_dir_t->pthread, NULL))
538 die("unable to join lazy_dir_thread");
543 * Iterate over all index entries and add them to the "istate->name_hash"
544 * using a single "name" background thread.
545 * (Testing showed it wasn't worth running more than 1 thread for this.)
547 * Meanwhile, finish updating the parent directory ref-counts for each
548 * index entry using the current thread. (This step is very fast and
549 * doesn't need threading.)
551 td_name->istate = istate;
552 td_name->lazy_entries = lazy_entries;
553 err = pthread_create(&td_name->pthread, NULL, lazy_name_thread_proc, td_name);
555 die(_("unable to create lazy_name thread: %s"), strerror(err));
557 lazy_update_dir_ref_counts(istate, lazy_entries);
559 err = pthread_join(td_name->pthread, NULL);
561 die(_("unable to join lazy_name thread: %s"), strerror(err));
570 static void lazy_init_name_hash(struct index_state *istate)
573 if (istate->name_hash_initialized)
575 trace_performance_enter();
576 hashmap_init(&istate->name_hash, cache_entry_cmp, NULL, istate->cache_nr);
577 hashmap_init(&istate->dir_hash, dir_entry_cmp, NULL, istate->cache_nr);
579 if (lookup_lazy_params(istate)) {
581 * Disable item counting and automatic rehashing because
582 * we do per-chain (mod n) locking rather than whole hashmap
583 * locking and we need to prevent the table-size from changing
584 * and bucket items from being redistributed.
586 hashmap_disable_item_counting(&istate->dir_hash);
587 threaded_lazy_init_name_hash(istate);
588 hashmap_enable_item_counting(&istate->dir_hash);
591 for (nr = 0; nr < istate->cache_nr; nr++)
592 hash_index_entry(istate, istate->cache[nr]);
595 istate->name_hash_initialized = 1;
596 trace_performance_leave("initialize name hash");
600 * A test routine for t/helper/ sources.
602 * Returns the number of threads used or 0 when
603 * the non-threaded code path was used.
605 * Requesting threading WILL NOT override guards
606 * in lookup_lazy_params().
608 int test_lazy_init_name_hash(struct index_state *istate, int try_threaded)
610 lazy_nr_dir_threads = 0;
611 lazy_try_threaded = try_threaded;
613 lazy_init_name_hash(istate);
615 return lazy_nr_dir_threads;
618 void add_name_hash(struct index_state *istate, struct cache_entry *ce)
620 if (istate->name_hash_initialized)
621 hash_index_entry(istate, ce);
624 void remove_name_hash(struct index_state *istate, struct cache_entry *ce)
626 if (!istate->name_hash_initialized || !(ce->ce_flags & CE_HASHED))
628 ce->ce_flags &= ~CE_HASHED;
629 hashmap_remove(&istate->name_hash, &ce->ent, ce);
632 remove_dir_entry(istate, ce);
635 static int slow_same_name(const char *name1, int len1, const char *name2, int len2)
641 unsigned char c1 = *name1++;
642 unsigned char c2 = *name2++;
654 static int same_name(const struct cache_entry *ce, const char *name, int namelen, int icase)
656 int len = ce_namelen(ce);
659 * Always do exact compare, even if we want a case-ignoring comparison;
660 * we do the quick exact one first, because it will be the common case.
662 if (len == namelen && !memcmp(name, ce->name, len))
668 return slow_same_name(name, namelen, ce->name, len);
671 int index_dir_exists(struct index_state *istate, const char *name, int namelen)
673 struct dir_entry *dir;
675 lazy_init_name_hash(istate);
676 dir = find_dir_entry(istate, name, namelen);
677 return dir && dir->nr;
680 void adjust_dirname_case(struct index_state *istate, char *name)
682 const char *startPtr = name;
683 const char *ptr = startPtr;
685 lazy_init_name_hash(istate);
687 while (*ptr && *ptr != '/')
691 struct dir_entry *dir;
693 dir = find_dir_entry(istate, name, ptr - name);
695 memcpy((void *)startPtr, dir->name + (startPtr - name), ptr - startPtr);
703 struct cache_entry *index_file_exists(struct index_state *istate, const char *name, int namelen, int icase)
705 struct cache_entry *ce;
706 unsigned int hash = memihash(name, namelen);
708 lazy_init_name_hash(istate);
710 ce = hashmap_get_entry_from_hash(&istate->name_hash, hash, NULL,
711 struct cache_entry, ent);
712 hashmap_for_each_entry_from(&istate->name_hash, ce,
713 struct cache_entry, ent) {
714 if (same_name(ce, name, namelen, icase))
720 void free_name_hash(struct index_state *istate)
722 if (!istate->name_hash_initialized)
724 istate->name_hash_initialized = 0;
726 hashmap_free(&istate->name_hash, 0);
727 hashmap_free(&istate->dir_hash, 1);