Merge branch 'jc/test-lazy-prereq' (early part)
[git] / preload-index.c
1 /*
2  * Copyright (C) 2008 Linus Torvalds
3  */
4 #include "cache.h"
5 #include "pathspec.h"
6 #include "dir.h"
7
8 #ifdef NO_PTHREADS
9 static void preload_index(struct index_state *index,
10                           const struct pathspec *pathspec)
11 {
12         ; /* nothing */
13 }
14 #else
15
16 #include <pthread.h>
17
18 /*
19  * Mostly randomly chosen maximum thread counts: we
20  * cap the parallelism to 20 threads, and we want
21  * to have at least 500 lstat's per thread for it to
22  * be worth starting a thread.
23  */
24 #define MAX_PARALLEL (20)
25 #define THREAD_COST (500)
26
27 struct thread_data {
28         pthread_t pthread;
29         struct index_state *index;
30         struct pathspec pathspec;
31         int offset, nr;
32 };
33
34 static void *preload_thread(void *_data)
35 {
36         int nr;
37         struct thread_data *p = _data;
38         struct index_state *index = p->index;
39         struct cache_entry **cep = index->cache + p->offset;
40         struct cache_def cache;
41
42         memset(&cache, 0, sizeof(cache));
43         nr = p->nr;
44         if (nr + p->offset > index->cache_nr)
45                 nr = index->cache_nr - p->offset;
46
47         do {
48                 struct cache_entry *ce = *cep++;
49                 struct stat st;
50
51                 if (ce_stage(ce))
52                         continue;
53                 if (S_ISGITLINK(ce->ce_mode))
54                         continue;
55                 if (ce_uptodate(ce))
56                         continue;
57                 if (!ce_path_match(ce, &p->pathspec, NULL))
58                         continue;
59                 if (threaded_has_symlink_leading_path(&cache, ce->name, ce_namelen(ce)))
60                         continue;
61                 if (lstat(ce->name, &st))
62                         continue;
63                 if (ie_match_stat(index, ce, &st, CE_MATCH_RACY_IS_DIRTY))
64                         continue;
65                 ce_mark_uptodate(ce);
66         } while (--nr > 0);
67         return NULL;
68 }
69
70 static void preload_index(struct index_state *index,
71                           const struct pathspec *pathspec)
72 {
73         int threads, i, work, offset;
74         struct thread_data data[MAX_PARALLEL];
75
76         if (!core_preload_index)
77                 return;
78
79         threads = index->cache_nr / THREAD_COST;
80         if (threads < 2)
81                 return;
82         if (threads > MAX_PARALLEL)
83                 threads = MAX_PARALLEL;
84         offset = 0;
85         work = DIV_ROUND_UP(index->cache_nr, threads);
86         memset(&data, 0, sizeof(data));
87         for (i = 0; i < threads; i++) {
88                 struct thread_data *p = data+i;
89                 p->index = index;
90                 if (pathspec)
91                         copy_pathspec(&p->pathspec, pathspec);
92                 p->offset = offset;
93                 p->nr = work;
94                 offset += work;
95                 if (pthread_create(&p->pthread, NULL, preload_thread, p))
96                         die("unable to create threaded lstat");
97         }
98         for (i = 0; i < threads; i++) {
99                 struct thread_data *p = data+i;
100                 if (pthread_join(p->pthread, NULL))
101                         die("unable to join threaded lstat");
102         }
103 }
104 #endif
105
106 int read_index_preload(struct index_state *index,
107                        const struct pathspec *pathspec)
108 {
109         int retval = read_index(index);
110
111         preload_index(index, pathspec);
112         return retval;
113 }