merge-recursive: consolidate unnecessary fields in merge_options
[git] / preload-index.c
1 /*
2  * Copyright (C) 2008 Linus Torvalds
3  */
4 #include "cache.h"
5 #include "pathspec.h"
6 #include "dir.h"
7 #include "fsmonitor.h"
8 #include "config.h"
9 #include "progress.h"
10 #include "thread-utils.h"
11 #include "repository.h"
12
13 /*
14  * Mostly randomly chosen maximum thread counts: we
15  * cap the parallelism to 20 threads, and we want
16  * to have at least 500 lstat's per thread for it to
17  * be worth starting a thread.
18  */
19 #define MAX_PARALLEL (20)
20 #define THREAD_COST (500)
21
22 struct progress_data {
23         unsigned long n;
24         struct progress *progress;
25         pthread_mutex_t mutex;
26 };
27
28 struct thread_data {
29         pthread_t pthread;
30         struct index_state *index;
31         struct pathspec pathspec;
32         struct progress_data *progress;
33         int offset, nr;
34 };
35
36 static void *preload_thread(void *_data)
37 {
38         int nr, last_nr;
39         struct thread_data *p = _data;
40         struct index_state *index = p->index;
41         struct cache_entry **cep = index->cache + p->offset;
42         struct cache_def cache = CACHE_DEF_INIT;
43
44         nr = p->nr;
45         if (nr + p->offset > index->cache_nr)
46                 nr = index->cache_nr - p->offset;
47         last_nr = nr;
48
49         do {
50                 struct cache_entry *ce = *cep++;
51                 struct stat st;
52
53                 if (ce_stage(ce))
54                         continue;
55                 if (S_ISGITLINK(ce->ce_mode))
56                         continue;
57                 if (ce_uptodate(ce))
58                         continue;
59                 if (ce_skip_worktree(ce))
60                         continue;
61                 if (ce->ce_flags & CE_FSMONITOR_VALID)
62                         continue;
63                 if (p->progress && !(nr & 31)) {
64                         struct progress_data *pd = p->progress;
65
66                         pthread_mutex_lock(&pd->mutex);
67                         pd->n += last_nr - nr;
68                         display_progress(pd->progress, pd->n);
69                         pthread_mutex_unlock(&pd->mutex);
70                         last_nr = nr;
71                 }
72                 if (!ce_path_match(index, ce, &p->pathspec, NULL))
73                         continue;
74                 if (threaded_has_symlink_leading_path(&cache, ce->name, ce_namelen(ce)))
75                         continue;
76                 if (lstat(ce->name, &st))
77                         continue;
78                 if (ie_match_stat(index, ce, &st, CE_MATCH_RACY_IS_DIRTY|CE_MATCH_IGNORE_FSMONITOR))
79                         continue;
80                 ce_mark_uptodate(ce);
81                 mark_fsmonitor_valid(index, ce);
82         } while (--nr > 0);
83         if (p->progress) {
84                 struct progress_data *pd = p->progress;
85
86                 pthread_mutex_lock(&pd->mutex);
87                 display_progress(pd->progress, pd->n + last_nr);
88                 pthread_mutex_unlock(&pd->mutex);
89         }
90         cache_def_clear(&cache);
91         return NULL;
92 }
93
94 void preload_index(struct index_state *index,
95                    const struct pathspec *pathspec,
96                    unsigned int refresh_flags)
97 {
98         int threads, i, work, offset;
99         struct thread_data data[MAX_PARALLEL];
100         struct progress_data pd;
101
102         if (!HAVE_THREADS || !core_preload_index)
103                 return;
104
105         threads = index->cache_nr / THREAD_COST;
106         if ((index->cache_nr > 1) && (threads < 2) && git_env_bool("GIT_TEST_PRELOAD_INDEX", 0))
107                 threads = 2;
108         if (threads < 2)
109                 return;
110         trace_performance_enter();
111         if (threads > MAX_PARALLEL)
112                 threads = MAX_PARALLEL;
113         offset = 0;
114         work = DIV_ROUND_UP(index->cache_nr, threads);
115         memset(&data, 0, sizeof(data));
116
117         memset(&pd, 0, sizeof(pd));
118         if (refresh_flags & REFRESH_PROGRESS && isatty(2)) {
119                 pd.progress = start_delayed_progress(_("Refreshing index"), index->cache_nr);
120                 pthread_mutex_init(&pd.mutex, NULL);
121         }
122
123         for (i = 0; i < threads; i++) {
124                 struct thread_data *p = data+i;
125                 int err;
126
127                 p->index = index;
128                 if (pathspec)
129                         copy_pathspec(&p->pathspec, pathspec);
130                 p->offset = offset;
131                 p->nr = work;
132                 if (pd.progress)
133                         p->progress = &pd;
134                 offset += work;
135                 err = pthread_create(&p->pthread, NULL, preload_thread, p);
136
137                 if (err)
138                         die(_("unable to create threaded lstat: %s"), strerror(err));
139         }
140         for (i = 0; i < threads; i++) {
141                 struct thread_data *p = data+i;
142                 if (pthread_join(p->pthread, NULL))
143                         die("unable to join threaded lstat");
144         }
145         stop_progress(&pd.progress);
146
147         trace_performance_leave("preload index");
148 }
149
150 int repo_read_index_preload(struct repository *repo,
151                             const struct pathspec *pathspec,
152                             unsigned int refresh_flags)
153 {
154         int retval = repo_read_index(repo);
155
156         preload_index(repo->index, pathspec, refresh_flags);
157         return retval;
158 }