2 * Copyright (C) 2005 Junio C Hamano
10 #include "cache-tree.h"
16 int run_diff_files(struct rev_info *revs, int silent_on_removed)
19 int diff_unmerged_stage = revs->max_count;
21 if (diff_unmerged_stage < 0)
22 diff_unmerged_stage = 2;
23 entries = read_cache();
28 for (i = 0; i < entries; i++) {
30 unsigned int oldmode, newmode;
31 struct cache_entry *ce = active_cache[i];
34 if (!ce_path_match(ce, revs->prune_data))
38 struct combine_diff_path *dpath;
39 int num_compare_stages = 0;
42 path_len = ce_namelen(ce);
44 dpath = xmalloc(combine_diff_path_size(5, path_len));
45 dpath->path = (char *) &(dpath->parent[5]);
48 dpath->len = path_len;
49 memcpy(dpath->path, ce->name, path_len);
50 dpath->path[path_len] = '\0';
52 memset(&(dpath->parent[0]), 0,
53 sizeof(struct combine_diff_parent)*5);
55 if (lstat(ce->name, &st) < 0) {
56 if (errno != ENOENT && errno != ENOTDIR) {
60 if (silent_on_removed)
64 dpath->mode = canon_mode(st.st_mode);
67 struct cache_entry *nce = active_cache[i];
70 if (strcmp(ce->name, nce->name))
73 /* Stage #2 (ours) is the first parent,
74 * stage #3 (theirs) is the second.
76 stage = ce_stage(nce);
78 int mode = ntohl(nce->ce_mode);
80 hashcpy(dpath->parent[stage-2].sha1, nce->sha1);
81 dpath->parent[stage-2].mode =
83 dpath->parent[stage-2].status =
87 /* diff against the proper unmerged stage */
88 if (stage == diff_unmerged_stage)
93 * Compensate for loop update
97 if (revs->combine_merges && num_compare_stages == 2) {
98 show_combined_diff(dpath, 2,
99 revs->dense_combined_merges,
108 * Show the diff for the 'ce' if we found the one
109 * from the desired stage.
111 diff_unmerge(&revs->diffopt, ce->name, 0, null_sha1);
112 if (ce_stage(ce) != diff_unmerged_stage)
116 if (lstat(ce->name, &st) < 0) {
117 if (errno != ENOENT && errno != ENOTDIR) {
121 if (silent_on_removed)
123 diff_addremove(&revs->diffopt, '-', ntohl(ce->ce_mode),
124 ce->sha1, ce->name, NULL);
127 changed = ce_match_stat(ce, &st, 0);
128 if (!changed && !revs->diffopt.find_copies_harder)
130 oldmode = ntohl(ce->ce_mode);
132 newmode = canon_mode(st.st_mode);
133 if (!trust_executable_bit &&
134 S_ISREG(newmode) && S_ISREG(oldmode) &&
135 ((newmode ^ oldmode) == 0111))
137 diff_change(&revs->diffopt, oldmode, newmode,
138 ce->sha1, (changed ? null_sha1 : ce->sha1),
142 diffcore_std(&revs->diffopt);
143 diff_flush(&revs->diffopt);
151 /* A file entry went away or appeared */
152 static void diff_index_show_file(struct rev_info *revs,
154 struct cache_entry *ce,
155 unsigned char *sha1, unsigned int mode)
157 diff_addremove(&revs->diffopt, prefix[0], ntohl(mode),
158 sha1, ce->name, NULL);
161 static int get_stat_data(struct cache_entry *ce,
162 unsigned char **sha1p,
164 int cached, int match_missing)
166 unsigned char *sha1 = ce->sha1;
167 unsigned int mode = ce->ce_mode;
170 static unsigned char no_sha1[20];
173 if (lstat(ce->name, &st) < 0) {
174 if (errno == ENOENT && match_missing) {
181 changed = ce_match_stat(ce, &st, 0);
183 mode = ce_mode_from_stat(ce, st.st_mode);
193 static void show_new_file(struct rev_info *revs,
194 struct cache_entry *new,
195 int cached, int match_missing)
200 /* New file in the index: it might actually be different in
203 if (get_stat_data(new, &sha1, &mode, cached, match_missing) < 0)
206 diff_index_show_file(revs, "+", new, sha1, mode);
209 static int show_modified(struct rev_info *revs,
210 struct cache_entry *old,
211 struct cache_entry *new,
213 int cached, int match_missing)
215 unsigned int mode, oldmode;
218 if (get_stat_data(new, &sha1, &mode, cached, match_missing) < 0) {
220 diff_index_show_file(revs, "-", old,
221 old->sha1, old->ce_mode);
225 if (revs->combine_merges && !cached &&
226 (hashcmp(sha1, old->sha1) || hashcmp(old->sha1, new->sha1))) {
227 struct combine_diff_path *p;
228 int pathlen = ce_namelen(new);
230 p = xmalloc(combine_diff_path_size(2, pathlen));
231 p->path = (char *) &p->parent[2];
234 memcpy(p->path, new->name, pathlen);
235 p->path[pathlen] = 0;
236 p->mode = ntohl(mode);
238 memset(p->parent, 0, 2 * sizeof(struct combine_diff_parent));
239 p->parent[0].status = DIFF_STATUS_MODIFIED;
240 p->parent[0].mode = ntohl(new->ce_mode);
241 hashcpy(p->parent[0].sha1, new->sha1);
242 p->parent[1].status = DIFF_STATUS_MODIFIED;
243 p->parent[1].mode = ntohl(old->ce_mode);
244 hashcpy(p->parent[1].sha1, old->sha1);
245 show_combined_diff(p, 2, revs->dense_combined_merges, revs);
250 oldmode = old->ce_mode;
251 if (mode == oldmode && !hashcmp(sha1, old->sha1) &&
252 !revs->diffopt.find_copies_harder)
256 oldmode = ntohl(oldmode);
258 diff_change(&revs->diffopt, oldmode, mode,
259 old->sha1, sha1, old->name, NULL);
263 static int diff_cache(struct rev_info *revs,
264 struct cache_entry **ac, int entries,
265 const char **pathspec,
266 int cached, int match_missing)
269 struct cache_entry *ce = *ac;
270 int same = (entries > 1) && ce_same_name(ce, ac[1]);
272 if (!ce_path_match(ce, pathspec))
275 switch (ce_stage(ce)) {
277 /* No stage 1 entry? That means it's a new file */
279 show_new_file(revs, ce, cached, match_missing);
282 /* Show difference between old and new */
283 show_modified(revs, ac[1], ce, 1,
284 cached, match_missing);
287 /* No stage 3 (merge) entry?
288 * That means it's been deleted.
291 diff_index_show_file(revs, "-", ce,
292 ce->sha1, ce->ce_mode);
295 /* We come here with ce pointing at stage 1
296 * (original tree) and ac[1] pointing at stage
297 * 3 (unmerged). show-modified with
298 * report-missing set to false does not say the
299 * file is deleted but reports true if work
300 * tree does not have it, in which case we
301 * fall through to report the unmerged state.
302 * Otherwise, we show the differences between
303 * the original tree and the work tree.
306 !show_modified(revs, ce, ac[1], 0,
307 cached, match_missing))
309 diff_unmerge(&revs->diffopt, ce->name,
310 ntohl(ce->ce_mode), ce->sha1);
313 diff_unmerge(&revs->diffopt, ce->name,
318 die("impossible cache entry stage");
323 * Ignore all the different stages for this file,
324 * we've handled the relevant cases now.
329 } while (entries && ce_same_name(ce, ac[0]));
335 * This turns all merge entries into "stage 3". That guarantees that
336 * when we read in the new tree (into "stage 1"), we won't lose sight
337 * of the fact that we had unmerged entries.
339 static void mark_merge_entries(void)
342 for (i = 0; i < active_nr; i++) {
343 struct cache_entry *ce = active_cache[i];
346 ce->ce_flags |= htons(CE_STAGEMASK);
350 int run_diff_index(struct rev_info *revs, int cached)
355 const char *tree_name;
356 int match_missing = 0;
359 * Backward compatibility wart - "diff-index -m" does
360 * not mean "do not ignore merges", but totally different.
362 if (!revs->ignore_merges)
365 if (read_cache() < 0) {
366 perror("read_cache");
369 mark_merge_entries();
371 ent = revs->pending.objects[0].item;
372 tree_name = revs->pending.objects[0].name;
373 tree = parse_tree_indirect(ent->sha1);
375 return error("bad tree object %s", tree_name);
376 if (read_tree(tree, 1, revs->prune_data))
377 return error("unable to read tree object %s", tree_name);
378 ret = diff_cache(revs, active_cache, active_nr, revs->prune_data,
379 cached, match_missing);
380 diffcore_std(&revs->diffopt);
381 diff_flush(&revs->diffopt);
385 int do_diff_cache(const unsigned char *tree_sha1, struct diff_options *opt)
388 struct rev_info revs;
390 struct cache_entry **dst;
391 struct cache_entry *last = NULL;
394 * This is used by git-blame to run diff-cache internally;
395 * it potentially needs to repeatedly run this, so we will
396 * start by removing the higher order entries the last round
400 for (i = 0; i < active_nr; i++) {
401 struct cache_entry *ce = active_cache[i];
403 if (last && !strcmp(ce->name, last->name))
405 cache_tree_invalidate_path(active_cache_tree,
409 ce->ce_flags &= ~htons(CE_STAGEMASK);
413 active_nr = dst - active_cache;
415 init_revisions(&revs, NULL);
416 revs.prune_data = opt->paths;
417 tree = parse_tree_indirect(tree_sha1);
419 die("bad tree object %s", sha1_to_hex(tree_sha1));
420 if (read_tree(tree, 1, opt->paths))
421 return error("unable to read tree %s", sha1_to_hex(tree_sha1));
422 return diff_cache(&revs, active_cache, active_nr, revs.prune_data,