2 * Copyright (C) 2005 Junio C Hamano
10 #include "cache-tree.h"
11 #include "unpack-trees.h"
19 * Has the work tree entity been removed?
21 * Return 1 if it was removed from the work tree, 0 if an entity to be
22 * compared with the cache entry ce still exists (the latter includes
23 * the case where a directory that is not a submodule repository
24 * exists for ce that is a submodule -- it is a submodule that is not
25 * checked out). Return negative for an error.
27 static int check_removed(const struct cache_entry *ce, struct stat *st)
29 if (lstat(ce->name, st) < 0) {
30 if (errno != ENOENT && errno != ENOTDIR)
34 if (has_symlink_leading_path(ce_namelen(ce), ce->name))
36 if (S_ISDIR(st->st_mode)) {
37 unsigned char sub[20];
40 * If ce is already a gitlink, we can have a plain
41 * directory (i.e. the submodule is not checked out),
42 * or a checked out submodule. Either case this is not
43 * a case where something was removed from the work tree,
44 * so we will return 0.
46 * Otherwise, if the directory is not a submodule
47 * repository, that means ce which was a blob turned into
48 * a directory --- the blob was removed!
50 if (!S_ISGITLINK(ce->ce_mode) &&
51 resolve_gitlink_ref(ce->name, "HEAD", sub))
57 int run_diff_files(struct rev_info *revs, unsigned int option)
60 int diff_unmerged_stage = revs->max_count;
61 int silent_on_removed = option & DIFF_SILENT_ON_REMOVED;
62 unsigned ce_option = ((option & DIFF_RACY_IS_MODIFIED)
63 ? CE_MATCH_RACY_IS_DIRTY : 0);
64 char symcache[PATH_MAX];
66 if (diff_unmerged_stage < 0)
67 diff_unmerged_stage = 2;
70 for (i = 0; i < entries; i++) {
72 unsigned int oldmode, newmode;
73 struct cache_entry *ce = active_cache[i];
76 if (DIFF_OPT_TST(&revs->diffopt, QUIET) &&
77 DIFF_OPT_TST(&revs->diffopt, HAS_CHANGES))
80 if (!ce_path_match(ce, revs->prune_data))
84 struct combine_diff_path *dpath;
85 int num_compare_stages = 0;
88 path_len = ce_namelen(ce);
90 dpath = xmalloc(combine_diff_path_size(5, path_len));
91 dpath->path = (char *) &(dpath->parent[5]);
94 dpath->len = path_len;
95 memcpy(dpath->path, ce->name, path_len);
96 dpath->path[path_len] = '\0';
98 memset(&(dpath->parent[0]), 0,
99 sizeof(struct combine_diff_parent)*5);
101 changed = check_removed(ce, &st);
103 dpath->mode = ce_mode_from_stat(ce, st.st_mode);
109 if (silent_on_removed)
113 while (i < entries) {
114 struct cache_entry *nce = active_cache[i];
117 if (strcmp(ce->name, nce->name))
120 /* Stage #2 (ours) is the first parent,
121 * stage #3 (theirs) is the second.
123 stage = ce_stage(nce);
125 int mode = nce->ce_mode;
126 num_compare_stages++;
127 hashcpy(dpath->parent[stage-2].sha1, nce->sha1);
128 dpath->parent[stage-2].mode = ce_mode_from_stat(nce, mode);
129 dpath->parent[stage-2].status =
130 DIFF_STATUS_MODIFIED;
133 /* diff against the proper unmerged stage */
134 if (stage == diff_unmerged_stage)
139 * Compensate for loop update
143 if (revs->combine_merges && num_compare_stages == 2) {
144 show_combined_diff(dpath, 2,
145 revs->dense_combined_merges,
154 * Show the diff for the 'ce' if we found the one
155 * from the desired stage.
157 diff_unmerge(&revs->diffopt, ce->name, 0, null_sha1);
158 if (ce_stage(ce) != diff_unmerged_stage)
165 changed = check_removed(ce, &st);
171 if (silent_on_removed)
173 diff_addremove(&revs->diffopt, '-', ce->ce_mode,
177 changed = ce_match_stat(ce, &st, ce_option);
179 ce_mark_uptodate(ce);
180 if (!DIFF_OPT_TST(&revs->diffopt, FIND_COPIES_HARDER))
183 oldmode = ce->ce_mode;
184 newmode = ce_mode_from_stat(ce, st.st_mode);
185 diff_change(&revs->diffopt, oldmode, newmode,
186 ce->sha1, (changed ? null_sha1 : ce->sha1),
190 diffcore_std(&revs->diffopt);
191 diff_flush(&revs->diffopt);
199 struct oneway_unpack_data {
200 struct rev_info *revs;
201 char symcache[PATH_MAX];
204 /* A file entry went away or appeared */
205 static void diff_index_show_file(struct rev_info *revs,
207 struct cache_entry *ce,
208 const unsigned char *sha1, unsigned int mode)
210 diff_addremove(&revs->diffopt, prefix[0], mode,
214 static int get_stat_data(struct cache_entry *ce,
215 const unsigned char **sha1p,
217 int cached, int match_missing,
218 struct oneway_unpack_data *cbdata)
220 const unsigned char *sha1 = ce->sha1;
221 unsigned int mode = ce->ce_mode;
226 changed = check_removed(ce, &st);
237 changed = ce_match_stat(ce, &st, 0);
239 mode = ce_mode_from_stat(ce, st.st_mode);
249 static void show_new_file(struct oneway_unpack_data *cbdata,
250 struct cache_entry *new,
251 int cached, int match_missing)
253 const unsigned char *sha1;
255 struct rev_info *revs = cbdata->revs;
258 * New file in the index: it might actually be different in
261 if (get_stat_data(new, &sha1, &mode, cached, match_missing, cbdata) < 0)
264 diff_index_show_file(revs, "+", new, sha1, mode);
267 static int show_modified(struct oneway_unpack_data *cbdata,
268 struct cache_entry *old,
269 struct cache_entry *new,
271 int cached, int match_missing)
273 unsigned int mode, oldmode;
274 const unsigned char *sha1;
275 struct rev_info *revs = cbdata->revs;
277 if (get_stat_data(new, &sha1, &mode, cached, match_missing, cbdata) < 0) {
279 diff_index_show_file(revs, "-", old,
280 old->sha1, old->ce_mode);
284 if (revs->combine_merges && !cached &&
285 (hashcmp(sha1, old->sha1) || hashcmp(old->sha1, new->sha1))) {
286 struct combine_diff_path *p;
287 int pathlen = ce_namelen(new);
289 p = xmalloc(combine_diff_path_size(2, pathlen));
290 p->path = (char *) &p->parent[2];
293 memcpy(p->path, new->name, pathlen);
294 p->path[pathlen] = 0;
297 memset(p->parent, 0, 2 * sizeof(struct combine_diff_parent));
298 p->parent[0].status = DIFF_STATUS_MODIFIED;
299 p->parent[0].mode = new->ce_mode;
300 hashcpy(p->parent[0].sha1, new->sha1);
301 p->parent[1].status = DIFF_STATUS_MODIFIED;
302 p->parent[1].mode = old->ce_mode;
303 hashcpy(p->parent[1].sha1, old->sha1);
304 show_combined_diff(p, 2, revs->dense_combined_merges, revs);
309 oldmode = old->ce_mode;
310 if (mode == oldmode && !hashcmp(sha1, old->sha1) &&
311 !DIFF_OPT_TST(&revs->diffopt, FIND_COPIES_HARDER))
314 diff_change(&revs->diffopt, oldmode, mode,
315 old->sha1, sha1, old->name);
320 * This turns all merge entries into "stage 3". That guarantees that
321 * when we read in the new tree (into "stage 1"), we won't lose sight
322 * of the fact that we had unmerged entries.
324 static void mark_merge_entries(void)
327 for (i = 0; i < active_nr; i++) {
328 struct cache_entry *ce = active_cache[i];
331 ce->ce_flags |= CE_STAGEMASK;
336 * This gets a mix of an existing index and a tree, one pathname entry
337 * at a time. The index entry may be a single stage-0 one, but it could
338 * also be multiple unmerged entries (in which case idx_pos/idx_nr will
339 * give you the position and number of entries in the index).
341 static void do_oneway_diff(struct unpack_trees_options *o,
342 struct cache_entry *idx,
343 struct cache_entry *tree)
345 struct oneway_unpack_data *cbdata = o->unpack_data;
346 struct rev_info *revs = cbdata->revs;
347 int match_missing, cached;
350 * Backward compatibility wart - "diff-index -m" does
351 * not mean "do not ignore merges", but "match_missing".
353 * But with the revision flag parsing, that's found in
354 * "!revs->ignore_merges".
356 cached = o->index_only;
357 match_missing = !revs->ignore_merges;
359 if (cached && idx && ce_stage(idx)) {
361 diff_unmerge(&revs->diffopt, idx->name, idx->ce_mode, idx->sha1);
366 * Something added to the tree?
369 show_new_file(cbdata, idx, cached, match_missing);
374 * Something removed from the tree?
377 diff_index_show_file(revs, "-", tree, tree->sha1, tree->ce_mode);
381 /* Show difference between old and new */
382 show_modified(cbdata, tree, idx, 1, cached, match_missing);
385 static inline void skip_same_name(struct cache_entry *ce, struct unpack_trees_options *o)
387 int len = ce_namelen(ce);
388 const struct index_state *index = o->src_index;
390 while (o->pos < index->cache_nr) {
391 struct cache_entry *next = index->cache[o->pos];
392 if (len != ce_namelen(next))
394 if (memcmp(ce->name, next->name, len))
401 * The unpack_trees() interface is designed for merging, so
402 * the different source entries are designed primarily for
403 * the source trees, with the old index being really mainly
404 * used for being replaced by the result.
406 * For diffing, the index is more important, and we only have a
409 * We're supposed to return how many index entries we want to skip.
411 * This wrapper makes it all more readable, and takes care of all
412 * the fairly complex unpack_trees() semantic requirements, including
413 * the skipping, the path matching, the type conflict cases etc.
415 static int oneway_diff(struct cache_entry **src, struct unpack_trees_options *o)
417 struct cache_entry *idx = src[0];
418 struct cache_entry *tree = src[1];
419 struct oneway_unpack_data *cbdata = o->unpack_data;
420 struct rev_info *revs = cbdata->revs;
422 if (idx && ce_stage(idx))
423 skip_same_name(idx, o);
426 * Unpack-trees generates a DF/conflict entry if
427 * there was a directory in the index and a tree
428 * in the tree. From a diff standpoint, that's a
429 * delete of the tree and a create of the file.
431 if (tree == o->df_conflict_entry)
434 if (ce_path_match(idx ? idx : tree, revs->prune_data))
435 do_oneway_diff(o, idx, tree);
440 int run_diff_index(struct rev_info *revs, int cached)
444 const char *tree_name;
445 struct unpack_trees_options opts;
447 struct oneway_unpack_data unpack_cb;
449 mark_merge_entries();
451 ent = revs->pending.objects[0].item;
452 tree_name = revs->pending.objects[0].name;
453 tree = parse_tree_indirect(ent->sha1);
455 return error("bad tree object %s", tree_name);
457 unpack_cb.revs = revs;
458 unpack_cb.symcache[0] = '\0';
459 memset(&opts, 0, sizeof(opts));
461 opts.index_only = cached;
463 opts.fn = oneway_diff;
464 opts.unpack_data = &unpack_cb;
465 opts.src_index = &the_index;
466 opts.dst_index = NULL;
468 init_tree_desc(&t, tree->buffer, tree->size);
469 if (unpack_trees(1, &t, &opts))
472 diffcore_std(&revs->diffopt);
473 diff_flush(&revs->diffopt);
477 int do_diff_cache(const unsigned char *tree_sha1, struct diff_options *opt)
480 struct rev_info revs;
482 struct cache_entry **dst;
483 struct cache_entry *last = NULL;
484 struct unpack_trees_options opts;
486 struct oneway_unpack_data unpack_cb;
489 * This is used by git-blame to run diff-cache internally;
490 * it potentially needs to repeatedly run this, so we will
491 * start by removing the higher order entries the last round
495 for (i = 0; i < active_nr; i++) {
496 struct cache_entry *ce = active_cache[i];
498 if (last && !strcmp(ce->name, last->name))
500 cache_tree_invalidate_path(active_cache_tree,
503 ce->ce_flags |= CE_REMOVE;
507 active_nr = dst - active_cache;
509 init_revisions(&revs, NULL);
510 revs.prune_data = opt->paths;
511 tree = parse_tree_indirect(tree_sha1);
513 die("bad tree object %s", sha1_to_hex(tree_sha1));
515 unpack_cb.revs = &revs;
516 unpack_cb.symcache[0] = '\0';
517 memset(&opts, 0, sizeof(opts));
521 opts.fn = oneway_diff;
522 opts.unpack_data = &unpack_cb;
523 opts.src_index = &the_index;
524 opts.dst_index = &the_index;
526 init_tree_desc(&t, tree->buffer, tree->size);
527 if (unpack_trees(1, &t, &opts))