2 * GIT - The information manager from hell
4 * Copyright (C) Linus Torvalds, 2005
12 #include "cache-tree.h"
15 #include "object-store.h"
19 #include "resolve-undo.h"
20 #include "run-command.h"
23 #include "split-index.h"
25 #include "fsmonitor.h"
26 #include "thread-utils.h"
28 #include "sparse-index.h"
29 #include "csum-file.h"
32 /* Mask for the name length in ce_flags in the on-disk index */
34 #define CE_NAMEMASK (0x0fff)
38 * The first letter should be 'A'..'Z' for extensions that are not
39 * necessary for a correct operation (i.e. optimization data).
40 * When new extensions are added that _needs_ to be understood in
41 * order to correctly interpret the index file, pick character that
42 * is outside the range, to cause the reader to abort.
45 #define CACHE_EXT(s) ( (s[0]<<24)|(s[1]<<16)|(s[2]<<8)|(s[3]) )
46 #define CACHE_EXT_TREE 0x54524545 /* "TREE" */
47 #define CACHE_EXT_RESOLVE_UNDO 0x52455543 /* "REUC" */
48 #define CACHE_EXT_LINK 0x6c696e6b /* "link" */
49 #define CACHE_EXT_UNTRACKED 0x554E5452 /* "UNTR" */
50 #define CACHE_EXT_FSMONITOR 0x46534D4E /* "FSMN" */
51 #define CACHE_EXT_ENDOFINDEXENTRIES 0x454F4945 /* "EOIE" */
52 #define CACHE_EXT_INDEXENTRYOFFSETTABLE 0x49454F54 /* "IEOT" */
53 #define CACHE_EXT_SPARSE_DIRECTORIES 0x73646972 /* "sdir" */
55 /* changes that can be kept in $GIT_DIR/index (basically all extensions) */
56 #define EXTMASK (RESOLVE_UNDO_CHANGED | CACHE_TREE_CHANGED | \
57 CE_ENTRY_ADDED | CE_ENTRY_REMOVED | CE_ENTRY_CHANGED | \
58 SPLIT_INDEX_ORDERED | UNTRACKED_CHANGED | FSMONITOR_CHANGED)
62 * This is an estimate of the pathname length in the index. We use
63 * this for V4 index files to guess the un-deltafied size of the index
64 * in memory because of pathname deltafication. This is not required
65 * for V2/V3 index formats because their pathnames are not compressed.
66 * If the initial amount of memory set aside is not sufficient, the
67 * mem pool will allocate extra memory.
69 #define CACHE_ENTRY_PATH_LENGTH 80
71 static inline struct cache_entry *mem_pool__ce_alloc(struct mem_pool *mem_pool, size_t len)
73 struct cache_entry *ce;
74 ce = mem_pool_alloc(mem_pool, cache_entry_size(len));
75 ce->mem_pool_allocated = 1;
79 static inline struct cache_entry *mem_pool__ce_calloc(struct mem_pool *mem_pool, size_t len)
81 struct cache_entry * ce;
82 ce = mem_pool_calloc(mem_pool, 1, cache_entry_size(len));
83 ce->mem_pool_allocated = 1;
87 static struct mem_pool *find_mem_pool(struct index_state *istate)
89 struct mem_pool **pool_ptr;
91 if (istate->split_index && istate->split_index->base)
92 pool_ptr = &istate->split_index->base->ce_mem_pool;
94 pool_ptr = &istate->ce_mem_pool;
97 *pool_ptr = xmalloc(sizeof(**pool_ptr));
98 mem_pool_init(*pool_ptr, 0);
104 static const char *alternate_index_output;
106 static void set_index_entry(struct index_state *istate, int nr, struct cache_entry *ce)
108 if (S_ISSPARSEDIR(ce->ce_mode))
109 istate->sparse_index = 1;
111 istate->cache[nr] = ce;
112 add_name_hash(istate, ce);
115 static void replace_index_entry(struct index_state *istate, int nr, struct cache_entry *ce)
117 struct cache_entry *old = istate->cache[nr];
119 replace_index_entry_in_base(istate, old, ce);
120 remove_name_hash(istate, old);
121 discard_cache_entry(old);
122 ce->ce_flags &= ~CE_HASHED;
123 set_index_entry(istate, nr, ce);
124 ce->ce_flags |= CE_UPDATE_IN_BASE;
125 mark_fsmonitor_invalid(istate, ce);
126 istate->cache_changed |= CE_ENTRY_CHANGED;
129 void rename_index_entry_at(struct index_state *istate, int nr, const char *new_name)
131 struct cache_entry *old_entry = istate->cache[nr], *new_entry;
132 int namelen = strlen(new_name);
134 new_entry = make_empty_cache_entry(istate, namelen);
135 copy_cache_entry(new_entry, old_entry);
136 new_entry->ce_flags &= ~CE_HASHED;
137 new_entry->ce_namelen = namelen;
138 new_entry->index = 0;
139 memcpy(new_entry->name, new_name, namelen + 1);
141 cache_tree_invalidate_path(istate, old_entry->name);
142 untracked_cache_remove_from_index(istate, old_entry->name);
143 remove_index_entry_at(istate, nr);
144 add_index_entry(istate, new_entry, ADD_CACHE_OK_TO_ADD|ADD_CACHE_OK_TO_REPLACE);
147 void fill_stat_data(struct stat_data *sd, struct stat *st)
149 sd->sd_ctime.sec = (unsigned int)st->st_ctime;
150 sd->sd_mtime.sec = (unsigned int)st->st_mtime;
151 sd->sd_ctime.nsec = ST_CTIME_NSEC(*st);
152 sd->sd_mtime.nsec = ST_MTIME_NSEC(*st);
153 sd->sd_dev = st->st_dev;
154 sd->sd_ino = st->st_ino;
155 sd->sd_uid = st->st_uid;
156 sd->sd_gid = st->st_gid;
157 sd->sd_size = st->st_size;
160 int match_stat_data(const struct stat_data *sd, struct stat *st)
164 if (sd->sd_mtime.sec != (unsigned int)st->st_mtime)
165 changed |= MTIME_CHANGED;
166 if (trust_ctime && check_stat &&
167 sd->sd_ctime.sec != (unsigned int)st->st_ctime)
168 changed |= CTIME_CHANGED;
171 if (check_stat && sd->sd_mtime.nsec != ST_MTIME_NSEC(*st))
172 changed |= MTIME_CHANGED;
173 if (trust_ctime && check_stat &&
174 sd->sd_ctime.nsec != ST_CTIME_NSEC(*st))
175 changed |= CTIME_CHANGED;
179 if (sd->sd_uid != (unsigned int) st->st_uid ||
180 sd->sd_gid != (unsigned int) st->st_gid)
181 changed |= OWNER_CHANGED;
182 if (sd->sd_ino != (unsigned int) st->st_ino)
183 changed |= INODE_CHANGED;
188 * st_dev breaks on network filesystems where different
189 * clients will have different views of what "device"
190 * the filesystem is on
192 if (check_stat && sd->sd_dev != (unsigned int) st->st_dev)
193 changed |= INODE_CHANGED;
196 if (sd->sd_size != (unsigned int) st->st_size)
197 changed |= DATA_CHANGED;
203 * This only updates the "non-critical" parts of the directory
204 * cache, ie the parts that aren't tracked by GIT, and only used
205 * to validate the cache.
207 void fill_stat_cache_info(struct index_state *istate, struct cache_entry *ce, struct stat *st)
209 fill_stat_data(&ce->ce_stat_data, st);
211 if (assume_unchanged)
212 ce->ce_flags |= CE_VALID;
214 if (S_ISREG(st->st_mode)) {
215 ce_mark_uptodate(ce);
216 mark_fsmonitor_valid(istate, ce);
220 static int ce_compare_data(struct index_state *istate,
221 const struct cache_entry *ce,
225 int fd = git_open_cloexec(ce->name, O_RDONLY);
228 struct object_id oid;
229 if (!index_fd(istate, &oid, fd, st, OBJ_BLOB, ce->name, 0))
230 match = !oideq(&oid, &ce->oid);
231 /* index_fd() closed the file descriptor already */
236 static int ce_compare_link(const struct cache_entry *ce, size_t expected_size)
241 enum object_type type;
242 struct strbuf sb = STRBUF_INIT;
244 if (strbuf_readlink(&sb, ce->name, expected_size))
247 buffer = read_object_file(&ce->oid, &type, &size);
250 match = memcmp(buffer, sb.buf, size);
257 static int ce_compare_gitlink(const struct cache_entry *ce)
259 struct object_id oid;
262 * We don't actually require that the .git directory
263 * under GITLINK directory be a valid git directory. It
264 * might even be missing (in case nobody populated that
267 * If so, we consider it always to match.
269 if (resolve_gitlink_ref(ce->name, "HEAD", &oid) < 0)
271 return !oideq(&oid, &ce->oid);
274 static int ce_modified_check_fs(struct index_state *istate,
275 const struct cache_entry *ce,
278 switch (st->st_mode & S_IFMT) {
280 if (ce_compare_data(istate, ce, st))
284 if (ce_compare_link(ce, xsize_t(st->st_size)))
288 if (S_ISGITLINK(ce->ce_mode))
289 return ce_compare_gitlink(ce) ? DATA_CHANGED : 0;
290 /* else fallthrough */
297 static int ce_match_stat_basic(const struct cache_entry *ce, struct stat *st)
299 unsigned int changed = 0;
301 if (ce->ce_flags & CE_REMOVE)
302 return MODE_CHANGED | DATA_CHANGED | TYPE_CHANGED;
304 switch (ce->ce_mode & S_IFMT) {
306 changed |= !S_ISREG(st->st_mode) ? TYPE_CHANGED : 0;
307 /* We consider only the owner x bit to be relevant for
310 if (trust_executable_bit &&
311 (0100 & (ce->ce_mode ^ st->st_mode)))
312 changed |= MODE_CHANGED;
315 if (!S_ISLNK(st->st_mode) &&
316 (has_symlinks || !S_ISREG(st->st_mode)))
317 changed |= TYPE_CHANGED;
320 /* We ignore most of the st_xxx fields for gitlinks */
321 if (!S_ISDIR(st->st_mode))
322 changed |= TYPE_CHANGED;
323 else if (ce_compare_gitlink(ce))
324 changed |= DATA_CHANGED;
327 BUG("unsupported ce_mode: %o", ce->ce_mode);
330 changed |= match_stat_data(&ce->ce_stat_data, st);
332 /* Racily smudged entry? */
333 if (!ce->ce_stat_data.sd_size) {
334 if (!is_empty_blob_sha1(ce->oid.hash))
335 changed |= DATA_CHANGED;
341 static int is_racy_stat(const struct index_state *istate,
342 const struct stat_data *sd)
344 return (istate->timestamp.sec &&
346 /* nanosecond timestamped files can also be racy! */
347 (istate->timestamp.sec < sd->sd_mtime.sec ||
348 (istate->timestamp.sec == sd->sd_mtime.sec &&
349 istate->timestamp.nsec <= sd->sd_mtime.nsec))
351 istate->timestamp.sec <= sd->sd_mtime.sec
356 int is_racy_timestamp(const struct index_state *istate,
357 const struct cache_entry *ce)
359 return (!S_ISGITLINK(ce->ce_mode) &&
360 is_racy_stat(istate, &ce->ce_stat_data));
363 int match_stat_data_racy(const struct index_state *istate,
364 const struct stat_data *sd, struct stat *st)
366 if (is_racy_stat(istate, sd))
367 return MTIME_CHANGED;
368 return match_stat_data(sd, st);
371 int ie_match_stat(struct index_state *istate,
372 const struct cache_entry *ce, struct stat *st,
373 unsigned int options)
375 unsigned int changed;
376 int ignore_valid = options & CE_MATCH_IGNORE_VALID;
377 int ignore_skip_worktree = options & CE_MATCH_IGNORE_SKIP_WORKTREE;
378 int assume_racy_is_modified = options & CE_MATCH_RACY_IS_DIRTY;
379 int ignore_fsmonitor = options & CE_MATCH_IGNORE_FSMONITOR;
381 if (!ignore_fsmonitor)
382 refresh_fsmonitor(istate);
384 * If it's marked as always valid in the index, it's
385 * valid whatever the checked-out copy says.
387 * skip-worktree has the same effect with higher precedence
389 if (!ignore_skip_worktree && ce_skip_worktree(ce))
391 if (!ignore_valid && (ce->ce_flags & CE_VALID))
393 if (!ignore_fsmonitor && (ce->ce_flags & CE_FSMONITOR_VALID))
397 * Intent-to-add entries have not been added, so the index entry
398 * by definition never matches what is in the work tree until it
399 * actually gets added.
401 if (ce_intent_to_add(ce))
402 return DATA_CHANGED | TYPE_CHANGED | MODE_CHANGED;
404 changed = ce_match_stat_basic(ce, st);
407 * Within 1 second of this sequence:
408 * echo xyzzy >file && git-update-index --add file
409 * running this command:
411 * would give a falsely clean cache entry. The mtime and
412 * length match the cache, and other stat fields do not change.
414 * We could detect this at update-index time (the cache entry
415 * being registered/updated records the same time as "now")
416 * and delay the return from git-update-index, but that would
417 * effectively mean we can make at most one commit per second,
418 * which is not acceptable. Instead, we check cache entries
419 * whose mtime are the same as the index file timestamp more
420 * carefully than others.
422 if (!changed && is_racy_timestamp(istate, ce)) {
423 if (assume_racy_is_modified)
424 changed |= DATA_CHANGED;
426 changed |= ce_modified_check_fs(istate, ce, st);
432 int ie_modified(struct index_state *istate,
433 const struct cache_entry *ce,
434 struct stat *st, unsigned int options)
436 int changed, changed_fs;
438 changed = ie_match_stat(istate, ce, st, options);
442 * If the mode or type has changed, there's no point in trying
443 * to refresh the entry - it's not going to match
445 if (changed & (MODE_CHANGED | TYPE_CHANGED))
449 * Immediately after read-tree or update-index --cacheinfo,
450 * the length field is zero, as we have never even read the
451 * lstat(2) information once, and we cannot trust DATA_CHANGED
452 * returned by ie_match_stat() which in turn was returned by
453 * ce_match_stat_basic() to signal that the filesize of the
454 * blob changed. We have to actually go to the filesystem to
455 * see if the contents match, and if so, should answer "unchanged".
457 * The logic does not apply to gitlinks, as ce_match_stat_basic()
458 * already has checked the actual HEAD from the filesystem in the
459 * subproject. If ie_match_stat() already said it is different,
460 * then we know it is.
462 if ((changed & DATA_CHANGED) &&
463 (S_ISGITLINK(ce->ce_mode) || ce->ce_stat_data.sd_size != 0))
466 changed_fs = ce_modified_check_fs(istate, ce, st);
468 return changed | changed_fs;
472 int base_name_compare(const char *name1, int len1, int mode1,
473 const char *name2, int len2, int mode2)
475 unsigned char c1, c2;
476 int len = len1 < len2 ? len1 : len2;
479 cmp = memcmp(name1, name2, len);
484 if (!c1 && S_ISDIR(mode1))
486 if (!c2 && S_ISDIR(mode2))
488 return (c1 < c2) ? -1 : (c1 > c2) ? 1 : 0;
492 * df_name_compare() is identical to base_name_compare(), except it
493 * compares conflicting directory/file entries as equal. Note that
494 * while a directory name compares as equal to a regular file, they
495 * then individually compare _differently_ to a filename that has
496 * a dot after the basename (because '\0' < '.' < '/').
498 * This is used by routines that want to traverse the git namespace
499 * but then handle conflicting entries together when possible.
501 int df_name_compare(const char *name1, int len1, int mode1,
502 const char *name2, int len2, int mode2)
504 int len = len1 < len2 ? len1 : len2, cmp;
505 unsigned char c1, c2;
507 cmp = memcmp(name1, name2, len);
510 /* Directories and files compare equal (same length, same name) */
514 if (!c1 && S_ISDIR(mode1))
517 if (!c2 && S_ISDIR(mode2))
519 if (c1 == '/' && !c2)
521 if (c2 == '/' && !c1)
526 int name_compare(const char *name1, size_t len1, const char *name2, size_t len2)
528 size_t min_len = (len1 < len2) ? len1 : len2;
529 int cmp = memcmp(name1, name2, min_len);
539 int cache_name_stage_compare(const char *name1, int len1, int stage1, const char *name2, int len2, int stage2)
543 cmp = name_compare(name1, len1, name2, len2);
554 static int index_name_stage_pos(struct index_state *istate, const char *name, int namelen, int stage)
559 last = istate->cache_nr;
560 while (last > first) {
561 int next = first + ((last - first) >> 1);
562 struct cache_entry *ce = istate->cache[next];
563 int cmp = cache_name_stage_compare(name, namelen, stage, ce->name, ce_namelen(ce), ce_stage(ce));
573 if (istate->sparse_index &&
575 /* Note: first <= istate->cache_nr */
576 struct cache_entry *ce = istate->cache[first - 1];
579 * If we are in a sparse-index _and_ the entry before the
580 * insertion position is a sparse-directory entry that is
581 * an ancestor of 'name', then we need to expand the index
582 * and search again. This will only trigger once, because
583 * thereafter the index is fully expanded.
585 if (S_ISSPARSEDIR(ce->ce_mode) &&
586 ce_namelen(ce) < namelen &&
587 !strncmp(name, ce->name, ce_namelen(ce))) {
588 ensure_full_index(istate);
589 return index_name_stage_pos(istate, name, namelen, stage);
596 int index_name_pos(struct index_state *istate, const char *name, int namelen)
598 return index_name_stage_pos(istate, name, namelen, 0);
601 int remove_index_entry_at(struct index_state *istate, int pos)
603 struct cache_entry *ce = istate->cache[pos];
605 record_resolve_undo(istate, ce);
606 remove_name_hash(istate, ce);
607 save_or_free_index_entry(istate, ce);
608 istate->cache_changed |= CE_ENTRY_REMOVED;
610 if (pos >= istate->cache_nr)
612 MOVE_ARRAY(istate->cache + pos, istate->cache + pos + 1,
613 istate->cache_nr - pos);
618 * Remove all cache entries marked for removal, that is where
619 * CE_REMOVE is set in ce_flags. This is much more effective than
620 * calling remove_index_entry_at() for each entry to be removed.
622 void remove_marked_cache_entries(struct index_state *istate, int invalidate)
624 struct cache_entry **ce_array = istate->cache;
627 for (i = j = 0; i < istate->cache_nr; i++) {
628 if (ce_array[i]->ce_flags & CE_REMOVE) {
630 cache_tree_invalidate_path(istate,
632 untracked_cache_remove_from_index(istate,
635 remove_name_hash(istate, ce_array[i]);
636 save_or_free_index_entry(istate, ce_array[i]);
639 ce_array[j++] = ce_array[i];
641 if (j == istate->cache_nr)
643 istate->cache_changed |= CE_ENTRY_REMOVED;
644 istate->cache_nr = j;
647 int remove_file_from_index(struct index_state *istate, const char *path)
649 int pos = index_name_pos(istate, path, strlen(path));
652 cache_tree_invalidate_path(istate, path);
653 untracked_cache_remove_from_index(istate, path);
654 while (pos < istate->cache_nr && !strcmp(istate->cache[pos]->name, path))
655 remove_index_entry_at(istate, pos);
659 static int compare_name(struct cache_entry *ce, const char *path, int namelen)
661 return namelen != ce_namelen(ce) || memcmp(path, ce->name, namelen);
664 static int index_name_pos_also_unmerged(struct index_state *istate,
665 const char *path, int namelen)
667 int pos = index_name_pos(istate, path, namelen);
668 struct cache_entry *ce;
673 /* maybe unmerged? */
675 if (pos >= istate->cache_nr ||
676 compare_name((ce = istate->cache[pos]), path, namelen))
679 /* order of preference: stage 2, 1, 3 */
680 if (ce_stage(ce) == 1 && pos + 1 < istate->cache_nr &&
681 ce_stage((ce = istate->cache[pos + 1])) == 2 &&
682 !compare_name(ce, path, namelen))
687 static int different_name(struct cache_entry *ce, struct cache_entry *alias)
689 int len = ce_namelen(ce);
690 return ce_namelen(alias) != len || memcmp(ce->name, alias->name, len);
694 * If we add a filename that aliases in the cache, we will use the
695 * name that we already have - but we don't want to update the same
696 * alias twice, because that implies that there were actually two
697 * different files with aliasing names!
699 * So we use the CE_ADDED flag to verify that the alias was an old
700 * one before we accept it as
702 static struct cache_entry *create_alias_ce(struct index_state *istate,
703 struct cache_entry *ce,
704 struct cache_entry *alias)
707 struct cache_entry *new_entry;
709 if (alias->ce_flags & CE_ADDED)
710 die(_("will not add file alias '%s' ('%s' already exists in index)"),
711 ce->name, alias->name);
713 /* Ok, create the new entry using the name of the existing alias */
714 len = ce_namelen(alias);
715 new_entry = make_empty_cache_entry(istate, len);
716 memcpy(new_entry->name, alias->name, len);
717 copy_cache_entry(new_entry, ce);
718 save_or_free_index_entry(istate, ce);
722 void set_object_name_for_intent_to_add_entry(struct cache_entry *ce)
724 struct object_id oid;
725 if (write_object_file("", 0, blob_type, &oid))
726 die(_("cannot create an empty blob in the object database"));
727 oidcpy(&ce->oid, &oid);
730 int add_to_index(struct index_state *istate, const char *path, struct stat *st, int flags)
732 int namelen, was_same;
733 mode_t st_mode = st->st_mode;
734 struct cache_entry *ce, *alias = NULL;
735 unsigned ce_option = CE_MATCH_IGNORE_VALID|CE_MATCH_IGNORE_SKIP_WORKTREE|CE_MATCH_RACY_IS_DIRTY;
736 int verbose = flags & (ADD_CACHE_VERBOSE | ADD_CACHE_PRETEND);
737 int pretend = flags & ADD_CACHE_PRETEND;
738 int intent_only = flags & ADD_CACHE_INTENT;
739 int add_option = (ADD_CACHE_OK_TO_ADD|ADD_CACHE_OK_TO_REPLACE|
740 (intent_only ? ADD_CACHE_NEW_ONLY : 0));
741 int hash_flags = HASH_WRITE_OBJECT;
742 struct object_id oid;
744 if (flags & ADD_CACHE_RENORMALIZE)
745 hash_flags |= HASH_RENORMALIZE;
747 if (!S_ISREG(st_mode) && !S_ISLNK(st_mode) && !S_ISDIR(st_mode))
748 return error(_("%s: can only add regular files, symbolic links or git-directories"), path);
750 namelen = strlen(path);
751 if (S_ISDIR(st_mode)) {
752 if (resolve_gitlink_ref(path, "HEAD", &oid) < 0)
753 return error(_("'%s' does not have a commit checked out"), path);
754 while (namelen && path[namelen-1] == '/')
757 ce = make_empty_cache_entry(istate, namelen);
758 memcpy(ce->name, path, namelen);
759 ce->ce_namelen = namelen;
761 fill_stat_cache_info(istate, ce, st);
763 ce->ce_flags |= CE_INTENT_TO_ADD;
766 if (trust_executable_bit && has_symlinks) {
767 ce->ce_mode = create_ce_mode(st_mode);
769 /* If there is an existing entry, pick the mode bits and type
770 * from it, otherwise assume unexecutable regular file.
772 struct cache_entry *ent;
773 int pos = index_name_pos_also_unmerged(istate, path, namelen);
775 ent = (0 <= pos) ? istate->cache[pos] : NULL;
776 ce->ce_mode = ce_mode_from_stat(ent, st_mode);
779 /* When core.ignorecase=true, determine if a directory of the same name but differing
780 * case already exists within the Git repository. If it does, ensure the directory
781 * case of the file being added to the repository matches (is folded into) the existing
782 * entry's directory case.
785 adjust_dirname_case(istate, ce->name);
787 if (!(flags & ADD_CACHE_RENORMALIZE)) {
788 alias = index_file_exists(istate, ce->name,
789 ce_namelen(ce), ignore_case);
792 !ie_match_stat(istate, alias, st, ce_option)) {
793 /* Nothing changed, really */
794 if (!S_ISGITLINK(alias->ce_mode))
795 ce_mark_uptodate(alias);
796 alias->ce_flags |= CE_ADDED;
798 discard_cache_entry(ce);
803 if (index_path(istate, &ce->oid, path, st, hash_flags)) {
804 discard_cache_entry(ce);
805 return error(_("unable to index file '%s'"), path);
808 set_object_name_for_intent_to_add_entry(ce);
810 if (ignore_case && alias && different_name(ce, alias))
811 ce = create_alias_ce(istate, ce, alias);
812 ce->ce_flags |= CE_ADDED;
814 /* It was suspected to be racily clean, but it turns out to be Ok */
817 oideq(&alias->oid, &ce->oid) &&
818 ce->ce_mode == alias->ce_mode);
821 discard_cache_entry(ce);
822 else if (add_index_entry(istate, ce, add_option)) {
823 discard_cache_entry(ce);
824 return error(_("unable to add '%s' to index"), path);
826 if (verbose && !was_same)
827 printf("add '%s'\n", path);
831 int add_file_to_index(struct index_state *istate, const char *path, int flags)
834 if (lstat(path, &st))
835 die_errno(_("unable to stat '%s'"), path);
836 return add_to_index(istate, path, &st, flags);
839 struct cache_entry *make_empty_cache_entry(struct index_state *istate, size_t len)
841 return mem_pool__ce_calloc(find_mem_pool(istate), len);
844 struct cache_entry *make_empty_transient_cache_entry(size_t len,
845 struct mem_pool *ce_mem_pool)
848 return mem_pool__ce_calloc(ce_mem_pool, len);
849 return xcalloc(1, cache_entry_size(len));
852 struct cache_entry *make_cache_entry(struct index_state *istate,
854 const struct object_id *oid,
857 unsigned int refresh_options)
859 struct cache_entry *ce, *ret;
862 if (!verify_path(path, mode)) {
863 error(_("invalid path '%s'"), path);
868 ce = make_empty_cache_entry(istate, len);
870 oidcpy(&ce->oid, oid);
871 memcpy(ce->name, path, len);
872 ce->ce_flags = create_ce_flags(stage);
873 ce->ce_namelen = len;
874 ce->ce_mode = create_ce_mode(mode);
876 ret = refresh_cache_entry(istate, ce, refresh_options);
878 discard_cache_entry(ce);
882 struct cache_entry *make_transient_cache_entry(unsigned int mode,
883 const struct object_id *oid,
886 struct mem_pool *ce_mem_pool)
888 struct cache_entry *ce;
891 if (!verify_path(path, mode)) {
892 error(_("invalid path '%s'"), path);
897 ce = make_empty_transient_cache_entry(len, ce_mem_pool);
899 oidcpy(&ce->oid, oid);
900 memcpy(ce->name, path, len);
901 ce->ce_flags = create_ce_flags(stage);
902 ce->ce_namelen = len;
903 ce->ce_mode = create_ce_mode(mode);
909 * Chmod an index entry with either +x or -x.
911 * Returns -1 if the chmod for the particular cache entry failed (if it's
912 * not a regular file), -2 if an invalid flip argument is passed in, 0
915 int chmod_index_entry(struct index_state *istate, struct cache_entry *ce,
918 if (!S_ISREG(ce->ce_mode))
925 ce->ce_mode &= ~0111;
930 cache_tree_invalidate_path(istate, ce->name);
931 ce->ce_flags |= CE_UPDATE_IN_BASE;
932 mark_fsmonitor_invalid(istate, ce);
933 istate->cache_changed |= CE_ENTRY_CHANGED;
938 int ce_same_name(const struct cache_entry *a, const struct cache_entry *b)
940 int len = ce_namelen(a);
941 return ce_namelen(b) == len && !memcmp(a->name, b->name, len);
945 * We fundamentally don't like some paths: we don't want
946 * dot or dot-dot anywhere, and for obvious reasons don't
947 * want to recurse into ".git" either.
949 * Also, we don't want double slashes or slashes at the
950 * end that can make pathnames ambiguous.
952 static int verify_dotfile(const char *rest, unsigned mode)
955 * The first character was '.', but that
956 * has already been discarded, we now test
960 /* "." is not allowed */
961 if (*rest == '\0' || is_dir_sep(*rest))
966 * ".git" followed by NUL or slash is bad. Note that we match
967 * case-insensitively here, even if ignore_case is not set.
968 * This outlaws ".GIT" everywhere out of an abundance of caution,
969 * since there's really no good reason to allow it.
971 * Once we've seen ".git", we can also find ".gitmodules", etc (also
972 * case-insensitively).
976 if (rest[1] != 'i' && rest[1] != 'I')
978 if (rest[2] != 't' && rest[2] != 'T')
980 if (rest[3] == '\0' || is_dir_sep(rest[3]))
984 if (skip_iprefix(rest, "modules", &rest) &&
985 (*rest == '\0' || is_dir_sep(*rest)))
990 if (rest[1] == '\0' || is_dir_sep(rest[1]))
996 int verify_path(const char *path, unsigned mode)
1000 if (has_dos_drive_prefix(path))
1003 if (!is_valid_path(path))
1010 if (is_dir_sep(c)) {
1014 if (is_hfs_dotgit(path))
1016 if (S_ISLNK(mode)) {
1017 if (is_hfs_dotgitmodules(path))
1022 #if defined GIT_WINDOWS_NATIVE || defined __CYGWIN__
1026 if (is_ntfs_dotgit(path))
1028 if (S_ISLNK(mode)) {
1029 if (is_ntfs_dotgitmodules(path))
1035 if ((c == '.' && !verify_dotfile(path, mode)) ||
1039 * allow terminating directory separators for
1040 * sparse directory entries.
1043 return S_ISDIR(mode);
1044 } else if (c == '\\' && protect_ntfs) {
1045 if (is_ntfs_dotgit(path))
1047 if (S_ISLNK(mode)) {
1048 if (is_ntfs_dotgitmodules(path))
1058 * Do we have another file that has the beginning components being a
1059 * proper superset of the name we're trying to add?
1061 static int has_file_name(struct index_state *istate,
1062 const struct cache_entry *ce, int pos, int ok_to_replace)
1065 int len = ce_namelen(ce);
1066 int stage = ce_stage(ce);
1067 const char *name = ce->name;
1069 while (pos < istate->cache_nr) {
1070 struct cache_entry *p = istate->cache[pos++];
1072 if (len >= ce_namelen(p))
1074 if (memcmp(name, p->name, len))
1076 if (ce_stage(p) != stage)
1078 if (p->name[len] != '/')
1080 if (p->ce_flags & CE_REMOVE)
1085 remove_index_entry_at(istate, --pos);
1092 * Like strcmp(), but also return the offset of the first change.
1093 * If strings are equal, return the length.
1095 int strcmp_offset(const char *s1, const char *s2, size_t *first_change)
1100 return strcmp(s1, s2);
1102 for (k = 0; s1[k] == s2[k]; k++)
1107 return (unsigned char)s1[k] - (unsigned char)s2[k];
1111 * Do we have another file with a pathname that is a proper
1112 * subset of the name we're trying to add?
1114 * That is, is there another file in the index with a path
1115 * that matches a sub-directory in the given entry?
1117 static int has_dir_name(struct index_state *istate,
1118 const struct cache_entry *ce, int pos, int ok_to_replace)
1121 int stage = ce_stage(ce);
1122 const char *name = ce->name;
1123 const char *slash = name + ce_namelen(ce);
1128 * We are frequently called during an iteration on a sorted
1129 * list of pathnames and while building a new index. Therefore,
1130 * there is a high probability that this entry will eventually
1131 * be appended to the index, rather than inserted in the middle.
1132 * If we can confirm that, we can avoid binary searches on the
1133 * components of the pathname.
1135 * Compare the entry's full path with the last path in the index.
1137 if (istate->cache_nr > 0) {
1138 cmp_last = strcmp_offset(name,
1139 istate->cache[istate->cache_nr - 1]->name,
1142 if (len_eq_last == 0) {
1144 * The entry sorts AFTER the last one in the
1145 * index and their paths have no common prefix,
1146 * so there cannot be a F/D conflict.
1151 * The entry sorts AFTER the last one in the
1152 * index, but has a common prefix. Fall through
1153 * to the loop below to disect the entry's path
1154 * and see where the difference is.
1157 } else if (cmp_last == 0) {
1159 * The entry exactly matches the last one in the
1160 * index, but because of multiple stage and CE_REMOVE
1161 * items, we fall through and let the regular search
1171 if (*--slash == '/')
1173 if (slash <= ce->name)
1180 * (len + 1) is a directory boundary (including
1181 * the trailing slash). And since the loop is
1182 * decrementing "slash", the first iteration is
1183 * the longest directory prefix; subsequent
1184 * iterations consider parent directories.
1187 if (len + 1 <= len_eq_last) {
1189 * The directory prefix (including the trailing
1190 * slash) also appears as a prefix in the last
1191 * entry, so the remainder cannot collide (because
1192 * strcmp said the whole path was greater).
1197 * LT: last: xxx/file_A
1203 if (len > len_eq_last) {
1205 * This part of the directory prefix (excluding
1206 * the trailing slash) is longer than the known
1207 * equal portions, so this sub-directory cannot
1208 * collide with a file.
1217 * This is a possible collision. Fall through and
1218 * let the regular search code handle it.
1225 pos = index_name_stage_pos(istate, name, len, stage);
1228 * Found one, but not so fast. This could
1229 * be a marker that says "I was here, but
1230 * I am being removed". Such an entry is
1231 * not a part of the resulting tree, and
1232 * it is Ok to have a directory at the same
1235 if (!(istate->cache[pos]->ce_flags & CE_REMOVE)) {
1239 remove_index_entry_at(istate, pos);
1247 * Trivial optimization: if we find an entry that
1248 * already matches the sub-directory, then we know
1249 * we're ok, and we can exit.
1251 while (pos < istate->cache_nr) {
1252 struct cache_entry *p = istate->cache[pos];
1253 if ((ce_namelen(p) <= len) ||
1254 (p->name[len] != '/') ||
1255 memcmp(p->name, name, len))
1256 break; /* not our subdirectory */
1257 if (ce_stage(p) == stage && !(p->ce_flags & CE_REMOVE))
1259 * p is at the same stage as our entry, and
1260 * is a subdirectory of what we are looking
1261 * at, so we cannot have conflicts at our
1262 * level or anything shorter.
1271 /* We may be in a situation where we already have path/file and path
1272 * is being added, or we already have path and path/file is being
1273 * added. Either one would result in a nonsense tree that has path
1274 * twice when git-write-tree tries to write it out. Prevent it.
1276 * If ok-to-replace is specified, we remove the conflicting entries
1277 * from the cache so the caller should recompute the insert position.
1278 * When this happens, we return non-zero.
1280 static int check_file_directory_conflict(struct index_state *istate,
1281 const struct cache_entry *ce,
1282 int pos, int ok_to_replace)
1287 * When ce is an "I am going away" entry, we allow it to be added
1289 if (ce->ce_flags & CE_REMOVE)
1293 * We check if the path is a sub-path of a subsequent pathname
1294 * first, since removing those will not change the position
1297 retval = has_file_name(istate, ce, pos, ok_to_replace);
1300 * Then check if the path might have a clashing sub-directory
1303 return retval + has_dir_name(istate, ce, pos, ok_to_replace);
1306 static int add_index_entry_with_check(struct index_state *istate, struct cache_entry *ce, int option)
1309 int ok_to_add = option & ADD_CACHE_OK_TO_ADD;
1310 int ok_to_replace = option & ADD_CACHE_OK_TO_REPLACE;
1311 int skip_df_check = option & ADD_CACHE_SKIP_DFCHECK;
1312 int new_only = option & ADD_CACHE_NEW_ONLY;
1314 if (!(option & ADD_CACHE_KEEP_CACHE_TREE))
1315 cache_tree_invalidate_path(istate, ce->name);
1318 * If this entry's path sorts after the last entry in the index,
1319 * we can avoid searching for it.
1321 if (istate->cache_nr > 0 &&
1322 strcmp(ce->name, istate->cache[istate->cache_nr - 1]->name) > 0)
1323 pos = index_pos_to_insert_pos(istate->cache_nr);
1325 pos = index_name_stage_pos(istate, ce->name, ce_namelen(ce), ce_stage(ce));
1327 /* existing match? Just replace it. */
1330 replace_index_entry(istate, pos, ce);
1335 if (!(option & ADD_CACHE_KEEP_CACHE_TREE))
1336 untracked_cache_add_to_index(istate, ce->name);
1339 * Inserting a merged entry ("stage 0") into the index
1340 * will always replace all non-merged entries..
1342 if (pos < istate->cache_nr && ce_stage(ce) == 0) {
1343 while (ce_same_name(istate->cache[pos], ce)) {
1345 if (!remove_index_entry_at(istate, pos))
1352 if (!verify_path(ce->name, ce->ce_mode))
1353 return error(_("invalid path '%s'"), ce->name);
1355 if (!skip_df_check &&
1356 check_file_directory_conflict(istate, ce, pos, ok_to_replace)) {
1358 return error(_("'%s' appears as both a file and as a directory"),
1360 pos = index_name_stage_pos(istate, ce->name, ce_namelen(ce), ce_stage(ce));
1366 int add_index_entry(struct index_state *istate, struct cache_entry *ce, int option)
1370 if (option & ADD_CACHE_JUST_APPEND)
1371 pos = istate->cache_nr;
1374 ret = add_index_entry_with_check(istate, ce, option);
1380 /* Make sure the array is big enough .. */
1381 ALLOC_GROW(istate->cache, istate->cache_nr + 1, istate->cache_alloc);
1385 if (istate->cache_nr > pos + 1)
1386 MOVE_ARRAY(istate->cache + pos + 1, istate->cache + pos,
1387 istate->cache_nr - pos - 1);
1388 set_index_entry(istate, pos, ce);
1389 istate->cache_changed |= CE_ENTRY_ADDED;
1394 * "refresh" does not calculate a new sha1 file or bring the
1395 * cache up-to-date for mode/content changes. But what it
1396 * _does_ do is to "re-match" the stat information of a file
1397 * with the cache, so that you can refresh the cache for a
1398 * file that hasn't been changed but where the stat entry is
1401 * For example, you'd want to do this after doing a "git-read-tree",
1402 * to link up the stat cache details with the proper files.
1404 static struct cache_entry *refresh_cache_ent(struct index_state *istate,
1405 struct cache_entry *ce,
1406 unsigned int options, int *err,
1412 struct cache_entry *updated;
1414 int refresh = options & CE_MATCH_REFRESH;
1415 int ignore_valid = options & CE_MATCH_IGNORE_VALID;
1416 int ignore_skip_worktree = options & CE_MATCH_IGNORE_SKIP_WORKTREE;
1417 int ignore_missing = options & CE_MATCH_IGNORE_MISSING;
1418 int ignore_fsmonitor = options & CE_MATCH_IGNORE_FSMONITOR;
1420 if (!refresh || ce_uptodate(ce))
1423 if (!ignore_fsmonitor)
1424 refresh_fsmonitor(istate);
1426 * CE_VALID or CE_SKIP_WORKTREE means the user promised us
1427 * that the change to the work tree does not matter and told
1430 if (!ignore_skip_worktree && ce_skip_worktree(ce)) {
1431 ce_mark_uptodate(ce);
1434 if (!ignore_valid && (ce->ce_flags & CE_VALID)) {
1435 ce_mark_uptodate(ce);
1438 if (!ignore_fsmonitor && (ce->ce_flags & CE_FSMONITOR_VALID)) {
1439 ce_mark_uptodate(ce);
1443 if (has_symlink_leading_path(ce->name, ce_namelen(ce))) {
1453 if (lstat(ce->name, &st) < 0) {
1454 if (ignore_missing && errno == ENOENT)
1461 changed = ie_match_stat(istate, ce, &st, options);
1463 *changed_ret = changed;
1466 * The path is unchanged. If we were told to ignore
1467 * valid bit, then we did the actual stat check and
1468 * found that the entry is unmodified. If the entry
1469 * is not marked VALID, this is the place to mark it
1470 * valid again, under "assume unchanged" mode.
1472 if (ignore_valid && assume_unchanged &&
1473 !(ce->ce_flags & CE_VALID))
1474 ; /* mark this one VALID again */
1477 * We do not mark the index itself "modified"
1478 * because CE_UPTODATE flag is in-core only;
1479 * we are not going to write this change out.
1481 if (!S_ISGITLINK(ce->ce_mode)) {
1482 ce_mark_uptodate(ce);
1483 mark_fsmonitor_valid(istate, ce);
1491 if (ie_modified(istate, ce, &st, options)) {
1497 updated = make_empty_cache_entry(istate, ce_namelen(ce));
1498 copy_cache_entry(updated, ce);
1499 memcpy(updated->name, ce->name, ce->ce_namelen + 1);
1500 fill_stat_cache_info(istate, updated, &st);
1502 * If ignore_valid is not set, we should leave CE_VALID bit
1503 * alone. Otherwise, paths marked with --no-assume-unchanged
1504 * (i.e. things to be edited) will reacquire CE_VALID bit
1505 * automatically, which is not really what we want.
1507 if (!ignore_valid && assume_unchanged &&
1508 !(ce->ce_flags & CE_VALID))
1509 updated->ce_flags &= ~CE_VALID;
1511 /* istate->cache_changed is updated in the caller */
1515 static void show_file(const char * fmt, const char * name, int in_porcelain,
1516 int * first, const char *header_msg)
1518 if (in_porcelain && *first && header_msg) {
1519 printf("%s\n", header_msg);
1525 int repo_refresh_and_write_index(struct repository *repo,
1526 unsigned int refresh_flags,
1527 unsigned int write_flags,
1529 const struct pathspec *pathspec,
1530 char *seen, const char *header_msg)
1532 struct lock_file lock_file = LOCK_INIT;
1535 fd = repo_hold_locked_index(repo, &lock_file, 0);
1536 if (!gentle && fd < 0)
1538 if (refresh_index(repo->index, refresh_flags, pathspec, seen, header_msg))
1540 if (0 <= fd && write_locked_index(repo->index, &lock_file, COMMIT_LOCK | write_flags))
1546 int refresh_index(struct index_state *istate, unsigned int flags,
1547 const struct pathspec *pathspec,
1548 char *seen, const char *header_msg)
1552 int really = (flags & REFRESH_REALLY) != 0;
1553 int allow_unmerged = (flags & REFRESH_UNMERGED) != 0;
1554 int quiet = (flags & REFRESH_QUIET) != 0;
1555 int not_new = (flags & REFRESH_IGNORE_MISSING) != 0;
1556 int ignore_submodules = (flags & REFRESH_IGNORE_SUBMODULES) != 0;
1557 int ignore_skip_worktree = (flags & REFRESH_IGNORE_SKIP_WORKTREE) != 0;
1559 int in_porcelain = (flags & REFRESH_IN_PORCELAIN);
1560 unsigned int options = (CE_MATCH_REFRESH |
1561 (really ? CE_MATCH_IGNORE_VALID : 0) |
1562 (not_new ? CE_MATCH_IGNORE_MISSING : 0));
1563 const char *modified_fmt;
1564 const char *deleted_fmt;
1565 const char *typechange_fmt;
1566 const char *added_fmt;
1567 const char *unmerged_fmt;
1568 struct progress *progress = NULL;
1569 int t2_sum_lstat = 0;
1570 int t2_sum_scan = 0;
1572 if (flags & REFRESH_PROGRESS && isatty(2))
1573 progress = start_delayed_progress(_("Refresh index"),
1576 trace_performance_enter();
1577 modified_fmt = in_porcelain ? "M\t%s\n" : "%s: needs update\n";
1578 deleted_fmt = in_porcelain ? "D\t%s\n" : "%s: needs update\n";
1579 typechange_fmt = in_porcelain ? "T\t%s\n" : "%s: needs update\n";
1580 added_fmt = in_porcelain ? "A\t%s\n" : "%s: needs update\n";
1581 unmerged_fmt = in_porcelain ? "U\t%s\n" : "%s: needs merge\n";
1583 * Use the multi-threaded preload_index() to refresh most of the
1584 * cache entries quickly then in the single threaded loop below,
1585 * we only have to do the special cases that are left.
1587 preload_index(istate, pathspec, 0);
1588 trace2_region_enter("index", "refresh", NULL);
1589 /* TODO: audit for interaction with sparse-index. */
1590 ensure_full_index(istate);
1591 for (i = 0; i < istate->cache_nr; i++) {
1592 struct cache_entry *ce, *new_entry;
1593 int cache_errno = 0;
1596 int t2_did_lstat = 0;
1597 int t2_did_scan = 0;
1599 ce = istate->cache[i];
1600 if (ignore_submodules && S_ISGITLINK(ce->ce_mode))
1602 if (ignore_skip_worktree && ce_skip_worktree(ce))
1605 if (pathspec && !ce_path_match(istate, ce, pathspec, seen))
1609 while ((i < istate->cache_nr) &&
1610 ! strcmp(istate->cache[i]->name, ce->name))
1616 show_file(unmerged_fmt, ce->name, in_porcelain,
1617 &first, header_msg);
1625 new_entry = refresh_cache_ent(istate, ce, options,
1626 &cache_errno, &changed,
1627 &t2_did_lstat, &t2_did_scan);
1628 t2_sum_lstat += t2_did_lstat;
1629 t2_sum_scan += t2_did_scan;
1630 if (new_entry == ce)
1633 display_progress(progress, i);
1637 if (really && cache_errno == EINVAL) {
1638 /* If we are doing --really-refresh that
1639 * means the index is not valid anymore.
1641 ce->ce_flags &= ~CE_VALID;
1642 ce->ce_flags |= CE_UPDATE_IN_BASE;
1643 mark_fsmonitor_invalid(istate, ce);
1644 istate->cache_changed |= CE_ENTRY_CHANGED;
1649 if (cache_errno == ENOENT)
1651 else if (ce_intent_to_add(ce))
1652 fmt = added_fmt; /* must be before other checks */
1653 else if (changed & TYPE_CHANGED)
1654 fmt = typechange_fmt;
1658 ce->name, in_porcelain, &first, header_msg);
1663 replace_index_entry(istate, i, new_entry);
1665 trace2_data_intmax("index", NULL, "refresh/sum_lstat", t2_sum_lstat);
1666 trace2_data_intmax("index", NULL, "refresh/sum_scan", t2_sum_scan);
1667 trace2_region_leave("index", "refresh", NULL);
1669 display_progress(progress, istate->cache_nr);
1670 stop_progress(&progress);
1672 trace_performance_leave("refresh index");
1676 struct cache_entry *refresh_cache_entry(struct index_state *istate,
1677 struct cache_entry *ce,
1678 unsigned int options)
1680 return refresh_cache_ent(istate, ce, options, NULL, NULL, NULL, NULL);
1684 /*****************************************************************
1686 *****************************************************************/
1688 #define INDEX_FORMAT_DEFAULT 3
1690 static unsigned int get_index_format_default(struct repository *r)
1692 char *envversion = getenv("GIT_INDEX_VERSION");
1694 unsigned int version = INDEX_FORMAT_DEFAULT;
1697 prepare_repo_settings(r);
1699 if (r->settings.index_version >= 0)
1700 version = r->settings.index_version;
1701 if (version < INDEX_FORMAT_LB || INDEX_FORMAT_UB < version) {
1702 warning(_("index.version set, but the value is invalid.\n"
1703 "Using version %i"), INDEX_FORMAT_DEFAULT);
1704 return INDEX_FORMAT_DEFAULT;
1709 version = strtoul(envversion, &endp, 10);
1711 version < INDEX_FORMAT_LB || INDEX_FORMAT_UB < version) {
1712 warning(_("GIT_INDEX_VERSION set, but the value is invalid.\n"
1713 "Using version %i"), INDEX_FORMAT_DEFAULT);
1714 version = INDEX_FORMAT_DEFAULT;
1720 * dev/ino/uid/gid/size are also just tracked to the low 32 bits
1721 * Again - this is just a (very strong in practice) heuristic that
1722 * the inode hasn't changed.
1724 * We save the fields in big-endian order to allow using the
1725 * index file over NFS transparently.
1727 struct ondisk_cache_entry {
1728 struct cache_time ctime;
1729 struct cache_time mtime;
1737 * unsigned char hash[hashsz];
1739 * if (flags & CE_EXTENDED)
1742 unsigned char data[GIT_MAX_RAWSZ + 2 * sizeof(uint16_t)];
1743 char name[FLEX_ARRAY];
1746 /* These are only used for v3 or lower */
1747 #define align_padding_size(size, len) ((size + (len) + 8) & ~7) - (size + len)
1748 #define align_flex_name(STRUCT,len) ((offsetof(struct STRUCT,data) + (len) + 8) & ~7)
1749 #define ondisk_cache_entry_size(len) align_flex_name(ondisk_cache_entry,len)
1750 #define ondisk_data_size(flags, len) (the_hash_algo->rawsz + \
1751 ((flags & CE_EXTENDED) ? 2 : 1) * sizeof(uint16_t) + len)
1752 #define ondisk_data_size_max(len) (ondisk_data_size(CE_EXTENDED, len))
1753 #define ondisk_ce_size(ce) (ondisk_cache_entry_size(ondisk_data_size((ce)->ce_flags, ce_namelen(ce))))
1755 /* Allow fsck to force verification of the index checksum. */
1756 int verify_index_checksum;
1758 /* Allow fsck to force verification of the cache entry order. */
1759 int verify_ce_order;
1761 static int verify_hdr(const struct cache_header *hdr, unsigned long size)
1764 unsigned char hash[GIT_MAX_RAWSZ];
1767 if (hdr->hdr_signature != htonl(CACHE_SIGNATURE))
1768 return error(_("bad signature 0x%08x"), hdr->hdr_signature);
1769 hdr_version = ntohl(hdr->hdr_version);
1770 if (hdr_version < INDEX_FORMAT_LB || INDEX_FORMAT_UB < hdr_version)
1771 return error(_("bad index version %d"), hdr_version);
1773 if (!verify_index_checksum)
1776 the_hash_algo->init_fn(&c);
1777 the_hash_algo->update_fn(&c, hdr, size - the_hash_algo->rawsz);
1778 the_hash_algo->final_fn(hash, &c);
1779 if (!hasheq(hash, (unsigned char *)hdr + size - the_hash_algo->rawsz))
1780 return error(_("bad index file sha1 signature"));
1784 static int read_index_extension(struct index_state *istate,
1785 const char *ext, const char *data, unsigned long sz)
1787 switch (CACHE_EXT(ext)) {
1788 case CACHE_EXT_TREE:
1789 istate->cache_tree = cache_tree_read(data, sz);
1791 case CACHE_EXT_RESOLVE_UNDO:
1792 istate->resolve_undo = resolve_undo_read(data, sz);
1794 case CACHE_EXT_LINK:
1795 if (read_link_extension(istate, data, sz))
1798 case CACHE_EXT_UNTRACKED:
1799 istate->untracked = read_untracked_extension(data, sz);
1801 case CACHE_EXT_FSMONITOR:
1802 read_fsmonitor_extension(istate, data, sz);
1804 case CACHE_EXT_ENDOFINDEXENTRIES:
1805 case CACHE_EXT_INDEXENTRYOFFSETTABLE:
1806 /* already handled in do_read_index() */
1808 case CACHE_EXT_SPARSE_DIRECTORIES:
1809 /* no content, only an indicator */
1810 istate->sparse_index = 1;
1813 if (*ext < 'A' || 'Z' < *ext)
1814 return error(_("index uses %.4s extension, which we do not understand"),
1816 fprintf_ln(stderr, _("ignoring %.4s extension"), ext);
1822 static struct cache_entry *create_from_disk(struct mem_pool *ce_mem_pool,
1823 unsigned int version,
1824 struct ondisk_cache_entry *ondisk,
1825 unsigned long *ent_size,
1826 const struct cache_entry *previous_ce)
1828 struct cache_entry *ce;
1831 const unsigned hashsz = the_hash_algo->rawsz;
1832 const uint16_t *flagsp = (const uint16_t *)(ondisk->data + hashsz);
1834 size_t copy_len = 0;
1836 * Adjacent cache entries tend to share the leading paths, so it makes
1837 * sense to only store the differences in later entries. In the v4
1838 * on-disk format of the index, each on-disk cache entry stores the
1839 * number of bytes to be stripped from the end of the previous name,
1840 * and the bytes to append to the result, to come up with its name.
1842 int expand_name_field = version == 4;
1844 /* On-disk flags are just 16 bits */
1845 flags = get_be16(flagsp);
1846 len = flags & CE_NAMEMASK;
1848 if (flags & CE_EXTENDED) {
1850 extended_flags = get_be16(flagsp + 1) << 16;
1851 /* We do not yet understand any bit out of CE_EXTENDED_FLAGS */
1852 if (extended_flags & ~CE_EXTENDED_FLAGS)
1853 die(_("unknown index entry format 0x%08x"), extended_flags);
1854 flags |= extended_flags;
1855 name = (const char *)(flagsp + 2);
1858 name = (const char *)(flagsp + 1);
1860 if (expand_name_field) {
1861 const unsigned char *cp = (const unsigned char *)name;
1862 size_t strip_len, previous_len;
1864 /* If we're at the beginning of a block, ignore the previous name */
1865 strip_len = decode_varint(&cp);
1867 previous_len = previous_ce->ce_namelen;
1868 if (previous_len < strip_len)
1869 die(_("malformed name field in the index, near path '%s'"),
1871 copy_len = previous_len - strip_len;
1873 name = (const char *)cp;
1876 if (len == CE_NAMEMASK) {
1878 if (expand_name_field)
1882 ce = mem_pool__ce_alloc(ce_mem_pool, len);
1884 ce->ce_stat_data.sd_ctime.sec = get_be32(&ondisk->ctime.sec);
1885 ce->ce_stat_data.sd_mtime.sec = get_be32(&ondisk->mtime.sec);
1886 ce->ce_stat_data.sd_ctime.nsec = get_be32(&ondisk->ctime.nsec);
1887 ce->ce_stat_data.sd_mtime.nsec = get_be32(&ondisk->mtime.nsec);
1888 ce->ce_stat_data.sd_dev = get_be32(&ondisk->dev);
1889 ce->ce_stat_data.sd_ino = get_be32(&ondisk->ino);
1890 ce->ce_mode = get_be32(&ondisk->mode);
1891 ce->ce_stat_data.sd_uid = get_be32(&ondisk->uid);
1892 ce->ce_stat_data.sd_gid = get_be32(&ondisk->gid);
1893 ce->ce_stat_data.sd_size = get_be32(&ondisk->size);
1894 ce->ce_flags = flags & ~CE_NAMEMASK;
1895 ce->ce_namelen = len;
1897 oidread(&ce->oid, ondisk->data);
1898 memcpy(ce->name, name, len);
1899 ce->name[len] = '\0';
1901 if (expand_name_field) {
1903 memcpy(ce->name, previous_ce->name, copy_len);
1904 memcpy(ce->name + copy_len, name, len + 1 - copy_len);
1905 *ent_size = (name - ((char *)ondisk)) + len + 1 - copy_len;
1907 memcpy(ce->name, name, len + 1);
1908 *ent_size = ondisk_ce_size(ce);
1913 static void check_ce_order(struct index_state *istate)
1917 if (!verify_ce_order)
1920 for (i = 1; i < istate->cache_nr; i++) {
1921 struct cache_entry *ce = istate->cache[i - 1];
1922 struct cache_entry *next_ce = istate->cache[i];
1923 int name_compare = strcmp(ce->name, next_ce->name);
1925 if (0 < name_compare)
1926 die(_("unordered stage entries in index"));
1927 if (!name_compare) {
1929 die(_("multiple stage entries for merged file '%s'"),
1931 if (ce_stage(ce) > ce_stage(next_ce))
1932 die(_("unordered stage entries for '%s'"),
1938 static void tweak_untracked_cache(struct index_state *istate)
1940 struct repository *r = the_repository;
1942 prepare_repo_settings(r);
1944 if (r->settings.core_untracked_cache == UNTRACKED_CACHE_REMOVE) {
1945 remove_untracked_cache(istate);
1949 if (r->settings.core_untracked_cache == UNTRACKED_CACHE_WRITE)
1950 add_untracked_cache(istate);
1953 static void tweak_split_index(struct index_state *istate)
1955 switch (git_config_get_split_index()) {
1956 case -1: /* unset: do nothing */
1959 remove_split_index(istate);
1962 add_split_index(istate);
1964 default: /* unknown value: do nothing */
1969 static void post_read_index_from(struct index_state *istate)
1971 check_ce_order(istate);
1972 tweak_untracked_cache(istate);
1973 tweak_split_index(istate);
1974 tweak_fsmonitor(istate);
1977 static size_t estimate_cache_size_from_compressed(unsigned int entries)
1979 return entries * (sizeof(struct cache_entry) + CACHE_ENTRY_PATH_LENGTH);
1982 static size_t estimate_cache_size(size_t ondisk_size, unsigned int entries)
1984 long per_entry = sizeof(struct cache_entry) - sizeof(struct ondisk_cache_entry);
1987 * Account for potential alignment differences.
1989 per_entry += align_padding_size(per_entry, 0);
1990 return ondisk_size + entries * per_entry;
1993 struct index_entry_offset
1995 /* starting byte offset into index file, count of index entries in this block */
1999 struct index_entry_offset_table
2002 struct index_entry_offset entries[FLEX_ARRAY];
2005 static struct index_entry_offset_table *read_ieot_extension(const char *mmap, size_t mmap_size, size_t offset);
2006 static void write_ieot_extension(struct strbuf *sb, struct index_entry_offset_table *ieot);
2008 static size_t read_eoie_extension(const char *mmap, size_t mmap_size);
2009 static void write_eoie_extension(struct strbuf *sb, git_hash_ctx *eoie_context, size_t offset);
2011 struct load_index_extensions
2014 struct index_state *istate;
2017 unsigned long src_offset;
2020 static void *load_index_extensions(void *_data)
2022 struct load_index_extensions *p = _data;
2023 unsigned long src_offset = p->src_offset;
2025 while (src_offset <= p->mmap_size - the_hash_algo->rawsz - 8) {
2026 /* After an array of active_nr index entries,
2027 * there can be arbitrary number of extended
2028 * sections, each of which is prefixed with
2029 * extension name (4-byte) and section length
2030 * in 4-byte network byte order.
2032 uint32_t extsize = get_be32(p->mmap + src_offset + 4);
2033 if (read_index_extension(p->istate,
2034 p->mmap + src_offset,
2035 p->mmap + src_offset + 8,
2037 munmap((void *)p->mmap, p->mmap_size);
2038 die(_("index file corrupt"));
2041 src_offset += extsize;
2048 * A helper function that will load the specified range of cache entries
2049 * from the memory mapped file and add them to the given index.
2051 static unsigned long load_cache_entry_block(struct index_state *istate,
2052 struct mem_pool *ce_mem_pool, int offset, int nr, const char *mmap,
2053 unsigned long start_offset, const struct cache_entry *previous_ce)
2056 unsigned long src_offset = start_offset;
2058 for (i = offset; i < offset + nr; i++) {
2059 struct ondisk_cache_entry *disk_ce;
2060 struct cache_entry *ce;
2061 unsigned long consumed;
2063 disk_ce = (struct ondisk_cache_entry *)(mmap + src_offset);
2064 ce = create_from_disk(ce_mem_pool, istate->version, disk_ce, &consumed, previous_ce);
2065 set_index_entry(istate, i, ce);
2067 src_offset += consumed;
2070 return src_offset - start_offset;
2073 static unsigned long load_all_cache_entries(struct index_state *istate,
2074 const char *mmap, size_t mmap_size, unsigned long src_offset)
2076 unsigned long consumed;
2078 istate->ce_mem_pool = xmalloc(sizeof(*istate->ce_mem_pool));
2079 if (istate->version == 4) {
2080 mem_pool_init(istate->ce_mem_pool,
2081 estimate_cache_size_from_compressed(istate->cache_nr));
2083 mem_pool_init(istate->ce_mem_pool,
2084 estimate_cache_size(mmap_size, istate->cache_nr));
2087 consumed = load_cache_entry_block(istate, istate->ce_mem_pool,
2088 0, istate->cache_nr, mmap, src_offset, NULL);
2093 * Mostly randomly chosen maximum thread counts: we
2094 * cap the parallelism to online_cpus() threads, and we want
2095 * to have at least 10000 cache entries per thread for it to
2096 * be worth starting a thread.
2099 #define THREAD_COST (10000)
2101 struct load_cache_entries_thread_data
2104 struct index_state *istate;
2105 struct mem_pool *ce_mem_pool;
2108 struct index_entry_offset_table *ieot;
2109 int ieot_start; /* starting index into the ieot array */
2110 int ieot_blocks; /* count of ieot entries to process */
2111 unsigned long consumed; /* return # of bytes in index file processed */
2115 * A thread proc to run the load_cache_entries() computation
2116 * across multiple background threads.
2118 static void *load_cache_entries_thread(void *_data)
2120 struct load_cache_entries_thread_data *p = _data;
2123 /* iterate across all ieot blocks assigned to this thread */
2124 for (i = p->ieot_start; i < p->ieot_start + p->ieot_blocks; i++) {
2125 p->consumed += load_cache_entry_block(p->istate, p->ce_mem_pool,
2126 p->offset, p->ieot->entries[i].nr, p->mmap, p->ieot->entries[i].offset, NULL);
2127 p->offset += p->ieot->entries[i].nr;
2132 static unsigned long load_cache_entries_threaded(struct index_state *istate, const char *mmap, size_t mmap_size,
2133 int nr_threads, struct index_entry_offset_table *ieot)
2135 int i, offset, ieot_blocks, ieot_start, err;
2136 struct load_cache_entries_thread_data *data;
2137 unsigned long consumed = 0;
2139 /* a little sanity checking */
2140 if (istate->name_hash_initialized)
2141 BUG("the name hash isn't thread safe");
2143 istate->ce_mem_pool = xmalloc(sizeof(*istate->ce_mem_pool));
2144 mem_pool_init(istate->ce_mem_pool, 0);
2146 /* ensure we have no more threads than we have blocks to process */
2147 if (nr_threads > ieot->nr)
2148 nr_threads = ieot->nr;
2149 CALLOC_ARRAY(data, nr_threads);
2151 offset = ieot_start = 0;
2152 ieot_blocks = DIV_ROUND_UP(ieot->nr, nr_threads);
2153 for (i = 0; i < nr_threads; i++) {
2154 struct load_cache_entries_thread_data *p = &data[i];
2157 if (ieot_start + ieot_blocks > ieot->nr)
2158 ieot_blocks = ieot->nr - ieot_start;
2164 p->ieot_start = ieot_start;
2165 p->ieot_blocks = ieot_blocks;
2167 /* create a mem_pool for each thread */
2169 for (j = p->ieot_start; j < p->ieot_start + p->ieot_blocks; j++)
2170 nr += p->ieot->entries[j].nr;
2171 p->ce_mem_pool = xmalloc(sizeof(*istate->ce_mem_pool));
2172 if (istate->version == 4) {
2173 mem_pool_init(p->ce_mem_pool,
2174 estimate_cache_size_from_compressed(nr));
2176 mem_pool_init(p->ce_mem_pool,
2177 estimate_cache_size(mmap_size, nr));
2180 err = pthread_create(&p->pthread, NULL, load_cache_entries_thread, p);
2182 die(_("unable to create load_cache_entries thread: %s"), strerror(err));
2184 /* increment by the number of cache entries in the ieot block being processed */
2185 for (j = 0; j < ieot_blocks; j++)
2186 offset += ieot->entries[ieot_start + j].nr;
2187 ieot_start += ieot_blocks;
2190 for (i = 0; i < nr_threads; i++) {
2191 struct load_cache_entries_thread_data *p = &data[i];
2193 err = pthread_join(p->pthread, NULL);
2195 die(_("unable to join load_cache_entries thread: %s"), strerror(err));
2196 mem_pool_combine(istate->ce_mem_pool, p->ce_mem_pool);
2197 consumed += p->consumed;
2205 /* remember to discard_cache() before reading a different cache! */
2206 int do_read_index(struct index_state *istate, const char *path, int must_exist)
2210 unsigned long src_offset;
2211 const struct cache_header *hdr;
2214 struct load_index_extensions p;
2215 size_t extension_offset = 0;
2216 int nr_threads, cpus;
2217 struct index_entry_offset_table *ieot = NULL;
2219 if (istate->initialized)
2220 return istate->cache_nr;
2222 istate->timestamp.sec = 0;
2223 istate->timestamp.nsec = 0;
2224 fd = open(path, O_RDONLY);
2226 if (!must_exist && errno == ENOENT)
2228 die_errno(_("%s: index file open failed"), path);
2232 die_errno(_("%s: cannot stat the open index"), path);
2234 mmap_size = xsize_t(st.st_size);
2235 if (mmap_size < sizeof(struct cache_header) + the_hash_algo->rawsz)
2236 die(_("%s: index file smaller than expected"), path);
2238 mmap = xmmap_gently(NULL, mmap_size, PROT_READ, MAP_PRIVATE, fd, 0);
2239 if (mmap == MAP_FAILED)
2240 die_errno(_("%s: unable to map index file"), path);
2243 hdr = (const struct cache_header *)mmap;
2244 if (verify_hdr(hdr, mmap_size) < 0)
2247 oidread(&istate->oid, (const unsigned char *)hdr + mmap_size - the_hash_algo->rawsz);
2248 istate->version = ntohl(hdr->hdr_version);
2249 istate->cache_nr = ntohl(hdr->hdr_entries);
2250 istate->cache_alloc = alloc_nr(istate->cache_nr);
2251 CALLOC_ARRAY(istate->cache, istate->cache_alloc);
2252 istate->initialized = 1;
2256 p.mmap_size = mmap_size;
2258 src_offset = sizeof(*hdr);
2260 if (git_config_get_index_threads(&nr_threads))
2263 /* TODO: does creating more threads than cores help? */
2265 nr_threads = istate->cache_nr / THREAD_COST;
2266 cpus = online_cpus();
2267 if (nr_threads > cpus)
2274 if (nr_threads > 1) {
2275 extension_offset = read_eoie_extension(mmap, mmap_size);
2276 if (extension_offset) {
2279 p.src_offset = extension_offset;
2280 err = pthread_create(&p.pthread, NULL, load_index_extensions, &p);
2282 die(_("unable to create load_index_extensions thread: %s"), strerror(err));
2289 * Locate and read the index entry offset table so that we can use it
2290 * to multi-thread the reading of the cache entries.
2292 if (extension_offset && nr_threads > 1)
2293 ieot = read_ieot_extension(mmap, mmap_size, extension_offset);
2296 src_offset += load_cache_entries_threaded(istate, mmap, mmap_size, nr_threads, ieot);
2299 src_offset += load_all_cache_entries(istate, mmap, mmap_size, src_offset);
2302 istate->timestamp.sec = st.st_mtime;
2303 istate->timestamp.nsec = ST_MTIME_NSEC(st);
2305 /* if we created a thread, join it otherwise load the extensions on the primary thread */
2306 if (extension_offset) {
2307 int ret = pthread_join(p.pthread, NULL);
2309 die(_("unable to join load_index_extensions thread: %s"), strerror(ret));
2311 p.src_offset = src_offset;
2312 load_index_extensions(&p);
2314 munmap((void *)mmap, mmap_size);
2317 * TODO trace2: replace "the_repository" with the actual repo instance
2318 * that is associated with the given "istate".
2320 trace2_data_intmax("index", the_repository, "read/version",
2322 trace2_data_intmax("index", the_repository, "read/cache_nr",
2326 istate->repo = the_repository;
2327 prepare_repo_settings(istate->repo);
2328 if (istate->repo->settings.command_requires_full_index)
2329 ensure_full_index(istate);
2331 return istate->cache_nr;
2334 munmap((void *)mmap, mmap_size);
2335 die(_("index file corrupt"));
2339 * Signal that the shared index is used by updating its mtime.
2341 * This way, shared index can be removed if they have not been used
2344 static void freshen_shared_index(const char *shared_index, int warn)
2346 if (!check_and_freshen_file(shared_index, 1) && warn)
2347 warning(_("could not freshen shared index '%s'"), shared_index);
2350 int read_index_from(struct index_state *istate, const char *path,
2353 struct split_index *split_index;
2358 /* istate->initialized covers both .git/index and .git/sharedindex.xxx */
2359 if (istate->initialized)
2360 return istate->cache_nr;
2363 * TODO trace2: replace "the_repository" with the actual repo instance
2364 * that is associated with the given "istate".
2366 trace2_region_enter_printf("index", "do_read_index", the_repository,
2368 trace_performance_enter();
2369 ret = do_read_index(istate, path, 0);
2370 trace_performance_leave("read cache %s", path);
2371 trace2_region_leave_printf("index", "do_read_index", the_repository,
2374 split_index = istate->split_index;
2375 if (!split_index || is_null_oid(&split_index->base_oid)) {
2376 post_read_index_from(istate);
2380 trace_performance_enter();
2381 if (split_index->base)
2382 discard_index(split_index->base);
2384 CALLOC_ARRAY(split_index->base, 1);
2386 base_oid_hex = oid_to_hex(&split_index->base_oid);
2387 base_path = xstrfmt("%s/sharedindex.%s", gitdir, base_oid_hex);
2388 trace2_region_enter_printf("index", "shared/do_read_index",
2389 the_repository, "%s", base_path);
2390 ret = do_read_index(split_index->base, base_path, 1);
2391 trace2_region_leave_printf("index", "shared/do_read_index",
2392 the_repository, "%s", base_path);
2393 if (!oideq(&split_index->base_oid, &split_index->base->oid))
2394 die(_("broken index, expect %s in %s, got %s"),
2395 base_oid_hex, base_path,
2396 oid_to_hex(&split_index->base->oid));
2398 freshen_shared_index(base_path, 0);
2399 merge_base_index(istate);
2400 post_read_index_from(istate);
2401 trace_performance_leave("read cache %s", base_path);
2406 int is_index_unborn(struct index_state *istate)
2408 return (!istate->cache_nr && !istate->timestamp.sec);
2411 int discard_index(struct index_state *istate)
2414 * Cache entries in istate->cache[] should have been allocated
2415 * from the memory pool associated with this index, or from an
2416 * associated split_index. There is no need to free individual
2417 * cache entries. validate_cache_entries can detect when this
2418 * assertion does not hold.
2420 validate_cache_entries(istate);
2422 resolve_undo_clear_index(istate);
2423 istate->cache_nr = 0;
2424 istate->cache_changed = 0;
2425 istate->timestamp.sec = 0;
2426 istate->timestamp.nsec = 0;
2427 free_name_hash(istate);
2428 cache_tree_free(&(istate->cache_tree));
2429 istate->initialized = 0;
2430 istate->fsmonitor_has_run_once = 0;
2431 FREE_AND_NULL(istate->fsmonitor_last_update);
2432 FREE_AND_NULL(istate->cache);
2433 istate->cache_alloc = 0;
2434 discard_split_index(istate);
2435 free_untracked_cache(istate->untracked);
2436 istate->untracked = NULL;
2438 if (istate->ce_mem_pool) {
2439 mem_pool_discard(istate->ce_mem_pool, should_validate_cache_entries());
2440 FREE_AND_NULL(istate->ce_mem_pool);
2447 * Validate the cache entries of this index.
2448 * All cache entries associated with this index
2449 * should have been allocated by the memory pool
2450 * associated with this index, or by a referenced
2453 void validate_cache_entries(const struct index_state *istate)
2457 if (!should_validate_cache_entries() ||!istate || !istate->initialized)
2460 for (i = 0; i < istate->cache_nr; i++) {
2462 BUG("cache entry is not allocated from expected memory pool");
2463 } else if (!istate->ce_mem_pool ||
2464 !mem_pool_contains(istate->ce_mem_pool, istate->cache[i])) {
2465 if (!istate->split_index ||
2466 !istate->split_index->base ||
2467 !istate->split_index->base->ce_mem_pool ||
2468 !mem_pool_contains(istate->split_index->base->ce_mem_pool, istate->cache[i])) {
2469 BUG("cache entry is not allocated from expected memory pool");
2474 if (istate->split_index)
2475 validate_cache_entries(istate->split_index->base);
2478 int unmerged_index(const struct index_state *istate)
2481 for (i = 0; i < istate->cache_nr; i++) {
2482 if (ce_stage(istate->cache[i]))
2488 int repo_index_has_changes(struct repository *repo,
2492 struct index_state *istate = repo->index;
2493 struct object_id cmp;
2497 cmp = tree->object.oid;
2498 if (tree || !get_oid_tree("HEAD", &cmp)) {
2499 struct diff_options opt;
2501 repo_diff_setup(repo, &opt);
2502 opt.flags.exit_with_status = 1;
2504 opt.flags.quick = 1;
2505 do_diff_cache(&cmp, &opt);
2507 for (i = 0; sb && i < diff_queued_diff.nr; i++) {
2509 strbuf_addch(sb, ' ');
2510 strbuf_addstr(sb, diff_queued_diff.queue[i]->two->path);
2513 return opt.flags.has_changes != 0;
2515 /* TODO: audit for interaction with sparse-index. */
2516 ensure_full_index(istate);
2517 for (i = 0; sb && i < istate->cache_nr; i++) {
2519 strbuf_addch(sb, ' ');
2520 strbuf_addstr(sb, istate->cache[i]->name);
2522 return !!istate->cache_nr;
2526 static int write_index_ext_header(struct hashfile *f,
2527 git_hash_ctx *eoie_f,
2531 hashwrite_be32(f, ext);
2532 hashwrite_be32(f, sz);
2537 the_hash_algo->update_fn(eoie_f, &ext, sizeof(ext));
2538 the_hash_algo->update_fn(eoie_f, &sz, sizeof(sz));
2543 static void ce_smudge_racily_clean_entry(struct index_state *istate,
2544 struct cache_entry *ce)
2547 * The only thing we care about in this function is to smudge the
2548 * falsely clean entry due to touch-update-touch race, so we leave
2549 * everything else as they are. We are called for entries whose
2550 * ce_stat_data.sd_mtime match the index file mtime.
2552 * Note that this actually does not do much for gitlinks, for
2553 * which ce_match_stat_basic() always goes to the actual
2554 * contents. The caller checks with is_racy_timestamp() which
2555 * always says "no" for gitlinks, so we are not called for them ;-)
2559 if (lstat(ce->name, &st) < 0)
2561 if (ce_match_stat_basic(ce, &st))
2563 if (ce_modified_check_fs(istate, ce, &st)) {
2564 /* This is "racily clean"; smudge it. Note that this
2565 * is a tricky code. At first glance, it may appear
2566 * that it can break with this sequence:
2568 * $ echo xyzzy >frotz
2569 * $ git-update-index --add frotz
2572 * $ echo filfre >nitfol
2573 * $ git-update-index --add nitfol
2575 * but it does not. When the second update-index runs,
2576 * it notices that the entry "frotz" has the same timestamp
2577 * as index, and if we were to smudge it by resetting its
2578 * size to zero here, then the object name recorded
2579 * in index is the 6-byte file but the cached stat information
2580 * becomes zero --- which would then match what we would
2581 * obtain from the filesystem next time we stat("frotz").
2583 * However, the second update-index, before calling
2584 * this function, notices that the cached size is 6
2585 * bytes and what is on the filesystem is an empty
2586 * file, and never calls us, so the cached size information
2587 * for "frotz" stays 6 which does not match the filesystem.
2589 ce->ce_stat_data.sd_size = 0;
2593 /* Copy miscellaneous fields but not the name */
2594 static void copy_cache_entry_to_ondisk(struct ondisk_cache_entry *ondisk,
2595 struct cache_entry *ce)
2598 const unsigned hashsz = the_hash_algo->rawsz;
2599 uint16_t *flagsp = (uint16_t *)(ondisk->data + hashsz);
2601 ondisk->ctime.sec = htonl(ce->ce_stat_data.sd_ctime.sec);
2602 ondisk->mtime.sec = htonl(ce->ce_stat_data.sd_mtime.sec);
2603 ondisk->ctime.nsec = htonl(ce->ce_stat_data.sd_ctime.nsec);
2604 ondisk->mtime.nsec = htonl(ce->ce_stat_data.sd_mtime.nsec);
2605 ondisk->dev = htonl(ce->ce_stat_data.sd_dev);
2606 ondisk->ino = htonl(ce->ce_stat_data.sd_ino);
2607 ondisk->mode = htonl(ce->ce_mode);
2608 ondisk->uid = htonl(ce->ce_stat_data.sd_uid);
2609 ondisk->gid = htonl(ce->ce_stat_data.sd_gid);
2610 ondisk->size = htonl(ce->ce_stat_data.sd_size);
2611 hashcpy(ondisk->data, ce->oid.hash);
2613 flags = ce->ce_flags & ~CE_NAMEMASK;
2614 flags |= (ce_namelen(ce) >= CE_NAMEMASK ? CE_NAMEMASK : ce_namelen(ce));
2615 flagsp[0] = htons(flags);
2616 if (ce->ce_flags & CE_EXTENDED) {
2617 flagsp[1] = htons((ce->ce_flags & CE_EXTENDED_FLAGS) >> 16);
2621 static int ce_write_entry(struct hashfile *f, struct cache_entry *ce,
2622 struct strbuf *previous_name, struct ondisk_cache_entry *ondisk)
2625 unsigned int saved_namelen;
2626 int stripped_name = 0;
2627 static unsigned char padding[8] = { 0x00 };
2629 if (ce->ce_flags & CE_STRIP_NAME) {
2630 saved_namelen = ce_namelen(ce);
2635 size = offsetof(struct ondisk_cache_entry,data) + ondisk_data_size(ce->ce_flags, 0);
2637 if (!previous_name) {
2638 int len = ce_namelen(ce);
2639 copy_cache_entry_to_ondisk(ondisk, ce);
2640 hashwrite(f, ondisk, size);
2641 hashwrite(f, ce->name, len);
2642 hashwrite(f, padding, align_padding_size(size, len));
2644 int common, to_remove, prefix_size;
2645 unsigned char to_remove_vi[16];
2647 (ce->name[common] &&
2648 common < previous_name->len &&
2649 ce->name[common] == previous_name->buf[common]);
2651 ; /* still matching */
2652 to_remove = previous_name->len - common;
2653 prefix_size = encode_varint(to_remove, to_remove_vi);
2655 copy_cache_entry_to_ondisk(ondisk, ce);
2656 hashwrite(f, ondisk, size);
2657 hashwrite(f, to_remove_vi, prefix_size);
2658 hashwrite(f, ce->name + common, ce_namelen(ce) - common);
2659 hashwrite(f, padding, 1);
2661 strbuf_splice(previous_name, common, to_remove,
2662 ce->name + common, ce_namelen(ce) - common);
2664 if (stripped_name) {
2665 ce->ce_namelen = saved_namelen;
2666 ce->ce_flags &= ~CE_STRIP_NAME;
2673 * This function verifies if index_state has the correct sha1 of the
2674 * index file. Don't die if we have any other failure, just return 0.
2676 static int verify_index_from(const struct index_state *istate, const char *path)
2681 unsigned char hash[GIT_MAX_RAWSZ];
2683 if (!istate->initialized)
2686 fd = open(path, O_RDONLY);
2693 if (st.st_size < sizeof(struct cache_header) + the_hash_algo->rawsz)
2696 n = pread_in_full(fd, hash, the_hash_algo->rawsz, st.st_size - the_hash_algo->rawsz);
2697 if (n != the_hash_algo->rawsz)
2700 if (!hasheq(istate->oid.hash, hash))
2711 static int repo_verify_index(struct repository *repo)
2713 return verify_index_from(repo->index, repo->index_file);
2716 static int has_racy_timestamp(struct index_state *istate)
2718 int entries = istate->cache_nr;
2721 for (i = 0; i < entries; i++) {
2722 struct cache_entry *ce = istate->cache[i];
2723 if (is_racy_timestamp(istate, ce))
2729 void repo_update_index_if_able(struct repository *repo,
2730 struct lock_file *lockfile)
2732 if ((repo->index->cache_changed ||
2733 has_racy_timestamp(repo->index)) &&
2734 repo_verify_index(repo))
2735 write_locked_index(repo->index, lockfile, COMMIT_LOCK);
2737 rollback_lock_file(lockfile);
2740 static int record_eoie(void)
2744 if (!git_config_get_bool("index.recordendofindexentries", &val))
2748 * As a convenience, the end of index entries extension
2749 * used for threading is written by default if the user
2750 * explicitly requested threaded index reads.
2752 return !git_config_get_index_threads(&val) && val != 1;
2755 static int record_ieot(void)
2759 if (!git_config_get_bool("index.recordoffsettable", &val))
2763 * As a convenience, the offset table used for threading is
2764 * written by default if the user explicitly requested
2765 * threaded index reads.
2767 return !git_config_get_index_threads(&val) && val != 1;
2771 * On success, `tempfile` is closed. If it is the temporary file
2772 * of a `struct lock_file`, we will therefore effectively perform
2773 * a 'close_lock_file_gently()`. Since that is an implementation
2774 * detail of lockfiles, callers of `do_write_index()` should not
2777 static int do_write_index(struct index_state *istate, struct tempfile *tempfile,
2778 int strip_extensions)
2780 uint64_t start = getnanotime();
2782 git_hash_ctx *eoie_c = NULL;
2783 struct cache_header hdr;
2784 int i, err = 0, removed, extended, hdr_version;
2785 struct cache_entry **cache = istate->cache;
2786 int entries = istate->cache_nr;
2788 struct ondisk_cache_entry ondisk;
2789 struct strbuf previous_name_buf = STRBUF_INIT, *previous_name;
2790 int drop_cache_tree = istate->drop_cache_tree;
2792 int ieot_entries = 1;
2793 struct index_entry_offset_table *ieot = NULL;
2796 f = hashfd(tempfile->fd, tempfile->filename.buf);
2798 for (i = removed = extended = 0; i < entries; i++) {
2799 if (cache[i]->ce_flags & CE_REMOVE)
2802 /* reduce extended entries if possible */
2803 cache[i]->ce_flags &= ~CE_EXTENDED;
2804 if (cache[i]->ce_flags & CE_EXTENDED_FLAGS) {
2806 cache[i]->ce_flags |= CE_EXTENDED;
2810 if (!istate->version) {
2811 istate->version = get_index_format_default(the_repository);
2812 if (git_env_bool("GIT_TEST_SPLIT_INDEX", 0))
2813 init_split_index(istate);
2816 /* demote version 3 to version 2 when the latter suffices */
2817 if (istate->version == 3 || istate->version == 2)
2818 istate->version = extended ? 3 : 2;
2820 hdr_version = istate->version;
2822 hdr.hdr_signature = htonl(CACHE_SIGNATURE);
2823 hdr.hdr_version = htonl(hdr_version);
2824 hdr.hdr_entries = htonl(entries - removed);
2826 hashwrite(f, &hdr, sizeof(hdr));
2828 if (!HAVE_THREADS || git_config_get_index_threads(&nr_threads))
2831 if (nr_threads != 1 && record_ieot()) {
2832 int ieot_blocks, cpus;
2835 * ensure default number of ieot blocks maps evenly to the
2836 * default number of threads that will process them leaving
2837 * room for the thread to load the index extensions.
2840 ieot_blocks = istate->cache_nr / THREAD_COST;
2841 cpus = online_cpus();
2842 if (ieot_blocks > cpus - 1)
2843 ieot_blocks = cpus - 1;
2845 ieot_blocks = nr_threads;
2846 if (ieot_blocks > istate->cache_nr)
2847 ieot_blocks = istate->cache_nr;
2851 * no reason to write out the IEOT extension if we don't
2852 * have enough blocks to utilize multi-threading
2854 if (ieot_blocks > 1) {
2855 ieot = xcalloc(1, sizeof(struct index_entry_offset_table)
2856 + (ieot_blocks * sizeof(struct index_entry_offset)));
2857 ieot_entries = DIV_ROUND_UP(entries, ieot_blocks);
2861 offset = hashfile_total(f);
2864 previous_name = (hdr_version == 4) ? &previous_name_buf : NULL;
2866 for (i = 0; i < entries; i++) {
2867 struct cache_entry *ce = cache[i];
2868 if (ce->ce_flags & CE_REMOVE)
2870 if (!ce_uptodate(ce) && is_racy_timestamp(istate, ce))
2871 ce_smudge_racily_clean_entry(istate, ce);
2872 if (is_null_oid(&ce->oid)) {
2873 static const char msg[] = "cache entry has null sha1: %s";
2874 static int allow = -1;
2877 allow = git_env_bool("GIT_ALLOW_NULL_SHA1", 0);
2879 warning(msg, ce->name);
2881 err = error(msg, ce->name);
2883 drop_cache_tree = 1;
2885 if (ieot && i && (i % ieot_entries == 0)) {
2886 ieot->entries[ieot->nr].nr = nr;
2887 ieot->entries[ieot->nr].offset = offset;
2890 * If we have a V4 index, set the first byte to an invalid
2891 * character to ensure there is nothing common with the previous
2895 previous_name->buf[0] = 0;
2898 offset = hashfile_total(f);
2900 if (ce_write_entry(f, ce, previous_name, (struct ondisk_cache_entry *)&ondisk) < 0)
2908 ieot->entries[ieot->nr].nr = nr;
2909 ieot->entries[ieot->nr].offset = offset;
2912 strbuf_release(&previous_name_buf);
2919 offset = hashfile_total(f);
2922 * The extension headers must be hashed on their own for the
2923 * EOIE extension. Create a hashfile here to compute that hash.
2925 if (offset && record_eoie()) {
2926 CALLOC_ARRAY(eoie_c, 1);
2927 the_hash_algo->init_fn(eoie_c);
2931 * Lets write out CACHE_EXT_INDEXENTRYOFFSETTABLE first so that we
2932 * can minimize the number of extensions we have to scan through to
2933 * find it during load. Write it out regardless of the
2934 * strip_extensions parameter as we need it when loading the shared
2938 struct strbuf sb = STRBUF_INIT;
2940 write_ieot_extension(&sb, ieot);
2941 err = write_index_ext_header(f, eoie_c, CACHE_EXT_INDEXENTRYOFFSETTABLE, sb.len) < 0;
2942 hashwrite(f, sb.buf, sb.len);
2943 strbuf_release(&sb);
2949 if (!strip_extensions && istate->split_index &&
2950 !is_null_oid(&istate->split_index->base_oid)) {
2951 struct strbuf sb = STRBUF_INIT;
2953 err = write_link_extension(&sb, istate) < 0 ||
2954 write_index_ext_header(f, eoie_c, CACHE_EXT_LINK,
2956 hashwrite(f, sb.buf, sb.len);
2957 strbuf_release(&sb);
2961 if (!strip_extensions && !drop_cache_tree && istate->cache_tree) {
2962 struct strbuf sb = STRBUF_INIT;
2964 cache_tree_write(&sb, istate->cache_tree);
2965 err = write_index_ext_header(f, eoie_c, CACHE_EXT_TREE, sb.len) < 0;
2966 hashwrite(f, sb.buf, sb.len);
2967 strbuf_release(&sb);
2971 if (!strip_extensions && istate->resolve_undo) {
2972 struct strbuf sb = STRBUF_INIT;
2974 resolve_undo_write(&sb, istate->resolve_undo);
2975 err = write_index_ext_header(f, eoie_c, CACHE_EXT_RESOLVE_UNDO,
2977 hashwrite(f, sb.buf, sb.len);
2978 strbuf_release(&sb);
2982 if (!strip_extensions && istate->untracked) {
2983 struct strbuf sb = STRBUF_INIT;
2985 write_untracked_extension(&sb, istate->untracked);
2986 err = write_index_ext_header(f, eoie_c, CACHE_EXT_UNTRACKED,
2988 hashwrite(f, sb.buf, sb.len);
2989 strbuf_release(&sb);
2993 if (!strip_extensions && istate->fsmonitor_last_update) {
2994 struct strbuf sb = STRBUF_INIT;
2996 write_fsmonitor_extension(&sb, istate);
2997 err = write_index_ext_header(f, eoie_c, CACHE_EXT_FSMONITOR, sb.len) < 0;
2998 hashwrite(f, sb.buf, sb.len);
2999 strbuf_release(&sb);
3003 if (istate->sparse_index) {
3004 if (write_index_ext_header(f, eoie_c, CACHE_EXT_SPARSE_DIRECTORIES, 0) < 0)
3009 * CACHE_EXT_ENDOFINDEXENTRIES must be written as the last entry before the SHA1
3010 * so that it can be found and processed before all the index entries are
3011 * read. Write it out regardless of the strip_extensions parameter as we need it
3012 * when loading the shared index.
3015 struct strbuf sb = STRBUF_INIT;
3017 write_eoie_extension(&sb, eoie_c, offset);
3018 err = write_index_ext_header(f, NULL, CACHE_EXT_ENDOFINDEXENTRIES, sb.len) < 0;
3019 hashwrite(f, sb.buf, sb.len);
3020 strbuf_release(&sb);
3025 finalize_hashfile(f, istate->oid.hash, CSUM_HASH_IN_STREAM);
3026 if (close_tempfile_gently(tempfile)) {
3027 error(_("could not close '%s'"), get_tempfile_path(tempfile));
3030 if (stat(get_tempfile_path(tempfile), &st))
3032 istate->timestamp.sec = (unsigned int)st.st_mtime;
3033 istate->timestamp.nsec = ST_MTIME_NSEC(st);
3034 trace_performance_since(start, "write index, changed mask = %x", istate->cache_changed);
3037 * TODO trace2: replace "the_repository" with the actual repo instance
3038 * that is associated with the given "istate".
3040 trace2_data_intmax("index", the_repository, "write/version",
3042 trace2_data_intmax("index", the_repository, "write/cache_nr",
3048 void set_alternate_index_output(const char *name)
3050 alternate_index_output = name;
3053 static int commit_locked_index(struct lock_file *lk)
3055 if (alternate_index_output)
3056 return commit_lock_file_to(lk, alternate_index_output);
3058 return commit_lock_file(lk);
3061 static int do_write_locked_index(struct index_state *istate, struct lock_file *lock,
3065 int was_full = !istate->sparse_index;
3066 struct run_hooks_opt hook_opt = RUN_HOOKS_OPT_INIT;
3068 ret = convert_to_sparse(istate);
3071 warning(_("failed to convert to a sparse-index"));
3076 * TODO trace2: replace "the_repository" with the actual repo instance
3077 * that is associated with the given "istate".
3079 trace2_region_enter_printf("index", "do_write_index", the_repository,
3080 "%s", get_lock_file_path(lock));
3081 ret = do_write_index(istate, lock->tempfile, 0);
3082 trace2_region_leave_printf("index", "do_write_index", the_repository,
3083 "%s", get_lock_file_path(lock));
3086 ensure_full_index(istate);
3090 if (flags & COMMIT_LOCK)
3091 ret = commit_locked_index(lock);
3093 ret = close_lock_file_gently(lock);
3095 strvec_pushl(&hook_opt.args,
3096 istate->updated_workdir ? "1" : "0",
3097 istate->updated_skipworktree ? "1" : "0",
3099 run_hooks("post-index-change", &hook_opt);
3100 run_hooks_opt_clear(&hook_opt);
3102 istate->updated_workdir = 0;
3103 istate->updated_skipworktree = 0;
3108 static int write_split_index(struct index_state *istate,
3109 struct lock_file *lock,
3113 prepare_to_write_split_index(istate);
3114 ret = do_write_locked_index(istate, lock, flags);
3115 finish_writing_split_index(istate);
3119 static const char *shared_index_expire = "2.weeks.ago";
3121 static unsigned long get_shared_index_expire_date(void)
3123 static unsigned long shared_index_expire_date;
3124 static int shared_index_expire_date_prepared;
3126 if (!shared_index_expire_date_prepared) {
3127 git_config_get_expiry("splitindex.sharedindexexpire",
3128 &shared_index_expire);
3129 shared_index_expire_date = approxidate(shared_index_expire);
3130 shared_index_expire_date_prepared = 1;
3133 return shared_index_expire_date;
3136 static int should_delete_shared_index(const char *shared_index_path)
3139 unsigned long expiration;
3141 /* Check timestamp */
3142 expiration = get_shared_index_expire_date();
3145 if (stat(shared_index_path, &st))
3146 return error_errno(_("could not stat '%s'"), shared_index_path);
3147 if (st.st_mtime > expiration)
3153 static int clean_shared_index_files(const char *current_hex)
3156 DIR *dir = opendir(get_git_dir());
3159 return error_errno(_("unable to open git dir: %s"), get_git_dir());
3161 while ((de = readdir(dir)) != NULL) {
3162 const char *sha1_hex;
3163 const char *shared_index_path;
3164 if (!skip_prefix(de->d_name, "sharedindex.", &sha1_hex))
3166 if (!strcmp(sha1_hex, current_hex))
3168 shared_index_path = git_path("%s", de->d_name);
3169 if (should_delete_shared_index(shared_index_path) > 0 &&
3170 unlink(shared_index_path))
3171 warning_errno(_("unable to unlink: %s"), shared_index_path);
3178 static int write_shared_index(struct index_state *istate,
3179 struct tempfile **temp)
3181 struct split_index *si = istate->split_index;
3182 int ret, was_full = !istate->sparse_index;
3184 move_cache_to_base_index(istate);
3185 convert_to_sparse(istate);
3187 trace2_region_enter_printf("index", "shared/do_write_index",
3188 the_repository, "%s", get_tempfile_path(*temp));
3189 ret = do_write_index(si->base, *temp, 1);
3190 trace2_region_leave_printf("index", "shared/do_write_index",
3191 the_repository, "%s", get_tempfile_path(*temp));
3194 ensure_full_index(istate);
3198 ret = adjust_shared_perm(get_tempfile_path(*temp));
3200 error(_("cannot fix permission bits on '%s'"), get_tempfile_path(*temp));
3203 ret = rename_tempfile(temp,
3204 git_path("sharedindex.%s", oid_to_hex(&si->base->oid)));
3206 oidcpy(&si->base_oid, &si->base->oid);
3207 clean_shared_index_files(oid_to_hex(&si->base->oid));
3213 static const int default_max_percent_split_change = 20;
3215 static int too_many_not_shared_entries(struct index_state *istate)
3217 int i, not_shared = 0;
3218 int max_split = git_config_get_max_percent_split_change();
3220 switch (max_split) {
3222 /* not or badly configured: use the default value */
3223 max_split = default_max_percent_split_change;
3226 return 1; /* 0% means always write a new shared index */
3228 return 0; /* 100% means never write a new shared index */
3230 break; /* just use the configured value */
3233 /* Count not shared entries */
3234 for (i = 0; i < istate->cache_nr; i++) {
3235 struct cache_entry *ce = istate->cache[i];
3240 return (int64_t)istate->cache_nr * max_split < (int64_t)not_shared * 100;
3243 int write_locked_index(struct index_state *istate, struct lock_file *lock,
3246 int new_shared_index, ret;
3247 struct split_index *si = istate->split_index;
3249 if (git_env_bool("GIT_TEST_CHECK_CACHE_TREE", 0))
3250 cache_tree_verify(the_repository, istate);
3252 if ((flags & SKIP_IF_UNCHANGED) && !istate->cache_changed) {
3253 if (flags & COMMIT_LOCK)
3254 rollback_lock_file(lock);
3258 if (istate->fsmonitor_last_update)
3259 fill_fsmonitor_bitmap(istate);
3261 if (!si || alternate_index_output ||
3262 (istate->cache_changed & ~EXTMASK)) {
3264 oidclr(&si->base_oid);
3265 ret = do_write_locked_index(istate, lock, flags);
3269 if (git_env_bool("GIT_TEST_SPLIT_INDEX", 0)) {
3270 int v = si->base_oid.hash[0];
3272 istate->cache_changed |= SPLIT_INDEX_ORDERED;
3274 if (too_many_not_shared_entries(istate))
3275 istate->cache_changed |= SPLIT_INDEX_ORDERED;
3277 new_shared_index = istate->cache_changed & SPLIT_INDEX_ORDERED;
3279 if (new_shared_index) {
3280 struct tempfile *temp;
3283 /* Same initial permissions as the main .git/index file */
3284 temp = mks_tempfile_sm(git_path("sharedindex_XXXXXX"), 0, 0666);
3286 oidclr(&si->base_oid);
3287 ret = do_write_locked_index(istate, lock, flags);
3290 ret = write_shared_index(istate, &temp);
3292 saved_errno = errno;
3293 if (is_tempfile_active(temp))
3294 delete_tempfile(&temp);
3295 errno = saved_errno;
3301 ret = write_split_index(istate, lock, flags);
3303 /* Freshen the shared index only if the split-index was written */
3304 if (!ret && !new_shared_index && !is_null_oid(&si->base_oid)) {
3305 const char *shared_index = git_path("sharedindex.%s",
3306 oid_to_hex(&si->base_oid));
3307 freshen_shared_index(shared_index, 1);
3311 if (flags & COMMIT_LOCK)
3312 rollback_lock_file(lock);
3317 * Read the index file that is potentially unmerged into given
3318 * index_state, dropping any unmerged entries to stage #0 (potentially
3319 * resulting in a path appearing as both a file and a directory in the
3320 * index; the caller is responsible to clear out the extra entries
3321 * before writing the index to a tree). Returns true if the index is
3322 * unmerged. Callers who want to refuse to work from an unmerged
3323 * state can call this and check its return value, instead of calling
3326 int repo_read_index_unmerged(struct repository *repo)
3328 struct index_state *istate;
3332 repo_read_index(repo);
3333 istate = repo->index;
3334 for (i = 0; i < istate->cache_nr; i++) {
3335 struct cache_entry *ce = istate->cache[i];
3336 struct cache_entry *new_ce;
3342 len = ce_namelen(ce);
3343 new_ce = make_empty_cache_entry(istate, len);
3344 memcpy(new_ce->name, ce->name, len);
3345 new_ce->ce_flags = create_ce_flags(0) | CE_CONFLICTED;
3346 new_ce->ce_namelen = len;
3347 new_ce->ce_mode = ce->ce_mode;
3348 if (add_index_entry(istate, new_ce, ADD_CACHE_SKIP_DFCHECK))
3349 return error(_("%s: cannot drop to stage #0"),
3356 * Returns 1 if the path is an "other" path with respect to
3357 * the index; that is, the path is not mentioned in the index at all,
3358 * either as a file, a directory with some files in the index,
3359 * or as an unmerged entry.
3361 * We helpfully remove a trailing "/" from directories so that
3362 * the output of read_directory can be used as-is.
3364 int index_name_is_other(struct index_state *istate, const char *name,
3368 if (namelen && name[namelen - 1] == '/')
3370 pos = index_name_pos(istate, name, namelen);
3372 return 0; /* exact match */
3374 if (pos < istate->cache_nr) {
3375 struct cache_entry *ce = istate->cache[pos];
3376 if (ce_namelen(ce) == namelen &&
3377 !memcmp(ce->name, name, namelen))
3378 return 0; /* Yup, this one exists unmerged */
3383 void *read_blob_data_from_index(struct index_state *istate,
3384 const char *path, unsigned long *size)
3388 enum object_type type;
3392 pos = index_name_pos(istate, path, len);
3395 * We might be in the middle of a merge, in which
3396 * case we would read stage #2 (ours).
3400 (pos < 0 && i < istate->cache_nr &&
3401 !strcmp(istate->cache[i]->name, path));
3403 if (ce_stage(istate->cache[i]) == 2)
3408 data = read_object_file(&istate->cache[pos]->oid, &type, &sz);
3409 if (!data || type != OBJ_BLOB) {
3418 void stat_validity_clear(struct stat_validity *sv)
3420 FREE_AND_NULL(sv->sd);
3423 int stat_validity_check(struct stat_validity *sv, const char *path)
3427 if (stat(path, &st) < 0)
3428 return sv->sd == NULL;
3431 return S_ISREG(st.st_mode) && !match_stat_data(sv->sd, &st);
3434 void stat_validity_update(struct stat_validity *sv, int fd)
3438 if (fstat(fd, &st) < 0 || !S_ISREG(st.st_mode))
3439 stat_validity_clear(sv);
3442 CALLOC_ARRAY(sv->sd, 1);
3443 fill_stat_data(sv->sd, &st);
3447 void move_index_extensions(struct index_state *dst, struct index_state *src)
3449 dst->untracked = src->untracked;
3450 src->untracked = NULL;
3451 dst->cache_tree = src->cache_tree;
3452 src->cache_tree = NULL;
3455 struct cache_entry *dup_cache_entry(const struct cache_entry *ce,
3456 struct index_state *istate)
3458 unsigned int size = ce_size(ce);
3459 int mem_pool_allocated;
3460 struct cache_entry *new_entry = make_empty_cache_entry(istate, ce_namelen(ce));
3461 mem_pool_allocated = new_entry->mem_pool_allocated;
3463 memcpy(new_entry, ce, size);
3464 new_entry->mem_pool_allocated = mem_pool_allocated;
3468 void discard_cache_entry(struct cache_entry *ce)
3470 if (ce && should_validate_cache_entries())
3471 memset(ce, 0xCD, cache_entry_size(ce->ce_namelen));
3473 if (ce && ce->mem_pool_allocated)
3479 int should_validate_cache_entries(void)
3481 static int validate_index_cache_entries = -1;
3483 if (validate_index_cache_entries < 0) {
3484 if (getenv("GIT_TEST_VALIDATE_INDEX_CACHE_ENTRIES"))
3485 validate_index_cache_entries = 1;
3487 validate_index_cache_entries = 0;
3490 return validate_index_cache_entries;
3493 #define EOIE_SIZE (4 + GIT_SHA1_RAWSZ) /* <4-byte offset> + <20-byte hash> */
3494 #define EOIE_SIZE_WITH_HEADER (4 + 4 + EOIE_SIZE) /* <4-byte signature> + <4-byte length> + EOIE_SIZE */
3496 static size_t read_eoie_extension(const char *mmap, size_t mmap_size)
3499 * The end of index entries (EOIE) extension is guaranteed to be last
3500 * so that it can be found by scanning backwards from the EOF.
3507 const char *index, *eoie;
3509 size_t offset, src_offset;
3510 unsigned char hash[GIT_MAX_RAWSZ];
3513 /* ensure we have an index big enough to contain an EOIE extension */
3514 if (mmap_size < sizeof(struct cache_header) + EOIE_SIZE_WITH_HEADER + the_hash_algo->rawsz)
3517 /* validate the extension signature */
3518 index = eoie = mmap + mmap_size - EOIE_SIZE_WITH_HEADER - the_hash_algo->rawsz;
3519 if (CACHE_EXT(index) != CACHE_EXT_ENDOFINDEXENTRIES)
3521 index += sizeof(uint32_t);
3523 /* validate the extension size */
3524 extsize = get_be32(index);
3525 if (extsize != EOIE_SIZE)
3527 index += sizeof(uint32_t);
3530 * Validate the offset we're going to look for the first extension
3531 * signature is after the index header and before the eoie extension.
3533 offset = get_be32(index);
3534 if (mmap + offset < mmap + sizeof(struct cache_header))
3536 if (mmap + offset >= eoie)
3538 index += sizeof(uint32_t);
3541 * The hash is computed over extension types and their sizes (but not
3542 * their contents). E.g. if we have "TREE" extension that is N-bytes
3543 * long, "REUC" extension that is M-bytes long, followed by "EOIE",
3544 * then the hash would be:
3546 * SHA-1("TREE" + <binary representation of N> +
3547 * "REUC" + <binary representation of M>)
3549 src_offset = offset;
3550 the_hash_algo->init_fn(&c);
3551 while (src_offset < mmap_size - the_hash_algo->rawsz - EOIE_SIZE_WITH_HEADER) {
3552 /* After an array of active_nr index entries,
3553 * there can be arbitrary number of extended
3554 * sections, each of which is prefixed with
3555 * extension name (4-byte) and section length
3556 * in 4-byte network byte order.
3559 memcpy(&extsize, mmap + src_offset + 4, 4);
3560 extsize = ntohl(extsize);
3562 /* verify the extension size isn't so large it will wrap around */
3563 if (src_offset + 8 + extsize < src_offset)
3566 the_hash_algo->update_fn(&c, mmap + src_offset, 8);
3569 src_offset += extsize;
3571 the_hash_algo->final_fn(hash, &c);
3572 if (!hasheq(hash, (const unsigned char *)index))
3575 /* Validate that the extension offsets returned us back to the eoie extension. */
3576 if (src_offset != mmap_size - the_hash_algo->rawsz - EOIE_SIZE_WITH_HEADER)
3582 static void write_eoie_extension(struct strbuf *sb, git_hash_ctx *eoie_context, size_t offset)
3585 unsigned char hash[GIT_MAX_RAWSZ];
3588 put_be32(&buffer, offset);
3589 strbuf_add(sb, &buffer, sizeof(uint32_t));
3592 the_hash_algo->final_fn(hash, eoie_context);
3593 strbuf_add(sb, hash, the_hash_algo->rawsz);
3596 #define IEOT_VERSION (1)
3598 static struct index_entry_offset_table *read_ieot_extension(const char *mmap, size_t mmap_size, size_t offset)
3600 const char *index = NULL;
3601 uint32_t extsize, ext_version;
3602 struct index_entry_offset_table *ieot;
3605 /* find the IEOT extension */
3608 while (offset <= mmap_size - the_hash_algo->rawsz - 8) {
3609 extsize = get_be32(mmap + offset + 4);
3610 if (CACHE_EXT((mmap + offset)) == CACHE_EXT_INDEXENTRYOFFSETTABLE) {
3611 index = mmap + offset + 4 + 4;
3620 /* validate the version is IEOT_VERSION */
3621 ext_version = get_be32(index);
3622 if (ext_version != IEOT_VERSION) {
3623 error("invalid IEOT version %d", ext_version);
3626 index += sizeof(uint32_t);
3628 /* extension size - version bytes / bytes per entry */
3629 nr = (extsize - sizeof(uint32_t)) / (sizeof(uint32_t) + sizeof(uint32_t));
3631 error("invalid number of IEOT entries %d", nr);
3634 ieot = xmalloc(sizeof(struct index_entry_offset_table)
3635 + (nr * sizeof(struct index_entry_offset)));
3637 for (i = 0; i < nr; i++) {
3638 ieot->entries[i].offset = get_be32(index);
3639 index += sizeof(uint32_t);
3640 ieot->entries[i].nr = get_be32(index);
3641 index += sizeof(uint32_t);
3647 static void write_ieot_extension(struct strbuf *sb, struct index_entry_offset_table *ieot)
3653 put_be32(&buffer, IEOT_VERSION);
3654 strbuf_add(sb, &buffer, sizeof(uint32_t));
3657 for (i = 0; i < ieot->nr; i++) {
3660 put_be32(&buffer, ieot->entries[i].offset);
3661 strbuf_add(sb, &buffer, sizeof(uint32_t));
3664 put_be32(&buffer, ieot->entries[i].nr);
3665 strbuf_add(sb, &buffer, sizeof(uint32_t));