1 #define NO_THE_INDEX_COMPATIBILITY_MACROS
3 #include "cache-tree.h"
10 const char *tree_type = "tree";
12 static int read_one_entry_opt(struct index_state *istate,
13 const struct object_id *oid,
14 const char *base, int baselen,
16 unsigned mode, int stage, int opt)
19 struct cache_entry *ce;
22 return READ_TREE_RECURSIVE;
24 len = strlen(pathname);
25 ce = make_empty_cache_entry(istate, baselen + len);
27 ce->ce_mode = create_ce_mode(mode);
28 ce->ce_flags = create_ce_flags(stage);
29 ce->ce_namelen = baselen + len;
30 memcpy(ce->name, base, baselen);
31 memcpy(ce->name + baselen, pathname, len+1);
32 oidcpy(&ce->oid, oid);
33 return add_index_entry(istate, ce, opt);
36 static int read_one_entry(const struct object_id *oid, struct strbuf *base,
37 const char *pathname, unsigned mode, int stage,
40 struct index_state *istate = context;
41 return read_one_entry_opt(istate, oid, base->buf, base->len, pathname,
43 ADD_CACHE_OK_TO_ADD|ADD_CACHE_SKIP_DFCHECK);
47 * This is used when the caller knows there is no existing entries at
48 * the stage that will conflict with the entry being added.
50 static int read_one_entry_quick(const struct object_id *oid, struct strbuf *base,
51 const char *pathname, unsigned mode, int stage,
54 struct index_state *istate = context;
55 return read_one_entry_opt(istate, oid, base->buf, base->len, pathname,
57 ADD_CACHE_JUST_APPEND);
60 static int read_tree_1(struct tree *tree, struct strbuf *base,
61 int stage, const struct pathspec *pathspec,
62 read_tree_fn_t fn, void *context)
64 struct tree_desc desc;
65 struct name_entry entry;
67 int len, oldlen = base->len;
68 enum interesting retval = entry_not_interesting;
73 init_tree_desc(&desc, tree->buffer, tree->size);
75 while (tree_entry(&desc, &entry)) {
76 if (retval != all_entries_interesting) {
77 retval = tree_entry_interesting(&entry, base, 0, pathspec);
78 if (retval == all_entries_not_interesting)
80 if (retval == entry_not_interesting)
84 switch (fn(entry.oid, base,
85 entry.path, entry.mode, stage, context)) {
88 case READ_TREE_RECURSIVE:
94 if (S_ISDIR(entry.mode))
95 oidcpy(&oid, entry.oid);
96 else if (S_ISGITLINK(entry.mode)) {
97 struct commit *commit;
99 commit = lookup_commit(entry.oid);
101 die("Commit %s in submodule path %s%s not found",
102 oid_to_hex(entry.oid),
103 base->buf, entry.path);
105 if (parse_commit(commit))
106 die("Invalid commit %s in submodule path %s%s",
107 oid_to_hex(entry.oid),
108 base->buf, entry.path);
110 oidcpy(&oid, get_commit_tree_oid(commit));
115 len = tree_entry_len(&entry);
116 strbuf_add(base, entry.path, len);
117 strbuf_addch(base, '/');
118 retval = read_tree_1(lookup_tree(&oid),
119 base, stage, pathspec,
121 strbuf_setlen(base, oldlen);
128 int read_tree_recursive(struct tree *tree,
129 const char *base, int baselen,
130 int stage, const struct pathspec *pathspec,
131 read_tree_fn_t fn, void *context)
133 struct strbuf sb = STRBUF_INIT;
136 strbuf_add(&sb, base, baselen);
137 ret = read_tree_1(tree, &sb, stage, pathspec, fn, context);
142 static int cmp_cache_name_compare(const void *a_, const void *b_)
144 const struct cache_entry *ce1, *ce2;
146 ce1 = *((const struct cache_entry **)a_);
147 ce2 = *((const struct cache_entry **)b_);
148 return cache_name_stage_compare(ce1->name, ce1->ce_namelen, ce_stage(ce1),
149 ce2->name, ce2->ce_namelen, ce_stage(ce2));
152 int read_tree(struct tree *tree, int stage, struct pathspec *match,
153 struct index_state *istate)
155 read_tree_fn_t fn = NULL;
159 * Currently the only existing callers of this function all
160 * call it with stage=1 and after making sure there is nothing
161 * at that stage; we could always use read_one_entry_quick().
163 * But when we decide to straighten out git-read-tree not to
164 * use unpack_trees() in some cases, this will probably start
169 * See if we have cache entry at the stage. If so,
170 * do it the original slow way, otherwise, append and then
173 for (i = 0; !fn && i < istate->cache_nr; i++) {
174 const struct cache_entry *ce = istate->cache[i];
175 if (ce_stage(ce) == stage)
180 fn = read_one_entry_quick;
181 err = read_tree_recursive(tree, "", 0, stage, match, fn, istate);
182 if (fn == read_one_entry || err)
186 * Sort the cache entry -- we need to nuke the cache tree, though.
188 cache_tree_free(&istate->cache_tree);
189 QSORT(istate->cache, istate->cache_nr, cmp_cache_name_compare);
193 struct tree *lookup_tree(const struct object_id *oid)
195 struct object *obj = lookup_object(oid->hash);
197 return create_object(oid->hash, alloc_tree_node());
198 return object_as_type(obj, OBJ_TREE, 0);
201 int parse_tree_buffer(struct tree *item, void *buffer, unsigned long size)
203 if (item->object.parsed)
205 item->object.parsed = 1;
206 item->buffer = buffer;
212 int parse_tree_gently(struct tree *item, int quiet_on_missing)
214 enum object_type type;
218 if (item->object.parsed)
220 buffer = read_object_file(&item->object.oid, &type, &size);
222 return quiet_on_missing ? -1 :
223 error("Could not read %s",
224 oid_to_hex(&item->object.oid));
225 if (type != OBJ_TREE) {
227 return error("Object %s not a tree",
228 oid_to_hex(&item->object.oid));
230 return parse_tree_buffer(item, buffer, size);
233 void free_tree_buffer(struct tree *tree)
235 FREE_AND_NULL(tree->buffer);
237 tree->object.parsed = 0;
240 struct tree *parse_tree_indirect(const struct object_id *oid)
242 struct object *obj = parse_object(oid);
246 if (obj->type == OBJ_TREE)
247 return (struct tree *) obj;
248 else if (obj->type == OBJ_COMMIT)
249 obj = &(get_commit_tree(((struct commit *)obj))->object);
250 else if (obj->type == OBJ_TAG)
251 obj = ((struct tag *) obj)->tagged;
255 parse_object(&obj->oid);