1 #define NO_THE_INDEX_COMPATIBILITY_MACROS
3 #include "cache-tree.h"
10 const char *tree_type = "tree";
12 static int read_one_entry_opt(struct index_state *istate,
13 const struct object_id *oid,
14 const char *base, int baselen,
16 unsigned mode, int stage, int opt)
20 struct cache_entry *ce;
23 return READ_TREE_RECURSIVE;
25 len = strlen(pathname);
26 size = cache_entry_size(baselen + len);
27 ce = xcalloc(1, size);
29 ce->ce_mode = create_ce_mode(mode);
30 ce->ce_flags = create_ce_flags(stage);
31 ce->ce_namelen = baselen + len;
32 memcpy(ce->name, base, baselen);
33 memcpy(ce->name + baselen, pathname, len+1);
34 oidcpy(&ce->oid, oid);
35 return add_index_entry(istate, ce, opt);
38 static int read_one_entry(const struct object_id *oid, struct strbuf *base,
39 const char *pathname, unsigned mode, int stage,
42 struct index_state *istate = context;
43 return read_one_entry_opt(istate, oid, base->buf, base->len, pathname,
45 ADD_CACHE_OK_TO_ADD|ADD_CACHE_SKIP_DFCHECK);
49 * This is used when the caller knows there is no existing entries at
50 * the stage that will conflict with the entry being added.
52 static int read_one_entry_quick(const struct object_id *oid, struct strbuf *base,
53 const char *pathname, unsigned mode, int stage,
56 struct index_state *istate = context;
57 return read_one_entry_opt(istate, oid, base->buf, base->len, pathname,
59 ADD_CACHE_JUST_APPEND);
62 static int read_tree_1(struct tree *tree, struct strbuf *base,
63 int stage, const struct pathspec *pathspec,
64 read_tree_fn_t fn, void *context)
66 struct tree_desc desc;
67 struct name_entry entry;
69 int len, oldlen = base->len;
70 enum interesting retval = entry_not_interesting;
75 init_tree_desc(&desc, tree->buffer, tree->size);
77 while (tree_entry(&desc, &entry)) {
78 if (retval != all_entries_interesting) {
79 retval = tree_entry_interesting(&entry, base, 0, pathspec);
80 if (retval == all_entries_not_interesting)
82 if (retval == entry_not_interesting)
86 switch (fn(entry.oid, base,
87 entry.path, entry.mode, stage, context)) {
90 case READ_TREE_RECURSIVE:
96 if (S_ISDIR(entry.mode))
97 oidcpy(&oid, entry.oid);
98 else if (S_ISGITLINK(entry.mode)) {
99 struct commit *commit;
101 commit = lookup_commit(entry.oid);
103 die("Commit %s in submodule path %s%s not found",
104 oid_to_hex(entry.oid),
105 base->buf, entry.path);
107 if (parse_commit(commit))
108 die("Invalid commit %s in submodule path %s%s",
109 oid_to_hex(entry.oid),
110 base->buf, entry.path);
112 oidcpy(&oid, get_commit_tree_oid(commit));
117 len = tree_entry_len(&entry);
118 strbuf_add(base, entry.path, len);
119 strbuf_addch(base, '/');
120 retval = read_tree_1(lookup_tree(&oid),
121 base, stage, pathspec,
123 strbuf_setlen(base, oldlen);
130 int read_tree_recursive(struct tree *tree,
131 const char *base, int baselen,
132 int stage, const struct pathspec *pathspec,
133 read_tree_fn_t fn, void *context)
135 struct strbuf sb = STRBUF_INIT;
138 strbuf_add(&sb, base, baselen);
139 ret = read_tree_1(tree, &sb, stage, pathspec, fn, context);
144 static int cmp_cache_name_compare(const void *a_, const void *b_)
146 const struct cache_entry *ce1, *ce2;
148 ce1 = *((const struct cache_entry **)a_);
149 ce2 = *((const struct cache_entry **)b_);
150 return cache_name_stage_compare(ce1->name, ce1->ce_namelen, ce_stage(ce1),
151 ce2->name, ce2->ce_namelen, ce_stage(ce2));
154 int read_tree(struct tree *tree, int stage, struct pathspec *match,
155 struct index_state *istate)
157 read_tree_fn_t fn = NULL;
161 * Currently the only existing callers of this function all
162 * call it with stage=1 and after making sure there is nothing
163 * at that stage; we could always use read_one_entry_quick().
165 * But when we decide to straighten out git-read-tree not to
166 * use unpack_trees() in some cases, this will probably start
171 * See if we have cache entry at the stage. If so,
172 * do it the original slow way, otherwise, append and then
175 for (i = 0; !fn && i < istate->cache_nr; i++) {
176 const struct cache_entry *ce = istate->cache[i];
177 if (ce_stage(ce) == stage)
182 fn = read_one_entry_quick;
183 err = read_tree_recursive(tree, "", 0, stage, match, fn, istate);
184 if (fn == read_one_entry || err)
188 * Sort the cache entry -- we need to nuke the cache tree, though.
190 cache_tree_free(&istate->cache_tree);
191 QSORT(istate->cache, istate->cache_nr, cmp_cache_name_compare);
195 struct tree *lookup_tree(const struct object_id *oid)
197 struct object *obj = lookup_object(oid->hash);
199 return create_object(oid->hash, alloc_tree_node());
200 return object_as_type(obj, OBJ_TREE, 0);
203 int parse_tree_buffer(struct tree *item, void *buffer, unsigned long size)
205 if (item->object.parsed)
207 item->object.parsed = 1;
208 item->buffer = buffer;
214 int parse_tree_gently(struct tree *item, int quiet_on_missing)
216 enum object_type type;
220 if (item->object.parsed)
222 buffer = read_object_file(&item->object.oid, &type, &size);
224 return quiet_on_missing ? -1 :
225 error("Could not read %s",
226 oid_to_hex(&item->object.oid));
227 if (type != OBJ_TREE) {
229 return error("Object %s not a tree",
230 oid_to_hex(&item->object.oid));
232 return parse_tree_buffer(item, buffer, size);
235 void free_tree_buffer(struct tree *tree)
237 FREE_AND_NULL(tree->buffer);
239 tree->object.parsed = 0;
242 struct tree *parse_tree_indirect(const struct object_id *oid)
244 struct object *obj = parse_object(oid);
248 if (obj->type == OBJ_TREE)
249 return (struct tree *) obj;
250 else if (obj->type == OBJ_COMMIT)
251 obj = &(get_commit_tree(((struct commit *)obj))->object);
252 else if (obj->type == OBJ_TAG)
253 obj = ((struct tag *) obj)->tagged;
257 parse_object(&obj->oid);