2 #include "cache-tree.h"
9 const char *tree_type = "tree";
11 static int read_one_entry_opt(const unsigned char *sha1, const char *base, int baselen, const char *pathname, unsigned mode, int stage, int opt)
15 struct cache_entry *ce;
18 return READ_TREE_RECURSIVE;
20 len = strlen(pathname);
21 size = cache_entry_size(baselen + len);
22 ce = xcalloc(1, size);
24 ce->ce_mode = create_ce_mode(mode);
25 ce->ce_flags = create_ce_flags(stage);
26 ce->ce_namelen = baselen + len;
27 memcpy(ce->name, base, baselen);
28 memcpy(ce->name + baselen, pathname, len+1);
29 hashcpy(ce->sha1, sha1);
30 return add_cache_entry(ce, opt);
33 static int read_one_entry(const unsigned char *sha1, const char *base, int baselen, const char *pathname, unsigned mode, int stage, void *context)
35 return read_one_entry_opt(sha1, base, baselen, pathname, mode, stage,
36 ADD_CACHE_OK_TO_ADD|ADD_CACHE_SKIP_DFCHECK);
40 * This is used when the caller knows there is no existing entries at
41 * the stage that will conflict with the entry being added.
43 static int read_one_entry_quick(const unsigned char *sha1, const char *base, int baselen, const char *pathname, unsigned mode, int stage, void *context)
45 return read_one_entry_opt(sha1, base, baselen, pathname, mode, stage,
46 ADD_CACHE_JUST_APPEND);
49 static int read_tree_1(struct tree *tree, struct strbuf *base,
50 int stage, const struct pathspec *pathspec,
51 read_tree_fn_t fn, void *context)
53 struct tree_desc desc;
54 struct name_entry entry;
55 unsigned char sha1[20];
56 int len, oldlen = base->len;
57 enum interesting retval = entry_not_interesting;
62 init_tree_desc(&desc, tree->buffer, tree->size);
64 while (tree_entry(&desc, &entry)) {
65 if (retval != all_entries_interesting) {
66 retval = tree_entry_interesting(&entry, base, 0, pathspec);
67 if (retval == all_entries_not_interesting)
69 if (retval == entry_not_interesting)
73 switch (fn(entry.sha1, base->buf, base->len,
74 entry.path, entry.mode, stage, context)) {
77 case READ_TREE_RECURSIVE:
83 if (S_ISDIR(entry.mode))
84 hashcpy(sha1, entry.sha1);
85 else if (S_ISGITLINK(entry.mode)) {
86 struct commit *commit;
88 commit = lookup_commit(entry.sha1);
90 die("Commit %s in submodule path %s%s not found",
91 sha1_to_hex(entry.sha1),
92 base->buf, entry.path);
94 if (parse_commit(commit))
95 die("Invalid commit %s in submodule path %s%s",
96 sha1_to_hex(entry.sha1),
97 base->buf, entry.path);
99 hashcpy(sha1, commit->tree->object.sha1);
104 len = tree_entry_len(&entry);
105 strbuf_add(base, entry.path, len);
106 strbuf_addch(base, '/');
107 retval = read_tree_1(lookup_tree(sha1),
108 base, stage, pathspec,
110 strbuf_setlen(base, oldlen);
117 int read_tree_recursive(struct tree *tree,
118 const char *base, int baselen,
119 int stage, const struct pathspec *pathspec,
120 read_tree_fn_t fn, void *context)
122 struct strbuf sb = STRBUF_INIT;
125 strbuf_add(&sb, base, baselen);
126 ret = read_tree_1(tree, &sb, stage, pathspec, fn, context);
131 static int cmp_cache_name_compare(const void *a_, const void *b_)
133 const struct cache_entry *ce1, *ce2;
135 ce1 = *((const struct cache_entry **)a_);
136 ce2 = *((const struct cache_entry **)b_);
137 return cache_name_stage_compare(ce1->name, ce1->ce_namelen, ce_stage(ce1),
138 ce2->name, ce2->ce_namelen, ce_stage(ce2));
141 int read_tree(struct tree *tree, int stage, struct pathspec *match)
143 read_tree_fn_t fn = NULL;
147 * Currently the only existing callers of this function all
148 * call it with stage=1 and after making sure there is nothing
149 * at that stage; we could always use read_one_entry_quick().
151 * But when we decide to straighten out git-read-tree not to
152 * use unpack_trees() in some cases, this will probably start
157 * See if we have cache entry at the stage. If so,
158 * do it the original slow way, otherwise, append and then
161 for (i = 0; !fn && i < active_nr; i++) {
162 const struct cache_entry *ce = active_cache[i];
163 if (ce_stage(ce) == stage)
168 fn = read_one_entry_quick;
169 err = read_tree_recursive(tree, "", 0, stage, match, fn, NULL);
170 if (fn == read_one_entry || err)
174 * Sort the cache entry -- we need to nuke the cache tree, though.
176 cache_tree_free(&active_cache_tree);
177 qsort(active_cache, active_nr, sizeof(active_cache[0]),
178 cmp_cache_name_compare);
182 struct tree *lookup_tree(const unsigned char *sha1)
184 struct object *obj = lookup_object(sha1);
186 return create_object(sha1, OBJ_TREE, alloc_tree_node());
188 obj->type = OBJ_TREE;
189 if (obj->type != OBJ_TREE) {
190 error("Object %s is a %s, not a tree",
191 sha1_to_hex(sha1), typename(obj->type));
194 return (struct tree *) obj;
197 int parse_tree_buffer(struct tree *item, void *buffer, unsigned long size)
199 if (item->object.parsed)
201 item->object.parsed = 1;
202 item->buffer = buffer;
208 int parse_tree(struct tree *item)
210 enum object_type type;
214 if (item->object.parsed)
216 buffer = read_sha1_file(item->object.sha1, &type, &size);
218 return error("Could not read %s",
219 sha1_to_hex(item->object.sha1));
220 if (type != OBJ_TREE) {
222 return error("Object %s not a tree",
223 sha1_to_hex(item->object.sha1));
225 return parse_tree_buffer(item, buffer, size);
228 void free_tree_buffer(struct tree *tree)
233 tree->object.parsed = 0;
236 struct tree *parse_tree_indirect(const unsigned char *sha1)
238 struct object *obj = parse_object(sha1);
242 if (obj->type == OBJ_TREE)
243 return (struct tree *) obj;
244 else if (obj->type == OBJ_COMMIT)
245 obj = &(((struct commit *) obj)->tree->object);
246 else if (obj->type == OBJ_TAG)
247 obj = ((struct tag *) obj)->tagged;
251 parse_object(obj->sha1);