2 #include "cache-tree.h"
9 const char *tree_type = "tree";
11 static int read_one_entry_opt(const unsigned char *sha1, const char *base, int baselen, const char *pathname, unsigned mode, int stage, int opt)
15 struct cache_entry *ce;
18 return READ_TREE_RECURSIVE;
20 len = strlen(pathname);
21 size = cache_entry_size(baselen + len);
22 ce = xcalloc(1, size);
24 ce->ce_mode = create_ce_mode(mode);
25 ce->ce_flags = create_ce_flags(baselen + len, stage);
26 memcpy(ce->name, base, baselen);
27 memcpy(ce->name + baselen, pathname, len+1);
28 hashcpy(ce->sha1, sha1);
29 return add_cache_entry(ce, opt);
32 static int read_one_entry(const unsigned char *sha1, const char *base, int baselen, const char *pathname, unsigned mode, int stage, void *context)
34 return read_one_entry_opt(sha1, base, baselen, pathname, mode, stage,
35 ADD_CACHE_OK_TO_ADD|ADD_CACHE_SKIP_DFCHECK);
39 * This is used when the caller knows there is no existing entries at
40 * the stage that will conflict with the entry being added.
42 static int read_one_entry_quick(const unsigned char *sha1, const char *base, int baselen, const char *pathname, unsigned mode, int stage, void *context)
44 return read_one_entry_opt(sha1, base, baselen, pathname, mode, stage,
45 ADD_CACHE_JUST_APPEND);
48 static int match_tree_entry(const char *base, int baselen, const char *path, unsigned int mode, const char **paths)
55 pathlen = strlen(path);
56 while ((match = *paths++) != NULL) {
57 int matchlen = strlen(match);
59 if (baselen >= matchlen) {
60 /* If it doesn't match, move along... */
61 if (strncmp(base, match, matchlen))
63 /* The base is a subdirectory of a path which was specified. */
67 /* Does the base match? */
68 if (strncmp(base, match, baselen))
74 if (pathlen > matchlen)
77 if (matchlen > pathlen) {
78 if (match[pathlen] != '/')
84 if (strncmp(path, match, pathlen))
92 int read_tree_recursive(struct tree *tree,
93 const char *base, int baselen,
94 int stage, const char **match,
95 read_tree_fn_t fn, void *context)
97 struct tree_desc desc;
98 struct name_entry entry;
100 if (parse_tree(tree))
103 init_tree_desc(&desc, tree->buffer, tree->size);
105 while (tree_entry(&desc, &entry)) {
106 if (!match_tree_entry(base, baselen, entry.path, entry.mode, match))
109 switch (fn(entry.sha1, base, baselen, entry.path, entry.mode, stage, context)) {
112 case READ_TREE_RECURSIVE:
117 if (S_ISDIR(entry.mode)) {
120 unsigned int pathlen = tree_entry_len(entry.path, entry.sha1);
122 newbase = xmalloc(baselen + 1 + pathlen);
123 memcpy(newbase, base, baselen);
124 memcpy(newbase + baselen, entry.path, pathlen);
125 newbase[baselen + pathlen] = '/';
126 retval = read_tree_recursive(lookup_tree(entry.sha1),
128 baselen + pathlen + 1,
129 stage, match, fn, context);
134 } else if (S_ISGITLINK(entry.mode)) {
137 unsigned int entrylen;
138 struct commit *commit;
140 entrylen = tree_entry_len(entry.path, entry.sha1);
141 strbuf_init(&path, baselen + entrylen + 1);
142 strbuf_add(&path, base, baselen);
143 strbuf_add(&path, entry.path, entrylen);
144 strbuf_addch(&path, '/');
146 commit = lookup_commit(entry.sha1);
148 die("Commit %s in submodule path %s not found",
149 sha1_to_hex(entry.sha1), path.buf);
151 if (parse_commit(commit))
152 die("Invalid commit %s in submodule path %s",
153 sha1_to_hex(entry.sha1), path.buf);
155 retval = read_tree_recursive(commit->tree,
157 stage, match, fn, context);
158 strbuf_release(&path);
167 static int cmp_cache_name_compare(const void *a_, const void *b_)
169 const struct cache_entry *ce1, *ce2;
171 ce1 = *((const struct cache_entry **)a_);
172 ce2 = *((const struct cache_entry **)b_);
173 return cache_name_compare(ce1->name, ce1->ce_flags,
174 ce2->name, ce2->ce_flags);
177 int read_tree(struct tree *tree, int stage, const char **match)
179 read_tree_fn_t fn = NULL;
183 * Currently the only existing callers of this function all
184 * call it with stage=1 and after making sure there is nothing
185 * at that stage; we could always use read_one_entry_quick().
187 * But when we decide to straighten out git-read-tree not to
188 * use unpack_trees() in some cases, this will probably start
193 * See if we have cache entry at the stage. If so,
194 * do it the original slow way, otherwise, append and then
197 for (i = 0; !fn && i < active_nr; i++) {
198 struct cache_entry *ce = active_cache[i];
199 if (ce_stage(ce) == stage)
204 fn = read_one_entry_quick;
205 err = read_tree_recursive(tree, "", 0, stage, match, fn, NULL);
206 if (fn == read_one_entry || err)
210 * Sort the cache entry -- we need to nuke the cache tree, though.
212 cache_tree_free(&active_cache_tree);
213 qsort(active_cache, active_nr, sizeof(active_cache[0]),
214 cmp_cache_name_compare);
218 struct tree *lookup_tree(const unsigned char *sha1)
220 struct object *obj = lookup_object(sha1);
222 return create_object(sha1, OBJ_TREE, alloc_tree_node());
224 obj->type = OBJ_TREE;
225 if (obj->type != OBJ_TREE) {
226 error("Object %s is a %s, not a tree",
227 sha1_to_hex(sha1), typename(obj->type));
230 return (struct tree *) obj;
233 int parse_tree_buffer(struct tree *item, void *buffer, unsigned long size)
235 if (item->object.parsed)
237 item->object.parsed = 1;
238 item->buffer = buffer;
244 int parse_tree(struct tree *item)
246 enum object_type type;
250 if (item->object.parsed)
252 buffer = read_sha1_file(item->object.sha1, &type, &size);
254 return error("Could not read %s",
255 sha1_to_hex(item->object.sha1));
256 if (type != OBJ_TREE) {
258 return error("Object %s not a tree",
259 sha1_to_hex(item->object.sha1));
261 return parse_tree_buffer(item, buffer, size);
264 struct tree *parse_tree_indirect(const unsigned char *sha1)
266 struct object *obj = parse_object(sha1);
270 if (obj->type == OBJ_TREE)
271 return (struct tree *) obj;
272 else if (obj->type == OBJ_COMMIT)
273 obj = &(((struct commit *) obj)->tree->object);
274 else if (obj->type == OBJ_TAG)
275 obj = ((struct tag *) obj)->tagged;
279 parse_object(obj->sha1);