block alloc: add lifecycle APIs for cache_entry structs
[git] / tree.c
1 #define NO_THE_INDEX_COMPATIBILITY_MACROS
2 #include "cache.h"
3 #include "cache-tree.h"
4 #include "tree.h"
5 #include "blob.h"
6 #include "commit.h"
7 #include "tag.h"
8 #include "tree-walk.h"
9
10 const char *tree_type = "tree";
11
12 static int read_one_entry_opt(struct index_state *istate,
13                               const struct object_id *oid,
14                               const char *base, int baselen,
15                               const char *pathname,
16                               unsigned mode, int stage, int opt)
17 {
18         int len;
19         struct cache_entry *ce;
20
21         if (S_ISDIR(mode))
22                 return READ_TREE_RECURSIVE;
23
24         len = strlen(pathname);
25         ce = make_empty_cache_entry(istate, baselen + len);
26
27         ce->ce_mode = create_ce_mode(mode);
28         ce->ce_flags = create_ce_flags(stage);
29         ce->ce_namelen = baselen + len;
30         memcpy(ce->name, base, baselen);
31         memcpy(ce->name + baselen, pathname, len+1);
32         oidcpy(&ce->oid, oid);
33         return add_index_entry(istate, ce, opt);
34 }
35
36 static int read_one_entry(const struct object_id *oid, struct strbuf *base,
37                           const char *pathname, unsigned mode, int stage,
38                           void *context)
39 {
40         struct index_state *istate = context;
41         return read_one_entry_opt(istate, oid, base->buf, base->len, pathname,
42                                   mode, stage,
43                                   ADD_CACHE_OK_TO_ADD|ADD_CACHE_SKIP_DFCHECK);
44 }
45
46 /*
47  * This is used when the caller knows there is no existing entries at
48  * the stage that will conflict with the entry being added.
49  */
50 static int read_one_entry_quick(const struct object_id *oid, struct strbuf *base,
51                                 const char *pathname, unsigned mode, int stage,
52                                 void *context)
53 {
54         struct index_state *istate = context;
55         return read_one_entry_opt(istate, oid, base->buf, base->len, pathname,
56                                   mode, stage,
57                                   ADD_CACHE_JUST_APPEND);
58 }
59
60 static int read_tree_1(struct tree *tree, struct strbuf *base,
61                        int stage, const struct pathspec *pathspec,
62                        read_tree_fn_t fn, void *context)
63 {
64         struct tree_desc desc;
65         struct name_entry entry;
66         struct object_id oid;
67         int len, oldlen = base->len;
68         enum interesting retval = entry_not_interesting;
69
70         if (parse_tree(tree))
71                 return -1;
72
73         init_tree_desc(&desc, tree->buffer, tree->size);
74
75         while (tree_entry(&desc, &entry)) {
76                 if (retval != all_entries_interesting) {
77                         retval = tree_entry_interesting(&entry, base, 0, pathspec);
78                         if (retval == all_entries_not_interesting)
79                                 break;
80                         if (retval == entry_not_interesting)
81                                 continue;
82                 }
83
84                 switch (fn(entry.oid, base,
85                            entry.path, entry.mode, stage, context)) {
86                 case 0:
87                         continue;
88                 case READ_TREE_RECURSIVE:
89                         break;
90                 default:
91                         return -1;
92                 }
93
94                 if (S_ISDIR(entry.mode))
95                         oidcpy(&oid, entry.oid);
96                 else if (S_ISGITLINK(entry.mode)) {
97                         struct commit *commit;
98
99                         commit = lookup_commit(entry.oid);
100                         if (!commit)
101                                 die("Commit %s in submodule path %s%s not found",
102                                     oid_to_hex(entry.oid),
103                                     base->buf, entry.path);
104
105                         if (parse_commit(commit))
106                                 die("Invalid commit %s in submodule path %s%s",
107                                     oid_to_hex(entry.oid),
108                                     base->buf, entry.path);
109
110                         oidcpy(&oid, get_commit_tree_oid(commit));
111                 }
112                 else
113                         continue;
114
115                 len = tree_entry_len(&entry);
116                 strbuf_add(base, entry.path, len);
117                 strbuf_addch(base, '/');
118                 retval = read_tree_1(lookup_tree(&oid),
119                                      base, stage, pathspec,
120                                      fn, context);
121                 strbuf_setlen(base, oldlen);
122                 if (retval)
123                         return -1;
124         }
125         return 0;
126 }
127
128 int read_tree_recursive(struct tree *tree,
129                         const char *base, int baselen,
130                         int stage, const struct pathspec *pathspec,
131                         read_tree_fn_t fn, void *context)
132 {
133         struct strbuf sb = STRBUF_INIT;
134         int ret;
135
136         strbuf_add(&sb, base, baselen);
137         ret = read_tree_1(tree, &sb, stage, pathspec, fn, context);
138         strbuf_release(&sb);
139         return ret;
140 }
141
142 static int cmp_cache_name_compare(const void *a_, const void *b_)
143 {
144         const struct cache_entry *ce1, *ce2;
145
146         ce1 = *((const struct cache_entry **)a_);
147         ce2 = *((const struct cache_entry **)b_);
148         return cache_name_stage_compare(ce1->name, ce1->ce_namelen, ce_stage(ce1),
149                                   ce2->name, ce2->ce_namelen, ce_stage(ce2));
150 }
151
152 int read_tree(struct tree *tree, int stage, struct pathspec *match,
153               struct index_state *istate)
154 {
155         read_tree_fn_t fn = NULL;
156         int i, err;
157
158         /*
159          * Currently the only existing callers of this function all
160          * call it with stage=1 and after making sure there is nothing
161          * at that stage; we could always use read_one_entry_quick().
162          *
163          * But when we decide to straighten out git-read-tree not to
164          * use unpack_trees() in some cases, this will probably start
165          * to matter.
166          */
167
168         /*
169          * See if we have cache entry at the stage.  If so,
170          * do it the original slow way, otherwise, append and then
171          * sort at the end.
172          */
173         for (i = 0; !fn && i < istate->cache_nr; i++) {
174                 const struct cache_entry *ce = istate->cache[i];
175                 if (ce_stage(ce) == stage)
176                         fn = read_one_entry;
177         }
178
179         if (!fn)
180                 fn = read_one_entry_quick;
181         err = read_tree_recursive(tree, "", 0, stage, match, fn, istate);
182         if (fn == read_one_entry || err)
183                 return err;
184
185         /*
186          * Sort the cache entry -- we need to nuke the cache tree, though.
187          */
188         cache_tree_free(&istate->cache_tree);
189         QSORT(istate->cache, istate->cache_nr, cmp_cache_name_compare);
190         return 0;
191 }
192
193 struct tree *lookup_tree(const struct object_id *oid)
194 {
195         struct object *obj = lookup_object(oid->hash);
196         if (!obj)
197                 return create_object(oid->hash, alloc_tree_node());
198         return object_as_type(obj, OBJ_TREE, 0);
199 }
200
201 int parse_tree_buffer(struct tree *item, void *buffer, unsigned long size)
202 {
203         if (item->object.parsed)
204                 return 0;
205         item->object.parsed = 1;
206         item->buffer = buffer;
207         item->size = size;
208
209         return 0;
210 }
211
212 int parse_tree_gently(struct tree *item, int quiet_on_missing)
213 {
214          enum object_type type;
215          void *buffer;
216          unsigned long size;
217
218         if (item->object.parsed)
219                 return 0;
220         buffer = read_object_file(&item->object.oid, &type, &size);
221         if (!buffer)
222                 return quiet_on_missing ? -1 :
223                         error("Could not read %s",
224                              oid_to_hex(&item->object.oid));
225         if (type != OBJ_TREE) {
226                 free(buffer);
227                 return error("Object %s not a tree",
228                              oid_to_hex(&item->object.oid));
229         }
230         return parse_tree_buffer(item, buffer, size);
231 }
232
233 void free_tree_buffer(struct tree *tree)
234 {
235         FREE_AND_NULL(tree->buffer);
236         tree->size = 0;
237         tree->object.parsed = 0;
238 }
239
240 struct tree *parse_tree_indirect(const struct object_id *oid)
241 {
242         struct object *obj = parse_object(oid);
243         do {
244                 if (!obj)
245                         return NULL;
246                 if (obj->type == OBJ_TREE)
247                         return (struct tree *) obj;
248                 else if (obj->type == OBJ_COMMIT)
249                         obj = &(get_commit_tree(((struct commit *)obj))->object);
250                 else if (obj->type == OBJ_TAG)
251                         obj = ((struct tag *) obj)->tagged;
252                 else
253                         return NULL;
254                 if (!obj->parsed)
255                         parse_object(&obj->oid);
256         } while (1);
257 }