Merge branch 'jc/commit-unedited-template' into maint
[git] / builtin / pack-objects.c
1 #include "builtin.h"
2 #include "cache.h"
3 #include "attr.h"
4 #include "object.h"
5 #include "blob.h"
6 #include "commit.h"
7 #include "tag.h"
8 #include "tree.h"
9 #include "delta.h"
10 #include "pack.h"
11 #include "pack-revindex.h"
12 #include "csum-file.h"
13 #include "tree-walk.h"
14 #include "diff.h"
15 #include "revision.h"
16 #include "list-objects.h"
17 #include "progress.h"
18 #include "refs.h"
19 #include "thread-utils.h"
20
21 static const char *pack_usage[] = {
22         "git pack-objects --stdout [options...] [< ref-list | < object-list]",
23         "git pack-objects [options...] base-name [< ref-list | < object-list]",
24         NULL
25 };
26
27 struct object_entry {
28         struct pack_idx_entry idx;
29         unsigned long size;     /* uncompressed size */
30         struct packed_git *in_pack;     /* already in pack */
31         off_t in_pack_offset;
32         struct object_entry *delta;     /* delta base object */
33         struct object_entry *delta_child; /* deltified objects who bases me */
34         struct object_entry *delta_sibling; /* other deltified objects who
35                                              * uses the same base as me
36                                              */
37         void *delta_data;       /* cached delta (uncompressed) */
38         unsigned long delta_size;       /* delta data size (uncompressed) */
39         unsigned long z_delta_size;     /* delta data size (compressed) */
40         unsigned int hash;      /* name hint hash */
41         enum object_type type;
42         enum object_type in_pack_type;  /* could be delta */
43         unsigned char in_pack_header_size;
44         unsigned char preferred_base; /* we do not pack this, but is available
45                                        * to be used as the base object to delta
46                                        * objects against.
47                                        */
48         unsigned char no_try_delta;
49         unsigned char tagged; /* near the very tip of refs */
50         unsigned char filled; /* assigned write-order */
51 };
52
53 /*
54  * Objects we are going to pack are collected in objects array (dynamically
55  * expanded).  nr_objects & nr_alloc controls this array.  They are stored
56  * in the order we see -- typically rev-list --objects order that gives us
57  * nice "minimum seek" order.
58  */
59 static struct object_entry *objects;
60 static struct pack_idx_entry **written_list;
61 static uint32_t nr_objects, nr_alloc, nr_result, nr_written;
62
63 static int non_empty;
64 static int reuse_delta = 1, reuse_object = 1;
65 static int keep_unreachable, unpack_unreachable, include_tag;
66 static int local;
67 static int incremental;
68 static int ignore_packed_keep;
69 static int allow_ofs_delta;
70 static struct pack_idx_option pack_idx_opts;
71 static const char *base_name;
72 static int progress = 1;
73 static int window = 10;
74 static unsigned long pack_size_limit;
75 static int depth = 50;
76 static int delta_search_threads;
77 static int pack_to_stdout;
78 static int num_preferred_base;
79 static struct progress *progress_state;
80 static int pack_compression_level = Z_DEFAULT_COMPRESSION;
81 static int pack_compression_seen;
82
83 static unsigned long delta_cache_size = 0;
84 static unsigned long max_delta_cache_size = 256 * 1024 * 1024;
85 static unsigned long cache_max_small_delta_size = 1000;
86
87 static unsigned long window_memory_limit = 0;
88
89 /*
90  * The object names in objects array are hashed with this hashtable,
91  * to help looking up the entry by object name.
92  * This hashtable is built after all the objects are seen.
93  */
94 static int *object_ix;
95 static int object_ix_hashsz;
96 static struct object_entry *locate_object_entry(const unsigned char *sha1);
97
98 /*
99  * stats
100  */
101 static uint32_t written, written_delta;
102 static uint32_t reused, reused_delta;
103
104
105 static void *get_delta(struct object_entry *entry)
106 {
107         unsigned long size, base_size, delta_size;
108         void *buf, *base_buf, *delta_buf;
109         enum object_type type;
110
111         buf = read_sha1_file(entry->idx.sha1, &type, &size);
112         if (!buf)
113                 die("unable to read %s", sha1_to_hex(entry->idx.sha1));
114         base_buf = read_sha1_file(entry->delta->idx.sha1, &type, &base_size);
115         if (!base_buf)
116                 die("unable to read %s", sha1_to_hex(entry->delta->idx.sha1));
117         delta_buf = diff_delta(base_buf, base_size,
118                                buf, size, &delta_size, 0);
119         if (!delta_buf || delta_size != entry->delta_size)
120                 die("delta size changed");
121         free(buf);
122         free(base_buf);
123         return delta_buf;
124 }
125
126 static unsigned long do_compress(void **pptr, unsigned long size)
127 {
128         git_zstream stream;
129         void *in, *out;
130         unsigned long maxsize;
131
132         memset(&stream, 0, sizeof(stream));
133         git_deflate_init(&stream, pack_compression_level);
134         maxsize = git_deflate_bound(&stream, size);
135
136         in = *pptr;
137         out = xmalloc(maxsize);
138         *pptr = out;
139
140         stream.next_in = in;
141         stream.avail_in = size;
142         stream.next_out = out;
143         stream.avail_out = maxsize;
144         while (git_deflate(&stream, Z_FINISH) == Z_OK)
145                 ; /* nothing */
146         git_deflate_end(&stream);
147
148         free(in);
149         return stream.total_out;
150 }
151
152 /*
153  * we are going to reuse the existing object data as is.  make
154  * sure it is not corrupt.
155  */
156 static int check_pack_inflate(struct packed_git *p,
157                 struct pack_window **w_curs,
158                 off_t offset,
159                 off_t len,
160                 unsigned long expect)
161 {
162         git_zstream stream;
163         unsigned char fakebuf[4096], *in;
164         int st;
165
166         memset(&stream, 0, sizeof(stream));
167         git_inflate_init(&stream);
168         do {
169                 in = use_pack(p, w_curs, offset, &stream.avail_in);
170                 stream.next_in = in;
171                 stream.next_out = fakebuf;
172                 stream.avail_out = sizeof(fakebuf);
173                 st = git_inflate(&stream, Z_FINISH);
174                 offset += stream.next_in - in;
175         } while (st == Z_OK || st == Z_BUF_ERROR);
176         git_inflate_end(&stream);
177         return (st == Z_STREAM_END &&
178                 stream.total_out == expect &&
179                 stream.total_in == len) ? 0 : -1;
180 }
181
182 static void copy_pack_data(struct sha1file *f,
183                 struct packed_git *p,
184                 struct pack_window **w_curs,
185                 off_t offset,
186                 off_t len)
187 {
188         unsigned char *in;
189         unsigned long avail;
190
191         while (len) {
192                 in = use_pack(p, w_curs, offset, &avail);
193                 if (avail > len)
194                         avail = (unsigned long)len;
195                 sha1write(f, in, avail);
196                 offset += avail;
197                 len -= avail;
198         }
199 }
200
201 /* Return 0 if we will bust the pack-size limit */
202 static unsigned long write_object(struct sha1file *f,
203                                   struct object_entry *entry,
204                                   off_t write_offset)
205 {
206         unsigned long size, limit, datalen;
207         void *buf;
208         unsigned char header[10], dheader[10];
209         unsigned hdrlen;
210         enum object_type type;
211         int usable_delta, to_reuse;
212
213         if (!pack_to_stdout)
214                 crc32_begin(f);
215
216         type = entry->type;
217
218         /* apply size limit if limited packsize and not first object */
219         if (!pack_size_limit || !nr_written)
220                 limit = 0;
221         else if (pack_size_limit <= write_offset)
222                 /*
223                  * the earlier object did not fit the limit; avoid
224                  * mistaking this with unlimited (i.e. limit = 0).
225                  */
226                 limit = 1;
227         else
228                 limit = pack_size_limit - write_offset;
229
230         if (!entry->delta)
231                 usable_delta = 0;       /* no delta */
232         else if (!pack_size_limit)
233                usable_delta = 1;        /* unlimited packfile */
234         else if (entry->delta->idx.offset == (off_t)-1)
235                 usable_delta = 0;       /* base was written to another pack */
236         else if (entry->delta->idx.offset)
237                 usable_delta = 1;       /* base already exists in this pack */
238         else
239                 usable_delta = 0;       /* base could end up in another pack */
240
241         if (!reuse_object)
242                 to_reuse = 0;   /* explicit */
243         else if (!entry->in_pack)
244                 to_reuse = 0;   /* can't reuse what we don't have */
245         else if (type == OBJ_REF_DELTA || type == OBJ_OFS_DELTA)
246                                 /* check_object() decided it for us ... */
247                 to_reuse = usable_delta;
248                                 /* ... but pack split may override that */
249         else if (type != entry->in_pack_type)
250                 to_reuse = 0;   /* pack has delta which is unusable */
251         else if (entry->delta)
252                 to_reuse = 0;   /* we want to pack afresh */
253         else
254                 to_reuse = 1;   /* we have it in-pack undeltified,
255                                  * and we do not need to deltify it.
256                                  */
257
258         if (!to_reuse) {
259                 no_reuse:
260                 if (!usable_delta) {
261                         buf = read_sha1_file(entry->idx.sha1, &type, &size);
262                         if (!buf)
263                                 die("unable to read %s", sha1_to_hex(entry->idx.sha1));
264                         /*
265                          * make sure no cached delta data remains from a
266                          * previous attempt before a pack split occurred.
267                          */
268                         free(entry->delta_data);
269                         entry->delta_data = NULL;
270                         entry->z_delta_size = 0;
271                 } else if (entry->delta_data) {
272                         size = entry->delta_size;
273                         buf = entry->delta_data;
274                         entry->delta_data = NULL;
275                         type = (allow_ofs_delta && entry->delta->idx.offset) ?
276                                 OBJ_OFS_DELTA : OBJ_REF_DELTA;
277                 } else {
278                         buf = get_delta(entry);
279                         size = entry->delta_size;
280                         type = (allow_ofs_delta && entry->delta->idx.offset) ?
281                                 OBJ_OFS_DELTA : OBJ_REF_DELTA;
282                 }
283
284                 if (entry->z_delta_size)
285                         datalen = entry->z_delta_size;
286                 else
287                         datalen = do_compress(&buf, size);
288
289                 /*
290                  * The object header is a byte of 'type' followed by zero or
291                  * more bytes of length.
292                  */
293                 hdrlen = encode_in_pack_object_header(type, size, header);
294
295                 if (type == OBJ_OFS_DELTA) {
296                         /*
297                          * Deltas with relative base contain an additional
298                          * encoding of the relative offset for the delta
299                          * base from this object's position in the pack.
300                          */
301                         off_t ofs = entry->idx.offset - entry->delta->idx.offset;
302                         unsigned pos = sizeof(dheader) - 1;
303                         dheader[pos] = ofs & 127;
304                         while (ofs >>= 7)
305                                 dheader[--pos] = 128 | (--ofs & 127);
306                         if (limit && hdrlen + sizeof(dheader) - pos + datalen + 20 >= limit) {
307                                 free(buf);
308                                 return 0;
309                         }
310                         sha1write(f, header, hdrlen);
311                         sha1write(f, dheader + pos, sizeof(dheader) - pos);
312                         hdrlen += sizeof(dheader) - pos;
313                 } else if (type == OBJ_REF_DELTA) {
314                         /*
315                          * Deltas with a base reference contain
316                          * an additional 20 bytes for the base sha1.
317                          */
318                         if (limit && hdrlen + 20 + datalen + 20 >= limit) {
319                                 free(buf);
320                                 return 0;
321                         }
322                         sha1write(f, header, hdrlen);
323                         sha1write(f, entry->delta->idx.sha1, 20);
324                         hdrlen += 20;
325                 } else {
326                         if (limit && hdrlen + datalen + 20 >= limit) {
327                                 free(buf);
328                                 return 0;
329                         }
330                         sha1write(f, header, hdrlen);
331                 }
332                 sha1write(f, buf, datalen);
333                 free(buf);
334         }
335         else {
336                 struct packed_git *p = entry->in_pack;
337                 struct pack_window *w_curs = NULL;
338                 struct revindex_entry *revidx;
339                 off_t offset;
340
341                 if (entry->delta)
342                         type = (allow_ofs_delta && entry->delta->idx.offset) ?
343                                 OBJ_OFS_DELTA : OBJ_REF_DELTA;
344                 hdrlen = encode_in_pack_object_header(type, entry->size, header);
345
346                 offset = entry->in_pack_offset;
347                 revidx = find_pack_revindex(p, offset);
348                 datalen = revidx[1].offset - offset;
349                 if (!pack_to_stdout && p->index_version > 1 &&
350                     check_pack_crc(p, &w_curs, offset, datalen, revidx->nr)) {
351                         error("bad packed object CRC for %s", sha1_to_hex(entry->idx.sha1));
352                         unuse_pack(&w_curs);
353                         goto no_reuse;
354                 }
355
356                 offset += entry->in_pack_header_size;
357                 datalen -= entry->in_pack_header_size;
358                 if (!pack_to_stdout && p->index_version == 1 &&
359                     check_pack_inflate(p, &w_curs, offset, datalen, entry->size)) {
360                         error("corrupt packed object for %s", sha1_to_hex(entry->idx.sha1));
361                         unuse_pack(&w_curs);
362                         goto no_reuse;
363                 }
364
365                 if (type == OBJ_OFS_DELTA) {
366                         off_t ofs = entry->idx.offset - entry->delta->idx.offset;
367                         unsigned pos = sizeof(dheader) - 1;
368                         dheader[pos] = ofs & 127;
369                         while (ofs >>= 7)
370                                 dheader[--pos] = 128 | (--ofs & 127);
371                         if (limit && hdrlen + sizeof(dheader) - pos + datalen + 20 >= limit) {
372                                 unuse_pack(&w_curs);
373                                 return 0;
374                         }
375                         sha1write(f, header, hdrlen);
376                         sha1write(f, dheader + pos, sizeof(dheader) - pos);
377                         hdrlen += sizeof(dheader) - pos;
378                         reused_delta++;
379                 } else if (type == OBJ_REF_DELTA) {
380                         if (limit && hdrlen + 20 + datalen + 20 >= limit) {
381                                 unuse_pack(&w_curs);
382                                 return 0;
383                         }
384                         sha1write(f, header, hdrlen);
385                         sha1write(f, entry->delta->idx.sha1, 20);
386                         hdrlen += 20;
387                         reused_delta++;
388                 } else {
389                         if (limit && hdrlen + datalen + 20 >= limit) {
390                                 unuse_pack(&w_curs);
391                                 return 0;
392                         }
393                         sha1write(f, header, hdrlen);
394                 }
395                 copy_pack_data(f, p, &w_curs, offset, datalen);
396                 unuse_pack(&w_curs);
397                 reused++;
398         }
399         if (usable_delta)
400                 written_delta++;
401         written++;
402         if (!pack_to_stdout)
403                 entry->idx.crc32 = crc32_end(f);
404         return hdrlen + datalen;
405 }
406
407 enum write_one_status {
408         WRITE_ONE_SKIP = -1, /* already written */
409         WRITE_ONE_BREAK = 0, /* writing this will bust the limit; not written */
410         WRITE_ONE_WRITTEN = 1, /* normal */
411         WRITE_ONE_RECURSIVE = 2 /* already scheduled to be written */
412 };
413
414 static enum write_one_status write_one(struct sha1file *f,
415                                        struct object_entry *e,
416                                        off_t *offset)
417 {
418         unsigned long size;
419         int recursing;
420
421         /*
422          * we set offset to 1 (which is an impossible value) to mark
423          * the fact that this object is involved in "write its base
424          * first before writing a deltified object" recursion.
425          */
426         recursing = (e->idx.offset == 1);
427         if (recursing) {
428                 warning("recursive delta detected for object %s",
429                         sha1_to_hex(e->idx.sha1));
430                 return WRITE_ONE_RECURSIVE;
431         } else if (e->idx.offset || e->preferred_base) {
432                 /* offset is non zero if object is written already. */
433                 return WRITE_ONE_SKIP;
434         }
435
436         /* if we are deltified, write out base object first. */
437         if (e->delta) {
438                 e->idx.offset = 1; /* now recurse */
439                 switch (write_one(f, e->delta, offset)) {
440                 case WRITE_ONE_RECURSIVE:
441                         /* we cannot depend on this one */
442                         e->delta = NULL;
443                         break;
444                 default:
445                         break;
446                 case WRITE_ONE_BREAK:
447                         e->idx.offset = recursing;
448                         return WRITE_ONE_BREAK;
449                 }
450         }
451
452         e->idx.offset = *offset;
453         size = write_object(f, e, *offset);
454         if (!size) {
455                 e->idx.offset = recursing;
456                 return WRITE_ONE_BREAK;
457         }
458         written_list[nr_written++] = &e->idx;
459
460         /* make sure off_t is sufficiently large not to wrap */
461         if (signed_add_overflows(*offset, size))
462                 die("pack too large for current definition of off_t");
463         *offset += size;
464         return WRITE_ONE_WRITTEN;
465 }
466
467 static int mark_tagged(const char *path, const unsigned char *sha1, int flag,
468                        void *cb_data)
469 {
470         unsigned char peeled[20];
471         struct object_entry *entry = locate_object_entry(sha1);
472
473         if (entry)
474                 entry->tagged = 1;
475         if (!peel_ref(path, peeled)) {
476                 entry = locate_object_entry(peeled);
477                 if (entry)
478                         entry->tagged = 1;
479         }
480         return 0;
481 }
482
483 static inline void add_to_write_order(struct object_entry **wo,
484                                unsigned int *endp,
485                                struct object_entry *e)
486 {
487         if (e->filled)
488                 return;
489         wo[(*endp)++] = e;
490         e->filled = 1;
491 }
492
493 static void add_descendants_to_write_order(struct object_entry **wo,
494                                            unsigned int *endp,
495                                            struct object_entry *e)
496 {
497         int add_to_order = 1;
498         while (e) {
499                 if (add_to_order) {
500                         struct object_entry *s;
501                         /* add this node... */
502                         add_to_write_order(wo, endp, e);
503                         /* all its siblings... */
504                         for (s = e->delta_sibling; s; s = s->delta_sibling) {
505                                 add_to_write_order(wo, endp, s);
506                         }
507                 }
508                 /* drop down a level to add left subtree nodes if possible */
509                 if (e->delta_child) {
510                         add_to_order = 1;
511                         e = e->delta_child;
512                 } else {
513                         add_to_order = 0;
514                         /* our sibling might have some children, it is next */
515                         if (e->delta_sibling) {
516                                 e = e->delta_sibling;
517                                 continue;
518                         }
519                         /* go back to our parent node */
520                         e = e->delta;
521                         while (e && !e->delta_sibling) {
522                                 /* we're on the right side of a subtree, keep
523                                  * going up until we can go right again */
524                                 e = e->delta;
525                         }
526                         if (!e) {
527                                 /* done- we hit our original root node */
528                                 return;
529                         }
530                         /* pass it off to sibling at this level */
531                         e = e->delta_sibling;
532                 }
533         };
534 }
535
536 static void add_family_to_write_order(struct object_entry **wo,
537                                       unsigned int *endp,
538                                       struct object_entry *e)
539 {
540         struct object_entry *root;
541
542         for (root = e; root->delta; root = root->delta)
543                 ; /* nothing */
544         add_descendants_to_write_order(wo, endp, root);
545 }
546
547 static struct object_entry **compute_write_order(void)
548 {
549         unsigned int i, wo_end, last_untagged;
550
551         struct object_entry **wo = xmalloc(nr_objects * sizeof(*wo));
552
553         for (i = 0; i < nr_objects; i++) {
554                 objects[i].tagged = 0;
555                 objects[i].filled = 0;
556                 objects[i].delta_child = NULL;
557                 objects[i].delta_sibling = NULL;
558         }
559
560         /*
561          * Fully connect delta_child/delta_sibling network.
562          * Make sure delta_sibling is sorted in the original
563          * recency order.
564          */
565         for (i = nr_objects; i > 0;) {
566                 struct object_entry *e = &objects[--i];
567                 if (!e->delta)
568                         continue;
569                 /* Mark me as the first child */
570                 e->delta_sibling = e->delta->delta_child;
571                 e->delta->delta_child = e;
572         }
573
574         /*
575          * Mark objects that are at the tip of tags.
576          */
577         for_each_tag_ref(mark_tagged, NULL);
578
579         /*
580          * Give the objects in the original recency order until
581          * we see a tagged tip.
582          */
583         for (i = wo_end = 0; i < nr_objects; i++) {
584                 if (objects[i].tagged)
585                         break;
586                 add_to_write_order(wo, &wo_end, &objects[i]);
587         }
588         last_untagged = i;
589
590         /*
591          * Then fill all the tagged tips.
592          */
593         for (; i < nr_objects; i++) {
594                 if (objects[i].tagged)
595                         add_to_write_order(wo, &wo_end, &objects[i]);
596         }
597
598         /*
599          * And then all remaining commits and tags.
600          */
601         for (i = last_untagged; i < nr_objects; i++) {
602                 if (objects[i].type != OBJ_COMMIT &&
603                     objects[i].type != OBJ_TAG)
604                         continue;
605                 add_to_write_order(wo, &wo_end, &objects[i]);
606         }
607
608         /*
609          * And then all the trees.
610          */
611         for (i = last_untagged; i < nr_objects; i++) {
612                 if (objects[i].type != OBJ_TREE)
613                         continue;
614                 add_to_write_order(wo, &wo_end, &objects[i]);
615         }
616
617         /*
618          * Finally all the rest in really tight order
619          */
620         for (i = last_untagged; i < nr_objects; i++) {
621                 if (!objects[i].filled)
622                         add_family_to_write_order(wo, &wo_end, &objects[i]);
623         }
624
625         if (wo_end != nr_objects)
626                 die("ordered %u objects, expected %"PRIu32, wo_end, nr_objects);
627
628         return wo;
629 }
630
631 static void write_pack_file(void)
632 {
633         uint32_t i = 0, j;
634         struct sha1file *f;
635         off_t offset;
636         uint32_t nr_remaining = nr_result;
637         time_t last_mtime = 0;
638         struct object_entry **write_order;
639
640         if (progress > pack_to_stdout)
641                 progress_state = start_progress("Writing objects", nr_result);
642         written_list = xmalloc(nr_objects * sizeof(*written_list));
643         write_order = compute_write_order();
644
645         do {
646                 unsigned char sha1[20];
647                 char *pack_tmp_name = NULL;
648
649                 if (pack_to_stdout)
650                         f = sha1fd_throughput(1, "<stdout>", progress_state);
651                 else
652                         f = create_tmp_packfile(&pack_tmp_name);
653
654                 offset = write_pack_header(f, nr_remaining);
655                 if (!offset)
656                         die_errno("unable to write pack header");
657                 nr_written = 0;
658                 for (; i < nr_objects; i++) {
659                         struct object_entry *e = write_order[i];
660                         if (write_one(f, e, &offset) == WRITE_ONE_BREAK)
661                                 break;
662                         display_progress(progress_state, written);
663                 }
664
665                 /*
666                  * Did we write the wrong # entries in the header?
667                  * If so, rewrite it like in fast-import
668                  */
669                 if (pack_to_stdout) {
670                         sha1close(f, sha1, CSUM_CLOSE);
671                 } else if (nr_written == nr_remaining) {
672                         sha1close(f, sha1, CSUM_FSYNC);
673                 } else {
674                         int fd = sha1close(f, sha1, 0);
675                         fixup_pack_header_footer(fd, sha1, pack_tmp_name,
676                                                  nr_written, sha1, offset);
677                         close(fd);
678                 }
679
680                 if (!pack_to_stdout) {
681                         struct stat st;
682                         char tmpname[PATH_MAX];
683
684                         /*
685                          * Packs are runtime accessed in their mtime
686                          * order since newer packs are more likely to contain
687                          * younger objects.  So if we are creating multiple
688                          * packs then we should modify the mtime of later ones
689                          * to preserve this property.
690                          */
691                         if (stat(pack_tmp_name, &st) < 0) {
692                                 warning("failed to stat %s: %s",
693                                         pack_tmp_name, strerror(errno));
694                         } else if (!last_mtime) {
695                                 last_mtime = st.st_mtime;
696                         } else {
697                                 struct utimbuf utb;
698                                 utb.actime = st.st_atime;
699                                 utb.modtime = --last_mtime;
700                                 if (utime(pack_tmp_name, &utb) < 0)
701                                         warning("failed utime() on %s: %s",
702                                                 tmpname, strerror(errno));
703                         }
704
705                         /* Enough space for "-<sha-1>.pack"? */
706                         if (sizeof(tmpname) <= strlen(base_name) + 50)
707                                 die("pack base name '%s' too long", base_name);
708                         snprintf(tmpname, sizeof(tmpname), "%s-", base_name);
709                         finish_tmp_packfile(tmpname, pack_tmp_name,
710                                             written_list, nr_written,
711                                             &pack_idx_opts, sha1);
712                         free(pack_tmp_name);
713                         puts(sha1_to_hex(sha1));
714                 }
715
716                 /* mark written objects as written to previous pack */
717                 for (j = 0; j < nr_written; j++) {
718                         written_list[j]->offset = (off_t)-1;
719                 }
720                 nr_remaining -= nr_written;
721         } while (nr_remaining && i < nr_objects);
722
723         free(written_list);
724         free(write_order);
725         stop_progress(&progress_state);
726         if (written != nr_result)
727                 die("wrote %"PRIu32" objects while expecting %"PRIu32,
728                         written, nr_result);
729 }
730
731 static int locate_object_entry_hash(const unsigned char *sha1)
732 {
733         int i;
734         unsigned int ui;
735         memcpy(&ui, sha1, sizeof(unsigned int));
736         i = ui % object_ix_hashsz;
737         while (0 < object_ix[i]) {
738                 if (!hashcmp(sha1, objects[object_ix[i] - 1].idx.sha1))
739                         return i;
740                 if (++i == object_ix_hashsz)
741                         i = 0;
742         }
743         return -1 - i;
744 }
745
746 static struct object_entry *locate_object_entry(const unsigned char *sha1)
747 {
748         int i;
749
750         if (!object_ix_hashsz)
751                 return NULL;
752
753         i = locate_object_entry_hash(sha1);
754         if (0 <= i)
755                 return &objects[object_ix[i]-1];
756         return NULL;
757 }
758
759 static void rehash_objects(void)
760 {
761         uint32_t i;
762         struct object_entry *oe;
763
764         object_ix_hashsz = nr_objects * 3;
765         if (object_ix_hashsz < 1024)
766                 object_ix_hashsz = 1024;
767         object_ix = xrealloc(object_ix, sizeof(int) * object_ix_hashsz);
768         memset(object_ix, 0, sizeof(int) * object_ix_hashsz);
769         for (i = 0, oe = objects; i < nr_objects; i++, oe++) {
770                 int ix = locate_object_entry_hash(oe->idx.sha1);
771                 if (0 <= ix)
772                         continue;
773                 ix = -1 - ix;
774                 object_ix[ix] = i + 1;
775         }
776 }
777
778 static unsigned name_hash(const char *name)
779 {
780         unsigned c, hash = 0;
781
782         if (!name)
783                 return 0;
784
785         /*
786          * This effectively just creates a sortable number from the
787          * last sixteen non-whitespace characters. Last characters
788          * count "most", so things that end in ".c" sort together.
789          */
790         while ((c = *name++) != 0) {
791                 if (isspace(c))
792                         continue;
793                 hash = (hash >> 2) + (c << 24);
794         }
795         return hash;
796 }
797
798 static void setup_delta_attr_check(struct git_attr_check *check)
799 {
800         static struct git_attr *attr_delta;
801
802         if (!attr_delta)
803                 attr_delta = git_attr("delta");
804
805         check[0].attr = attr_delta;
806 }
807
808 static int no_try_delta(const char *path)
809 {
810         struct git_attr_check check[1];
811
812         setup_delta_attr_check(check);
813         if (git_check_attr(path, ARRAY_SIZE(check), check))
814                 return 0;
815         if (ATTR_FALSE(check->value))
816                 return 1;
817         return 0;
818 }
819
820 static int add_object_entry(const unsigned char *sha1, enum object_type type,
821                             const char *name, int exclude)
822 {
823         struct object_entry *entry;
824         struct packed_git *p, *found_pack = NULL;
825         off_t found_offset = 0;
826         int ix;
827         unsigned hash = name_hash(name);
828
829         ix = nr_objects ? locate_object_entry_hash(sha1) : -1;
830         if (ix >= 0) {
831                 if (exclude) {
832                         entry = objects + object_ix[ix] - 1;
833                         if (!entry->preferred_base)
834                                 nr_result--;
835                         entry->preferred_base = 1;
836                 }
837                 return 0;
838         }
839
840         if (!exclude && local && has_loose_object_nonlocal(sha1))
841                 return 0;
842
843         for (p = packed_git; p; p = p->next) {
844                 off_t offset = find_pack_entry_one(sha1, p);
845                 if (offset) {
846                         if (!found_pack) {
847                                 if (!is_pack_valid(p)) {
848                                         warning("packfile %s cannot be accessed", p->pack_name);
849                                         continue;
850                                 }
851                                 found_offset = offset;
852                                 found_pack = p;
853                         }
854                         if (exclude)
855                                 break;
856                         if (incremental)
857                                 return 0;
858                         if (local && !p->pack_local)
859                                 return 0;
860                         if (ignore_packed_keep && p->pack_local && p->pack_keep)
861                                 return 0;
862                 }
863         }
864
865         if (nr_objects >= nr_alloc) {
866                 nr_alloc = (nr_alloc  + 1024) * 3 / 2;
867                 objects = xrealloc(objects, nr_alloc * sizeof(*entry));
868         }
869
870         entry = objects + nr_objects++;
871         memset(entry, 0, sizeof(*entry));
872         hashcpy(entry->idx.sha1, sha1);
873         entry->hash = hash;
874         if (type)
875                 entry->type = type;
876         if (exclude)
877                 entry->preferred_base = 1;
878         else
879                 nr_result++;
880         if (found_pack) {
881                 entry->in_pack = found_pack;
882                 entry->in_pack_offset = found_offset;
883         }
884
885         if (object_ix_hashsz * 3 <= nr_objects * 4)
886                 rehash_objects();
887         else
888                 object_ix[-1 - ix] = nr_objects;
889
890         display_progress(progress_state, nr_objects);
891
892         if (name && no_try_delta(name))
893                 entry->no_try_delta = 1;
894
895         return 1;
896 }
897
898 struct pbase_tree_cache {
899         unsigned char sha1[20];
900         int ref;
901         int temporary;
902         void *tree_data;
903         unsigned long tree_size;
904 };
905
906 static struct pbase_tree_cache *(pbase_tree_cache[256]);
907 static int pbase_tree_cache_ix(const unsigned char *sha1)
908 {
909         return sha1[0] % ARRAY_SIZE(pbase_tree_cache);
910 }
911 static int pbase_tree_cache_ix_incr(int ix)
912 {
913         return (ix+1) % ARRAY_SIZE(pbase_tree_cache);
914 }
915
916 static struct pbase_tree {
917         struct pbase_tree *next;
918         /* This is a phony "cache" entry; we are not
919          * going to evict it nor find it through _get()
920          * mechanism -- this is for the toplevel node that
921          * would almost always change with any commit.
922          */
923         struct pbase_tree_cache pcache;
924 } *pbase_tree;
925
926 static struct pbase_tree_cache *pbase_tree_get(const unsigned char *sha1)
927 {
928         struct pbase_tree_cache *ent, *nent;
929         void *data;
930         unsigned long size;
931         enum object_type type;
932         int neigh;
933         int my_ix = pbase_tree_cache_ix(sha1);
934         int available_ix = -1;
935
936         /* pbase-tree-cache acts as a limited hashtable.
937          * your object will be found at your index or within a few
938          * slots after that slot if it is cached.
939          */
940         for (neigh = 0; neigh < 8; neigh++) {
941                 ent = pbase_tree_cache[my_ix];
942                 if (ent && !hashcmp(ent->sha1, sha1)) {
943                         ent->ref++;
944                         return ent;
945                 }
946                 else if (((available_ix < 0) && (!ent || !ent->ref)) ||
947                          ((0 <= available_ix) &&
948                           (!ent && pbase_tree_cache[available_ix])))
949                         available_ix = my_ix;
950                 if (!ent)
951                         break;
952                 my_ix = pbase_tree_cache_ix_incr(my_ix);
953         }
954
955         /* Did not find one.  Either we got a bogus request or
956          * we need to read and perhaps cache.
957          */
958         data = read_sha1_file(sha1, &type, &size);
959         if (!data)
960                 return NULL;
961         if (type != OBJ_TREE) {
962                 free(data);
963                 return NULL;
964         }
965
966         /* We need to either cache or return a throwaway copy */
967
968         if (available_ix < 0)
969                 ent = NULL;
970         else {
971                 ent = pbase_tree_cache[available_ix];
972                 my_ix = available_ix;
973         }
974
975         if (!ent) {
976                 nent = xmalloc(sizeof(*nent));
977                 nent->temporary = (available_ix < 0);
978         }
979         else {
980                 /* evict and reuse */
981                 free(ent->tree_data);
982                 nent = ent;
983         }
984         hashcpy(nent->sha1, sha1);
985         nent->tree_data = data;
986         nent->tree_size = size;
987         nent->ref = 1;
988         if (!nent->temporary)
989                 pbase_tree_cache[my_ix] = nent;
990         return nent;
991 }
992
993 static void pbase_tree_put(struct pbase_tree_cache *cache)
994 {
995         if (!cache->temporary) {
996                 cache->ref--;
997                 return;
998         }
999         free(cache->tree_data);
1000         free(cache);
1001 }
1002
1003 static int name_cmp_len(const char *name)
1004 {
1005         int i;
1006         for (i = 0; name[i] && name[i] != '\n' && name[i] != '/'; i++)
1007                 ;
1008         return i;
1009 }
1010
1011 static void add_pbase_object(struct tree_desc *tree,
1012                              const char *name,
1013                              int cmplen,
1014                              const char *fullname)
1015 {
1016         struct name_entry entry;
1017         int cmp;
1018
1019         while (tree_entry(tree,&entry)) {
1020                 if (S_ISGITLINK(entry.mode))
1021                         continue;
1022                 cmp = tree_entry_len(&entry) != cmplen ? 1 :
1023                       memcmp(name, entry.path, cmplen);
1024                 if (cmp > 0)
1025                         continue;
1026                 if (cmp < 0)
1027                         return;
1028                 if (name[cmplen] != '/') {
1029                         add_object_entry(entry.sha1,
1030                                          object_type(entry.mode),
1031                                          fullname, 1);
1032                         return;
1033                 }
1034                 if (S_ISDIR(entry.mode)) {
1035                         struct tree_desc sub;
1036                         struct pbase_tree_cache *tree;
1037                         const char *down = name+cmplen+1;
1038                         int downlen = name_cmp_len(down);
1039
1040                         tree = pbase_tree_get(entry.sha1);
1041                         if (!tree)
1042                                 return;
1043                         init_tree_desc(&sub, tree->tree_data, tree->tree_size);
1044
1045                         add_pbase_object(&sub, down, downlen, fullname);
1046                         pbase_tree_put(tree);
1047                 }
1048         }
1049 }
1050
1051 static unsigned *done_pbase_paths;
1052 static int done_pbase_paths_num;
1053 static int done_pbase_paths_alloc;
1054 static int done_pbase_path_pos(unsigned hash)
1055 {
1056         int lo = 0;
1057         int hi = done_pbase_paths_num;
1058         while (lo < hi) {
1059                 int mi = (hi + lo) / 2;
1060                 if (done_pbase_paths[mi] == hash)
1061                         return mi;
1062                 if (done_pbase_paths[mi] < hash)
1063                         hi = mi;
1064                 else
1065                         lo = mi + 1;
1066         }
1067         return -lo-1;
1068 }
1069
1070 static int check_pbase_path(unsigned hash)
1071 {
1072         int pos = (!done_pbase_paths) ? -1 : done_pbase_path_pos(hash);
1073         if (0 <= pos)
1074                 return 1;
1075         pos = -pos - 1;
1076         if (done_pbase_paths_alloc <= done_pbase_paths_num) {
1077                 done_pbase_paths_alloc = alloc_nr(done_pbase_paths_alloc);
1078                 done_pbase_paths = xrealloc(done_pbase_paths,
1079                                             done_pbase_paths_alloc *
1080                                             sizeof(unsigned));
1081         }
1082         done_pbase_paths_num++;
1083         if (pos < done_pbase_paths_num)
1084                 memmove(done_pbase_paths + pos + 1,
1085                         done_pbase_paths + pos,
1086                         (done_pbase_paths_num - pos - 1) * sizeof(unsigned));
1087         done_pbase_paths[pos] = hash;
1088         return 0;
1089 }
1090
1091 static void add_preferred_base_object(const char *name)
1092 {
1093         struct pbase_tree *it;
1094         int cmplen;
1095         unsigned hash = name_hash(name);
1096
1097         if (!num_preferred_base || check_pbase_path(hash))
1098                 return;
1099
1100         cmplen = name_cmp_len(name);
1101         for (it = pbase_tree; it; it = it->next) {
1102                 if (cmplen == 0) {
1103                         add_object_entry(it->pcache.sha1, OBJ_TREE, NULL, 1);
1104                 }
1105                 else {
1106                         struct tree_desc tree;
1107                         init_tree_desc(&tree, it->pcache.tree_data, it->pcache.tree_size);
1108                         add_pbase_object(&tree, name, cmplen, name);
1109                 }
1110         }
1111 }
1112
1113 static void add_preferred_base(unsigned char *sha1)
1114 {
1115         struct pbase_tree *it;
1116         void *data;
1117         unsigned long size;
1118         unsigned char tree_sha1[20];
1119
1120         if (window <= num_preferred_base++)
1121                 return;
1122
1123         data = read_object_with_reference(sha1, tree_type, &size, tree_sha1);
1124         if (!data)
1125                 return;
1126
1127         for (it = pbase_tree; it; it = it->next) {
1128                 if (!hashcmp(it->pcache.sha1, tree_sha1)) {
1129                         free(data);
1130                         return;
1131                 }
1132         }
1133
1134         it = xcalloc(1, sizeof(*it));
1135         it->next = pbase_tree;
1136         pbase_tree = it;
1137
1138         hashcpy(it->pcache.sha1, tree_sha1);
1139         it->pcache.tree_data = data;
1140         it->pcache.tree_size = size;
1141 }
1142
1143 static void cleanup_preferred_base(void)
1144 {
1145         struct pbase_tree *it;
1146         unsigned i;
1147
1148         it = pbase_tree;
1149         pbase_tree = NULL;
1150         while (it) {
1151                 struct pbase_tree *this = it;
1152                 it = this->next;
1153                 free(this->pcache.tree_data);
1154                 free(this);
1155         }
1156
1157         for (i = 0; i < ARRAY_SIZE(pbase_tree_cache); i++) {
1158                 if (!pbase_tree_cache[i])
1159                         continue;
1160                 free(pbase_tree_cache[i]->tree_data);
1161                 free(pbase_tree_cache[i]);
1162                 pbase_tree_cache[i] = NULL;
1163         }
1164
1165         free(done_pbase_paths);
1166         done_pbase_paths = NULL;
1167         done_pbase_paths_num = done_pbase_paths_alloc = 0;
1168 }
1169
1170 static void check_object(struct object_entry *entry)
1171 {
1172         if (entry->in_pack) {
1173                 struct packed_git *p = entry->in_pack;
1174                 struct pack_window *w_curs = NULL;
1175                 const unsigned char *base_ref = NULL;
1176                 struct object_entry *base_entry;
1177                 unsigned long used, used_0;
1178                 unsigned long avail;
1179                 off_t ofs;
1180                 unsigned char *buf, c;
1181
1182                 buf = use_pack(p, &w_curs, entry->in_pack_offset, &avail);
1183
1184                 /*
1185                  * We want in_pack_type even if we do not reuse delta
1186                  * since non-delta representations could still be reused.
1187                  */
1188                 used = unpack_object_header_buffer(buf, avail,
1189                                                    &entry->in_pack_type,
1190                                                    &entry->size);
1191                 if (used == 0)
1192                         goto give_up;
1193
1194                 /*
1195                  * Determine if this is a delta and if so whether we can
1196                  * reuse it or not.  Otherwise let's find out as cheaply as
1197                  * possible what the actual type and size for this object is.
1198                  */
1199                 switch (entry->in_pack_type) {
1200                 default:
1201                         /* Not a delta hence we've already got all we need. */
1202                         entry->type = entry->in_pack_type;
1203                         entry->in_pack_header_size = used;
1204                         if (entry->type < OBJ_COMMIT || entry->type > OBJ_BLOB)
1205                                 goto give_up;
1206                         unuse_pack(&w_curs);
1207                         return;
1208                 case OBJ_REF_DELTA:
1209                         if (reuse_delta && !entry->preferred_base)
1210                                 base_ref = use_pack(p, &w_curs,
1211                                                 entry->in_pack_offset + used, NULL);
1212                         entry->in_pack_header_size = used + 20;
1213                         break;
1214                 case OBJ_OFS_DELTA:
1215                         buf = use_pack(p, &w_curs,
1216                                        entry->in_pack_offset + used, NULL);
1217                         used_0 = 0;
1218                         c = buf[used_0++];
1219                         ofs = c & 127;
1220                         while (c & 128) {
1221                                 ofs += 1;
1222                                 if (!ofs || MSB(ofs, 7)) {
1223                                         error("delta base offset overflow in pack for %s",
1224                                               sha1_to_hex(entry->idx.sha1));
1225                                         goto give_up;
1226                                 }
1227                                 c = buf[used_0++];
1228                                 ofs = (ofs << 7) + (c & 127);
1229                         }
1230                         ofs = entry->in_pack_offset - ofs;
1231                         if (ofs <= 0 || ofs >= entry->in_pack_offset) {
1232                                 error("delta base offset out of bound for %s",
1233                                       sha1_to_hex(entry->idx.sha1));
1234                                 goto give_up;
1235                         }
1236                         if (reuse_delta && !entry->preferred_base) {
1237                                 struct revindex_entry *revidx;
1238                                 revidx = find_pack_revindex(p, ofs);
1239                                 if (!revidx)
1240                                         goto give_up;
1241                                 base_ref = nth_packed_object_sha1(p, revidx->nr);
1242                         }
1243                         entry->in_pack_header_size = used + used_0;
1244                         break;
1245                 }
1246
1247                 if (base_ref && (base_entry = locate_object_entry(base_ref))) {
1248                         /*
1249                          * If base_ref was set above that means we wish to
1250                          * reuse delta data, and we even found that base
1251                          * in the list of objects we want to pack. Goodie!
1252                          *
1253                          * Depth value does not matter - find_deltas() will
1254                          * never consider reused delta as the base object to
1255                          * deltify other objects against, in order to avoid
1256                          * circular deltas.
1257                          */
1258                         entry->type = entry->in_pack_type;
1259                         entry->delta = base_entry;
1260                         entry->delta_size = entry->size;
1261                         entry->delta_sibling = base_entry->delta_child;
1262                         base_entry->delta_child = entry;
1263                         unuse_pack(&w_curs);
1264                         return;
1265                 }
1266
1267                 if (entry->type) {
1268                         /*
1269                          * This must be a delta and we already know what the
1270                          * final object type is.  Let's extract the actual
1271                          * object size from the delta header.
1272                          */
1273                         entry->size = get_size_from_delta(p, &w_curs,
1274                                         entry->in_pack_offset + entry->in_pack_header_size);
1275                         if (entry->size == 0)
1276                                 goto give_up;
1277                         unuse_pack(&w_curs);
1278                         return;
1279                 }
1280
1281                 /*
1282                  * No choice but to fall back to the recursive delta walk
1283                  * with sha1_object_info() to find about the object type
1284                  * at this point...
1285                  */
1286                 give_up:
1287                 unuse_pack(&w_curs);
1288         }
1289
1290         entry->type = sha1_object_info(entry->idx.sha1, &entry->size);
1291         /*
1292          * The error condition is checked in prepare_pack().  This is
1293          * to permit a missing preferred base object to be ignored
1294          * as a preferred base.  Doing so can result in a larger
1295          * pack file, but the transfer will still take place.
1296          */
1297 }
1298
1299 static int pack_offset_sort(const void *_a, const void *_b)
1300 {
1301         const struct object_entry *a = *(struct object_entry **)_a;
1302         const struct object_entry *b = *(struct object_entry **)_b;
1303
1304         /* avoid filesystem trashing with loose objects */
1305         if (!a->in_pack && !b->in_pack)
1306                 return hashcmp(a->idx.sha1, b->idx.sha1);
1307
1308         if (a->in_pack < b->in_pack)
1309                 return -1;
1310         if (a->in_pack > b->in_pack)
1311                 return 1;
1312         return a->in_pack_offset < b->in_pack_offset ? -1 :
1313                         (a->in_pack_offset > b->in_pack_offset);
1314 }
1315
1316 static void get_object_details(void)
1317 {
1318         uint32_t i;
1319         struct object_entry **sorted_by_offset;
1320
1321         sorted_by_offset = xcalloc(nr_objects, sizeof(struct object_entry *));
1322         for (i = 0; i < nr_objects; i++)
1323                 sorted_by_offset[i] = objects + i;
1324         qsort(sorted_by_offset, nr_objects, sizeof(*sorted_by_offset), pack_offset_sort);
1325
1326         for (i = 0; i < nr_objects; i++) {
1327                 struct object_entry *entry = sorted_by_offset[i];
1328                 check_object(entry);
1329                 if (big_file_threshold <= entry->size)
1330                         entry->no_try_delta = 1;
1331         }
1332
1333         free(sorted_by_offset);
1334 }
1335
1336 /*
1337  * We search for deltas in a list sorted by type, by filename hash, and then
1338  * by size, so that we see progressively smaller and smaller files.
1339  * That's because we prefer deltas to be from the bigger file
1340  * to the smaller -- deletes are potentially cheaper, but perhaps
1341  * more importantly, the bigger file is likely the more recent
1342  * one.  The deepest deltas are therefore the oldest objects which are
1343  * less susceptible to be accessed often.
1344  */
1345 static int type_size_sort(const void *_a, const void *_b)
1346 {
1347         const struct object_entry *a = *(struct object_entry **)_a;
1348         const struct object_entry *b = *(struct object_entry **)_b;
1349
1350         if (a->type > b->type)
1351                 return -1;
1352         if (a->type < b->type)
1353                 return 1;
1354         if (a->hash > b->hash)
1355                 return -1;
1356         if (a->hash < b->hash)
1357                 return 1;
1358         if (a->preferred_base > b->preferred_base)
1359                 return -1;
1360         if (a->preferred_base < b->preferred_base)
1361                 return 1;
1362         if (a->size > b->size)
1363                 return -1;
1364         if (a->size < b->size)
1365                 return 1;
1366         return a < b ? -1 : (a > b);  /* newest first */
1367 }
1368
1369 struct unpacked {
1370         struct object_entry *entry;
1371         void *data;
1372         struct delta_index *index;
1373         unsigned depth;
1374 };
1375
1376 static int delta_cacheable(unsigned long src_size, unsigned long trg_size,
1377                            unsigned long delta_size)
1378 {
1379         if (max_delta_cache_size && delta_cache_size + delta_size > max_delta_cache_size)
1380                 return 0;
1381
1382         if (delta_size < cache_max_small_delta_size)
1383                 return 1;
1384
1385         /* cache delta, if objects are large enough compared to delta size */
1386         if ((src_size >> 20) + (trg_size >> 21) > (delta_size >> 10))
1387                 return 1;
1388
1389         return 0;
1390 }
1391
1392 #ifndef NO_PTHREADS
1393
1394 static pthread_mutex_t read_mutex;
1395 #define read_lock()             pthread_mutex_lock(&read_mutex)
1396 #define read_unlock()           pthread_mutex_unlock(&read_mutex)
1397
1398 static pthread_mutex_t cache_mutex;
1399 #define cache_lock()            pthread_mutex_lock(&cache_mutex)
1400 #define cache_unlock()          pthread_mutex_unlock(&cache_mutex)
1401
1402 static pthread_mutex_t progress_mutex;
1403 #define progress_lock()         pthread_mutex_lock(&progress_mutex)
1404 #define progress_unlock()       pthread_mutex_unlock(&progress_mutex)
1405
1406 #else
1407
1408 #define read_lock()             (void)0
1409 #define read_unlock()           (void)0
1410 #define cache_lock()            (void)0
1411 #define cache_unlock()          (void)0
1412 #define progress_lock()         (void)0
1413 #define progress_unlock()       (void)0
1414
1415 #endif
1416
1417 static int try_delta(struct unpacked *trg, struct unpacked *src,
1418                      unsigned max_depth, unsigned long *mem_usage)
1419 {
1420         struct object_entry *trg_entry = trg->entry;
1421         struct object_entry *src_entry = src->entry;
1422         unsigned long trg_size, src_size, delta_size, sizediff, max_size, sz;
1423         unsigned ref_depth;
1424         enum object_type type;
1425         void *delta_buf;
1426
1427         /* Don't bother doing diffs between different types */
1428         if (trg_entry->type != src_entry->type)
1429                 return -1;
1430
1431         /*
1432          * We do not bother to try a delta that we discarded on an
1433          * earlier try, but only when reusing delta data.  Note that
1434          * src_entry that is marked as the preferred_base should always
1435          * be considered, as even if we produce a suboptimal delta against
1436          * it, we will still save the transfer cost, as we already know
1437          * the other side has it and we won't send src_entry at all.
1438          */
1439         if (reuse_delta && trg_entry->in_pack &&
1440             trg_entry->in_pack == src_entry->in_pack &&
1441             !src_entry->preferred_base &&
1442             trg_entry->in_pack_type != OBJ_REF_DELTA &&
1443             trg_entry->in_pack_type != OBJ_OFS_DELTA)
1444                 return 0;
1445
1446         /* Let's not bust the allowed depth. */
1447         if (src->depth >= max_depth)
1448                 return 0;
1449
1450         /* Now some size filtering heuristics. */
1451         trg_size = trg_entry->size;
1452         if (!trg_entry->delta) {
1453                 max_size = trg_size/2 - 20;
1454                 ref_depth = 1;
1455         } else {
1456                 max_size = trg_entry->delta_size;
1457                 ref_depth = trg->depth;
1458         }
1459         max_size = (uint64_t)max_size * (max_depth - src->depth) /
1460                                                 (max_depth - ref_depth + 1);
1461         if (max_size == 0)
1462                 return 0;
1463         src_size = src_entry->size;
1464         sizediff = src_size < trg_size ? trg_size - src_size : 0;
1465         if (sizediff >= max_size)
1466                 return 0;
1467         if (trg_size < src_size / 32)
1468                 return 0;
1469
1470         /* Load data if not already done */
1471         if (!trg->data) {
1472                 read_lock();
1473                 trg->data = read_sha1_file(trg_entry->idx.sha1, &type, &sz);
1474                 read_unlock();
1475                 if (!trg->data)
1476                         die("object %s cannot be read",
1477                             sha1_to_hex(trg_entry->idx.sha1));
1478                 if (sz != trg_size)
1479                         die("object %s inconsistent object length (%lu vs %lu)",
1480                             sha1_to_hex(trg_entry->idx.sha1), sz, trg_size);
1481                 *mem_usage += sz;
1482         }
1483         if (!src->data) {
1484                 read_lock();
1485                 src->data = read_sha1_file(src_entry->idx.sha1, &type, &sz);
1486                 read_unlock();
1487                 if (!src->data) {
1488                         if (src_entry->preferred_base) {
1489                                 static int warned = 0;
1490                                 if (!warned++)
1491                                         warning("object %s cannot be read",
1492                                                 sha1_to_hex(src_entry->idx.sha1));
1493                                 /*
1494                                  * Those objects are not included in the
1495                                  * resulting pack.  Be resilient and ignore
1496                                  * them if they can't be read, in case the
1497                                  * pack could be created nevertheless.
1498                                  */
1499                                 return 0;
1500                         }
1501                         die("object %s cannot be read",
1502                             sha1_to_hex(src_entry->idx.sha1));
1503                 }
1504                 if (sz != src_size)
1505                         die("object %s inconsistent object length (%lu vs %lu)",
1506                             sha1_to_hex(src_entry->idx.sha1), sz, src_size);
1507                 *mem_usage += sz;
1508         }
1509         if (!src->index) {
1510                 src->index = create_delta_index(src->data, src_size);
1511                 if (!src->index) {
1512                         static int warned = 0;
1513                         if (!warned++)
1514                                 warning("suboptimal pack - out of memory");
1515                         return 0;
1516                 }
1517                 *mem_usage += sizeof_delta_index(src->index);
1518         }
1519
1520         delta_buf = create_delta(src->index, trg->data, trg_size, &delta_size, max_size);
1521         if (!delta_buf)
1522                 return 0;
1523
1524         if (trg_entry->delta) {
1525                 /* Prefer only shallower same-sized deltas. */
1526                 if (delta_size == trg_entry->delta_size &&
1527                     src->depth + 1 >= trg->depth) {
1528                         free(delta_buf);
1529                         return 0;
1530                 }
1531         }
1532
1533         /*
1534          * Handle memory allocation outside of the cache
1535          * accounting lock.  Compiler will optimize the strangeness
1536          * away when NO_PTHREADS is defined.
1537          */
1538         free(trg_entry->delta_data);
1539         cache_lock();
1540         if (trg_entry->delta_data) {
1541                 delta_cache_size -= trg_entry->delta_size;
1542                 trg_entry->delta_data = NULL;
1543         }
1544         if (delta_cacheable(src_size, trg_size, delta_size)) {
1545                 delta_cache_size += delta_size;
1546                 cache_unlock();
1547                 trg_entry->delta_data = xrealloc(delta_buf, delta_size);
1548         } else {
1549                 cache_unlock();
1550                 free(delta_buf);
1551         }
1552
1553         trg_entry->delta = src_entry;
1554         trg_entry->delta_size = delta_size;
1555         trg->depth = src->depth + 1;
1556
1557         return 1;
1558 }
1559
1560 static unsigned int check_delta_limit(struct object_entry *me, unsigned int n)
1561 {
1562         struct object_entry *child = me->delta_child;
1563         unsigned int m = n;
1564         while (child) {
1565                 unsigned int c = check_delta_limit(child, n + 1);
1566                 if (m < c)
1567                         m = c;
1568                 child = child->delta_sibling;
1569         }
1570         return m;
1571 }
1572
1573 static unsigned long free_unpacked(struct unpacked *n)
1574 {
1575         unsigned long freed_mem = sizeof_delta_index(n->index);
1576         free_delta_index(n->index);
1577         n->index = NULL;
1578         if (n->data) {
1579                 freed_mem += n->entry->size;
1580                 free(n->data);
1581                 n->data = NULL;
1582         }
1583         n->entry = NULL;
1584         n->depth = 0;
1585         return freed_mem;
1586 }
1587
1588 static void find_deltas(struct object_entry **list, unsigned *list_size,
1589                         int window, int depth, unsigned *processed)
1590 {
1591         uint32_t i, idx = 0, count = 0;
1592         struct unpacked *array;
1593         unsigned long mem_usage = 0;
1594
1595         array = xcalloc(window, sizeof(struct unpacked));
1596
1597         for (;;) {
1598                 struct object_entry *entry;
1599                 struct unpacked *n = array + idx;
1600                 int j, max_depth, best_base = -1;
1601
1602                 progress_lock();
1603                 if (!*list_size) {
1604                         progress_unlock();
1605                         break;
1606                 }
1607                 entry = *list++;
1608                 (*list_size)--;
1609                 if (!entry->preferred_base) {
1610                         (*processed)++;
1611                         display_progress(progress_state, *processed);
1612                 }
1613                 progress_unlock();
1614
1615                 mem_usage -= free_unpacked(n);
1616                 n->entry = entry;
1617
1618                 while (window_memory_limit &&
1619                        mem_usage > window_memory_limit &&
1620                        count > 1) {
1621                         uint32_t tail = (idx + window - count) % window;
1622                         mem_usage -= free_unpacked(array + tail);
1623                         count--;
1624                 }
1625
1626                 /* We do not compute delta to *create* objects we are not
1627                  * going to pack.
1628                  */
1629                 if (entry->preferred_base)
1630                         goto next;
1631
1632                 /*
1633                  * If the current object is at pack edge, take the depth the
1634                  * objects that depend on the current object into account
1635                  * otherwise they would become too deep.
1636                  */
1637                 max_depth = depth;
1638                 if (entry->delta_child) {
1639                         max_depth -= check_delta_limit(entry, 0);
1640                         if (max_depth <= 0)
1641                                 goto next;
1642                 }
1643
1644                 j = window;
1645                 while (--j > 0) {
1646                         int ret;
1647                         uint32_t other_idx = idx + j;
1648                         struct unpacked *m;
1649                         if (other_idx >= window)
1650                                 other_idx -= window;
1651                         m = array + other_idx;
1652                         if (!m->entry)
1653                                 break;
1654                         ret = try_delta(n, m, max_depth, &mem_usage);
1655                         if (ret < 0)
1656                                 break;
1657                         else if (ret > 0)
1658                                 best_base = other_idx;
1659                 }
1660
1661                 /*
1662                  * If we decided to cache the delta data, then it is best
1663                  * to compress it right away.  First because we have to do
1664                  * it anyway, and doing it here while we're threaded will
1665                  * save a lot of time in the non threaded write phase,
1666                  * as well as allow for caching more deltas within
1667                  * the same cache size limit.
1668                  * ...
1669                  * But only if not writing to stdout, since in that case
1670                  * the network is most likely throttling writes anyway,
1671                  * and therefore it is best to go to the write phase ASAP
1672                  * instead, as we can afford spending more time compressing
1673                  * between writes at that moment.
1674                  */
1675                 if (entry->delta_data && !pack_to_stdout) {
1676                         entry->z_delta_size = do_compress(&entry->delta_data,
1677                                                           entry->delta_size);
1678                         cache_lock();
1679                         delta_cache_size -= entry->delta_size;
1680                         delta_cache_size += entry->z_delta_size;
1681                         cache_unlock();
1682                 }
1683
1684                 /* if we made n a delta, and if n is already at max
1685                  * depth, leaving it in the window is pointless.  we
1686                  * should evict it first.
1687                  */
1688                 if (entry->delta && max_depth <= n->depth)
1689                         continue;
1690
1691                 /*
1692                  * Move the best delta base up in the window, after the
1693                  * currently deltified object, to keep it longer.  It will
1694                  * be the first base object to be attempted next.
1695                  */
1696                 if (entry->delta) {
1697                         struct unpacked swap = array[best_base];
1698                         int dist = (window + idx - best_base) % window;
1699                         int dst = best_base;
1700                         while (dist--) {
1701                                 int src = (dst + 1) % window;
1702                                 array[dst] = array[src];
1703                                 dst = src;
1704                         }
1705                         array[dst] = swap;
1706                 }
1707
1708                 next:
1709                 idx++;
1710                 if (count + 1 < window)
1711                         count++;
1712                 if (idx >= window)
1713                         idx = 0;
1714         }
1715
1716         for (i = 0; i < window; ++i) {
1717                 free_delta_index(array[i].index);
1718                 free(array[i].data);
1719         }
1720         free(array);
1721 }
1722
1723 #ifndef NO_PTHREADS
1724
1725 static void try_to_free_from_threads(size_t size)
1726 {
1727         read_lock();
1728         release_pack_memory(size, -1);
1729         read_unlock();
1730 }
1731
1732 static try_to_free_t old_try_to_free_routine;
1733
1734 /*
1735  * The main thread waits on the condition that (at least) one of the workers
1736  * has stopped working (which is indicated in the .working member of
1737  * struct thread_params).
1738  * When a work thread has completed its work, it sets .working to 0 and
1739  * signals the main thread and waits on the condition that .data_ready
1740  * becomes 1.
1741  */
1742
1743 struct thread_params {
1744         pthread_t thread;
1745         struct object_entry **list;
1746         unsigned list_size;
1747         unsigned remaining;
1748         int window;
1749         int depth;
1750         int working;
1751         int data_ready;
1752         pthread_mutex_t mutex;
1753         pthread_cond_t cond;
1754         unsigned *processed;
1755 };
1756
1757 static pthread_cond_t progress_cond;
1758
1759 /*
1760  * Mutex and conditional variable can't be statically-initialized on Windows.
1761  */
1762 static void init_threaded_search(void)
1763 {
1764         init_recursive_mutex(&read_mutex);
1765         pthread_mutex_init(&cache_mutex, NULL);
1766         pthread_mutex_init(&progress_mutex, NULL);
1767         pthread_cond_init(&progress_cond, NULL);
1768         old_try_to_free_routine = set_try_to_free_routine(try_to_free_from_threads);
1769 }
1770
1771 static void cleanup_threaded_search(void)
1772 {
1773         set_try_to_free_routine(old_try_to_free_routine);
1774         pthread_cond_destroy(&progress_cond);
1775         pthread_mutex_destroy(&read_mutex);
1776         pthread_mutex_destroy(&cache_mutex);
1777         pthread_mutex_destroy(&progress_mutex);
1778 }
1779
1780 static void *threaded_find_deltas(void *arg)
1781 {
1782         struct thread_params *me = arg;
1783
1784         while (me->remaining) {
1785                 find_deltas(me->list, &me->remaining,
1786                             me->window, me->depth, me->processed);
1787
1788                 progress_lock();
1789                 me->working = 0;
1790                 pthread_cond_signal(&progress_cond);
1791                 progress_unlock();
1792
1793                 /*
1794                  * We must not set ->data_ready before we wait on the
1795                  * condition because the main thread may have set it to 1
1796                  * before we get here. In order to be sure that new
1797                  * work is available if we see 1 in ->data_ready, it
1798                  * was initialized to 0 before this thread was spawned
1799                  * and we reset it to 0 right away.
1800                  */
1801                 pthread_mutex_lock(&me->mutex);
1802                 while (!me->data_ready)
1803                         pthread_cond_wait(&me->cond, &me->mutex);
1804                 me->data_ready = 0;
1805                 pthread_mutex_unlock(&me->mutex);
1806         }
1807         /* leave ->working 1 so that this doesn't get more work assigned */
1808         return NULL;
1809 }
1810
1811 static void ll_find_deltas(struct object_entry **list, unsigned list_size,
1812                            int window, int depth, unsigned *processed)
1813 {
1814         struct thread_params *p;
1815         int i, ret, active_threads = 0;
1816
1817         init_threaded_search();
1818
1819         if (!delta_search_threads)      /* --threads=0 means autodetect */
1820                 delta_search_threads = online_cpus();
1821         if (delta_search_threads <= 1) {
1822                 find_deltas(list, &list_size, window, depth, processed);
1823                 cleanup_threaded_search();
1824                 return;
1825         }
1826         if (progress > pack_to_stdout)
1827                 fprintf(stderr, "Delta compression using up to %d threads.\n",
1828                                 delta_search_threads);
1829         p = xcalloc(delta_search_threads, sizeof(*p));
1830
1831         /* Partition the work amongst work threads. */
1832         for (i = 0; i < delta_search_threads; i++) {
1833                 unsigned sub_size = list_size / (delta_search_threads - i);
1834
1835                 /* don't use too small segments or no deltas will be found */
1836                 if (sub_size < 2*window && i+1 < delta_search_threads)
1837                         sub_size = 0;
1838
1839                 p[i].window = window;
1840                 p[i].depth = depth;
1841                 p[i].processed = processed;
1842                 p[i].working = 1;
1843                 p[i].data_ready = 0;
1844
1845                 /* try to split chunks on "path" boundaries */
1846                 while (sub_size && sub_size < list_size &&
1847                        list[sub_size]->hash &&
1848                        list[sub_size]->hash == list[sub_size-1]->hash)
1849                         sub_size++;
1850
1851                 p[i].list = list;
1852                 p[i].list_size = sub_size;
1853                 p[i].remaining = sub_size;
1854
1855                 list += sub_size;
1856                 list_size -= sub_size;
1857         }
1858
1859         /* Start work threads. */
1860         for (i = 0; i < delta_search_threads; i++) {
1861                 if (!p[i].list_size)
1862                         continue;
1863                 pthread_mutex_init(&p[i].mutex, NULL);
1864                 pthread_cond_init(&p[i].cond, NULL);
1865                 ret = pthread_create(&p[i].thread, NULL,
1866                                      threaded_find_deltas, &p[i]);
1867                 if (ret)
1868                         die("unable to create thread: %s", strerror(ret));
1869                 active_threads++;
1870         }
1871
1872         /*
1873          * Now let's wait for work completion.  Each time a thread is done
1874          * with its work, we steal half of the remaining work from the
1875          * thread with the largest number of unprocessed objects and give
1876          * it to that newly idle thread.  This ensure good load balancing
1877          * until the remaining object list segments are simply too short
1878          * to be worth splitting anymore.
1879          */
1880         while (active_threads) {
1881                 struct thread_params *target = NULL;
1882                 struct thread_params *victim = NULL;
1883                 unsigned sub_size = 0;
1884
1885                 progress_lock();
1886                 for (;;) {
1887                         for (i = 0; !target && i < delta_search_threads; i++)
1888                                 if (!p[i].working)
1889                                         target = &p[i];
1890                         if (target)
1891                                 break;
1892                         pthread_cond_wait(&progress_cond, &progress_mutex);
1893                 }
1894
1895                 for (i = 0; i < delta_search_threads; i++)
1896                         if (p[i].remaining > 2*window &&
1897                             (!victim || victim->remaining < p[i].remaining))
1898                                 victim = &p[i];
1899                 if (victim) {
1900                         sub_size = victim->remaining / 2;
1901                         list = victim->list + victim->list_size - sub_size;
1902                         while (sub_size && list[0]->hash &&
1903                                list[0]->hash == list[-1]->hash) {
1904                                 list++;
1905                                 sub_size--;
1906                         }
1907                         if (!sub_size) {
1908                                 /*
1909                                  * It is possible for some "paths" to have
1910                                  * so many objects that no hash boundary
1911                                  * might be found.  Let's just steal the
1912                                  * exact half in that case.
1913                                  */
1914                                 sub_size = victim->remaining / 2;
1915                                 list -= sub_size;
1916                         }
1917                         target->list = list;
1918                         victim->list_size -= sub_size;
1919                         victim->remaining -= sub_size;
1920                 }
1921                 target->list_size = sub_size;
1922                 target->remaining = sub_size;
1923                 target->working = 1;
1924                 progress_unlock();
1925
1926                 pthread_mutex_lock(&target->mutex);
1927                 target->data_ready = 1;
1928                 pthread_cond_signal(&target->cond);
1929                 pthread_mutex_unlock(&target->mutex);
1930
1931                 if (!sub_size) {
1932                         pthread_join(target->thread, NULL);
1933                         pthread_cond_destroy(&target->cond);
1934                         pthread_mutex_destroy(&target->mutex);
1935                         active_threads--;
1936                 }
1937         }
1938         cleanup_threaded_search();
1939         free(p);
1940 }
1941
1942 #else
1943 #define ll_find_deltas(l, s, w, d, p)   find_deltas(l, &s, w, d, p)
1944 #endif
1945
1946 static int add_ref_tag(const char *path, const unsigned char *sha1, int flag, void *cb_data)
1947 {
1948         unsigned char peeled[20];
1949
1950         if (!prefixcmp(path, "refs/tags/") && /* is a tag? */
1951             !peel_ref(path, peeled)        && /* peelable? */
1952             !is_null_sha1(peeled)          && /* annotated tag? */
1953             locate_object_entry(peeled))      /* object packed? */
1954                 add_object_entry(sha1, OBJ_TAG, NULL, 0);
1955         return 0;
1956 }
1957
1958 static void prepare_pack(int window, int depth)
1959 {
1960         struct object_entry **delta_list;
1961         uint32_t i, nr_deltas;
1962         unsigned n;
1963
1964         get_object_details();
1965
1966         /*
1967          * If we're locally repacking then we need to be doubly careful
1968          * from now on in order to make sure no stealth corruption gets
1969          * propagated to the new pack.  Clients receiving streamed packs
1970          * should validate everything they get anyway so no need to incur
1971          * the additional cost here in that case.
1972          */
1973         if (!pack_to_stdout)
1974                 do_check_packed_object_crc = 1;
1975
1976         if (!nr_objects || !window || !depth)
1977                 return;
1978
1979         delta_list = xmalloc(nr_objects * sizeof(*delta_list));
1980         nr_deltas = n = 0;
1981
1982         for (i = 0; i < nr_objects; i++) {
1983                 struct object_entry *entry = objects + i;
1984
1985                 if (entry->delta)
1986                         /* This happens if we decided to reuse existing
1987                          * delta from a pack.  "reuse_delta &&" is implied.
1988                          */
1989                         continue;
1990
1991                 if (entry->size < 50)
1992                         continue;
1993
1994                 if (entry->no_try_delta)
1995                         continue;
1996
1997                 if (!entry->preferred_base) {
1998                         nr_deltas++;
1999                         if (entry->type < 0)
2000                                 die("unable to get type of object %s",
2001                                     sha1_to_hex(entry->idx.sha1));
2002                 } else {
2003                         if (entry->type < 0) {
2004                                 /*
2005                                  * This object is not found, but we
2006                                  * don't have to include it anyway.
2007                                  */
2008                                 continue;
2009                         }
2010                 }
2011
2012                 delta_list[n++] = entry;
2013         }
2014
2015         if (nr_deltas && n > 1) {
2016                 unsigned nr_done = 0;
2017                 if (progress)
2018                         progress_state = start_progress("Compressing objects",
2019                                                         nr_deltas);
2020                 qsort(delta_list, n, sizeof(*delta_list), type_size_sort);
2021                 ll_find_deltas(delta_list, n, window+1, depth, &nr_done);
2022                 stop_progress(&progress_state);
2023                 if (nr_done != nr_deltas)
2024                         die("inconsistency with delta count");
2025         }
2026         free(delta_list);
2027 }
2028
2029 static int git_pack_config(const char *k, const char *v, void *cb)
2030 {
2031         if (!strcmp(k, "pack.window")) {
2032                 window = git_config_int(k, v);
2033                 return 0;
2034         }
2035         if (!strcmp(k, "pack.windowmemory")) {
2036                 window_memory_limit = git_config_ulong(k, v);
2037                 return 0;
2038         }
2039         if (!strcmp(k, "pack.depth")) {
2040                 depth = git_config_int(k, v);
2041                 return 0;
2042         }
2043         if (!strcmp(k, "pack.compression")) {
2044                 int level = git_config_int(k, v);
2045                 if (level == -1)
2046                         level = Z_DEFAULT_COMPRESSION;
2047                 else if (level < 0 || level > Z_BEST_COMPRESSION)
2048                         die("bad pack compression level %d", level);
2049                 pack_compression_level = level;
2050                 pack_compression_seen = 1;
2051                 return 0;
2052         }
2053         if (!strcmp(k, "pack.deltacachesize")) {
2054                 max_delta_cache_size = git_config_int(k, v);
2055                 return 0;
2056         }
2057         if (!strcmp(k, "pack.deltacachelimit")) {
2058                 cache_max_small_delta_size = git_config_int(k, v);
2059                 return 0;
2060         }
2061         if (!strcmp(k, "pack.threads")) {
2062                 delta_search_threads = git_config_int(k, v);
2063                 if (delta_search_threads < 0)
2064                         die("invalid number of threads specified (%d)",
2065                             delta_search_threads);
2066 #ifdef NO_PTHREADS
2067                 if (delta_search_threads != 1)
2068                         warning("no threads support, ignoring %s", k);
2069 #endif
2070                 return 0;
2071         }
2072         if (!strcmp(k, "pack.indexversion")) {
2073                 pack_idx_opts.version = git_config_int(k, v);
2074                 if (pack_idx_opts.version > 2)
2075                         die("bad pack.indexversion=%"PRIu32,
2076                             pack_idx_opts.version);
2077                 return 0;
2078         }
2079         return git_default_config(k, v, cb);
2080 }
2081
2082 static void read_object_list_from_stdin(void)
2083 {
2084         char line[40 + 1 + PATH_MAX + 2];
2085         unsigned char sha1[20];
2086
2087         for (;;) {
2088                 if (!fgets(line, sizeof(line), stdin)) {
2089                         if (feof(stdin))
2090                                 break;
2091                         if (!ferror(stdin))
2092                                 die("fgets returned NULL, not EOF, not error!");
2093                         if (errno != EINTR)
2094                                 die_errno("fgets");
2095                         clearerr(stdin);
2096                         continue;
2097                 }
2098                 if (line[0] == '-') {
2099                         if (get_sha1_hex(line+1, sha1))
2100                                 die("expected edge sha1, got garbage:\n %s",
2101                                     line);
2102                         add_preferred_base(sha1);
2103                         continue;
2104                 }
2105                 if (get_sha1_hex(line, sha1))
2106                         die("expected sha1, got garbage:\n %s", line);
2107
2108                 add_preferred_base_object(line+41);
2109                 add_object_entry(sha1, 0, line+41, 0);
2110         }
2111 }
2112
2113 #define OBJECT_ADDED (1u<<20)
2114
2115 static void show_commit(struct commit *commit, void *data)
2116 {
2117         add_object_entry(commit->object.sha1, OBJ_COMMIT, NULL, 0);
2118         commit->object.flags |= OBJECT_ADDED;
2119 }
2120
2121 static void show_object(struct object *obj,
2122                         const struct name_path *path, const char *last,
2123                         void *data)
2124 {
2125         char *name = path_name(path, last);
2126
2127         add_preferred_base_object(name);
2128         add_object_entry(obj->sha1, obj->type, name, 0);
2129         obj->flags |= OBJECT_ADDED;
2130
2131         /*
2132          * We will have generated the hash from the name,
2133          * but not saved a pointer to it - we can free it
2134          */
2135         free((char *)name);
2136 }
2137
2138 static void show_edge(struct commit *commit)
2139 {
2140         add_preferred_base(commit->object.sha1);
2141 }
2142
2143 struct in_pack_object {
2144         off_t offset;
2145         struct object *object;
2146 };
2147
2148 struct in_pack {
2149         int alloc;
2150         int nr;
2151         struct in_pack_object *array;
2152 };
2153
2154 static void mark_in_pack_object(struct object *object, struct packed_git *p, struct in_pack *in_pack)
2155 {
2156         in_pack->array[in_pack->nr].offset = find_pack_entry_one(object->sha1, p);
2157         in_pack->array[in_pack->nr].object = object;
2158         in_pack->nr++;
2159 }
2160
2161 /*
2162  * Compare the objects in the offset order, in order to emulate the
2163  * "git rev-list --objects" output that produced the pack originally.
2164  */
2165 static int ofscmp(const void *a_, const void *b_)
2166 {
2167         struct in_pack_object *a = (struct in_pack_object *)a_;
2168         struct in_pack_object *b = (struct in_pack_object *)b_;
2169
2170         if (a->offset < b->offset)
2171                 return -1;
2172         else if (a->offset > b->offset)
2173                 return 1;
2174         else
2175                 return hashcmp(a->object->sha1, b->object->sha1);
2176 }
2177
2178 static void add_objects_in_unpacked_packs(struct rev_info *revs)
2179 {
2180         struct packed_git *p;
2181         struct in_pack in_pack;
2182         uint32_t i;
2183
2184         memset(&in_pack, 0, sizeof(in_pack));
2185
2186         for (p = packed_git; p; p = p->next) {
2187                 const unsigned char *sha1;
2188                 struct object *o;
2189
2190                 if (!p->pack_local || p->pack_keep)
2191                         continue;
2192                 if (open_pack_index(p))
2193                         die("cannot open pack index");
2194
2195                 ALLOC_GROW(in_pack.array,
2196                            in_pack.nr + p->num_objects,
2197                            in_pack.alloc);
2198
2199                 for (i = 0; i < p->num_objects; i++) {
2200                         sha1 = nth_packed_object_sha1(p, i);
2201                         o = lookup_unknown_object(sha1);
2202                         if (!(o->flags & OBJECT_ADDED))
2203                                 mark_in_pack_object(o, p, &in_pack);
2204                         o->flags |= OBJECT_ADDED;
2205                 }
2206         }
2207
2208         if (in_pack.nr) {
2209                 qsort(in_pack.array, in_pack.nr, sizeof(in_pack.array[0]),
2210                       ofscmp);
2211                 for (i = 0; i < in_pack.nr; i++) {
2212                         struct object *o = in_pack.array[i].object;
2213                         add_object_entry(o->sha1, o->type, "", 0);
2214                 }
2215         }
2216         free(in_pack.array);
2217 }
2218
2219 static int has_sha1_pack_kept_or_nonlocal(const unsigned char *sha1)
2220 {
2221         static struct packed_git *last_found = (void *)1;
2222         struct packed_git *p;
2223
2224         p = (last_found != (void *)1) ? last_found : packed_git;
2225
2226         while (p) {
2227                 if ((!p->pack_local || p->pack_keep) &&
2228                         find_pack_entry_one(sha1, p)) {
2229                         last_found = p;
2230                         return 1;
2231                 }
2232                 if (p == last_found)
2233                         p = packed_git;
2234                 else
2235                         p = p->next;
2236                 if (p == last_found)
2237                         p = p->next;
2238         }
2239         return 0;
2240 }
2241
2242 static void loosen_unused_packed_objects(struct rev_info *revs)
2243 {
2244         struct packed_git *p;
2245         uint32_t i;
2246         const unsigned char *sha1;
2247
2248         for (p = packed_git; p; p = p->next) {
2249                 if (!p->pack_local || p->pack_keep)
2250                         continue;
2251
2252                 if (open_pack_index(p))
2253                         die("cannot open pack index");
2254
2255                 for (i = 0; i < p->num_objects; i++) {
2256                         sha1 = nth_packed_object_sha1(p, i);
2257                         if (!locate_object_entry(sha1) &&
2258                                 !has_sha1_pack_kept_or_nonlocal(sha1))
2259                                 if (force_object_loose(sha1, p->mtime))
2260                                         die("unable to force loose object");
2261                 }
2262         }
2263 }
2264
2265 static void get_object_list(int ac, const char **av)
2266 {
2267         struct rev_info revs;
2268         char line[1000];
2269         int flags = 0;
2270
2271         init_revisions(&revs, NULL);
2272         save_commit_buffer = 0;
2273         setup_revisions(ac, av, &revs, NULL);
2274
2275         while (fgets(line, sizeof(line), stdin) != NULL) {
2276                 int len = strlen(line);
2277                 if (len && line[len - 1] == '\n')
2278                         line[--len] = 0;
2279                 if (!len)
2280                         break;
2281                 if (*line == '-') {
2282                         if (!strcmp(line, "--not")) {
2283                                 flags ^= UNINTERESTING;
2284                                 continue;
2285                         }
2286                         die("not a rev '%s'", line);
2287                 }
2288                 if (handle_revision_arg(line, &revs, flags, 1))
2289                         die("bad revision '%s'", line);
2290         }
2291
2292         if (prepare_revision_walk(&revs))
2293                 die("revision walk setup failed");
2294         mark_edges_uninteresting(revs.commits, &revs, show_edge);
2295         traverse_commit_list(&revs, show_commit, show_object, NULL);
2296
2297         if (keep_unreachable)
2298                 add_objects_in_unpacked_packs(&revs);
2299         if (unpack_unreachable)
2300                 loosen_unused_packed_objects(&revs);
2301 }
2302
2303 static int option_parse_index_version(const struct option *opt,
2304                                       const char *arg, int unset)
2305 {
2306         char *c;
2307         const char *val = arg;
2308         pack_idx_opts.version = strtoul(val, &c, 10);
2309         if (pack_idx_opts.version > 2)
2310                 die(_("unsupported index version %s"), val);
2311         if (*c == ',' && c[1])
2312                 pack_idx_opts.off32_limit = strtoul(c+1, &c, 0);
2313         if (*c || pack_idx_opts.off32_limit & 0x80000000)
2314                 die(_("bad index version '%s'"), val);
2315         return 0;
2316 }
2317
2318 static int option_parse_ulong(const struct option *opt,
2319                               const char *arg, int unset)
2320 {
2321         if (unset)
2322                 die(_("option %s does not accept negative form"),
2323                     opt->long_name);
2324
2325         if (!git_parse_ulong(arg, opt->value))
2326                 die(_("unable to parse value '%s' for option %s"),
2327                     arg, opt->long_name);
2328         return 0;
2329 }
2330
2331 #define OPT_ULONG(s, l, v, h) \
2332         { OPTION_CALLBACK, (s), (l), (v), "n", (h),     \
2333           PARSE_OPT_NONEG, option_parse_ulong }
2334
2335 int cmd_pack_objects(int argc, const char **argv, const char *prefix)
2336 {
2337         int use_internal_rev_list = 0;
2338         int thin = 0;
2339         int all_progress_implied = 0;
2340         const char *rp_av[6];
2341         int rp_ac = 0;
2342         int rev_list_unpacked = 0, rev_list_all = 0, rev_list_reflog = 0;
2343         struct option pack_objects_options[] = {
2344                 OPT_SET_INT('q', "quiet", &progress,
2345                             "do not show progress meter", 0),
2346                 OPT_SET_INT(0, "progress", &progress,
2347                             "show progress meter", 1),
2348                 OPT_SET_INT(0, "all-progress", &progress,
2349                             "show progress meter during object writing phase", 2),
2350                 OPT_BOOL(0, "all-progress-implied",
2351                          &all_progress_implied,
2352                          "similar to --all-progress when progress meter is shown"),
2353                 { OPTION_CALLBACK, 0, "index-version", NULL, "version[,offset]",
2354                   "write the pack index file in the specified idx format version",
2355                   0, option_parse_index_version },
2356                 OPT_ULONG(0, "max-pack-size", &pack_size_limit,
2357                           "maximum size of each output pack file"),
2358                 OPT_BOOL(0, "local", &local,
2359                          "ignore borrowed objects from alternate object store"),
2360                 OPT_BOOL(0, "incremental", &incremental,
2361                          "ignore packed objects"),
2362                 OPT_INTEGER(0, "window", &window,
2363                             "limit pack window by objects"),
2364                 OPT_ULONG(0, "window-memory", &window_memory_limit,
2365                           "limit pack window by memory in addition to object limit"),
2366                 OPT_INTEGER(0, "depth", &depth,
2367                             "maximum length of delta chain allowed in the resulting pack"),
2368                 OPT_BOOL(0, "reuse-delta", &reuse_delta,
2369                          "reuse existing deltas"),
2370                 OPT_BOOL(0, "reuse-object", &reuse_object,
2371                          "reuse existing objects"),
2372                 OPT_BOOL(0, "delta-base-offset", &allow_ofs_delta,
2373                          "use OFS_DELTA objects"),
2374                 OPT_INTEGER(0, "threads", &delta_search_threads,
2375                             "use threads when searching for best delta matches"),
2376                 OPT_BOOL(0, "non-empty", &non_empty,
2377                          "do not create an empty pack output"),
2378                 OPT_BOOL(0, "revs", &use_internal_rev_list,
2379                          "read revision arguments from standard input"),
2380                 { OPTION_SET_INT, 0, "unpacked", &rev_list_unpacked, NULL,
2381                   "limit the objects to those that are not yet packed",
2382                   PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL, 1 },
2383                 { OPTION_SET_INT, 0, "all", &rev_list_all, NULL,
2384                   "include objects reachable from any reference",
2385                   PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL, 1 },
2386                 { OPTION_SET_INT, 0, "reflog", &rev_list_reflog, NULL,
2387                   "include objects referred by reflog entries",
2388                   PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL, 1 },
2389                 OPT_BOOL(0, "stdout", &pack_to_stdout,
2390                          "output pack to stdout"),
2391                 OPT_BOOL(0, "include-tag", &include_tag,
2392                          "include tag objects that refer to objects to be packed"),
2393                 OPT_BOOL(0, "keep-unreachable", &keep_unreachable,
2394                          "keep unreachable objects"),
2395                 OPT_BOOL(0, "unpack-unreachable", &unpack_unreachable,
2396                          "unpack unreachable objects"),
2397                 OPT_BOOL(0, "thin", &thin,
2398                          "create thin packs"),
2399                 OPT_BOOL(0, "honor-pack-keep", &ignore_packed_keep,
2400                          "ignore packs that have companion .keep file"),
2401                 OPT_INTEGER(0, "compression", &pack_compression_level,
2402                             "pack compression level"),
2403                 OPT_SET_INT(0, "keep-true-parents", &grafts_replace_parents,
2404                             "do not hide commits by grafts", 0),
2405                 OPT_END(),
2406         };
2407
2408         read_replace_refs = 0;
2409
2410         reset_pack_idx_option(&pack_idx_opts);
2411         git_config(git_pack_config, NULL);
2412         if (!pack_compression_seen && core_compression_seen)
2413                 pack_compression_level = core_compression_level;
2414
2415         progress = isatty(2);
2416         argc = parse_options(argc, argv, prefix, pack_objects_options,
2417                              pack_usage, 0);
2418
2419         if (argc) {
2420                 base_name = argv[0];
2421                 argc--;
2422         }
2423         if (pack_to_stdout != !base_name || argc)
2424                 usage_with_options(pack_usage, pack_objects_options);
2425
2426         rp_av[rp_ac++] = "pack-objects";
2427         if (thin) {
2428                 use_internal_rev_list = 1;
2429                 rp_av[rp_ac++] = "--objects-edge";
2430         } else
2431                 rp_av[rp_ac++] = "--objects";
2432
2433         if (rev_list_all) {
2434                 use_internal_rev_list = 1;
2435                 rp_av[rp_ac++] = "--all";
2436         }
2437         if (rev_list_reflog) {
2438                 use_internal_rev_list = 1;
2439                 rp_av[rp_ac++] = "--reflog";
2440         }
2441         if (rev_list_unpacked) {
2442                 use_internal_rev_list = 1;
2443                 rp_av[rp_ac++] = "--unpacked";
2444         }
2445
2446         if (!reuse_object)
2447                 reuse_delta = 0;
2448         if (pack_compression_level == -1)
2449                 pack_compression_level = Z_DEFAULT_COMPRESSION;
2450         else if (pack_compression_level < 0 || pack_compression_level > Z_BEST_COMPRESSION)
2451                 die("bad pack compression level %d", pack_compression_level);
2452 #ifdef NO_PTHREADS
2453         if (delta_search_threads != 1)
2454                 warning("no threads support, ignoring --threads");
2455 #endif
2456         if (!pack_to_stdout && !pack_size_limit)
2457                 pack_size_limit = pack_size_limit_cfg;
2458         if (pack_to_stdout && pack_size_limit)
2459                 die("--max-pack-size cannot be used to build a pack for transfer.");
2460         if (pack_size_limit && pack_size_limit < 1024*1024) {
2461                 warning("minimum pack size limit is 1 MiB");
2462                 pack_size_limit = 1024*1024;
2463         }
2464
2465         if (!pack_to_stdout && thin)
2466                 die("--thin cannot be used to build an indexable pack.");
2467
2468         if (keep_unreachable && unpack_unreachable)
2469                 die("--keep-unreachable and --unpack-unreachable are incompatible.");
2470
2471         if (progress && all_progress_implied)
2472                 progress = 2;
2473
2474         prepare_packed_git();
2475
2476         if (progress)
2477                 progress_state = start_progress("Counting objects", 0);
2478         if (!use_internal_rev_list)
2479                 read_object_list_from_stdin();
2480         else {
2481                 rp_av[rp_ac] = NULL;
2482                 get_object_list(rp_ac, rp_av);
2483         }
2484         cleanup_preferred_base();
2485         if (include_tag && nr_result)
2486                 for_each_ref(add_ref_tag, NULL);
2487         stop_progress(&progress_state);
2488
2489         if (non_empty && !nr_result)
2490                 return 0;
2491         if (nr_result)
2492                 prepare_pack(window, depth);
2493         write_pack_file();
2494         if (progress)
2495                 fprintf(stderr, "Total %"PRIu32" (delta %"PRIu32"),"
2496                         " reused %"PRIu32" (delta %"PRIu32")\n",
2497                         written, written_delta, reused, reused_delta);
2498         return 0;
2499 }