Merge branch 'js/enhanced-version-info'
[git] / builtin / pack-objects.c
1 #include "builtin.h"
2 #include "cache.h"
3 #include "config.h"
4 #include "attr.h"
5 #include "object.h"
6 #include "blob.h"
7 #include "commit.h"
8 #include "tag.h"
9 #include "tree.h"
10 #include "delta.h"
11 #include "pack.h"
12 #include "pack-revindex.h"
13 #include "csum-file.h"
14 #include "tree-walk.h"
15 #include "diff.h"
16 #include "revision.h"
17 #include "list-objects.h"
18 #include "list-objects-filter.h"
19 #include "list-objects-filter-options.h"
20 #include "pack-objects.h"
21 #include "progress.h"
22 #include "refs.h"
23 #include "streaming.h"
24 #include "thread-utils.h"
25 #include "pack-bitmap.h"
26 #include "reachable.h"
27 #include "sha1-array.h"
28 #include "argv-array.h"
29 #include "mru.h"
30 #include "packfile.h"
31
32 static const char *pack_usage[] = {
33         N_("git pack-objects --stdout [<options>...] [< <ref-list> | < <object-list>]"),
34         N_("git pack-objects [<options>...] <base-name> [< <ref-list> | < <object-list>]"),
35         NULL
36 };
37
38 /*
39  * Objects we are going to pack are collected in the `to_pack` structure.
40  * It contains an array (dynamically expanded) of the object data, and a map
41  * that can resolve SHA1s to their position in the array.
42  */
43 static struct packing_data to_pack;
44
45 static struct pack_idx_entry **written_list;
46 static uint32_t nr_result, nr_written;
47
48 static int non_empty;
49 static int reuse_delta = 1, reuse_object = 1;
50 static int keep_unreachable, unpack_unreachable, include_tag;
51 static timestamp_t unpack_unreachable_expiration;
52 static int pack_loose_unreachable;
53 static int local;
54 static int have_non_local_packs;
55 static int incremental;
56 static int ignore_packed_keep;
57 static int allow_ofs_delta;
58 static struct pack_idx_option pack_idx_opts;
59 static const char *base_name;
60 static int progress = 1;
61 static int window = 10;
62 static unsigned long pack_size_limit;
63 static int depth = 50;
64 static int delta_search_threads;
65 static int pack_to_stdout;
66 static int num_preferred_base;
67 static struct progress *progress_state;
68
69 static struct packed_git *reuse_packfile;
70 static uint32_t reuse_packfile_objects;
71 static off_t reuse_packfile_offset;
72
73 static int use_bitmap_index_default = 1;
74 static int use_bitmap_index = -1;
75 static int write_bitmap_index;
76 static uint16_t write_bitmap_options;
77
78 static unsigned long delta_cache_size = 0;
79 static unsigned long max_delta_cache_size = 256 * 1024 * 1024;
80 static unsigned long cache_max_small_delta_size = 1000;
81
82 static unsigned long window_memory_limit = 0;
83
84 static struct list_objects_filter_options filter_options;
85
86 enum missing_action {
87         MA_ERROR = 0,    /* fail if any missing objects are encountered */
88         MA_ALLOW_ANY,    /* silently allow ALL missing objects */
89 };
90 static enum missing_action arg_missing_action;
91 static show_object_fn fn_show_object;
92
93 /*
94  * stats
95  */
96 static uint32_t written, written_delta;
97 static uint32_t reused, reused_delta;
98
99 /*
100  * Indexed commits
101  */
102 static struct commit **indexed_commits;
103 static unsigned int indexed_commits_nr;
104 static unsigned int indexed_commits_alloc;
105
106 static void index_commit_for_bitmap(struct commit *commit)
107 {
108         if (indexed_commits_nr >= indexed_commits_alloc) {
109                 indexed_commits_alloc = (indexed_commits_alloc + 32) * 2;
110                 REALLOC_ARRAY(indexed_commits, indexed_commits_alloc);
111         }
112
113         indexed_commits[indexed_commits_nr++] = commit;
114 }
115
116 static void *get_delta(struct object_entry *entry)
117 {
118         unsigned long size, base_size, delta_size;
119         void *buf, *base_buf, *delta_buf;
120         enum object_type type;
121
122         buf = read_sha1_file(entry->idx.oid.hash, &type, &size);
123         if (!buf)
124                 die("unable to read %s", oid_to_hex(&entry->idx.oid));
125         base_buf = read_sha1_file(entry->delta->idx.oid.hash, &type,
126                                   &base_size);
127         if (!base_buf)
128                 die("unable to read %s",
129                     oid_to_hex(&entry->delta->idx.oid));
130         delta_buf = diff_delta(base_buf, base_size,
131                                buf, size, &delta_size, 0);
132         if (!delta_buf || delta_size != entry->delta_size)
133                 die("delta size changed");
134         free(buf);
135         free(base_buf);
136         return delta_buf;
137 }
138
139 static unsigned long do_compress(void **pptr, unsigned long size)
140 {
141         git_zstream stream;
142         void *in, *out;
143         unsigned long maxsize;
144
145         git_deflate_init(&stream, pack_compression_level);
146         maxsize = git_deflate_bound(&stream, size);
147
148         in = *pptr;
149         out = xmalloc(maxsize);
150         *pptr = out;
151
152         stream.next_in = in;
153         stream.avail_in = size;
154         stream.next_out = out;
155         stream.avail_out = maxsize;
156         while (git_deflate(&stream, Z_FINISH) == Z_OK)
157                 ; /* nothing */
158         git_deflate_end(&stream);
159
160         free(in);
161         return stream.total_out;
162 }
163
164 static unsigned long write_large_blob_data(struct git_istream *st, struct sha1file *f,
165                                            const struct object_id *oid)
166 {
167         git_zstream stream;
168         unsigned char ibuf[1024 * 16];
169         unsigned char obuf[1024 * 16];
170         unsigned long olen = 0;
171
172         git_deflate_init(&stream, pack_compression_level);
173
174         for (;;) {
175                 ssize_t readlen;
176                 int zret = Z_OK;
177                 readlen = read_istream(st, ibuf, sizeof(ibuf));
178                 if (readlen == -1)
179                         die(_("unable to read %s"), oid_to_hex(oid));
180
181                 stream.next_in = ibuf;
182                 stream.avail_in = readlen;
183                 while ((stream.avail_in || readlen == 0) &&
184                        (zret == Z_OK || zret == Z_BUF_ERROR)) {
185                         stream.next_out = obuf;
186                         stream.avail_out = sizeof(obuf);
187                         zret = git_deflate(&stream, readlen ? 0 : Z_FINISH);
188                         sha1write(f, obuf, stream.next_out - obuf);
189                         olen += stream.next_out - obuf;
190                 }
191                 if (stream.avail_in)
192                         die(_("deflate error (%d)"), zret);
193                 if (readlen == 0) {
194                         if (zret != Z_STREAM_END)
195                                 die(_("deflate error (%d)"), zret);
196                         break;
197                 }
198         }
199         git_deflate_end(&stream);
200         return olen;
201 }
202
203 /*
204  * we are going to reuse the existing object data as is.  make
205  * sure it is not corrupt.
206  */
207 static int check_pack_inflate(struct packed_git *p,
208                 struct pack_window **w_curs,
209                 off_t offset,
210                 off_t len,
211                 unsigned long expect)
212 {
213         git_zstream stream;
214         unsigned char fakebuf[4096], *in;
215         int st;
216
217         memset(&stream, 0, sizeof(stream));
218         git_inflate_init(&stream);
219         do {
220                 in = use_pack(p, w_curs, offset, &stream.avail_in);
221                 stream.next_in = in;
222                 stream.next_out = fakebuf;
223                 stream.avail_out = sizeof(fakebuf);
224                 st = git_inflate(&stream, Z_FINISH);
225                 offset += stream.next_in - in;
226         } while (st == Z_OK || st == Z_BUF_ERROR);
227         git_inflate_end(&stream);
228         return (st == Z_STREAM_END &&
229                 stream.total_out == expect &&
230                 stream.total_in == len) ? 0 : -1;
231 }
232
233 static void copy_pack_data(struct sha1file *f,
234                 struct packed_git *p,
235                 struct pack_window **w_curs,
236                 off_t offset,
237                 off_t len)
238 {
239         unsigned char *in;
240         unsigned long avail;
241
242         while (len) {
243                 in = use_pack(p, w_curs, offset, &avail);
244                 if (avail > len)
245                         avail = (unsigned long)len;
246                 sha1write(f, in, avail);
247                 offset += avail;
248                 len -= avail;
249         }
250 }
251
252 /* Return 0 if we will bust the pack-size limit */
253 static unsigned long write_no_reuse_object(struct sha1file *f, struct object_entry *entry,
254                                            unsigned long limit, int usable_delta)
255 {
256         unsigned long size, datalen;
257         unsigned char header[MAX_PACK_OBJECT_HEADER],
258                       dheader[MAX_PACK_OBJECT_HEADER];
259         unsigned hdrlen;
260         enum object_type type;
261         void *buf;
262         struct git_istream *st = NULL;
263
264         if (!usable_delta) {
265                 if (entry->type == OBJ_BLOB &&
266                     entry->size > big_file_threshold &&
267                     (st = open_istream(entry->idx.oid.hash, &type, &size, NULL)) != NULL)
268                         buf = NULL;
269                 else {
270                         buf = read_sha1_file(entry->idx.oid.hash, &type,
271                                              &size);
272                         if (!buf)
273                                 die(_("unable to read %s"),
274                                     oid_to_hex(&entry->idx.oid));
275                 }
276                 /*
277                  * make sure no cached delta data remains from a
278                  * previous attempt before a pack split occurred.
279                  */
280                 FREE_AND_NULL(entry->delta_data);
281                 entry->z_delta_size = 0;
282         } else if (entry->delta_data) {
283                 size = entry->delta_size;
284                 buf = entry->delta_data;
285                 entry->delta_data = NULL;
286                 type = (allow_ofs_delta && entry->delta->idx.offset) ?
287                         OBJ_OFS_DELTA : OBJ_REF_DELTA;
288         } else {
289                 buf = get_delta(entry);
290                 size = entry->delta_size;
291                 type = (allow_ofs_delta && entry->delta->idx.offset) ?
292                         OBJ_OFS_DELTA : OBJ_REF_DELTA;
293         }
294
295         if (st) /* large blob case, just assume we don't compress well */
296                 datalen = size;
297         else if (entry->z_delta_size)
298                 datalen = entry->z_delta_size;
299         else
300                 datalen = do_compress(&buf, size);
301
302         /*
303          * The object header is a byte of 'type' followed by zero or
304          * more bytes of length.
305          */
306         hdrlen = encode_in_pack_object_header(header, sizeof(header),
307                                               type, size);
308
309         if (type == OBJ_OFS_DELTA) {
310                 /*
311                  * Deltas with relative base contain an additional
312                  * encoding of the relative offset for the delta
313                  * base from this object's position in the pack.
314                  */
315                 off_t ofs = entry->idx.offset - entry->delta->idx.offset;
316                 unsigned pos = sizeof(dheader) - 1;
317                 dheader[pos] = ofs & 127;
318                 while (ofs >>= 7)
319                         dheader[--pos] = 128 | (--ofs & 127);
320                 if (limit && hdrlen + sizeof(dheader) - pos + datalen + 20 >= limit) {
321                         if (st)
322                                 close_istream(st);
323                         free(buf);
324                         return 0;
325                 }
326                 sha1write(f, header, hdrlen);
327                 sha1write(f, dheader + pos, sizeof(dheader) - pos);
328                 hdrlen += sizeof(dheader) - pos;
329         } else if (type == OBJ_REF_DELTA) {
330                 /*
331                  * Deltas with a base reference contain
332                  * an additional 20 bytes for the base sha1.
333                  */
334                 if (limit && hdrlen + 20 + datalen + 20 >= limit) {
335                         if (st)
336                                 close_istream(st);
337                         free(buf);
338                         return 0;
339                 }
340                 sha1write(f, header, hdrlen);
341                 sha1write(f, entry->delta->idx.oid.hash, 20);
342                 hdrlen += 20;
343         } else {
344                 if (limit && hdrlen + datalen + 20 >= limit) {
345                         if (st)
346                                 close_istream(st);
347                         free(buf);
348                         return 0;
349                 }
350                 sha1write(f, header, hdrlen);
351         }
352         if (st) {
353                 datalen = write_large_blob_data(st, f, &entry->idx.oid);
354                 close_istream(st);
355         } else {
356                 sha1write(f, buf, datalen);
357                 free(buf);
358         }
359
360         return hdrlen + datalen;
361 }
362
363 /* Return 0 if we will bust the pack-size limit */
364 static off_t write_reuse_object(struct sha1file *f, struct object_entry *entry,
365                                 unsigned long limit, int usable_delta)
366 {
367         struct packed_git *p = entry->in_pack;
368         struct pack_window *w_curs = NULL;
369         struct revindex_entry *revidx;
370         off_t offset;
371         enum object_type type = entry->type;
372         off_t datalen;
373         unsigned char header[MAX_PACK_OBJECT_HEADER],
374                       dheader[MAX_PACK_OBJECT_HEADER];
375         unsigned hdrlen;
376
377         if (entry->delta)
378                 type = (allow_ofs_delta && entry->delta->idx.offset) ?
379                         OBJ_OFS_DELTA : OBJ_REF_DELTA;
380         hdrlen = encode_in_pack_object_header(header, sizeof(header),
381                                               type, entry->size);
382
383         offset = entry->in_pack_offset;
384         revidx = find_pack_revindex(p, offset);
385         datalen = revidx[1].offset - offset;
386         if (!pack_to_stdout && p->index_version > 1 &&
387             check_pack_crc(p, &w_curs, offset, datalen, revidx->nr)) {
388                 error("bad packed object CRC for %s",
389                       oid_to_hex(&entry->idx.oid));
390                 unuse_pack(&w_curs);
391                 return write_no_reuse_object(f, entry, limit, usable_delta);
392         }
393
394         offset += entry->in_pack_header_size;
395         datalen -= entry->in_pack_header_size;
396
397         if (!pack_to_stdout && p->index_version == 1 &&
398             check_pack_inflate(p, &w_curs, offset, datalen, entry->size)) {
399                 error("corrupt packed object for %s",
400                       oid_to_hex(&entry->idx.oid));
401                 unuse_pack(&w_curs);
402                 return write_no_reuse_object(f, entry, limit, usable_delta);
403         }
404
405         if (type == OBJ_OFS_DELTA) {
406                 off_t ofs = entry->idx.offset - entry->delta->idx.offset;
407                 unsigned pos = sizeof(dheader) - 1;
408                 dheader[pos] = ofs & 127;
409                 while (ofs >>= 7)
410                         dheader[--pos] = 128 | (--ofs & 127);
411                 if (limit && hdrlen + sizeof(dheader) - pos + datalen + 20 >= limit) {
412                         unuse_pack(&w_curs);
413                         return 0;
414                 }
415                 sha1write(f, header, hdrlen);
416                 sha1write(f, dheader + pos, sizeof(dheader) - pos);
417                 hdrlen += sizeof(dheader) - pos;
418                 reused_delta++;
419         } else if (type == OBJ_REF_DELTA) {
420                 if (limit && hdrlen + 20 + datalen + 20 >= limit) {
421                         unuse_pack(&w_curs);
422                         return 0;
423                 }
424                 sha1write(f, header, hdrlen);
425                 sha1write(f, entry->delta->idx.oid.hash, 20);
426                 hdrlen += 20;
427                 reused_delta++;
428         } else {
429                 if (limit && hdrlen + datalen + 20 >= limit) {
430                         unuse_pack(&w_curs);
431                         return 0;
432                 }
433                 sha1write(f, header, hdrlen);
434         }
435         copy_pack_data(f, p, &w_curs, offset, datalen);
436         unuse_pack(&w_curs);
437         reused++;
438         return hdrlen + datalen;
439 }
440
441 /* Return 0 if we will bust the pack-size limit */
442 static off_t write_object(struct sha1file *f,
443                           struct object_entry *entry,
444                           off_t write_offset)
445 {
446         unsigned long limit;
447         off_t len;
448         int usable_delta, to_reuse;
449
450         if (!pack_to_stdout)
451                 crc32_begin(f);
452
453         /* apply size limit if limited packsize and not first object */
454         if (!pack_size_limit || !nr_written)
455                 limit = 0;
456         else if (pack_size_limit <= write_offset)
457                 /*
458                  * the earlier object did not fit the limit; avoid
459                  * mistaking this with unlimited (i.e. limit = 0).
460                  */
461                 limit = 1;
462         else
463                 limit = pack_size_limit - write_offset;
464
465         if (!entry->delta)
466                 usable_delta = 0;       /* no delta */
467         else if (!pack_size_limit)
468                usable_delta = 1;        /* unlimited packfile */
469         else if (entry->delta->idx.offset == (off_t)-1)
470                 usable_delta = 0;       /* base was written to another pack */
471         else if (entry->delta->idx.offset)
472                 usable_delta = 1;       /* base already exists in this pack */
473         else
474                 usable_delta = 0;       /* base could end up in another pack */
475
476         if (!reuse_object)
477                 to_reuse = 0;   /* explicit */
478         else if (!entry->in_pack)
479                 to_reuse = 0;   /* can't reuse what we don't have */
480         else if (entry->type == OBJ_REF_DELTA || entry->type == OBJ_OFS_DELTA)
481                                 /* check_object() decided it for us ... */
482                 to_reuse = usable_delta;
483                                 /* ... but pack split may override that */
484         else if (entry->type != entry->in_pack_type)
485                 to_reuse = 0;   /* pack has delta which is unusable */
486         else if (entry->delta)
487                 to_reuse = 0;   /* we want to pack afresh */
488         else
489                 to_reuse = 1;   /* we have it in-pack undeltified,
490                                  * and we do not need to deltify it.
491                                  */
492
493         if (!to_reuse)
494                 len = write_no_reuse_object(f, entry, limit, usable_delta);
495         else
496                 len = write_reuse_object(f, entry, limit, usable_delta);
497         if (!len)
498                 return 0;
499
500         if (usable_delta)
501                 written_delta++;
502         written++;
503         if (!pack_to_stdout)
504                 entry->idx.crc32 = crc32_end(f);
505         return len;
506 }
507
508 enum write_one_status {
509         WRITE_ONE_SKIP = -1, /* already written */
510         WRITE_ONE_BREAK = 0, /* writing this will bust the limit; not written */
511         WRITE_ONE_WRITTEN = 1, /* normal */
512         WRITE_ONE_RECURSIVE = 2 /* already scheduled to be written */
513 };
514
515 static enum write_one_status write_one(struct sha1file *f,
516                                        struct object_entry *e,
517                                        off_t *offset)
518 {
519         off_t size;
520         int recursing;
521
522         /*
523          * we set offset to 1 (which is an impossible value) to mark
524          * the fact that this object is involved in "write its base
525          * first before writing a deltified object" recursion.
526          */
527         recursing = (e->idx.offset == 1);
528         if (recursing) {
529                 warning("recursive delta detected for object %s",
530                         oid_to_hex(&e->idx.oid));
531                 return WRITE_ONE_RECURSIVE;
532         } else if (e->idx.offset || e->preferred_base) {
533                 /* offset is non zero if object is written already. */
534                 return WRITE_ONE_SKIP;
535         }
536
537         /* if we are deltified, write out base object first. */
538         if (e->delta) {
539                 e->idx.offset = 1; /* now recurse */
540                 switch (write_one(f, e->delta, offset)) {
541                 case WRITE_ONE_RECURSIVE:
542                         /* we cannot depend on this one */
543                         e->delta = NULL;
544                         break;
545                 default:
546                         break;
547                 case WRITE_ONE_BREAK:
548                         e->idx.offset = recursing;
549                         return WRITE_ONE_BREAK;
550                 }
551         }
552
553         e->idx.offset = *offset;
554         size = write_object(f, e, *offset);
555         if (!size) {
556                 e->idx.offset = recursing;
557                 return WRITE_ONE_BREAK;
558         }
559         written_list[nr_written++] = &e->idx;
560
561         /* make sure off_t is sufficiently large not to wrap */
562         if (signed_add_overflows(*offset, size))
563                 die("pack too large for current definition of off_t");
564         *offset += size;
565         return WRITE_ONE_WRITTEN;
566 }
567
568 static int mark_tagged(const char *path, const struct object_id *oid, int flag,
569                        void *cb_data)
570 {
571         struct object_id peeled;
572         struct object_entry *entry = packlist_find(&to_pack, oid->hash, NULL);
573
574         if (entry)
575                 entry->tagged = 1;
576         if (!peel_ref(path, &peeled)) {
577                 entry = packlist_find(&to_pack, peeled.hash, NULL);
578                 if (entry)
579                         entry->tagged = 1;
580         }
581         return 0;
582 }
583
584 static inline void add_to_write_order(struct object_entry **wo,
585                                unsigned int *endp,
586                                struct object_entry *e)
587 {
588         if (e->filled)
589                 return;
590         wo[(*endp)++] = e;
591         e->filled = 1;
592 }
593
594 static void add_descendants_to_write_order(struct object_entry **wo,
595                                            unsigned int *endp,
596                                            struct object_entry *e)
597 {
598         int add_to_order = 1;
599         while (e) {
600                 if (add_to_order) {
601                         struct object_entry *s;
602                         /* add this node... */
603                         add_to_write_order(wo, endp, e);
604                         /* all its siblings... */
605                         for (s = e->delta_sibling; s; s = s->delta_sibling) {
606                                 add_to_write_order(wo, endp, s);
607                         }
608                 }
609                 /* drop down a level to add left subtree nodes if possible */
610                 if (e->delta_child) {
611                         add_to_order = 1;
612                         e = e->delta_child;
613                 } else {
614                         add_to_order = 0;
615                         /* our sibling might have some children, it is next */
616                         if (e->delta_sibling) {
617                                 e = e->delta_sibling;
618                                 continue;
619                         }
620                         /* go back to our parent node */
621                         e = e->delta;
622                         while (e && !e->delta_sibling) {
623                                 /* we're on the right side of a subtree, keep
624                                  * going up until we can go right again */
625                                 e = e->delta;
626                         }
627                         if (!e) {
628                                 /* done- we hit our original root node */
629                                 return;
630                         }
631                         /* pass it off to sibling at this level */
632                         e = e->delta_sibling;
633                 }
634         };
635 }
636
637 static void add_family_to_write_order(struct object_entry **wo,
638                                       unsigned int *endp,
639                                       struct object_entry *e)
640 {
641         struct object_entry *root;
642
643         for (root = e; root->delta; root = root->delta)
644                 ; /* nothing */
645         add_descendants_to_write_order(wo, endp, root);
646 }
647
648 static struct object_entry **compute_write_order(void)
649 {
650         unsigned int i, wo_end, last_untagged;
651
652         struct object_entry **wo;
653         struct object_entry *objects = to_pack.objects;
654
655         for (i = 0; i < to_pack.nr_objects; i++) {
656                 objects[i].tagged = 0;
657                 objects[i].filled = 0;
658                 objects[i].delta_child = NULL;
659                 objects[i].delta_sibling = NULL;
660         }
661
662         /*
663          * Fully connect delta_child/delta_sibling network.
664          * Make sure delta_sibling is sorted in the original
665          * recency order.
666          */
667         for (i = to_pack.nr_objects; i > 0;) {
668                 struct object_entry *e = &objects[--i];
669                 if (!e->delta)
670                         continue;
671                 /* Mark me as the first child */
672                 e->delta_sibling = e->delta->delta_child;
673                 e->delta->delta_child = e;
674         }
675
676         /*
677          * Mark objects that are at the tip of tags.
678          */
679         for_each_tag_ref(mark_tagged, NULL);
680
681         /*
682          * Give the objects in the original recency order until
683          * we see a tagged tip.
684          */
685         ALLOC_ARRAY(wo, to_pack.nr_objects);
686         for (i = wo_end = 0; i < to_pack.nr_objects; i++) {
687                 if (objects[i].tagged)
688                         break;
689                 add_to_write_order(wo, &wo_end, &objects[i]);
690         }
691         last_untagged = i;
692
693         /*
694          * Then fill all the tagged tips.
695          */
696         for (; i < to_pack.nr_objects; i++) {
697                 if (objects[i].tagged)
698                         add_to_write_order(wo, &wo_end, &objects[i]);
699         }
700
701         /*
702          * And then all remaining commits and tags.
703          */
704         for (i = last_untagged; i < to_pack.nr_objects; i++) {
705                 if (objects[i].type != OBJ_COMMIT &&
706                     objects[i].type != OBJ_TAG)
707                         continue;
708                 add_to_write_order(wo, &wo_end, &objects[i]);
709         }
710
711         /*
712          * And then all the trees.
713          */
714         for (i = last_untagged; i < to_pack.nr_objects; i++) {
715                 if (objects[i].type != OBJ_TREE)
716                         continue;
717                 add_to_write_order(wo, &wo_end, &objects[i]);
718         }
719
720         /*
721          * Finally all the rest in really tight order
722          */
723         for (i = last_untagged; i < to_pack.nr_objects; i++) {
724                 if (!objects[i].filled)
725                         add_family_to_write_order(wo, &wo_end, &objects[i]);
726         }
727
728         if (wo_end != to_pack.nr_objects)
729                 die("ordered %u objects, expected %"PRIu32, wo_end, to_pack.nr_objects);
730
731         return wo;
732 }
733
734 static off_t write_reused_pack(struct sha1file *f)
735 {
736         unsigned char buffer[8192];
737         off_t to_write, total;
738         int fd;
739
740         if (!is_pack_valid(reuse_packfile))
741                 die("packfile is invalid: %s", reuse_packfile->pack_name);
742
743         fd = git_open(reuse_packfile->pack_name);
744         if (fd < 0)
745                 die_errno("unable to open packfile for reuse: %s",
746                           reuse_packfile->pack_name);
747
748         if (lseek(fd, sizeof(struct pack_header), SEEK_SET) == -1)
749                 die_errno("unable to seek in reused packfile");
750
751         if (reuse_packfile_offset < 0)
752                 reuse_packfile_offset = reuse_packfile->pack_size - 20;
753
754         total = to_write = reuse_packfile_offset - sizeof(struct pack_header);
755
756         while (to_write) {
757                 int read_pack = xread(fd, buffer, sizeof(buffer));
758
759                 if (read_pack <= 0)
760                         die_errno("unable to read from reused packfile");
761
762                 if (read_pack > to_write)
763                         read_pack = to_write;
764
765                 sha1write(f, buffer, read_pack);
766                 to_write -= read_pack;
767
768                 /*
769                  * We don't know the actual number of objects written,
770                  * only how many bytes written, how many bytes total, and
771                  * how many objects total. So we can fake it by pretending all
772                  * objects we are writing are the same size. This gives us a
773                  * smooth progress meter, and at the end it matches the true
774                  * answer.
775                  */
776                 written = reuse_packfile_objects *
777                                 (((double)(total - to_write)) / total);
778                 display_progress(progress_state, written);
779         }
780
781         close(fd);
782         written = reuse_packfile_objects;
783         display_progress(progress_state, written);
784         return reuse_packfile_offset - sizeof(struct pack_header);
785 }
786
787 static const char no_split_warning[] = N_(
788 "disabling bitmap writing, packs are split due to pack.packSizeLimit"
789 );
790
791 static void write_pack_file(void)
792 {
793         uint32_t i = 0, j;
794         struct sha1file *f;
795         off_t offset;
796         uint32_t nr_remaining = nr_result;
797         time_t last_mtime = 0;
798         struct object_entry **write_order;
799
800         if (progress > pack_to_stdout)
801                 progress_state = start_progress(_("Writing objects"), nr_result);
802         ALLOC_ARRAY(written_list, to_pack.nr_objects);
803         write_order = compute_write_order();
804
805         do {
806                 struct object_id oid;
807                 char *pack_tmp_name = NULL;
808
809                 if (pack_to_stdout)
810                         f = sha1fd_throughput(1, "<stdout>", progress_state);
811                 else
812                         f = create_tmp_packfile(&pack_tmp_name);
813
814                 offset = write_pack_header(f, nr_remaining);
815
816                 if (reuse_packfile) {
817                         off_t packfile_size;
818                         assert(pack_to_stdout);
819
820                         packfile_size = write_reused_pack(f);
821                         offset += packfile_size;
822                 }
823
824                 nr_written = 0;
825                 for (; i < to_pack.nr_objects; i++) {
826                         struct object_entry *e = write_order[i];
827                         if (write_one(f, e, &offset) == WRITE_ONE_BREAK)
828                                 break;
829                         display_progress(progress_state, written);
830                 }
831
832                 /*
833                  * Did we write the wrong # entries in the header?
834                  * If so, rewrite it like in fast-import
835                  */
836                 if (pack_to_stdout) {
837                         sha1close(f, oid.hash, CSUM_CLOSE);
838                 } else if (nr_written == nr_remaining) {
839                         sha1close(f, oid.hash, CSUM_FSYNC);
840                 } else {
841                         int fd = sha1close(f, oid.hash, 0);
842                         fixup_pack_header_footer(fd, oid.hash, pack_tmp_name,
843                                                  nr_written, oid.hash, offset);
844                         close(fd);
845                         if (write_bitmap_index) {
846                                 warning(_(no_split_warning));
847                                 write_bitmap_index = 0;
848                         }
849                 }
850
851                 if (!pack_to_stdout) {
852                         struct stat st;
853                         struct strbuf tmpname = STRBUF_INIT;
854
855                         /*
856                          * Packs are runtime accessed in their mtime
857                          * order since newer packs are more likely to contain
858                          * younger objects.  So if we are creating multiple
859                          * packs then we should modify the mtime of later ones
860                          * to preserve this property.
861                          */
862                         if (stat(pack_tmp_name, &st) < 0) {
863                                 warning_errno("failed to stat %s", pack_tmp_name);
864                         } else if (!last_mtime) {
865                                 last_mtime = st.st_mtime;
866                         } else {
867                                 struct utimbuf utb;
868                                 utb.actime = st.st_atime;
869                                 utb.modtime = --last_mtime;
870                                 if (utime(pack_tmp_name, &utb) < 0)
871                                         warning_errno("failed utime() on %s", pack_tmp_name);
872                         }
873
874                         strbuf_addf(&tmpname, "%s-", base_name);
875
876                         if (write_bitmap_index) {
877                                 bitmap_writer_set_checksum(oid.hash);
878                                 bitmap_writer_build_type_index(written_list, nr_written);
879                         }
880
881                         finish_tmp_packfile(&tmpname, pack_tmp_name,
882                                             written_list, nr_written,
883                                             &pack_idx_opts, oid.hash);
884
885                         if (write_bitmap_index) {
886                                 strbuf_addf(&tmpname, "%s.bitmap", oid_to_hex(&oid));
887
888                                 stop_progress(&progress_state);
889
890                                 bitmap_writer_show_progress(progress);
891                                 bitmap_writer_reuse_bitmaps(&to_pack);
892                                 bitmap_writer_select_commits(indexed_commits, indexed_commits_nr, -1);
893                                 bitmap_writer_build(&to_pack);
894                                 bitmap_writer_finish(written_list, nr_written,
895                                                      tmpname.buf, write_bitmap_options);
896                                 write_bitmap_index = 0;
897                         }
898
899                         strbuf_release(&tmpname);
900                         free(pack_tmp_name);
901                         puts(oid_to_hex(&oid));
902                 }
903
904                 /* mark written objects as written to previous pack */
905                 for (j = 0; j < nr_written; j++) {
906                         written_list[j]->offset = (off_t)-1;
907                 }
908                 nr_remaining -= nr_written;
909         } while (nr_remaining && i < to_pack.nr_objects);
910
911         free(written_list);
912         free(write_order);
913         stop_progress(&progress_state);
914         if (written != nr_result)
915                 die("wrote %"PRIu32" objects while expecting %"PRIu32,
916                         written, nr_result);
917 }
918
919 static int no_try_delta(const char *path)
920 {
921         static struct attr_check *check;
922
923         if (!check)
924                 check = attr_check_initl("delta", NULL);
925         if (git_check_attr(path, check))
926                 return 0;
927         if (ATTR_FALSE(check->items[0].value))
928                 return 1;
929         return 0;
930 }
931
932 /*
933  * When adding an object, check whether we have already added it
934  * to our packing list. If so, we can skip. However, if we are
935  * being asked to excludei t, but the previous mention was to include
936  * it, make sure to adjust its flags and tweak our numbers accordingly.
937  *
938  * As an optimization, we pass out the index position where we would have
939  * found the item, since that saves us from having to look it up again a
940  * few lines later when we want to add the new entry.
941  */
942 static int have_duplicate_entry(const struct object_id *oid,
943                                 int exclude,
944                                 uint32_t *index_pos)
945 {
946         struct object_entry *entry;
947
948         entry = packlist_find(&to_pack, oid->hash, index_pos);
949         if (!entry)
950                 return 0;
951
952         if (exclude) {
953                 if (!entry->preferred_base)
954                         nr_result--;
955                 entry->preferred_base = 1;
956         }
957
958         return 1;
959 }
960
961 static int want_found_object(int exclude, struct packed_git *p)
962 {
963         if (exclude)
964                 return 1;
965         if (incremental)
966                 return 0;
967
968         /*
969          * When asked to do --local (do not include an object that appears in a
970          * pack we borrow from elsewhere) or --honor-pack-keep (do not include
971          * an object that appears in a pack marked with .keep), finding a pack
972          * that matches the criteria is sufficient for us to decide to omit it.
973          * However, even if this pack does not satisfy the criteria, we need to
974          * make sure no copy of this object appears in _any_ pack that makes us
975          * to omit the object, so we need to check all the packs.
976          *
977          * We can however first check whether these options can possible matter;
978          * if they do not matter we know we want the object in generated pack.
979          * Otherwise, we signal "-1" at the end to tell the caller that we do
980          * not know either way, and it needs to check more packs.
981          */
982         if (!ignore_packed_keep &&
983             (!local || !have_non_local_packs))
984                 return 1;
985
986         if (local && !p->pack_local)
987                 return 0;
988         if (ignore_packed_keep && p->pack_local && p->pack_keep)
989                 return 0;
990
991         /* we don't know yet; keep looking for more packs */
992         return -1;
993 }
994
995 /*
996  * Check whether we want the object in the pack (e.g., we do not want
997  * objects found in non-local stores if the "--local" option was used).
998  *
999  * If the caller already knows an existing pack it wants to take the object
1000  * from, that is passed in *found_pack and *found_offset; otherwise this
1001  * function finds if there is any pack that has the object and returns the pack
1002  * and its offset in these variables.
1003  */
1004 static int want_object_in_pack(const struct object_id *oid,
1005                                int exclude,
1006                                struct packed_git **found_pack,
1007                                off_t *found_offset)
1008 {
1009         struct mru_entry *entry;
1010         int want;
1011
1012         if (!exclude && local && has_loose_object_nonlocal(oid->hash))
1013                 return 0;
1014
1015         /*
1016          * If we already know the pack object lives in, start checks from that
1017          * pack - in the usual case when neither --local was given nor .keep files
1018          * are present we will determine the answer right now.
1019          */
1020         if (*found_pack) {
1021                 want = want_found_object(exclude, *found_pack);
1022                 if (want != -1)
1023                         return want;
1024         }
1025
1026         for (entry = packed_git_mru.head; entry; entry = entry->next) {
1027                 struct packed_git *p = entry->item;
1028                 off_t offset;
1029
1030                 if (p == *found_pack)
1031                         offset = *found_offset;
1032                 else
1033                         offset = find_pack_entry_one(oid->hash, p);
1034
1035                 if (offset) {
1036                         if (!*found_pack) {
1037                                 if (!is_pack_valid(p))
1038                                         continue;
1039                                 *found_offset = offset;
1040                                 *found_pack = p;
1041                         }
1042                         want = want_found_object(exclude, p);
1043                         if (!exclude && want > 0)
1044                                 mru_mark(&packed_git_mru, entry);
1045                         if (want != -1)
1046                                 return want;
1047                 }
1048         }
1049
1050         return 1;
1051 }
1052
1053 static void create_object_entry(const struct object_id *oid,
1054                                 enum object_type type,
1055                                 uint32_t hash,
1056                                 int exclude,
1057                                 int no_try_delta,
1058                                 uint32_t index_pos,
1059                                 struct packed_git *found_pack,
1060                                 off_t found_offset)
1061 {
1062         struct object_entry *entry;
1063
1064         entry = packlist_alloc(&to_pack, oid->hash, index_pos);
1065         entry->hash = hash;
1066         if (type)
1067                 entry->type = type;
1068         if (exclude)
1069                 entry->preferred_base = 1;
1070         else
1071                 nr_result++;
1072         if (found_pack) {
1073                 entry->in_pack = found_pack;
1074                 entry->in_pack_offset = found_offset;
1075         }
1076
1077         entry->no_try_delta = no_try_delta;
1078 }
1079
1080 static const char no_closure_warning[] = N_(
1081 "disabling bitmap writing, as some objects are not being packed"
1082 );
1083
1084 static int add_object_entry(const struct object_id *oid, enum object_type type,
1085                             const char *name, int exclude)
1086 {
1087         struct packed_git *found_pack = NULL;
1088         off_t found_offset = 0;
1089         uint32_t index_pos;
1090
1091         if (have_duplicate_entry(oid, exclude, &index_pos))
1092                 return 0;
1093
1094         if (!want_object_in_pack(oid, exclude, &found_pack, &found_offset)) {
1095                 /* The pack is missing an object, so it will not have closure */
1096                 if (write_bitmap_index) {
1097                         warning(_(no_closure_warning));
1098                         write_bitmap_index = 0;
1099                 }
1100                 return 0;
1101         }
1102
1103         create_object_entry(oid, type, pack_name_hash(name),
1104                             exclude, name && no_try_delta(name),
1105                             index_pos, found_pack, found_offset);
1106
1107         display_progress(progress_state, nr_result);
1108         return 1;
1109 }
1110
1111 static int add_object_entry_from_bitmap(const struct object_id *oid,
1112                                         enum object_type type,
1113                                         int flags, uint32_t name_hash,
1114                                         struct packed_git *pack, off_t offset)
1115 {
1116         uint32_t index_pos;
1117
1118         if (have_duplicate_entry(oid, 0, &index_pos))
1119                 return 0;
1120
1121         if (!want_object_in_pack(oid, 0, &pack, &offset))
1122                 return 0;
1123
1124         create_object_entry(oid, type, name_hash, 0, 0, index_pos, pack, offset);
1125
1126         display_progress(progress_state, nr_result);
1127         return 1;
1128 }
1129
1130 struct pbase_tree_cache {
1131         struct object_id oid;
1132         int ref;
1133         int temporary;
1134         void *tree_data;
1135         unsigned long tree_size;
1136 };
1137
1138 static struct pbase_tree_cache *(pbase_tree_cache[256]);
1139 static int pbase_tree_cache_ix(const struct object_id *oid)
1140 {
1141         return oid->hash[0] % ARRAY_SIZE(pbase_tree_cache);
1142 }
1143 static int pbase_tree_cache_ix_incr(int ix)
1144 {
1145         return (ix+1) % ARRAY_SIZE(pbase_tree_cache);
1146 }
1147
1148 static struct pbase_tree {
1149         struct pbase_tree *next;
1150         /* This is a phony "cache" entry; we are not
1151          * going to evict it or find it through _get()
1152          * mechanism -- this is for the toplevel node that
1153          * would almost always change with any commit.
1154          */
1155         struct pbase_tree_cache pcache;
1156 } *pbase_tree;
1157
1158 static struct pbase_tree_cache *pbase_tree_get(const struct object_id *oid)
1159 {
1160         struct pbase_tree_cache *ent, *nent;
1161         void *data;
1162         unsigned long size;
1163         enum object_type type;
1164         int neigh;
1165         int my_ix = pbase_tree_cache_ix(oid);
1166         int available_ix = -1;
1167
1168         /* pbase-tree-cache acts as a limited hashtable.
1169          * your object will be found at your index or within a few
1170          * slots after that slot if it is cached.
1171          */
1172         for (neigh = 0; neigh < 8; neigh++) {
1173                 ent = pbase_tree_cache[my_ix];
1174                 if (ent && !oidcmp(&ent->oid, oid)) {
1175                         ent->ref++;
1176                         return ent;
1177                 }
1178                 else if (((available_ix < 0) && (!ent || !ent->ref)) ||
1179                          ((0 <= available_ix) &&
1180                           (!ent && pbase_tree_cache[available_ix])))
1181                         available_ix = my_ix;
1182                 if (!ent)
1183                         break;
1184                 my_ix = pbase_tree_cache_ix_incr(my_ix);
1185         }
1186
1187         /* Did not find one.  Either we got a bogus request or
1188          * we need to read and perhaps cache.
1189          */
1190         data = read_sha1_file(oid->hash, &type, &size);
1191         if (!data)
1192                 return NULL;
1193         if (type != OBJ_TREE) {
1194                 free(data);
1195                 return NULL;
1196         }
1197
1198         /* We need to either cache or return a throwaway copy */
1199
1200         if (available_ix < 0)
1201                 ent = NULL;
1202         else {
1203                 ent = pbase_tree_cache[available_ix];
1204                 my_ix = available_ix;
1205         }
1206
1207         if (!ent) {
1208                 nent = xmalloc(sizeof(*nent));
1209                 nent->temporary = (available_ix < 0);
1210         }
1211         else {
1212                 /* evict and reuse */
1213                 free(ent->tree_data);
1214                 nent = ent;
1215         }
1216         oidcpy(&nent->oid, oid);
1217         nent->tree_data = data;
1218         nent->tree_size = size;
1219         nent->ref = 1;
1220         if (!nent->temporary)
1221                 pbase_tree_cache[my_ix] = nent;
1222         return nent;
1223 }
1224
1225 static void pbase_tree_put(struct pbase_tree_cache *cache)
1226 {
1227         if (!cache->temporary) {
1228                 cache->ref--;
1229                 return;
1230         }
1231         free(cache->tree_data);
1232         free(cache);
1233 }
1234
1235 static int name_cmp_len(const char *name)
1236 {
1237         int i;
1238         for (i = 0; name[i] && name[i] != '\n' && name[i] != '/'; i++)
1239                 ;
1240         return i;
1241 }
1242
1243 static void add_pbase_object(struct tree_desc *tree,
1244                              const char *name,
1245                              int cmplen,
1246                              const char *fullname)
1247 {
1248         struct name_entry entry;
1249         int cmp;
1250
1251         while (tree_entry(tree,&entry)) {
1252                 if (S_ISGITLINK(entry.mode))
1253                         continue;
1254                 cmp = tree_entry_len(&entry) != cmplen ? 1 :
1255                       memcmp(name, entry.path, cmplen);
1256                 if (cmp > 0)
1257                         continue;
1258                 if (cmp < 0)
1259                         return;
1260                 if (name[cmplen] != '/') {
1261                         add_object_entry(entry.oid,
1262                                          object_type(entry.mode),
1263                                          fullname, 1);
1264                         return;
1265                 }
1266                 if (S_ISDIR(entry.mode)) {
1267                         struct tree_desc sub;
1268                         struct pbase_tree_cache *tree;
1269                         const char *down = name+cmplen+1;
1270                         int downlen = name_cmp_len(down);
1271
1272                         tree = pbase_tree_get(entry.oid);
1273                         if (!tree)
1274                                 return;
1275                         init_tree_desc(&sub, tree->tree_data, tree->tree_size);
1276
1277                         add_pbase_object(&sub, down, downlen, fullname);
1278                         pbase_tree_put(tree);
1279                 }
1280         }
1281 }
1282
1283 static unsigned *done_pbase_paths;
1284 static int done_pbase_paths_num;
1285 static int done_pbase_paths_alloc;
1286 static int done_pbase_path_pos(unsigned hash)
1287 {
1288         int lo = 0;
1289         int hi = done_pbase_paths_num;
1290         while (lo < hi) {
1291                 int mi = lo + (hi - lo) / 2;
1292                 if (done_pbase_paths[mi] == hash)
1293                         return mi;
1294                 if (done_pbase_paths[mi] < hash)
1295                         hi = mi;
1296                 else
1297                         lo = mi + 1;
1298         }
1299         return -lo-1;
1300 }
1301
1302 static int check_pbase_path(unsigned hash)
1303 {
1304         int pos = done_pbase_path_pos(hash);
1305         if (0 <= pos)
1306                 return 1;
1307         pos = -pos - 1;
1308         ALLOC_GROW(done_pbase_paths,
1309                    done_pbase_paths_num + 1,
1310                    done_pbase_paths_alloc);
1311         done_pbase_paths_num++;
1312         if (pos < done_pbase_paths_num)
1313                 MOVE_ARRAY(done_pbase_paths + pos + 1, done_pbase_paths + pos,
1314                            done_pbase_paths_num - pos - 1);
1315         done_pbase_paths[pos] = hash;
1316         return 0;
1317 }
1318
1319 static void add_preferred_base_object(const char *name)
1320 {
1321         struct pbase_tree *it;
1322         int cmplen;
1323         unsigned hash = pack_name_hash(name);
1324
1325         if (!num_preferred_base || check_pbase_path(hash))
1326                 return;
1327
1328         cmplen = name_cmp_len(name);
1329         for (it = pbase_tree; it; it = it->next) {
1330                 if (cmplen == 0) {
1331                         add_object_entry(&it->pcache.oid, OBJ_TREE, NULL, 1);
1332                 }
1333                 else {
1334                         struct tree_desc tree;
1335                         init_tree_desc(&tree, it->pcache.tree_data, it->pcache.tree_size);
1336                         add_pbase_object(&tree, name, cmplen, name);
1337                 }
1338         }
1339 }
1340
1341 static void add_preferred_base(struct object_id *oid)
1342 {
1343         struct pbase_tree *it;
1344         void *data;
1345         unsigned long size;
1346         struct object_id tree_oid;
1347
1348         if (window <= num_preferred_base++)
1349                 return;
1350
1351         data = read_object_with_reference(oid->hash, tree_type, &size, tree_oid.hash);
1352         if (!data)
1353                 return;
1354
1355         for (it = pbase_tree; it; it = it->next) {
1356                 if (!oidcmp(&it->pcache.oid, &tree_oid)) {
1357                         free(data);
1358                         return;
1359                 }
1360         }
1361
1362         it = xcalloc(1, sizeof(*it));
1363         it->next = pbase_tree;
1364         pbase_tree = it;
1365
1366         oidcpy(&it->pcache.oid, &tree_oid);
1367         it->pcache.tree_data = data;
1368         it->pcache.tree_size = size;
1369 }
1370
1371 static void cleanup_preferred_base(void)
1372 {
1373         struct pbase_tree *it;
1374         unsigned i;
1375
1376         it = pbase_tree;
1377         pbase_tree = NULL;
1378         while (it) {
1379                 struct pbase_tree *this = it;
1380                 it = this->next;
1381                 free(this->pcache.tree_data);
1382                 free(this);
1383         }
1384
1385         for (i = 0; i < ARRAY_SIZE(pbase_tree_cache); i++) {
1386                 if (!pbase_tree_cache[i])
1387                         continue;
1388                 free(pbase_tree_cache[i]->tree_data);
1389                 FREE_AND_NULL(pbase_tree_cache[i]);
1390         }
1391
1392         FREE_AND_NULL(done_pbase_paths);
1393         done_pbase_paths_num = done_pbase_paths_alloc = 0;
1394 }
1395
1396 static void check_object(struct object_entry *entry)
1397 {
1398         if (entry->in_pack) {
1399                 struct packed_git *p = entry->in_pack;
1400                 struct pack_window *w_curs = NULL;
1401                 const unsigned char *base_ref = NULL;
1402                 struct object_entry *base_entry;
1403                 unsigned long used, used_0;
1404                 unsigned long avail;
1405                 off_t ofs;
1406                 unsigned char *buf, c;
1407
1408                 buf = use_pack(p, &w_curs, entry->in_pack_offset, &avail);
1409
1410                 /*
1411                  * We want in_pack_type even if we do not reuse delta
1412                  * since non-delta representations could still be reused.
1413                  */
1414                 used = unpack_object_header_buffer(buf, avail,
1415                                                    &entry->in_pack_type,
1416                                                    &entry->size);
1417                 if (used == 0)
1418                         goto give_up;
1419
1420                 /*
1421                  * Determine if this is a delta and if so whether we can
1422                  * reuse it or not.  Otherwise let's find out as cheaply as
1423                  * possible what the actual type and size for this object is.
1424                  */
1425                 switch (entry->in_pack_type) {
1426                 default:
1427                         /* Not a delta hence we've already got all we need. */
1428                         entry->type = entry->in_pack_type;
1429                         entry->in_pack_header_size = used;
1430                         if (entry->type < OBJ_COMMIT || entry->type > OBJ_BLOB)
1431                                 goto give_up;
1432                         unuse_pack(&w_curs);
1433                         return;
1434                 case OBJ_REF_DELTA:
1435                         if (reuse_delta && !entry->preferred_base)
1436                                 base_ref = use_pack(p, &w_curs,
1437                                                 entry->in_pack_offset + used, NULL);
1438                         entry->in_pack_header_size = used + 20;
1439                         break;
1440                 case OBJ_OFS_DELTA:
1441                         buf = use_pack(p, &w_curs,
1442                                        entry->in_pack_offset + used, NULL);
1443                         used_0 = 0;
1444                         c = buf[used_0++];
1445                         ofs = c & 127;
1446                         while (c & 128) {
1447                                 ofs += 1;
1448                                 if (!ofs || MSB(ofs, 7)) {
1449                                         error("delta base offset overflow in pack for %s",
1450                                               oid_to_hex(&entry->idx.oid));
1451                                         goto give_up;
1452                                 }
1453                                 c = buf[used_0++];
1454                                 ofs = (ofs << 7) + (c & 127);
1455                         }
1456                         ofs = entry->in_pack_offset - ofs;
1457                         if (ofs <= 0 || ofs >= entry->in_pack_offset) {
1458                                 error("delta base offset out of bound for %s",
1459                                       oid_to_hex(&entry->idx.oid));
1460                                 goto give_up;
1461                         }
1462                         if (reuse_delta && !entry->preferred_base) {
1463                                 struct revindex_entry *revidx;
1464                                 revidx = find_pack_revindex(p, ofs);
1465                                 if (!revidx)
1466                                         goto give_up;
1467                                 base_ref = nth_packed_object_sha1(p, revidx->nr);
1468                         }
1469                         entry->in_pack_header_size = used + used_0;
1470                         break;
1471                 }
1472
1473                 if (base_ref && (base_entry = packlist_find(&to_pack, base_ref, NULL))) {
1474                         /*
1475                          * If base_ref was set above that means we wish to
1476                          * reuse delta data, and we even found that base
1477                          * in the list of objects we want to pack. Goodie!
1478                          *
1479                          * Depth value does not matter - find_deltas() will
1480                          * never consider reused delta as the base object to
1481                          * deltify other objects against, in order to avoid
1482                          * circular deltas.
1483                          */
1484                         entry->type = entry->in_pack_type;
1485                         entry->delta = base_entry;
1486                         entry->delta_size = entry->size;
1487                         entry->delta_sibling = base_entry->delta_child;
1488                         base_entry->delta_child = entry;
1489                         unuse_pack(&w_curs);
1490                         return;
1491                 }
1492
1493                 if (entry->type) {
1494                         /*
1495                          * This must be a delta and we already know what the
1496                          * final object type is.  Let's extract the actual
1497                          * object size from the delta header.
1498                          */
1499                         entry->size = get_size_from_delta(p, &w_curs,
1500                                         entry->in_pack_offset + entry->in_pack_header_size);
1501                         if (entry->size == 0)
1502                                 goto give_up;
1503                         unuse_pack(&w_curs);
1504                         return;
1505                 }
1506
1507                 /*
1508                  * No choice but to fall back to the recursive delta walk
1509                  * with sha1_object_info() to find about the object type
1510                  * at this point...
1511                  */
1512                 give_up:
1513                 unuse_pack(&w_curs);
1514         }
1515
1516         entry->type = sha1_object_info(entry->idx.oid.hash, &entry->size);
1517         /*
1518          * The error condition is checked in prepare_pack().  This is
1519          * to permit a missing preferred base object to be ignored
1520          * as a preferred base.  Doing so can result in a larger
1521          * pack file, but the transfer will still take place.
1522          */
1523 }
1524
1525 static int pack_offset_sort(const void *_a, const void *_b)
1526 {
1527         const struct object_entry *a = *(struct object_entry **)_a;
1528         const struct object_entry *b = *(struct object_entry **)_b;
1529
1530         /* avoid filesystem trashing with loose objects */
1531         if (!a->in_pack && !b->in_pack)
1532                 return oidcmp(&a->idx.oid, &b->idx.oid);
1533
1534         if (a->in_pack < b->in_pack)
1535                 return -1;
1536         if (a->in_pack > b->in_pack)
1537                 return 1;
1538         return a->in_pack_offset < b->in_pack_offset ? -1 :
1539                         (a->in_pack_offset > b->in_pack_offset);
1540 }
1541
1542 /*
1543  * Drop an on-disk delta we were planning to reuse. Naively, this would
1544  * just involve blanking out the "delta" field, but we have to deal
1545  * with some extra book-keeping:
1546  *
1547  *   1. Removing ourselves from the delta_sibling linked list.
1548  *
1549  *   2. Updating our size/type to the non-delta representation. These were
1550  *      either not recorded initially (size) or overwritten with the delta type
1551  *      (type) when check_object() decided to reuse the delta.
1552  *
1553  *   3. Resetting our delta depth, as we are now a base object.
1554  */
1555 static void drop_reused_delta(struct object_entry *entry)
1556 {
1557         struct object_entry **p = &entry->delta->delta_child;
1558         struct object_info oi = OBJECT_INFO_INIT;
1559
1560         while (*p) {
1561                 if (*p == entry)
1562                         *p = (*p)->delta_sibling;
1563                 else
1564                         p = &(*p)->delta_sibling;
1565         }
1566         entry->delta = NULL;
1567         entry->depth = 0;
1568
1569         oi.sizep = &entry->size;
1570         oi.typep = &entry->type;
1571         if (packed_object_info(entry->in_pack, entry->in_pack_offset, &oi) < 0) {
1572                 /*
1573                  * We failed to get the info from this pack for some reason;
1574                  * fall back to sha1_object_info, which may find another copy.
1575                  * And if that fails, the error will be recorded in entry->type
1576                  * and dealt with in prepare_pack().
1577                  */
1578                 entry->type = sha1_object_info(entry->idx.oid.hash,
1579                                                &entry->size);
1580         }
1581 }
1582
1583 /*
1584  * Follow the chain of deltas from this entry onward, throwing away any links
1585  * that cause us to hit a cycle (as determined by the DFS state flags in
1586  * the entries).
1587  *
1588  * We also detect too-long reused chains that would violate our --depth
1589  * limit.
1590  */
1591 static void break_delta_chains(struct object_entry *entry)
1592 {
1593         /*
1594          * The actual depth of each object we will write is stored as an int,
1595          * as it cannot exceed our int "depth" limit. But before we break
1596          * changes based no that limit, we may potentially go as deep as the
1597          * number of objects, which is elsewhere bounded to a uint32_t.
1598          */
1599         uint32_t total_depth;
1600         struct object_entry *cur, *next;
1601
1602         for (cur = entry, total_depth = 0;
1603              cur;
1604              cur = cur->delta, total_depth++) {
1605                 if (cur->dfs_state == DFS_DONE) {
1606                         /*
1607                          * We've already seen this object and know it isn't
1608                          * part of a cycle. We do need to append its depth
1609                          * to our count.
1610                          */
1611                         total_depth += cur->depth;
1612                         break;
1613                 }
1614
1615                 /*
1616                  * We break cycles before looping, so an ACTIVE state (or any
1617                  * other cruft which made its way into the state variable)
1618                  * is a bug.
1619                  */
1620                 if (cur->dfs_state != DFS_NONE)
1621                         die("BUG: confusing delta dfs state in first pass: %d",
1622                             cur->dfs_state);
1623
1624                 /*
1625                  * Now we know this is the first time we've seen the object. If
1626                  * it's not a delta, we're done traversing, but we'll mark it
1627                  * done to save time on future traversals.
1628                  */
1629                 if (!cur->delta) {
1630                         cur->dfs_state = DFS_DONE;
1631                         break;
1632                 }
1633
1634                 /*
1635                  * Mark ourselves as active and see if the next step causes
1636                  * us to cycle to another active object. It's important to do
1637                  * this _before_ we loop, because it impacts where we make the
1638                  * cut, and thus how our total_depth counter works.
1639                  * E.g., We may see a partial loop like:
1640                  *
1641                  *   A -> B -> C -> D -> B
1642                  *
1643                  * Cutting B->C breaks the cycle. But now the depth of A is
1644                  * only 1, and our total_depth counter is at 3. The size of the
1645                  * error is always one less than the size of the cycle we
1646                  * broke. Commits C and D were "lost" from A's chain.
1647                  *
1648                  * If we instead cut D->B, then the depth of A is correct at 3.
1649                  * We keep all commits in the chain that we examined.
1650                  */
1651                 cur->dfs_state = DFS_ACTIVE;
1652                 if (cur->delta->dfs_state == DFS_ACTIVE) {
1653                         drop_reused_delta(cur);
1654                         cur->dfs_state = DFS_DONE;
1655                         break;
1656                 }
1657         }
1658
1659         /*
1660          * And now that we've gone all the way to the bottom of the chain, we
1661          * need to clear the active flags and set the depth fields as
1662          * appropriate. Unlike the loop above, which can quit when it drops a
1663          * delta, we need to keep going to look for more depth cuts. So we need
1664          * an extra "next" pointer to keep going after we reset cur->delta.
1665          */
1666         for (cur = entry; cur; cur = next) {
1667                 next = cur->delta;
1668
1669                 /*
1670                  * We should have a chain of zero or more ACTIVE states down to
1671                  * a final DONE. We can quit after the DONE, because either it
1672                  * has no bases, or we've already handled them in a previous
1673                  * call.
1674                  */
1675                 if (cur->dfs_state == DFS_DONE)
1676                         break;
1677                 else if (cur->dfs_state != DFS_ACTIVE)
1678                         die("BUG: confusing delta dfs state in second pass: %d",
1679                             cur->dfs_state);
1680
1681                 /*
1682                  * If the total_depth is more than depth, then we need to snip
1683                  * the chain into two or more smaller chains that don't exceed
1684                  * the maximum depth. Most of the resulting chains will contain
1685                  * (depth + 1) entries (i.e., depth deltas plus one base), and
1686                  * the last chain (i.e., the one containing entry) will contain
1687                  * whatever entries are left over, namely
1688                  * (total_depth % (depth + 1)) of them.
1689                  *
1690                  * Since we are iterating towards decreasing depth, we need to
1691                  * decrement total_depth as we go, and we need to write to the
1692                  * entry what its final depth will be after all of the
1693                  * snipping. Since we're snipping into chains of length (depth
1694                  * + 1) entries, the final depth of an entry will be its
1695                  * original depth modulo (depth + 1). Any time we encounter an
1696                  * entry whose final depth is supposed to be zero, we snip it
1697                  * from its delta base, thereby making it so.
1698                  */
1699                 cur->depth = (total_depth--) % (depth + 1);
1700                 if (!cur->depth)
1701                         drop_reused_delta(cur);
1702
1703                 cur->dfs_state = DFS_DONE;
1704         }
1705 }
1706
1707 static void get_object_details(void)
1708 {
1709         uint32_t i;
1710         struct object_entry **sorted_by_offset;
1711
1712         sorted_by_offset = xcalloc(to_pack.nr_objects, sizeof(struct object_entry *));
1713         for (i = 0; i < to_pack.nr_objects; i++)
1714                 sorted_by_offset[i] = to_pack.objects + i;
1715         QSORT(sorted_by_offset, to_pack.nr_objects, pack_offset_sort);
1716
1717         for (i = 0; i < to_pack.nr_objects; i++) {
1718                 struct object_entry *entry = sorted_by_offset[i];
1719                 check_object(entry);
1720                 if (big_file_threshold < entry->size)
1721                         entry->no_try_delta = 1;
1722         }
1723
1724         /*
1725          * This must happen in a second pass, since we rely on the delta
1726          * information for the whole list being completed.
1727          */
1728         for (i = 0; i < to_pack.nr_objects; i++)
1729                 break_delta_chains(&to_pack.objects[i]);
1730
1731         free(sorted_by_offset);
1732 }
1733
1734 /*
1735  * We search for deltas in a list sorted by type, by filename hash, and then
1736  * by size, so that we see progressively smaller and smaller files.
1737  * That's because we prefer deltas to be from the bigger file
1738  * to the smaller -- deletes are potentially cheaper, but perhaps
1739  * more importantly, the bigger file is likely the more recent
1740  * one.  The deepest deltas are therefore the oldest objects which are
1741  * less susceptible to be accessed often.
1742  */
1743 static int type_size_sort(const void *_a, const void *_b)
1744 {
1745         const struct object_entry *a = *(struct object_entry **)_a;
1746         const struct object_entry *b = *(struct object_entry **)_b;
1747
1748         if (a->type > b->type)
1749                 return -1;
1750         if (a->type < b->type)
1751                 return 1;
1752         if (a->hash > b->hash)
1753                 return -1;
1754         if (a->hash < b->hash)
1755                 return 1;
1756         if (a->preferred_base > b->preferred_base)
1757                 return -1;
1758         if (a->preferred_base < b->preferred_base)
1759                 return 1;
1760         if (a->size > b->size)
1761                 return -1;
1762         if (a->size < b->size)
1763                 return 1;
1764         return a < b ? -1 : (a > b);  /* newest first */
1765 }
1766
1767 struct unpacked {
1768         struct object_entry *entry;
1769         void *data;
1770         struct delta_index *index;
1771         unsigned depth;
1772 };
1773
1774 static int delta_cacheable(unsigned long src_size, unsigned long trg_size,
1775                            unsigned long delta_size)
1776 {
1777         if (max_delta_cache_size && delta_cache_size + delta_size > max_delta_cache_size)
1778                 return 0;
1779
1780         if (delta_size < cache_max_small_delta_size)
1781                 return 1;
1782
1783         /* cache delta, if objects are large enough compared to delta size */
1784         if ((src_size >> 20) + (trg_size >> 21) > (delta_size >> 10))
1785                 return 1;
1786
1787         return 0;
1788 }
1789
1790 #ifndef NO_PTHREADS
1791
1792 static pthread_mutex_t read_mutex;
1793 #define read_lock()             pthread_mutex_lock(&read_mutex)
1794 #define read_unlock()           pthread_mutex_unlock(&read_mutex)
1795
1796 static pthread_mutex_t cache_mutex;
1797 #define cache_lock()            pthread_mutex_lock(&cache_mutex)
1798 #define cache_unlock()          pthread_mutex_unlock(&cache_mutex)
1799
1800 static pthread_mutex_t progress_mutex;
1801 #define progress_lock()         pthread_mutex_lock(&progress_mutex)
1802 #define progress_unlock()       pthread_mutex_unlock(&progress_mutex)
1803
1804 #else
1805
1806 #define read_lock()             (void)0
1807 #define read_unlock()           (void)0
1808 #define cache_lock()            (void)0
1809 #define cache_unlock()          (void)0
1810 #define progress_lock()         (void)0
1811 #define progress_unlock()       (void)0
1812
1813 #endif
1814
1815 static int try_delta(struct unpacked *trg, struct unpacked *src,
1816                      unsigned max_depth, unsigned long *mem_usage)
1817 {
1818         struct object_entry *trg_entry = trg->entry;
1819         struct object_entry *src_entry = src->entry;
1820         unsigned long trg_size, src_size, delta_size, sizediff, max_size, sz;
1821         unsigned ref_depth;
1822         enum object_type type;
1823         void *delta_buf;
1824
1825         /* Don't bother doing diffs between different types */
1826         if (trg_entry->type != src_entry->type)
1827                 return -1;
1828
1829         /*
1830          * We do not bother to try a delta that we discarded on an
1831          * earlier try, but only when reusing delta data.  Note that
1832          * src_entry that is marked as the preferred_base should always
1833          * be considered, as even if we produce a suboptimal delta against
1834          * it, we will still save the transfer cost, as we already know
1835          * the other side has it and we won't send src_entry at all.
1836          */
1837         if (reuse_delta && trg_entry->in_pack &&
1838             trg_entry->in_pack == src_entry->in_pack &&
1839             !src_entry->preferred_base &&
1840             trg_entry->in_pack_type != OBJ_REF_DELTA &&
1841             trg_entry->in_pack_type != OBJ_OFS_DELTA)
1842                 return 0;
1843
1844         /* Let's not bust the allowed depth. */
1845         if (src->depth >= max_depth)
1846                 return 0;
1847
1848         /* Now some size filtering heuristics. */
1849         trg_size = trg_entry->size;
1850         if (!trg_entry->delta) {
1851                 max_size = trg_size/2 - 20;
1852                 ref_depth = 1;
1853         } else {
1854                 max_size = trg_entry->delta_size;
1855                 ref_depth = trg->depth;
1856         }
1857         max_size = (uint64_t)max_size * (max_depth - src->depth) /
1858                                                 (max_depth - ref_depth + 1);
1859         if (max_size == 0)
1860                 return 0;
1861         src_size = src_entry->size;
1862         sizediff = src_size < trg_size ? trg_size - src_size : 0;
1863         if (sizediff >= max_size)
1864                 return 0;
1865         if (trg_size < src_size / 32)
1866                 return 0;
1867
1868         /* Load data if not already done */
1869         if (!trg->data) {
1870                 read_lock();
1871                 trg->data = read_sha1_file(trg_entry->idx.oid.hash, &type,
1872                                            &sz);
1873                 read_unlock();
1874                 if (!trg->data)
1875                         die("object %s cannot be read",
1876                             oid_to_hex(&trg_entry->idx.oid));
1877                 if (sz != trg_size)
1878                         die("object %s inconsistent object length (%lu vs %lu)",
1879                             oid_to_hex(&trg_entry->idx.oid), sz,
1880                             trg_size);
1881                 *mem_usage += sz;
1882         }
1883         if (!src->data) {
1884                 read_lock();
1885                 src->data = read_sha1_file(src_entry->idx.oid.hash, &type,
1886                                            &sz);
1887                 read_unlock();
1888                 if (!src->data) {
1889                         if (src_entry->preferred_base) {
1890                                 static int warned = 0;
1891                                 if (!warned++)
1892                                         warning("object %s cannot be read",
1893                                                 oid_to_hex(&src_entry->idx.oid));
1894                                 /*
1895                                  * Those objects are not included in the
1896                                  * resulting pack.  Be resilient and ignore
1897                                  * them if they can't be read, in case the
1898                                  * pack could be created nevertheless.
1899                                  */
1900                                 return 0;
1901                         }
1902                         die("object %s cannot be read",
1903                             oid_to_hex(&src_entry->idx.oid));
1904                 }
1905                 if (sz != src_size)
1906                         die("object %s inconsistent object length (%lu vs %lu)",
1907                             oid_to_hex(&src_entry->idx.oid), sz,
1908                             src_size);
1909                 *mem_usage += sz;
1910         }
1911         if (!src->index) {
1912                 src->index = create_delta_index(src->data, src_size);
1913                 if (!src->index) {
1914                         static int warned = 0;
1915                         if (!warned++)
1916                                 warning("suboptimal pack - out of memory");
1917                         return 0;
1918                 }
1919                 *mem_usage += sizeof_delta_index(src->index);
1920         }
1921
1922         delta_buf = create_delta(src->index, trg->data, trg_size, &delta_size, max_size);
1923         if (!delta_buf)
1924                 return 0;
1925
1926         if (trg_entry->delta) {
1927                 /* Prefer only shallower same-sized deltas. */
1928                 if (delta_size == trg_entry->delta_size &&
1929                     src->depth + 1 >= trg->depth) {
1930                         free(delta_buf);
1931                         return 0;
1932                 }
1933         }
1934
1935         /*
1936          * Handle memory allocation outside of the cache
1937          * accounting lock.  Compiler will optimize the strangeness
1938          * away when NO_PTHREADS is defined.
1939          */
1940         free(trg_entry->delta_data);
1941         cache_lock();
1942         if (trg_entry->delta_data) {
1943                 delta_cache_size -= trg_entry->delta_size;
1944                 trg_entry->delta_data = NULL;
1945         }
1946         if (delta_cacheable(src_size, trg_size, delta_size)) {
1947                 delta_cache_size += delta_size;
1948                 cache_unlock();
1949                 trg_entry->delta_data = xrealloc(delta_buf, delta_size);
1950         } else {
1951                 cache_unlock();
1952                 free(delta_buf);
1953         }
1954
1955         trg_entry->delta = src_entry;
1956         trg_entry->delta_size = delta_size;
1957         trg->depth = src->depth + 1;
1958
1959         return 1;
1960 }
1961
1962 static unsigned int check_delta_limit(struct object_entry *me, unsigned int n)
1963 {
1964         struct object_entry *child = me->delta_child;
1965         unsigned int m = n;
1966         while (child) {
1967                 unsigned int c = check_delta_limit(child, n + 1);
1968                 if (m < c)
1969                         m = c;
1970                 child = child->delta_sibling;
1971         }
1972         return m;
1973 }
1974
1975 static unsigned long free_unpacked(struct unpacked *n)
1976 {
1977         unsigned long freed_mem = sizeof_delta_index(n->index);
1978         free_delta_index(n->index);
1979         n->index = NULL;
1980         if (n->data) {
1981                 freed_mem += n->entry->size;
1982                 FREE_AND_NULL(n->data);
1983         }
1984         n->entry = NULL;
1985         n->depth = 0;
1986         return freed_mem;
1987 }
1988
1989 static void find_deltas(struct object_entry **list, unsigned *list_size,
1990                         int window, int depth, unsigned *processed)
1991 {
1992         uint32_t i, idx = 0, count = 0;
1993         struct unpacked *array;
1994         unsigned long mem_usage = 0;
1995
1996         array = xcalloc(window, sizeof(struct unpacked));
1997
1998         for (;;) {
1999                 struct object_entry *entry;
2000                 struct unpacked *n = array + idx;
2001                 int j, max_depth, best_base = -1;
2002
2003                 progress_lock();
2004                 if (!*list_size) {
2005                         progress_unlock();
2006                         break;
2007                 }
2008                 entry = *list++;
2009                 (*list_size)--;
2010                 if (!entry->preferred_base) {
2011                         (*processed)++;
2012                         display_progress(progress_state, *processed);
2013                 }
2014                 progress_unlock();
2015
2016                 mem_usage -= free_unpacked(n);
2017                 n->entry = entry;
2018
2019                 while (window_memory_limit &&
2020                        mem_usage > window_memory_limit &&
2021                        count > 1) {
2022                         uint32_t tail = (idx + window - count) % window;
2023                         mem_usage -= free_unpacked(array + tail);
2024                         count--;
2025                 }
2026
2027                 /* We do not compute delta to *create* objects we are not
2028                  * going to pack.
2029                  */
2030                 if (entry->preferred_base)
2031                         goto next;
2032
2033                 /*
2034                  * If the current object is at pack edge, take the depth the
2035                  * objects that depend on the current object into account
2036                  * otherwise they would become too deep.
2037                  */
2038                 max_depth = depth;
2039                 if (entry->delta_child) {
2040                         max_depth -= check_delta_limit(entry, 0);
2041                         if (max_depth <= 0)
2042                                 goto next;
2043                 }
2044
2045                 j = window;
2046                 while (--j > 0) {
2047                         int ret;
2048                         uint32_t other_idx = idx + j;
2049                         struct unpacked *m;
2050                         if (other_idx >= window)
2051                                 other_idx -= window;
2052                         m = array + other_idx;
2053                         if (!m->entry)
2054                                 break;
2055                         ret = try_delta(n, m, max_depth, &mem_usage);
2056                         if (ret < 0)
2057                                 break;
2058                         else if (ret > 0)
2059                                 best_base = other_idx;
2060                 }
2061
2062                 /*
2063                  * If we decided to cache the delta data, then it is best
2064                  * to compress it right away.  First because we have to do
2065                  * it anyway, and doing it here while we're threaded will
2066                  * save a lot of time in the non threaded write phase,
2067                  * as well as allow for caching more deltas within
2068                  * the same cache size limit.
2069                  * ...
2070                  * But only if not writing to stdout, since in that case
2071                  * the network is most likely throttling writes anyway,
2072                  * and therefore it is best to go to the write phase ASAP
2073                  * instead, as we can afford spending more time compressing
2074                  * between writes at that moment.
2075                  */
2076                 if (entry->delta_data && !pack_to_stdout) {
2077                         entry->z_delta_size = do_compress(&entry->delta_data,
2078                                                           entry->delta_size);
2079                         cache_lock();
2080                         delta_cache_size -= entry->delta_size;
2081                         delta_cache_size += entry->z_delta_size;
2082                         cache_unlock();
2083                 }
2084
2085                 /* if we made n a delta, and if n is already at max
2086                  * depth, leaving it in the window is pointless.  we
2087                  * should evict it first.
2088                  */
2089                 if (entry->delta && max_depth <= n->depth)
2090                         continue;
2091
2092                 /*
2093                  * Move the best delta base up in the window, after the
2094                  * currently deltified object, to keep it longer.  It will
2095                  * be the first base object to be attempted next.
2096                  */
2097                 if (entry->delta) {
2098                         struct unpacked swap = array[best_base];
2099                         int dist = (window + idx - best_base) % window;
2100                         int dst = best_base;
2101                         while (dist--) {
2102                                 int src = (dst + 1) % window;
2103                                 array[dst] = array[src];
2104                                 dst = src;
2105                         }
2106                         array[dst] = swap;
2107                 }
2108
2109                 next:
2110                 idx++;
2111                 if (count + 1 < window)
2112                         count++;
2113                 if (idx >= window)
2114                         idx = 0;
2115         }
2116
2117         for (i = 0; i < window; ++i) {
2118                 free_delta_index(array[i].index);
2119                 free(array[i].data);
2120         }
2121         free(array);
2122 }
2123
2124 #ifndef NO_PTHREADS
2125
2126 static void try_to_free_from_threads(size_t size)
2127 {
2128         read_lock();
2129         release_pack_memory(size);
2130         read_unlock();
2131 }
2132
2133 static try_to_free_t old_try_to_free_routine;
2134
2135 /*
2136  * The main thread waits on the condition that (at least) one of the workers
2137  * has stopped working (which is indicated in the .working member of
2138  * struct thread_params).
2139  * When a work thread has completed its work, it sets .working to 0 and
2140  * signals the main thread and waits on the condition that .data_ready
2141  * becomes 1.
2142  */
2143
2144 struct thread_params {
2145         pthread_t thread;
2146         struct object_entry **list;
2147         unsigned list_size;
2148         unsigned remaining;
2149         int window;
2150         int depth;
2151         int working;
2152         int data_ready;
2153         pthread_mutex_t mutex;
2154         pthread_cond_t cond;
2155         unsigned *processed;
2156 };
2157
2158 static pthread_cond_t progress_cond;
2159
2160 /*
2161  * Mutex and conditional variable can't be statically-initialized on Windows.
2162  */
2163 static void init_threaded_search(void)
2164 {
2165         init_recursive_mutex(&read_mutex);
2166         pthread_mutex_init(&cache_mutex, NULL);
2167         pthread_mutex_init(&progress_mutex, NULL);
2168         pthread_cond_init(&progress_cond, NULL);
2169         old_try_to_free_routine = set_try_to_free_routine(try_to_free_from_threads);
2170 }
2171
2172 static void cleanup_threaded_search(void)
2173 {
2174         set_try_to_free_routine(old_try_to_free_routine);
2175         pthread_cond_destroy(&progress_cond);
2176         pthread_mutex_destroy(&read_mutex);
2177         pthread_mutex_destroy(&cache_mutex);
2178         pthread_mutex_destroy(&progress_mutex);
2179 }
2180
2181 static void *threaded_find_deltas(void *arg)
2182 {
2183         struct thread_params *me = arg;
2184
2185         progress_lock();
2186         while (me->remaining) {
2187                 progress_unlock();
2188
2189                 find_deltas(me->list, &me->remaining,
2190                             me->window, me->depth, me->processed);
2191
2192                 progress_lock();
2193                 me->working = 0;
2194                 pthread_cond_signal(&progress_cond);
2195                 progress_unlock();
2196
2197                 /*
2198                  * We must not set ->data_ready before we wait on the
2199                  * condition because the main thread may have set it to 1
2200                  * before we get here. In order to be sure that new
2201                  * work is available if we see 1 in ->data_ready, it
2202                  * was initialized to 0 before this thread was spawned
2203                  * and we reset it to 0 right away.
2204                  */
2205                 pthread_mutex_lock(&me->mutex);
2206                 while (!me->data_ready)
2207                         pthread_cond_wait(&me->cond, &me->mutex);
2208                 me->data_ready = 0;
2209                 pthread_mutex_unlock(&me->mutex);
2210
2211                 progress_lock();
2212         }
2213         progress_unlock();
2214         /* leave ->working 1 so that this doesn't get more work assigned */
2215         return NULL;
2216 }
2217
2218 static void ll_find_deltas(struct object_entry **list, unsigned list_size,
2219                            int window, int depth, unsigned *processed)
2220 {
2221         struct thread_params *p;
2222         int i, ret, active_threads = 0;
2223
2224         init_threaded_search();
2225
2226         if (delta_search_threads <= 1) {
2227                 find_deltas(list, &list_size, window, depth, processed);
2228                 cleanup_threaded_search();
2229                 return;
2230         }
2231         if (progress > pack_to_stdout)
2232                 fprintf(stderr, "Delta compression using up to %d threads.\n",
2233                                 delta_search_threads);
2234         p = xcalloc(delta_search_threads, sizeof(*p));
2235
2236         /* Partition the work amongst work threads. */
2237         for (i = 0; i < delta_search_threads; i++) {
2238                 unsigned sub_size = list_size / (delta_search_threads - i);
2239
2240                 /* don't use too small segments or no deltas will be found */
2241                 if (sub_size < 2*window && i+1 < delta_search_threads)
2242                         sub_size = 0;
2243
2244                 p[i].window = window;
2245                 p[i].depth = depth;
2246                 p[i].processed = processed;
2247                 p[i].working = 1;
2248                 p[i].data_ready = 0;
2249
2250                 /* try to split chunks on "path" boundaries */
2251                 while (sub_size && sub_size < list_size &&
2252                        list[sub_size]->hash &&
2253                        list[sub_size]->hash == list[sub_size-1]->hash)
2254                         sub_size++;
2255
2256                 p[i].list = list;
2257                 p[i].list_size = sub_size;
2258                 p[i].remaining = sub_size;
2259
2260                 list += sub_size;
2261                 list_size -= sub_size;
2262         }
2263
2264         /* Start work threads. */
2265         for (i = 0; i < delta_search_threads; i++) {
2266                 if (!p[i].list_size)
2267                         continue;
2268                 pthread_mutex_init(&p[i].mutex, NULL);
2269                 pthread_cond_init(&p[i].cond, NULL);
2270                 ret = pthread_create(&p[i].thread, NULL,
2271                                      threaded_find_deltas, &p[i]);
2272                 if (ret)
2273                         die("unable to create thread: %s", strerror(ret));
2274                 active_threads++;
2275         }
2276
2277         /*
2278          * Now let's wait for work completion.  Each time a thread is done
2279          * with its work, we steal half of the remaining work from the
2280          * thread with the largest number of unprocessed objects and give
2281          * it to that newly idle thread.  This ensure good load balancing
2282          * until the remaining object list segments are simply too short
2283          * to be worth splitting anymore.
2284          */
2285         while (active_threads) {
2286                 struct thread_params *target = NULL;
2287                 struct thread_params *victim = NULL;
2288                 unsigned sub_size = 0;
2289
2290                 progress_lock();
2291                 for (;;) {
2292                         for (i = 0; !target && i < delta_search_threads; i++)
2293                                 if (!p[i].working)
2294                                         target = &p[i];
2295                         if (target)
2296                                 break;
2297                         pthread_cond_wait(&progress_cond, &progress_mutex);
2298                 }
2299
2300                 for (i = 0; i < delta_search_threads; i++)
2301                         if (p[i].remaining > 2*window &&
2302                             (!victim || victim->remaining < p[i].remaining))
2303                                 victim = &p[i];
2304                 if (victim) {
2305                         sub_size = victim->remaining / 2;
2306                         list = victim->list + victim->list_size - sub_size;
2307                         while (sub_size && list[0]->hash &&
2308                                list[0]->hash == list[-1]->hash) {
2309                                 list++;
2310                                 sub_size--;
2311                         }
2312                         if (!sub_size) {
2313                                 /*
2314                                  * It is possible for some "paths" to have
2315                                  * so many objects that no hash boundary
2316                                  * might be found.  Let's just steal the
2317                                  * exact half in that case.
2318                                  */
2319                                 sub_size = victim->remaining / 2;
2320                                 list -= sub_size;
2321                         }
2322                         target->list = list;
2323                         victim->list_size -= sub_size;
2324                         victim->remaining -= sub_size;
2325                 }
2326                 target->list_size = sub_size;
2327                 target->remaining = sub_size;
2328                 target->working = 1;
2329                 progress_unlock();
2330
2331                 pthread_mutex_lock(&target->mutex);
2332                 target->data_ready = 1;
2333                 pthread_cond_signal(&target->cond);
2334                 pthread_mutex_unlock(&target->mutex);
2335
2336                 if (!sub_size) {
2337                         pthread_join(target->thread, NULL);
2338                         pthread_cond_destroy(&target->cond);
2339                         pthread_mutex_destroy(&target->mutex);
2340                         active_threads--;
2341                 }
2342         }
2343         cleanup_threaded_search();
2344         free(p);
2345 }
2346
2347 #else
2348 #define ll_find_deltas(l, s, w, d, p)   find_deltas(l, &s, w, d, p)
2349 #endif
2350
2351 static void add_tag_chain(const struct object_id *oid)
2352 {
2353         struct tag *tag;
2354
2355         /*
2356          * We catch duplicates already in add_object_entry(), but we'd
2357          * prefer to do this extra check to avoid having to parse the
2358          * tag at all if we already know that it's being packed (e.g., if
2359          * it was included via bitmaps, we would not have parsed it
2360          * previously).
2361          */
2362         if (packlist_find(&to_pack, oid->hash, NULL))
2363                 return;
2364
2365         tag = lookup_tag(oid);
2366         while (1) {
2367                 if (!tag || parse_tag(tag) || !tag->tagged)
2368                         die("unable to pack objects reachable from tag %s",
2369                             oid_to_hex(oid));
2370
2371                 add_object_entry(&tag->object.oid, OBJ_TAG, NULL, 0);
2372
2373                 if (tag->tagged->type != OBJ_TAG)
2374                         return;
2375
2376                 tag = (struct tag *)tag->tagged;
2377         }
2378 }
2379
2380 static int add_ref_tag(const char *path, const struct object_id *oid, int flag, void *cb_data)
2381 {
2382         struct object_id peeled;
2383
2384         if (starts_with(path, "refs/tags/") && /* is a tag? */
2385             !peel_ref(path, &peeled)    && /* peelable? */
2386             packlist_find(&to_pack, peeled.hash, NULL))      /* object packed? */
2387                 add_tag_chain(oid);
2388         return 0;
2389 }
2390
2391 static void prepare_pack(int window, int depth)
2392 {
2393         struct object_entry **delta_list;
2394         uint32_t i, nr_deltas;
2395         unsigned n;
2396
2397         get_object_details();
2398
2399         /*
2400          * If we're locally repacking then we need to be doubly careful
2401          * from now on in order to make sure no stealth corruption gets
2402          * propagated to the new pack.  Clients receiving streamed packs
2403          * should validate everything they get anyway so no need to incur
2404          * the additional cost here in that case.
2405          */
2406         if (!pack_to_stdout)
2407                 do_check_packed_object_crc = 1;
2408
2409         if (!to_pack.nr_objects || !window || !depth)
2410                 return;
2411
2412         ALLOC_ARRAY(delta_list, to_pack.nr_objects);
2413         nr_deltas = n = 0;
2414
2415         for (i = 0; i < to_pack.nr_objects; i++) {
2416                 struct object_entry *entry = to_pack.objects + i;
2417
2418                 if (entry->delta)
2419                         /* This happens if we decided to reuse existing
2420                          * delta from a pack.  "reuse_delta &&" is implied.
2421                          */
2422                         continue;
2423
2424                 if (entry->size < 50)
2425                         continue;
2426
2427                 if (entry->no_try_delta)
2428                         continue;
2429
2430                 if (!entry->preferred_base) {
2431                         nr_deltas++;
2432                         if (entry->type < 0)
2433                                 die("unable to get type of object %s",
2434                                     oid_to_hex(&entry->idx.oid));
2435                 } else {
2436                         if (entry->type < 0) {
2437                                 /*
2438                                  * This object is not found, but we
2439                                  * don't have to include it anyway.
2440                                  */
2441                                 continue;
2442                         }
2443                 }
2444
2445                 delta_list[n++] = entry;
2446         }
2447
2448         if (nr_deltas && n > 1) {
2449                 unsigned nr_done = 0;
2450                 if (progress)
2451                         progress_state = start_progress(_("Compressing objects"),
2452                                                         nr_deltas);
2453                 QSORT(delta_list, n, type_size_sort);
2454                 ll_find_deltas(delta_list, n, window+1, depth, &nr_done);
2455                 stop_progress(&progress_state);
2456                 if (nr_done != nr_deltas)
2457                         die("inconsistency with delta count");
2458         }
2459         free(delta_list);
2460 }
2461
2462 static int git_pack_config(const char *k, const char *v, void *cb)
2463 {
2464         if (!strcmp(k, "pack.window")) {
2465                 window = git_config_int(k, v);
2466                 return 0;
2467         }
2468         if (!strcmp(k, "pack.windowmemory")) {
2469                 window_memory_limit = git_config_ulong(k, v);
2470                 return 0;
2471         }
2472         if (!strcmp(k, "pack.depth")) {
2473                 depth = git_config_int(k, v);
2474                 return 0;
2475         }
2476         if (!strcmp(k, "pack.deltacachesize")) {
2477                 max_delta_cache_size = git_config_int(k, v);
2478                 return 0;
2479         }
2480         if (!strcmp(k, "pack.deltacachelimit")) {
2481                 cache_max_small_delta_size = git_config_int(k, v);
2482                 return 0;
2483         }
2484         if (!strcmp(k, "pack.writebitmaphashcache")) {
2485                 if (git_config_bool(k, v))
2486                         write_bitmap_options |= BITMAP_OPT_HASH_CACHE;
2487                 else
2488                         write_bitmap_options &= ~BITMAP_OPT_HASH_CACHE;
2489         }
2490         if (!strcmp(k, "pack.usebitmaps")) {
2491                 use_bitmap_index_default = git_config_bool(k, v);
2492                 return 0;
2493         }
2494         if (!strcmp(k, "pack.threads")) {
2495                 delta_search_threads = git_config_int(k, v);
2496                 if (delta_search_threads < 0)
2497                         die("invalid number of threads specified (%d)",
2498                             delta_search_threads);
2499 #ifdef NO_PTHREADS
2500                 if (delta_search_threads != 1) {
2501                         warning("no threads support, ignoring %s", k);
2502                         delta_search_threads = 0;
2503                 }
2504 #endif
2505                 return 0;
2506         }
2507         if (!strcmp(k, "pack.indexversion")) {
2508                 pack_idx_opts.version = git_config_int(k, v);
2509                 if (pack_idx_opts.version > 2)
2510                         die("bad pack.indexversion=%"PRIu32,
2511                             pack_idx_opts.version);
2512                 return 0;
2513         }
2514         return git_default_config(k, v, cb);
2515 }
2516
2517 static void read_object_list_from_stdin(void)
2518 {
2519         char line[GIT_MAX_HEXSZ + 1 + PATH_MAX + 2];
2520         struct object_id oid;
2521         const char *p;
2522
2523         for (;;) {
2524                 if (!fgets(line, sizeof(line), stdin)) {
2525                         if (feof(stdin))
2526                                 break;
2527                         if (!ferror(stdin))
2528                                 die("fgets returned NULL, not EOF, not error!");
2529                         if (errno != EINTR)
2530                                 die_errno("fgets");
2531                         clearerr(stdin);
2532                         continue;
2533                 }
2534                 if (line[0] == '-') {
2535                         if (get_oid_hex(line+1, &oid))
2536                                 die("expected edge object ID, got garbage:\n %s",
2537                                     line);
2538                         add_preferred_base(&oid);
2539                         continue;
2540                 }
2541                 if (parse_oid_hex(line, &oid, &p))
2542                         die("expected object ID, got garbage:\n %s", line);
2543
2544                 add_preferred_base_object(p + 1);
2545                 add_object_entry(&oid, 0, p + 1, 0);
2546         }
2547 }
2548
2549 #define OBJECT_ADDED (1u<<20)
2550
2551 static void show_commit(struct commit *commit, void *data)
2552 {
2553         add_object_entry(&commit->object.oid, OBJ_COMMIT, NULL, 0);
2554         commit->object.flags |= OBJECT_ADDED;
2555
2556         if (write_bitmap_index)
2557                 index_commit_for_bitmap(commit);
2558 }
2559
2560 static void show_object(struct object *obj, const char *name, void *data)
2561 {
2562         add_preferred_base_object(name);
2563         add_object_entry(&obj->oid, obj->type, name, 0);
2564         obj->flags |= OBJECT_ADDED;
2565 }
2566
2567 static void show_object__ma_allow_any(struct object *obj, const char *name, void *data)
2568 {
2569         assert(arg_missing_action == MA_ALLOW_ANY);
2570
2571         /*
2572          * Quietly ignore ALL missing objects.  This avoids problems with
2573          * staging them now and getting an odd error later.
2574          */
2575         if (!has_object_file(&obj->oid))
2576                 return;
2577
2578         show_object(obj, name, data);
2579 }
2580
2581 static int option_parse_missing_action(const struct option *opt,
2582                                        const char *arg, int unset)
2583 {
2584         assert(arg);
2585         assert(!unset);
2586
2587         if (!strcmp(arg, "error")) {
2588                 arg_missing_action = MA_ERROR;
2589                 fn_show_object = show_object;
2590                 return 0;
2591         }
2592
2593         if (!strcmp(arg, "allow-any")) {
2594                 arg_missing_action = MA_ALLOW_ANY;
2595                 fn_show_object = show_object__ma_allow_any;
2596                 return 0;
2597         }
2598
2599         die(_("invalid value for --missing"));
2600         return 0;
2601 }
2602
2603 static void show_edge(struct commit *commit)
2604 {
2605         add_preferred_base(&commit->object.oid);
2606 }
2607
2608 struct in_pack_object {
2609         off_t offset;
2610         struct object *object;
2611 };
2612
2613 struct in_pack {
2614         unsigned int alloc;
2615         unsigned int nr;
2616         struct in_pack_object *array;
2617 };
2618
2619 static void mark_in_pack_object(struct object *object, struct packed_git *p, struct in_pack *in_pack)
2620 {
2621         in_pack->array[in_pack->nr].offset = find_pack_entry_one(object->oid.hash, p);
2622         in_pack->array[in_pack->nr].object = object;
2623         in_pack->nr++;
2624 }
2625
2626 /*
2627  * Compare the objects in the offset order, in order to emulate the
2628  * "git rev-list --objects" output that produced the pack originally.
2629  */
2630 static int ofscmp(const void *a_, const void *b_)
2631 {
2632         struct in_pack_object *a = (struct in_pack_object *)a_;
2633         struct in_pack_object *b = (struct in_pack_object *)b_;
2634
2635         if (a->offset < b->offset)
2636                 return -1;
2637         else if (a->offset > b->offset)
2638                 return 1;
2639         else
2640                 return oidcmp(&a->object->oid, &b->object->oid);
2641 }
2642
2643 static void add_objects_in_unpacked_packs(struct rev_info *revs)
2644 {
2645         struct packed_git *p;
2646         struct in_pack in_pack;
2647         uint32_t i;
2648
2649         memset(&in_pack, 0, sizeof(in_pack));
2650
2651         for (p = packed_git; p; p = p->next) {
2652                 struct object_id oid;
2653                 struct object *o;
2654
2655                 if (!p->pack_local || p->pack_keep)
2656                         continue;
2657                 if (open_pack_index(p))
2658                         die("cannot open pack index");
2659
2660                 ALLOC_GROW(in_pack.array,
2661                            in_pack.nr + p->num_objects,
2662                            in_pack.alloc);
2663
2664                 for (i = 0; i < p->num_objects; i++) {
2665                         nth_packed_object_oid(&oid, p, i);
2666                         o = lookup_unknown_object(oid.hash);
2667                         if (!(o->flags & OBJECT_ADDED))
2668                                 mark_in_pack_object(o, p, &in_pack);
2669                         o->flags |= OBJECT_ADDED;
2670                 }
2671         }
2672
2673         if (in_pack.nr) {
2674                 QSORT(in_pack.array, in_pack.nr, ofscmp);
2675                 for (i = 0; i < in_pack.nr; i++) {
2676                         struct object *o = in_pack.array[i].object;
2677                         add_object_entry(&o->oid, o->type, "", 0);
2678                 }
2679         }
2680         free(in_pack.array);
2681 }
2682
2683 static int add_loose_object(const struct object_id *oid, const char *path,
2684                             void *data)
2685 {
2686         enum object_type type = sha1_object_info(oid->hash, NULL);
2687
2688         if (type < 0) {
2689                 warning("loose object at %s could not be examined", path);
2690                 return 0;
2691         }
2692
2693         add_object_entry(oid, type, "", 0);
2694         return 0;
2695 }
2696
2697 /*
2698  * We actually don't even have to worry about reachability here.
2699  * add_object_entry will weed out duplicates, so we just add every
2700  * loose object we find.
2701  */
2702 static void add_unreachable_loose_objects(void)
2703 {
2704         for_each_loose_file_in_objdir(get_object_directory(),
2705                                       add_loose_object,
2706                                       NULL, NULL, NULL);
2707 }
2708
2709 static int has_sha1_pack_kept_or_nonlocal(const struct object_id *oid)
2710 {
2711         static struct packed_git *last_found = (void *)1;
2712         struct packed_git *p;
2713
2714         p = (last_found != (void *)1) ? last_found : packed_git;
2715
2716         while (p) {
2717                 if ((!p->pack_local || p->pack_keep) &&
2718                         find_pack_entry_one(oid->hash, p)) {
2719                         last_found = p;
2720                         return 1;
2721                 }
2722                 if (p == last_found)
2723                         p = packed_git;
2724                 else
2725                         p = p->next;
2726                 if (p == last_found)
2727                         p = p->next;
2728         }
2729         return 0;
2730 }
2731
2732 /*
2733  * Store a list of sha1s that are should not be discarded
2734  * because they are either written too recently, or are
2735  * reachable from another object that was.
2736  *
2737  * This is filled by get_object_list.
2738  */
2739 static struct oid_array recent_objects;
2740
2741 static int loosened_object_can_be_discarded(const struct object_id *oid,
2742                                             timestamp_t mtime)
2743 {
2744         if (!unpack_unreachable_expiration)
2745                 return 0;
2746         if (mtime > unpack_unreachable_expiration)
2747                 return 0;
2748         if (oid_array_lookup(&recent_objects, oid) >= 0)
2749                 return 0;
2750         return 1;
2751 }
2752
2753 static void loosen_unused_packed_objects(struct rev_info *revs)
2754 {
2755         struct packed_git *p;
2756         uint32_t i;
2757         struct object_id oid;
2758
2759         for (p = packed_git; p; p = p->next) {
2760                 if (!p->pack_local || p->pack_keep)
2761                         continue;
2762
2763                 if (open_pack_index(p))
2764                         die("cannot open pack index");
2765
2766                 for (i = 0; i < p->num_objects; i++) {
2767                         nth_packed_object_oid(&oid, p, i);
2768                         if (!packlist_find(&to_pack, oid.hash, NULL) &&
2769                             !has_sha1_pack_kept_or_nonlocal(&oid) &&
2770                             !loosened_object_can_be_discarded(&oid, p->mtime))
2771                                 if (force_object_loose(oid.hash, p->mtime))
2772                                         die("unable to force loose object");
2773                 }
2774         }
2775 }
2776
2777 /*
2778  * This tracks any options which pack-reuse code expects to be on, or which a
2779  * reader of the pack might not understand, and which would therefore prevent
2780  * blind reuse of what we have on disk.
2781  */
2782 static int pack_options_allow_reuse(void)
2783 {
2784         return pack_to_stdout &&
2785                allow_ofs_delta &&
2786                !ignore_packed_keep &&
2787                (!local || !have_non_local_packs) &&
2788                !incremental;
2789 }
2790
2791 static int get_object_list_from_bitmap(struct rev_info *revs)
2792 {
2793         if (prepare_bitmap_walk(revs) < 0)
2794                 return -1;
2795
2796         if (pack_options_allow_reuse() &&
2797             !reuse_partial_packfile_from_bitmap(
2798                         &reuse_packfile,
2799                         &reuse_packfile_objects,
2800                         &reuse_packfile_offset)) {
2801                 assert(reuse_packfile_objects);
2802                 nr_result += reuse_packfile_objects;
2803                 display_progress(progress_state, nr_result);
2804         }
2805
2806         traverse_bitmap_commit_list(&add_object_entry_from_bitmap);
2807         return 0;
2808 }
2809
2810 static void record_recent_object(struct object *obj,
2811                                  const char *name,
2812                                  void *data)
2813 {
2814         oid_array_append(&recent_objects, &obj->oid);
2815 }
2816
2817 static void record_recent_commit(struct commit *commit, void *data)
2818 {
2819         oid_array_append(&recent_objects, &commit->object.oid);
2820 }
2821
2822 static void get_object_list(int ac, const char **av)
2823 {
2824         struct rev_info revs;
2825         char line[1000];
2826         int flags = 0;
2827
2828         init_revisions(&revs, NULL);
2829         save_commit_buffer = 0;
2830         setup_revisions(ac, av, &revs, NULL);
2831
2832         /* make sure shallows are read */
2833         is_repository_shallow();
2834
2835         while (fgets(line, sizeof(line), stdin) != NULL) {
2836                 int len = strlen(line);
2837                 if (len && line[len - 1] == '\n')
2838                         line[--len] = 0;
2839                 if (!len)
2840                         break;
2841                 if (*line == '-') {
2842                         if (!strcmp(line, "--not")) {
2843                                 flags ^= UNINTERESTING;
2844                                 write_bitmap_index = 0;
2845                                 continue;
2846                         }
2847                         if (starts_with(line, "--shallow ")) {
2848                                 struct object_id oid;
2849                                 if (get_oid_hex(line + 10, &oid))
2850                                         die("not an SHA-1 '%s'", line + 10);
2851                                 register_shallow(&oid);
2852                                 use_bitmap_index = 0;
2853                                 continue;
2854                         }
2855                         die("not a rev '%s'", line);
2856                 }
2857                 if (handle_revision_arg(line, &revs, flags, REVARG_CANNOT_BE_FILENAME))
2858                         die("bad revision '%s'", line);
2859         }
2860
2861         if (use_bitmap_index && !get_object_list_from_bitmap(&revs))
2862                 return;
2863
2864         if (prepare_revision_walk(&revs))
2865                 die("revision walk setup failed");
2866         mark_edges_uninteresting(&revs, show_edge);
2867
2868         if (!fn_show_object)
2869                 fn_show_object = show_object;
2870         traverse_commit_list_filtered(&filter_options, &revs,
2871                                       show_commit, fn_show_object, NULL,
2872                                       NULL);
2873
2874         if (unpack_unreachable_expiration) {
2875                 revs.ignore_missing_links = 1;
2876                 if (add_unseen_recent_objects_to_traversal(&revs,
2877                                 unpack_unreachable_expiration))
2878                         die("unable to add recent objects");
2879                 if (prepare_revision_walk(&revs))
2880                         die("revision walk setup failed");
2881                 traverse_commit_list(&revs, record_recent_commit,
2882                                      record_recent_object, NULL);
2883         }
2884
2885         if (keep_unreachable)
2886                 add_objects_in_unpacked_packs(&revs);
2887         if (pack_loose_unreachable)
2888                 add_unreachable_loose_objects();
2889         if (unpack_unreachable)
2890                 loosen_unused_packed_objects(&revs);
2891
2892         oid_array_clear(&recent_objects);
2893 }
2894
2895 static int option_parse_index_version(const struct option *opt,
2896                                       const char *arg, int unset)
2897 {
2898         char *c;
2899         const char *val = arg;
2900         pack_idx_opts.version = strtoul(val, &c, 10);
2901         if (pack_idx_opts.version > 2)
2902                 die(_("unsupported index version %s"), val);
2903         if (*c == ',' && c[1])
2904                 pack_idx_opts.off32_limit = strtoul(c+1, &c, 0);
2905         if (*c || pack_idx_opts.off32_limit & 0x80000000)
2906                 die(_("bad index version '%s'"), val);
2907         return 0;
2908 }
2909
2910 static int option_parse_unpack_unreachable(const struct option *opt,
2911                                            const char *arg, int unset)
2912 {
2913         if (unset) {
2914                 unpack_unreachable = 0;
2915                 unpack_unreachable_expiration = 0;
2916         }
2917         else {
2918                 unpack_unreachable = 1;
2919                 if (arg)
2920                         unpack_unreachable_expiration = approxidate(arg);
2921         }
2922         return 0;
2923 }
2924
2925 int cmd_pack_objects(int argc, const char **argv, const char *prefix)
2926 {
2927         int use_internal_rev_list = 0;
2928         int thin = 0;
2929         int shallow = 0;
2930         int all_progress_implied = 0;
2931         struct argv_array rp = ARGV_ARRAY_INIT;
2932         int rev_list_unpacked = 0, rev_list_all = 0, rev_list_reflog = 0;
2933         int rev_list_index = 0;
2934         struct option pack_objects_options[] = {
2935                 OPT_SET_INT('q', "quiet", &progress,
2936                             N_("do not show progress meter"), 0),
2937                 OPT_SET_INT(0, "progress", &progress,
2938                             N_("show progress meter"), 1),
2939                 OPT_SET_INT(0, "all-progress", &progress,
2940                             N_("show progress meter during object writing phase"), 2),
2941                 OPT_BOOL(0, "all-progress-implied",
2942                          &all_progress_implied,
2943                          N_("similar to --all-progress when progress meter is shown")),
2944                 { OPTION_CALLBACK, 0, "index-version", NULL, N_("version[,offset]"),
2945                   N_("write the pack index file in the specified idx format version"),
2946                   0, option_parse_index_version },
2947                 OPT_MAGNITUDE(0, "max-pack-size", &pack_size_limit,
2948                               N_("maximum size of each output pack file")),
2949                 OPT_BOOL(0, "local", &local,
2950                          N_("ignore borrowed objects from alternate object store")),
2951                 OPT_BOOL(0, "incremental", &incremental,
2952                          N_("ignore packed objects")),
2953                 OPT_INTEGER(0, "window", &window,
2954                             N_("limit pack window by objects")),
2955                 OPT_MAGNITUDE(0, "window-memory", &window_memory_limit,
2956                               N_("limit pack window by memory in addition to object limit")),
2957                 OPT_INTEGER(0, "depth", &depth,
2958                             N_("maximum length of delta chain allowed in the resulting pack")),
2959                 OPT_BOOL(0, "reuse-delta", &reuse_delta,
2960                          N_("reuse existing deltas")),
2961                 OPT_BOOL(0, "reuse-object", &reuse_object,
2962                          N_("reuse existing objects")),
2963                 OPT_BOOL(0, "delta-base-offset", &allow_ofs_delta,
2964                          N_("use OFS_DELTA objects")),
2965                 OPT_INTEGER(0, "threads", &delta_search_threads,
2966                             N_("use threads when searching for best delta matches")),
2967                 OPT_BOOL(0, "non-empty", &non_empty,
2968                          N_("do not create an empty pack output")),
2969                 OPT_BOOL(0, "revs", &use_internal_rev_list,
2970                          N_("read revision arguments from standard input")),
2971                 { OPTION_SET_INT, 0, "unpacked", &rev_list_unpacked, NULL,
2972                   N_("limit the objects to those that are not yet packed"),
2973                   PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL, 1 },
2974                 { OPTION_SET_INT, 0, "all", &rev_list_all, NULL,
2975                   N_("include objects reachable from any reference"),
2976                   PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL, 1 },
2977                 { OPTION_SET_INT, 0, "reflog", &rev_list_reflog, NULL,
2978                   N_("include objects referred by reflog entries"),
2979                   PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL, 1 },
2980                 { OPTION_SET_INT, 0, "indexed-objects", &rev_list_index, NULL,
2981                   N_("include objects referred to by the index"),
2982                   PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL, 1 },
2983                 OPT_BOOL(0, "stdout", &pack_to_stdout,
2984                          N_("output pack to stdout")),
2985                 OPT_BOOL(0, "include-tag", &include_tag,
2986                          N_("include tag objects that refer to objects to be packed")),
2987                 OPT_BOOL(0, "keep-unreachable", &keep_unreachable,
2988                          N_("keep unreachable objects")),
2989                 OPT_BOOL(0, "pack-loose-unreachable", &pack_loose_unreachable,
2990                          N_("pack loose unreachable objects")),
2991                 { OPTION_CALLBACK, 0, "unpack-unreachable", NULL, N_("time"),
2992                   N_("unpack unreachable objects newer than <time>"),
2993                   PARSE_OPT_OPTARG, option_parse_unpack_unreachable },
2994                 OPT_BOOL(0, "thin", &thin,
2995                          N_("create thin packs")),
2996                 OPT_BOOL(0, "shallow", &shallow,
2997                          N_("create packs suitable for shallow fetches")),
2998                 OPT_BOOL(0, "honor-pack-keep", &ignore_packed_keep,
2999                          N_("ignore packs that have companion .keep file")),
3000                 OPT_INTEGER(0, "compression", &pack_compression_level,
3001                             N_("pack compression level")),
3002                 OPT_SET_INT(0, "keep-true-parents", &grafts_replace_parents,
3003                             N_("do not hide commits by grafts"), 0),
3004                 OPT_BOOL(0, "use-bitmap-index", &use_bitmap_index,
3005                          N_("use a bitmap index if available to speed up counting objects")),
3006                 OPT_BOOL(0, "write-bitmap-index", &write_bitmap_index,
3007                          N_("write a bitmap index together with the pack index")),
3008                 OPT_PARSE_LIST_OBJECTS_FILTER(&filter_options),
3009                 { OPTION_CALLBACK, 0, "missing", NULL, N_("action"),
3010                   N_("handling for missing objects"), PARSE_OPT_NONEG,
3011                   option_parse_missing_action },
3012                 OPT_END(),
3013         };
3014
3015         check_replace_refs = 0;
3016
3017         reset_pack_idx_option(&pack_idx_opts);
3018         git_config(git_pack_config, NULL);
3019
3020         progress = isatty(2);
3021         argc = parse_options(argc, argv, prefix, pack_objects_options,
3022                              pack_usage, 0);
3023
3024         if (argc) {
3025                 base_name = argv[0];
3026                 argc--;
3027         }
3028         if (pack_to_stdout != !base_name || argc)
3029                 usage_with_options(pack_usage, pack_objects_options);
3030
3031         argv_array_push(&rp, "pack-objects");
3032         if (thin) {
3033                 use_internal_rev_list = 1;
3034                 argv_array_push(&rp, shallow
3035                                 ? "--objects-edge-aggressive"
3036                                 : "--objects-edge");
3037         } else
3038                 argv_array_push(&rp, "--objects");
3039
3040         if (rev_list_all) {
3041                 use_internal_rev_list = 1;
3042                 argv_array_push(&rp, "--all");
3043         }
3044         if (rev_list_reflog) {
3045                 use_internal_rev_list = 1;
3046                 argv_array_push(&rp, "--reflog");
3047         }
3048         if (rev_list_index) {
3049                 use_internal_rev_list = 1;
3050                 argv_array_push(&rp, "--indexed-objects");
3051         }
3052         if (rev_list_unpacked) {
3053                 use_internal_rev_list = 1;
3054                 argv_array_push(&rp, "--unpacked");
3055         }
3056
3057         if (!reuse_object)
3058                 reuse_delta = 0;
3059         if (pack_compression_level == -1)
3060                 pack_compression_level = Z_DEFAULT_COMPRESSION;
3061         else if (pack_compression_level < 0 || pack_compression_level > Z_BEST_COMPRESSION)
3062                 die("bad pack compression level %d", pack_compression_level);
3063
3064         if (!delta_search_threads)      /* --threads=0 means autodetect */
3065                 delta_search_threads = online_cpus();
3066
3067 #ifdef NO_PTHREADS
3068         if (delta_search_threads != 1)
3069                 warning("no threads support, ignoring --threads");
3070 #endif
3071         if (!pack_to_stdout && !pack_size_limit)
3072                 pack_size_limit = pack_size_limit_cfg;
3073         if (pack_to_stdout && pack_size_limit)
3074                 die("--max-pack-size cannot be used to build a pack for transfer.");
3075         if (pack_size_limit && pack_size_limit < 1024*1024) {
3076                 warning("minimum pack size limit is 1 MiB");
3077                 pack_size_limit = 1024*1024;
3078         }
3079
3080         if (!pack_to_stdout && thin)
3081                 die("--thin cannot be used to build an indexable pack.");
3082
3083         if (keep_unreachable && unpack_unreachable)
3084                 die("--keep-unreachable and --unpack-unreachable are incompatible.");
3085         if (!rev_list_all || !rev_list_reflog || !rev_list_index)
3086                 unpack_unreachable_expiration = 0;
3087
3088         if (filter_options.choice) {
3089                 if (!pack_to_stdout)
3090                         die("cannot use --filter without --stdout.");
3091                 use_bitmap_index = 0;
3092         }
3093
3094         /*
3095          * "soft" reasons not to use bitmaps - for on-disk repack by default we want
3096          *
3097          * - to produce good pack (with bitmap index not-yet-packed objects are
3098          *   packed in suboptimal order).
3099          *
3100          * - to use more robust pack-generation codepath (avoiding possible
3101          *   bugs in bitmap code and possible bitmap index corruption).
3102          */
3103         if (!pack_to_stdout)
3104                 use_bitmap_index_default = 0;
3105
3106         if (use_bitmap_index < 0)
3107                 use_bitmap_index = use_bitmap_index_default;
3108
3109         /* "hard" reasons not to use bitmaps; these just won't work at all */
3110         if (!use_internal_rev_list || (!pack_to_stdout && write_bitmap_index) || is_repository_shallow())
3111                 use_bitmap_index = 0;
3112
3113         if (pack_to_stdout || !rev_list_all)
3114                 write_bitmap_index = 0;
3115
3116         if (progress && all_progress_implied)
3117                 progress = 2;
3118
3119         prepare_packed_git();
3120         if (ignore_packed_keep) {
3121                 struct packed_git *p;
3122                 for (p = packed_git; p; p = p->next)
3123                         if (p->pack_local && p->pack_keep)
3124                                 break;
3125                 if (!p) /* no keep-able packs found */
3126                         ignore_packed_keep = 0;
3127         }
3128         if (local) {
3129                 /*
3130                  * unlike ignore_packed_keep above, we do not want to
3131                  * unset "local" based on looking at packs, as it
3132                  * also covers non-local objects
3133                  */
3134                 struct packed_git *p;
3135                 for (p = packed_git; p; p = p->next) {
3136                         if (!p->pack_local) {
3137                                 have_non_local_packs = 1;
3138                                 break;
3139                         }
3140                 }
3141         }
3142
3143         if (progress)
3144                 progress_state = start_progress(_("Counting objects"), 0);
3145         if (!use_internal_rev_list)
3146                 read_object_list_from_stdin();
3147         else {
3148                 get_object_list(rp.argc, rp.argv);
3149                 argv_array_clear(&rp);
3150         }
3151         cleanup_preferred_base();
3152         if (include_tag && nr_result)
3153                 for_each_ref(add_ref_tag, NULL);
3154         stop_progress(&progress_state);
3155
3156         if (non_empty && !nr_result)
3157                 return 0;
3158         if (nr_result)
3159                 prepare_pack(window, depth);
3160         write_pack_file();
3161         if (progress)
3162                 fprintf(stderr, "Total %"PRIu32" (delta %"PRIu32"),"
3163                         " reused %"PRIu32" (delta %"PRIu32")\n",
3164                         written, written_delta, reused, reused_delta);
3165         return 0;
3166 }