git-compat-util.h: add FLOSS headers for HPE NonStop
[git] / bulk-checkin.c
1 /*
2  * Copyright (c) 2011, Google Inc.
3  */
4 #include "cache.h"
5 #include "bulk-checkin.h"
6 #include "repository.h"
7 #include "csum-file.h"
8 #include "pack.h"
9 #include "strbuf.h"
10 #include "packfile.h"
11 #include "object-store.h"
12
13 static struct bulk_checkin_state {
14         unsigned plugged:1;
15
16         char *pack_tmp_name;
17         struct hashfile *f;
18         off_t offset;
19         struct pack_idx_option pack_idx_opts;
20
21         struct pack_idx_entry **written;
22         uint32_t alloc_written;
23         uint32_t nr_written;
24 } state;
25
26 static void finish_bulk_checkin(struct bulk_checkin_state *state)
27 {
28         struct object_id oid;
29         struct strbuf packname = STRBUF_INIT;
30         int i;
31
32         if (!state->f)
33                 return;
34
35         if (state->nr_written == 0) {
36                 close(state->f->fd);
37                 unlink(state->pack_tmp_name);
38                 goto clear_exit;
39         } else if (state->nr_written == 1) {
40                 finalize_hashfile(state->f, oid.hash, CSUM_HASH_IN_STREAM | CSUM_FSYNC | CSUM_CLOSE);
41         } else {
42                 int fd = finalize_hashfile(state->f, oid.hash, 0);
43                 fixup_pack_header_footer(fd, oid.hash, state->pack_tmp_name,
44                                          state->nr_written, oid.hash,
45                                          state->offset);
46                 close(fd);
47         }
48
49         strbuf_addf(&packname, "%s/pack/pack-", get_object_directory());
50         finish_tmp_packfile(&packname, state->pack_tmp_name,
51                             state->written, state->nr_written,
52                             &state->pack_idx_opts, oid.hash);
53         for (i = 0; i < state->nr_written; i++)
54                 free(state->written[i]);
55
56 clear_exit:
57         free(state->written);
58         memset(state, 0, sizeof(*state));
59
60         strbuf_release(&packname);
61         /* Make objects we just wrote available to ourselves */
62         reprepare_packed_git(the_repository);
63 }
64
65 static int already_written(struct bulk_checkin_state *state, struct object_id *oid)
66 {
67         int i;
68
69         /* The object may already exist in the repository */
70         if (has_sha1_file(oid->hash))
71                 return 1;
72
73         /* Might want to keep the list sorted */
74         for (i = 0; i < state->nr_written; i++)
75                 if (oideq(&state->written[i]->oid, oid))
76                         return 1;
77
78         /* This is a new object we need to keep */
79         return 0;
80 }
81
82 /*
83  * Read the contents from fd for size bytes, streaming it to the
84  * packfile in state while updating the hash in ctx. Signal a failure
85  * by returning a negative value when the resulting pack would exceed
86  * the pack size limit and this is not the first object in the pack,
87  * so that the caller can discard what we wrote from the current pack
88  * by truncating it and opening a new one. The caller will then call
89  * us again after rewinding the input fd.
90  *
91  * The already_hashed_to pointer is kept untouched by the caller to
92  * make sure we do not hash the same byte when we are called
93  * again. This way, the caller does not have to checkpoint its hash
94  * status before calling us just in case we ask it to call us again
95  * with a new pack.
96  */
97 static int stream_to_pack(struct bulk_checkin_state *state,
98                           git_hash_ctx *ctx, off_t *already_hashed_to,
99                           int fd, size_t size, enum object_type type,
100                           const char *path, unsigned flags)
101 {
102         git_zstream s;
103         unsigned char obuf[16384];
104         unsigned hdrlen;
105         int status = Z_OK;
106         int write_object = (flags & HASH_WRITE_OBJECT);
107         off_t offset = 0;
108
109         git_deflate_init(&s, pack_compression_level);
110
111         hdrlen = encode_in_pack_object_header(obuf, sizeof(obuf), type, size);
112         s.next_out = obuf + hdrlen;
113         s.avail_out = sizeof(obuf) - hdrlen;
114
115         while (status != Z_STREAM_END) {
116                 unsigned char ibuf[16384];
117
118                 if (size && !s.avail_in) {
119                         ssize_t rsize = size < sizeof(ibuf) ? size : sizeof(ibuf);
120                         ssize_t read_result = read_in_full(fd, ibuf, rsize);
121                         if (read_result < 0)
122                                 die_errno("failed to read from '%s'", path);
123                         if (read_result != rsize)
124                                 die("failed to read %d bytes from '%s'",
125                                     (int)rsize, path);
126                         offset += rsize;
127                         if (*already_hashed_to < offset) {
128                                 size_t hsize = offset - *already_hashed_to;
129                                 if (rsize < hsize)
130                                         hsize = rsize;
131                                 if (hsize)
132                                         the_hash_algo->update_fn(ctx, ibuf, hsize);
133                                 *already_hashed_to = offset;
134                         }
135                         s.next_in = ibuf;
136                         s.avail_in = rsize;
137                         size -= rsize;
138                 }
139
140                 status = git_deflate(&s, size ? 0 : Z_FINISH);
141
142                 if (!s.avail_out || status == Z_STREAM_END) {
143                         if (write_object) {
144                                 size_t written = s.next_out - obuf;
145
146                                 /* would we bust the size limit? */
147                                 if (state->nr_written &&
148                                     pack_size_limit_cfg &&
149                                     pack_size_limit_cfg < state->offset + written) {
150                                         git_deflate_abort(&s);
151                                         return -1;
152                                 }
153
154                                 hashwrite(state->f, obuf, written);
155                                 state->offset += written;
156                         }
157                         s.next_out = obuf;
158                         s.avail_out = sizeof(obuf);
159                 }
160
161                 switch (status) {
162                 case Z_OK:
163                 case Z_BUF_ERROR:
164                 case Z_STREAM_END:
165                         continue;
166                 default:
167                         die("unexpected deflate failure: %d", status);
168                 }
169         }
170         git_deflate_end(&s);
171         return 0;
172 }
173
174 /* Lazily create backing packfile for the state */
175 static void prepare_to_stream(struct bulk_checkin_state *state,
176                               unsigned flags)
177 {
178         if (!(flags & HASH_WRITE_OBJECT) || state->f)
179                 return;
180
181         state->f = create_tmp_packfile(&state->pack_tmp_name);
182         reset_pack_idx_option(&state->pack_idx_opts);
183
184         /* Pretend we are going to write only one object */
185         state->offset = write_pack_header(state->f, 1);
186         if (!state->offset)
187                 die_errno("unable to write pack header");
188 }
189
190 static int deflate_to_pack(struct bulk_checkin_state *state,
191                            struct object_id *result_oid,
192                            int fd, size_t size,
193                            enum object_type type, const char *path,
194                            unsigned flags)
195 {
196         off_t seekback, already_hashed_to;
197         git_hash_ctx ctx;
198         unsigned char obuf[16384];
199         unsigned header_len;
200         struct hashfile_checkpoint checkpoint;
201         struct pack_idx_entry *idx = NULL;
202
203         seekback = lseek(fd, 0, SEEK_CUR);
204         if (seekback == (off_t) -1)
205                 return error("cannot find the current offset");
206
207         header_len = xsnprintf((char *)obuf, sizeof(obuf), "%s %" PRIuMAX,
208                                type_name(type), (uintmax_t)size) + 1;
209         the_hash_algo->init_fn(&ctx);
210         the_hash_algo->update_fn(&ctx, obuf, header_len);
211
212         /* Note: idx is non-NULL when we are writing */
213         if ((flags & HASH_WRITE_OBJECT) != 0)
214                 idx = xcalloc(1, sizeof(*idx));
215
216         already_hashed_to = 0;
217
218         while (1) {
219                 prepare_to_stream(state, flags);
220                 if (idx) {
221                         hashfile_checkpoint(state->f, &checkpoint);
222                         idx->offset = state->offset;
223                         crc32_begin(state->f);
224                 }
225                 if (!stream_to_pack(state, &ctx, &already_hashed_to,
226                                     fd, size, type, path, flags))
227                         break;
228                 /*
229                  * Writing this object to the current pack will make
230                  * it too big; we need to truncate it, start a new
231                  * pack, and write into it.
232                  */
233                 if (!idx)
234                         BUG("should not happen");
235                 hashfile_truncate(state->f, &checkpoint);
236                 state->offset = checkpoint.offset;
237                 finish_bulk_checkin(state);
238                 if (lseek(fd, seekback, SEEK_SET) == (off_t) -1)
239                         return error("cannot seek back");
240         }
241         the_hash_algo->final_fn(result_oid->hash, &ctx);
242         if (!idx)
243                 return 0;
244
245         idx->crc32 = crc32_end(state->f);
246         if (already_written(state, result_oid)) {
247                 hashfile_truncate(state->f, &checkpoint);
248                 state->offset = checkpoint.offset;
249                 free(idx);
250         } else {
251                 oidcpy(&idx->oid, result_oid);
252                 ALLOC_GROW(state->written,
253                            state->nr_written + 1,
254                            state->alloc_written);
255                 state->written[state->nr_written++] = idx;
256         }
257         return 0;
258 }
259
260 int index_bulk_checkin(struct object_id *oid,
261                        int fd, size_t size, enum object_type type,
262                        const char *path, unsigned flags)
263 {
264         int status = deflate_to_pack(&state, oid, fd, size, type,
265                                      path, flags);
266         if (!state.plugged)
267                 finish_bulk_checkin(&state);
268         return status;
269 }
270
271 void plug_bulk_checkin(void)
272 {
273         state.plugged = 1;
274 }
275
276 void unplug_bulk_checkin(void)
277 {
278         state.plugged = 0;
279         if (state.f)
280                 finish_bulk_checkin(&state);
281 }