/src/git/builtin/unpack-objects.c
Line | Count | Source (jump to first uncovered line) |
1 | | #include "builtin.h" |
2 | | #include "bulk-checkin.h" |
3 | | #include "config.h" |
4 | | #include "environment.h" |
5 | | #include "gettext.h" |
6 | | #include "git-zlib.h" |
7 | | #include "hex.h" |
8 | | #include "object-store-ll.h" |
9 | | #include "object.h" |
10 | | #include "delta.h" |
11 | | #include "pack.h" |
12 | | #include "blob.h" |
13 | | #include "replace-object.h" |
14 | | #include "strbuf.h" |
15 | | #include "progress.h" |
16 | | #include "decorate.h" |
17 | | #include "fsck.h" |
18 | | |
19 | | static int dry_run, quiet, recover, has_errors, strict; |
20 | | static const char unpack_usage[] = "git unpack-objects [-n] [-q] [-r] [--strict]"; |
21 | | |
22 | | /* We always read in 4kB chunks. */ |
23 | | static unsigned char buffer[4096]; |
24 | | static unsigned int offset, len; |
25 | | static off_t consumed_bytes; |
26 | | static off_t max_input_size; |
27 | | static git_hash_ctx ctx; |
28 | | static struct fsck_options fsck_options = FSCK_OPTIONS_STRICT; |
29 | | static struct progress *progress; |
30 | | |
31 | | /* |
32 | | * When running under --strict mode, objects whose reachability are |
33 | | * suspect are kept in core without getting written in the object |
34 | | * store. |
35 | | */ |
36 | | struct obj_buffer { |
37 | | char *buffer; |
38 | | unsigned long size; |
39 | | }; |
40 | | |
41 | | static struct decoration obj_decorate; |
42 | | |
43 | | static struct obj_buffer *lookup_object_buffer(struct object *base) |
44 | 0 | { |
45 | 0 | return lookup_decoration(&obj_decorate, base); |
46 | 0 | } |
47 | | |
48 | | static void add_object_buffer(struct object *object, char *buffer, unsigned long size) |
49 | 0 | { |
50 | 0 | struct obj_buffer *obj; |
51 | 0 | CALLOC_ARRAY(obj, 1); |
52 | 0 | obj->buffer = buffer; |
53 | 0 | obj->size = size; |
54 | 0 | if (add_decoration(&obj_decorate, object, obj)) |
55 | 0 | die("object %s tried to add buffer twice!", oid_to_hex(&object->oid)); |
56 | 0 | } |
57 | | |
58 | | /* |
59 | | * Make sure at least "min" bytes are available in the buffer, and |
60 | | * return the pointer to the buffer. |
61 | | */ |
62 | | static void *fill(int min) |
63 | 0 | { |
64 | 0 | if (min <= len) |
65 | 0 | return buffer + offset; |
66 | 0 | if (min > sizeof(buffer)) |
67 | 0 | die("cannot fill %d bytes", min); |
68 | 0 | if (offset) { |
69 | 0 | the_hash_algo->update_fn(&ctx, buffer, offset); |
70 | 0 | memmove(buffer, buffer + offset, len); |
71 | 0 | offset = 0; |
72 | 0 | } |
73 | 0 | do { |
74 | 0 | ssize_t ret = xread(0, buffer + len, sizeof(buffer) - len); |
75 | 0 | if (ret <= 0) { |
76 | 0 | if (!ret) |
77 | 0 | die("early EOF"); |
78 | 0 | die_errno("read error on input"); |
79 | 0 | } |
80 | 0 | len += ret; |
81 | 0 | } while (len < min); |
82 | 0 | return buffer; |
83 | 0 | } |
84 | | |
85 | | static void use(int bytes) |
86 | 0 | { |
87 | 0 | if (bytes > len) |
88 | 0 | die("used more bytes than were available"); |
89 | 0 | len -= bytes; |
90 | 0 | offset += bytes; |
91 | | |
92 | | /* make sure off_t is sufficiently large not to wrap */ |
93 | 0 | if (signed_add_overflows(consumed_bytes, bytes)) |
94 | 0 | die("pack too large for current definition of off_t"); |
95 | 0 | consumed_bytes += bytes; |
96 | 0 | if (max_input_size && consumed_bytes > max_input_size) |
97 | 0 | die(_("pack exceeds maximum allowed size")); |
98 | 0 | display_throughput(progress, consumed_bytes); |
99 | 0 | } |
100 | | |
101 | | /* |
102 | | * Decompress zstream from the standard input into a newly |
103 | | * allocated buffer of specified size and return the buffer. |
104 | | * The caller is responsible to free the returned buffer. |
105 | | * |
106 | | * But for dry_run mode, "get_data()" is only used to check the |
107 | | * integrity of data, and the returned buffer is not used at all. |
108 | | * Therefore, in dry_run mode, "get_data()" will release the small |
109 | | * allocated buffer which is reused to hold temporary zstream output |
110 | | * and return NULL instead of returning garbage data. |
111 | | */ |
112 | | static void *get_data(unsigned long size) |
113 | 0 | { |
114 | 0 | git_zstream stream; |
115 | 0 | unsigned long bufsize = dry_run && size > 8192 ? 8192 : size; |
116 | 0 | void *buf = xmallocz(bufsize); |
117 | |
|
118 | 0 | memset(&stream, 0, sizeof(stream)); |
119 | |
|
120 | 0 | stream.next_out = buf; |
121 | 0 | stream.avail_out = bufsize; |
122 | 0 | stream.next_in = fill(1); |
123 | 0 | stream.avail_in = len; |
124 | 0 | git_inflate_init(&stream); |
125 | |
|
126 | 0 | for (;;) { |
127 | 0 | int ret = git_inflate(&stream, 0); |
128 | 0 | use(len - stream.avail_in); |
129 | 0 | if (stream.total_out == size && ret == Z_STREAM_END) |
130 | 0 | break; |
131 | 0 | if (ret != Z_OK) { |
132 | 0 | error("inflate returned %d", ret); |
133 | 0 | FREE_AND_NULL(buf); |
134 | 0 | if (!recover) |
135 | 0 | exit(1); |
136 | 0 | has_errors = 1; |
137 | 0 | break; |
138 | 0 | } |
139 | 0 | stream.next_in = fill(1); |
140 | 0 | stream.avail_in = len; |
141 | 0 | if (dry_run) { |
142 | | /* reuse the buffer in dry_run mode */ |
143 | 0 | stream.next_out = buf; |
144 | 0 | stream.avail_out = bufsize > size - stream.total_out ? |
145 | 0 | size - stream.total_out : |
146 | 0 | bufsize; |
147 | 0 | } |
148 | 0 | } |
149 | 0 | git_inflate_end(&stream); |
150 | 0 | if (dry_run) |
151 | 0 | FREE_AND_NULL(buf); |
152 | 0 | return buf; |
153 | 0 | } |
154 | | |
155 | | struct delta_info { |
156 | | struct object_id base_oid; |
157 | | unsigned nr; |
158 | | off_t base_offset; |
159 | | unsigned long size; |
160 | | void *delta; |
161 | | struct delta_info *next; |
162 | | }; |
163 | | |
164 | | static struct delta_info *delta_list; |
165 | | |
166 | | static void add_delta_to_list(unsigned nr, const struct object_id *base_oid, |
167 | | off_t base_offset, |
168 | | void *delta, unsigned long size) |
169 | 0 | { |
170 | 0 | struct delta_info *info = xmalloc(sizeof(*info)); |
171 | |
|
172 | 0 | oidcpy(&info->base_oid, base_oid); |
173 | 0 | info->base_offset = base_offset; |
174 | 0 | info->size = size; |
175 | 0 | info->delta = delta; |
176 | 0 | info->nr = nr; |
177 | 0 | info->next = delta_list; |
178 | 0 | delta_list = info; |
179 | 0 | } |
180 | | |
181 | | struct obj_info { |
182 | | off_t offset; |
183 | | struct object_id oid; |
184 | | struct object *obj; |
185 | | }; |
186 | | |
187 | | /* Remember to update object flag allocation in object.h */ |
188 | 0 | #define FLAG_OPEN (1u<<20) |
189 | 0 | #define FLAG_WRITTEN (1u<<21) |
190 | | |
191 | | static struct obj_info *obj_list; |
192 | | static unsigned nr_objects; |
193 | | |
194 | | /* |
195 | | * Called only from check_object() after it verified this object |
196 | | * is Ok. |
197 | | */ |
198 | | static void write_cached_object(struct object *obj, struct obj_buffer *obj_buf) |
199 | 0 | { |
200 | 0 | struct object_id oid; |
201 | |
|
202 | 0 | if (write_object_file(obj_buf->buffer, obj_buf->size, |
203 | 0 | obj->type, &oid) < 0) |
204 | 0 | die("failed to write object %s", oid_to_hex(&obj->oid)); |
205 | 0 | obj->flags |= FLAG_WRITTEN; |
206 | 0 | } |
207 | | |
208 | | /* |
209 | | * At the very end of the processing, write_rest() scans the objects |
210 | | * that have reachability requirements and calls this function. |
211 | | * Verify its reachability and validity recursively and write it out. |
212 | | */ |
213 | | static int check_object(struct object *obj, enum object_type type, |
214 | | void *data UNUSED, |
215 | | struct fsck_options *options UNUSED) |
216 | 0 | { |
217 | 0 | struct obj_buffer *obj_buf; |
218 | |
|
219 | 0 | if (!obj) |
220 | 0 | return 1; |
221 | | |
222 | 0 | if (obj->flags & FLAG_WRITTEN) |
223 | 0 | return 0; |
224 | | |
225 | 0 | if (type != OBJ_ANY && obj->type != type) |
226 | 0 | die("object type mismatch"); |
227 | | |
228 | 0 | if (!(obj->flags & FLAG_OPEN)) { |
229 | 0 | unsigned long size; |
230 | 0 | int type = oid_object_info(the_repository, &obj->oid, &size); |
231 | 0 | if (type != obj->type || type <= 0) |
232 | 0 | die("object of unexpected type"); |
233 | 0 | obj->flags |= FLAG_WRITTEN; |
234 | 0 | return 0; |
235 | 0 | } |
236 | | |
237 | 0 | obj_buf = lookup_object_buffer(obj); |
238 | 0 | if (!obj_buf) |
239 | 0 | die("Whoops! Cannot find object '%s'", oid_to_hex(&obj->oid)); |
240 | 0 | if (fsck_object(obj, obj_buf->buffer, obj_buf->size, &fsck_options)) |
241 | 0 | die("fsck error in packed object"); |
242 | 0 | fsck_options.walk = check_object; |
243 | 0 | if (fsck_walk(obj, NULL, &fsck_options)) |
244 | 0 | die("Error on reachable objects of %s", oid_to_hex(&obj->oid)); |
245 | 0 | write_cached_object(obj, obj_buf); |
246 | 0 | return 0; |
247 | 0 | } |
248 | | |
249 | | static void write_rest(void) |
250 | 0 | { |
251 | 0 | unsigned i; |
252 | 0 | for (i = 0; i < nr_objects; i++) { |
253 | 0 | if (obj_list[i].obj) |
254 | 0 | check_object(obj_list[i].obj, OBJ_ANY, NULL, NULL); |
255 | 0 | } |
256 | 0 | } |
257 | | |
258 | | static void added_object(unsigned nr, enum object_type type, |
259 | | void *data, unsigned long size); |
260 | | |
261 | | /* |
262 | | * Write out nr-th object from the list, now we know the contents |
263 | | * of it. Under --strict, this buffers structured objects in-core, |
264 | | * to be checked at the end. |
265 | | */ |
266 | | static void write_object(unsigned nr, enum object_type type, |
267 | | void *buf, unsigned long size) |
268 | 0 | { |
269 | 0 | if (!strict) { |
270 | 0 | if (write_object_file(buf, size, type, |
271 | 0 | &obj_list[nr].oid) < 0) |
272 | 0 | die("failed to write object"); |
273 | 0 | added_object(nr, type, buf, size); |
274 | 0 | free(buf); |
275 | 0 | obj_list[nr].obj = NULL; |
276 | 0 | } else if (type == OBJ_BLOB) { |
277 | 0 | struct blob *blob; |
278 | 0 | if (write_object_file(buf, size, type, |
279 | 0 | &obj_list[nr].oid) < 0) |
280 | 0 | die("failed to write object"); |
281 | 0 | added_object(nr, type, buf, size); |
282 | 0 | free(buf); |
283 | |
|
284 | 0 | blob = lookup_blob(the_repository, &obj_list[nr].oid); |
285 | 0 | if (blob) |
286 | 0 | blob->object.flags |= FLAG_WRITTEN; |
287 | 0 | else |
288 | 0 | die("invalid blob object"); |
289 | 0 | obj_list[nr].obj = NULL; |
290 | 0 | } else { |
291 | 0 | struct object *obj; |
292 | 0 | int eaten; |
293 | 0 | hash_object_file(the_hash_algo, buf, size, type, |
294 | 0 | &obj_list[nr].oid); |
295 | 0 | added_object(nr, type, buf, size); |
296 | 0 | obj = parse_object_buffer(the_repository, &obj_list[nr].oid, |
297 | 0 | type, size, buf, |
298 | 0 | &eaten); |
299 | 0 | if (!obj) |
300 | 0 | die("invalid %s", type_name(type)); |
301 | 0 | add_object_buffer(obj, buf, size); |
302 | 0 | obj->flags |= FLAG_OPEN; |
303 | 0 | obj_list[nr].obj = obj; |
304 | 0 | } |
305 | 0 | } |
306 | | |
307 | | static void resolve_delta(unsigned nr, enum object_type type, |
308 | | void *base, unsigned long base_size, |
309 | | void *delta, unsigned long delta_size) |
310 | 0 | { |
311 | 0 | void *result; |
312 | 0 | unsigned long result_size; |
313 | |
|
314 | 0 | result = patch_delta(base, base_size, |
315 | 0 | delta, delta_size, |
316 | 0 | &result_size); |
317 | 0 | if (!result) |
318 | 0 | die("failed to apply delta"); |
319 | 0 | free(delta); |
320 | 0 | write_object(nr, type, result, result_size); |
321 | 0 | } |
322 | | |
323 | | /* |
324 | | * We now know the contents of an object (which is nr-th in the pack); |
325 | | * resolve all the deltified objects that are based on it. |
326 | | */ |
327 | | static void added_object(unsigned nr, enum object_type type, |
328 | | void *data, unsigned long size) |
329 | 0 | { |
330 | 0 | struct delta_info **p = &delta_list; |
331 | 0 | struct delta_info *info; |
332 | |
|
333 | 0 | while ((info = *p) != NULL) { |
334 | 0 | if (oideq(&info->base_oid, &obj_list[nr].oid) || |
335 | 0 | info->base_offset == obj_list[nr].offset) { |
336 | 0 | *p = info->next; |
337 | 0 | p = &delta_list; |
338 | 0 | resolve_delta(info->nr, type, data, size, |
339 | 0 | info->delta, info->size); |
340 | 0 | free(info); |
341 | 0 | continue; |
342 | 0 | } |
343 | 0 | p = &info->next; |
344 | 0 | } |
345 | 0 | } |
346 | | |
347 | | static void unpack_non_delta_entry(enum object_type type, unsigned long size, |
348 | | unsigned nr) |
349 | 0 | { |
350 | 0 | void *buf = get_data(size); |
351 | |
|
352 | 0 | if (buf) |
353 | 0 | write_object(nr, type, buf, size); |
354 | 0 | } |
355 | | |
356 | | struct input_zstream_data { |
357 | | git_zstream *zstream; |
358 | | unsigned char buf[8192]; |
359 | | int status; |
360 | | }; |
361 | | |
362 | | static const void *feed_input_zstream(struct input_stream *in_stream, |
363 | | unsigned long *readlen) |
364 | 0 | { |
365 | 0 | struct input_zstream_data *data = in_stream->data; |
366 | 0 | git_zstream *zstream = data->zstream; |
367 | 0 | void *in = fill(1); |
368 | |
|
369 | 0 | if (in_stream->is_finished) { |
370 | 0 | *readlen = 0; |
371 | 0 | return NULL; |
372 | 0 | } |
373 | | |
374 | 0 | zstream->next_out = data->buf; |
375 | 0 | zstream->avail_out = sizeof(data->buf); |
376 | 0 | zstream->next_in = in; |
377 | 0 | zstream->avail_in = len; |
378 | |
|
379 | 0 | data->status = git_inflate(zstream, 0); |
380 | |
|
381 | 0 | in_stream->is_finished = data->status != Z_OK; |
382 | 0 | use(len - zstream->avail_in); |
383 | 0 | *readlen = sizeof(data->buf) - zstream->avail_out; |
384 | |
|
385 | 0 | return data->buf; |
386 | 0 | } |
387 | | |
388 | | static void stream_blob(unsigned long size, unsigned nr) |
389 | 0 | { |
390 | 0 | git_zstream zstream = { 0 }; |
391 | 0 | struct input_zstream_data data = { 0 }; |
392 | 0 | struct input_stream in_stream = { |
393 | 0 | .read = feed_input_zstream, |
394 | 0 | .data = &data, |
395 | 0 | }; |
396 | 0 | struct obj_info *info = &obj_list[nr]; |
397 | |
|
398 | 0 | data.zstream = &zstream; |
399 | 0 | git_inflate_init(&zstream); |
400 | |
|
401 | 0 | if (stream_loose_object(&in_stream, size, &info->oid)) |
402 | 0 | die(_("failed to write object in stream")); |
403 | | |
404 | 0 | if (data.status != Z_STREAM_END) |
405 | 0 | die(_("inflate returned (%d)"), data.status); |
406 | 0 | git_inflate_end(&zstream); |
407 | |
|
408 | 0 | if (strict) { |
409 | 0 | struct blob *blob = lookup_blob(the_repository, &info->oid); |
410 | |
|
411 | 0 | if (!blob) |
412 | 0 | die(_("invalid blob object from stream")); |
413 | 0 | blob->object.flags |= FLAG_WRITTEN; |
414 | 0 | } |
415 | 0 | info->obj = NULL; |
416 | 0 | } |
417 | | |
418 | | static int resolve_against_held(unsigned nr, const struct object_id *base, |
419 | | void *delta_data, unsigned long delta_size) |
420 | 0 | { |
421 | 0 | struct object *obj; |
422 | 0 | struct obj_buffer *obj_buffer; |
423 | 0 | obj = lookup_object(the_repository, base); |
424 | 0 | if (!obj) |
425 | 0 | return 0; |
426 | 0 | obj_buffer = lookup_object_buffer(obj); |
427 | 0 | if (!obj_buffer) |
428 | 0 | return 0; |
429 | 0 | resolve_delta(nr, obj->type, obj_buffer->buffer, |
430 | 0 | obj_buffer->size, delta_data, delta_size); |
431 | 0 | return 1; |
432 | 0 | } |
433 | | |
434 | | static void unpack_delta_entry(enum object_type type, unsigned long delta_size, |
435 | | unsigned nr) |
436 | 0 | { |
437 | 0 | void *delta_data, *base; |
438 | 0 | unsigned long base_size; |
439 | 0 | struct object_id base_oid; |
440 | |
|
441 | 0 | if (type == OBJ_REF_DELTA) { |
442 | 0 | oidread(&base_oid, fill(the_hash_algo->rawsz), the_repository->hash_algo); |
443 | 0 | use(the_hash_algo->rawsz); |
444 | 0 | delta_data = get_data(delta_size); |
445 | 0 | if (!delta_data) |
446 | 0 | return; |
447 | 0 | if (repo_has_object_file(the_repository, &base_oid)) |
448 | 0 | ; /* Ok we have this one */ |
449 | 0 | else if (resolve_against_held(nr, &base_oid, |
450 | 0 | delta_data, delta_size)) |
451 | 0 | return; /* we are done */ |
452 | 0 | else { |
453 | | /* cannot resolve yet --- queue it */ |
454 | 0 | oidclr(&obj_list[nr].oid, the_repository->hash_algo); |
455 | 0 | add_delta_to_list(nr, &base_oid, 0, delta_data, delta_size); |
456 | 0 | return; |
457 | 0 | } |
458 | 0 | } else { |
459 | 0 | unsigned base_found = 0; |
460 | 0 | unsigned char *pack, c; |
461 | 0 | off_t base_offset; |
462 | 0 | unsigned lo, mid, hi; |
463 | |
|
464 | 0 | pack = fill(1); |
465 | 0 | c = *pack; |
466 | 0 | use(1); |
467 | 0 | base_offset = c & 127; |
468 | 0 | while (c & 128) { |
469 | 0 | base_offset += 1; |
470 | 0 | if (!base_offset || MSB(base_offset, 7)) |
471 | 0 | die("offset value overflow for delta base object"); |
472 | 0 | pack = fill(1); |
473 | 0 | c = *pack; |
474 | 0 | use(1); |
475 | 0 | base_offset = (base_offset << 7) + (c & 127); |
476 | 0 | } |
477 | 0 | base_offset = obj_list[nr].offset - base_offset; |
478 | 0 | if (base_offset <= 0 || base_offset >= obj_list[nr].offset) |
479 | 0 | die("offset value out of bound for delta base object"); |
480 | | |
481 | 0 | delta_data = get_data(delta_size); |
482 | 0 | if (!delta_data) |
483 | 0 | return; |
484 | 0 | lo = 0; |
485 | 0 | hi = nr; |
486 | 0 | while (lo < hi) { |
487 | 0 | mid = lo + (hi - lo) / 2; |
488 | 0 | if (base_offset < obj_list[mid].offset) { |
489 | 0 | hi = mid; |
490 | 0 | } else if (base_offset > obj_list[mid].offset) { |
491 | 0 | lo = mid + 1; |
492 | 0 | } else { |
493 | 0 | oidcpy(&base_oid, &obj_list[mid].oid); |
494 | 0 | base_found = !is_null_oid(&base_oid); |
495 | 0 | break; |
496 | 0 | } |
497 | 0 | } |
498 | 0 | if (!base_found) { |
499 | | /* |
500 | | * The delta base object is itself a delta that |
501 | | * has not been resolved yet. |
502 | | */ |
503 | 0 | oidclr(&obj_list[nr].oid, the_repository->hash_algo); |
504 | 0 | add_delta_to_list(nr, null_oid(), base_offset, |
505 | 0 | delta_data, delta_size); |
506 | 0 | return; |
507 | 0 | } |
508 | 0 | } |
509 | | |
510 | 0 | if (resolve_against_held(nr, &base_oid, delta_data, delta_size)) |
511 | 0 | return; |
512 | | |
513 | 0 | base = repo_read_object_file(the_repository, &base_oid, &type, |
514 | 0 | &base_size); |
515 | 0 | if (!base) { |
516 | 0 | error("failed to read delta-pack base object %s", |
517 | 0 | oid_to_hex(&base_oid)); |
518 | 0 | if (!recover) |
519 | 0 | exit(1); |
520 | 0 | has_errors = 1; |
521 | 0 | return; |
522 | 0 | } |
523 | 0 | resolve_delta(nr, type, base, base_size, delta_data, delta_size); |
524 | 0 | free(base); |
525 | 0 | } |
526 | | |
527 | | static void unpack_one(unsigned nr) |
528 | 0 | { |
529 | 0 | unsigned shift; |
530 | 0 | unsigned char *pack; |
531 | 0 | unsigned long size, c; |
532 | 0 | enum object_type type; |
533 | |
|
534 | 0 | obj_list[nr].offset = consumed_bytes; |
535 | |
|
536 | 0 | pack = fill(1); |
537 | 0 | c = *pack; |
538 | 0 | use(1); |
539 | 0 | type = (c >> 4) & 7; |
540 | 0 | size = (c & 15); |
541 | 0 | shift = 4; |
542 | 0 | while (c & 0x80) { |
543 | 0 | pack = fill(1); |
544 | 0 | c = *pack; |
545 | 0 | use(1); |
546 | 0 | size += (c & 0x7f) << shift; |
547 | 0 | shift += 7; |
548 | 0 | } |
549 | |
|
550 | 0 | switch (type) { |
551 | 0 | case OBJ_BLOB: |
552 | 0 | if (!dry_run && size > big_file_threshold) { |
553 | 0 | stream_blob(size, nr); |
554 | 0 | return; |
555 | 0 | } |
556 | | /* fallthrough */ |
557 | 0 | case OBJ_COMMIT: |
558 | 0 | case OBJ_TREE: |
559 | 0 | case OBJ_TAG: |
560 | 0 | unpack_non_delta_entry(type, size, nr); |
561 | 0 | return; |
562 | 0 | case OBJ_REF_DELTA: |
563 | 0 | case OBJ_OFS_DELTA: |
564 | 0 | unpack_delta_entry(type, size, nr); |
565 | 0 | return; |
566 | 0 | default: |
567 | 0 | error("bad object type %d", type); |
568 | 0 | has_errors = 1; |
569 | 0 | if (recover) |
570 | 0 | return; |
571 | 0 | exit(1); |
572 | 0 | } |
573 | 0 | } |
574 | | |
575 | | static void unpack_all(void) |
576 | 0 | { |
577 | 0 | int i; |
578 | 0 | struct pack_header *hdr = fill(sizeof(struct pack_header)); |
579 | |
|
580 | 0 | nr_objects = ntohl(hdr->hdr_entries); |
581 | |
|
582 | 0 | if (ntohl(hdr->hdr_signature) != PACK_SIGNATURE) |
583 | 0 | die("bad pack file"); |
584 | 0 | if (!pack_version_ok(hdr->hdr_version)) |
585 | 0 | die("unknown pack file version %"PRIu32, |
586 | 0 | ntohl(hdr->hdr_version)); |
587 | 0 | use(sizeof(struct pack_header)); |
588 | |
|
589 | 0 | if (!quiet) |
590 | 0 | progress = start_progress(_("Unpacking objects"), nr_objects); |
591 | 0 | CALLOC_ARRAY(obj_list, nr_objects); |
592 | 0 | begin_odb_transaction(); |
593 | 0 | for (i = 0; i < nr_objects; i++) { |
594 | 0 | unpack_one(i); |
595 | 0 | display_progress(progress, i + 1); |
596 | 0 | } |
597 | 0 | end_odb_transaction(); |
598 | 0 | stop_progress(&progress); |
599 | |
|
600 | 0 | if (delta_list) |
601 | 0 | die("unresolved deltas left after unpacking"); |
602 | 0 | } |
603 | | |
604 | | int cmd_unpack_objects(int argc, const char **argv, const char *prefix UNUSED) |
605 | 0 | { |
606 | 0 | int i; |
607 | 0 | struct object_id oid; |
608 | 0 | git_hash_ctx tmp_ctx; |
609 | |
|
610 | 0 | disable_replace_refs(); |
611 | |
|
612 | 0 | git_config(git_default_config, NULL); |
613 | |
|
614 | 0 | quiet = !isatty(2); |
615 | |
|
616 | 0 | for (i = 1 ; i < argc; i++) { |
617 | 0 | const char *arg = argv[i]; |
618 | |
|
619 | 0 | if (*arg == '-') { |
620 | 0 | if (!strcmp(arg, "-n")) { |
621 | 0 | dry_run = 1; |
622 | 0 | continue; |
623 | 0 | } |
624 | 0 | if (!strcmp(arg, "-q")) { |
625 | 0 | quiet = 1; |
626 | 0 | continue; |
627 | 0 | } |
628 | 0 | if (!strcmp(arg, "-r")) { |
629 | 0 | recover = 1; |
630 | 0 | continue; |
631 | 0 | } |
632 | 0 | if (!strcmp(arg, "--strict")) { |
633 | 0 | strict = 1; |
634 | 0 | continue; |
635 | 0 | } |
636 | 0 | if (skip_prefix(arg, "--strict=", &arg)) { |
637 | 0 | strict = 1; |
638 | 0 | fsck_set_msg_types(&fsck_options, arg); |
639 | 0 | continue; |
640 | 0 | } |
641 | 0 | if (starts_with(arg, "--pack_header=")) { |
642 | 0 | struct pack_header *hdr; |
643 | 0 | char *c; |
644 | |
|
645 | 0 | hdr = (struct pack_header *)buffer; |
646 | 0 | hdr->hdr_signature = htonl(PACK_SIGNATURE); |
647 | 0 | hdr->hdr_version = htonl(strtoul(arg + 14, &c, 10)); |
648 | 0 | if (*c != ',') |
649 | 0 | die("bad %s", arg); |
650 | 0 | hdr->hdr_entries = htonl(strtoul(c + 1, &c, 10)); |
651 | 0 | if (*c) |
652 | 0 | die("bad %s", arg); |
653 | 0 | len = sizeof(*hdr); |
654 | 0 | continue; |
655 | 0 | } |
656 | 0 | if (skip_prefix(arg, "--max-input-size=", &arg)) { |
657 | 0 | max_input_size = strtoumax(arg, NULL, 10); |
658 | 0 | continue; |
659 | 0 | } |
660 | 0 | usage(unpack_usage); |
661 | 0 | } |
662 | | |
663 | | /* We don't take any non-flag arguments now.. Maybe some day */ |
664 | 0 | usage(unpack_usage); |
665 | 0 | } |
666 | 0 | the_hash_algo->init_fn(&ctx); |
667 | 0 | unpack_all(); |
668 | 0 | the_hash_algo->update_fn(&ctx, buffer, offset); |
669 | 0 | the_hash_algo->init_fn(&tmp_ctx); |
670 | 0 | the_hash_algo->clone_fn(&tmp_ctx, &ctx); |
671 | 0 | the_hash_algo->final_oid_fn(&oid, &tmp_ctx); |
672 | 0 | if (strict) { |
673 | 0 | write_rest(); |
674 | 0 | if (fsck_finish(&fsck_options)) |
675 | 0 | die(_("fsck error in pack objects")); |
676 | 0 | } |
677 | 0 | if (!hasheq(fill(the_hash_algo->rawsz), oid.hash, |
678 | 0 | the_repository->hash_algo)) |
679 | 0 | die("final sha1 did not match"); |
680 | 0 | use(the_hash_algo->rawsz); |
681 | | |
682 | | /* Write the last part of the buffer to stdout */ |
683 | 0 | write_in_full(1, buffer + offset, len); |
684 | | |
685 | | /* All done */ |
686 | 0 | return has_errors; |
687 | 0 | } |