Line | Count | Source |
1 | | #define DISABLE_SIGN_COMPARE_WARNINGS |
2 | | |
3 | | #include "git-compat-util.h" |
4 | | #include "gettext.h" |
5 | | #include "hex.h" |
6 | | #include "object.h" |
7 | | #include "replace-object.h" |
8 | | #include "object-file.h" |
9 | | #include "odb/streaming.h" |
10 | | #include "blob.h" |
11 | | #include "statinfo.h" |
12 | | #include "tree.h" |
13 | | #include "commit.h" |
14 | | #include "tag.h" |
15 | | #include "alloc.h" |
16 | | #include "commit-graph.h" |
17 | | |
18 | | unsigned int get_max_object_index(const struct repository *repo) |
19 | 0 | { |
20 | 0 | return repo->parsed_objects->obj_hash_size; |
21 | 0 | } |
22 | | |
23 | | struct object *get_indexed_object(const struct repository *repo, |
24 | | unsigned int idx) |
25 | 0 | { |
26 | 0 | return repo->parsed_objects->obj_hash[idx]; |
27 | 0 | } |
28 | | |
29 | | static const char *object_type_strings[] = { |
30 | | NULL, /* OBJ_NONE = 0 */ |
31 | | "commit", /* OBJ_COMMIT = 1 */ |
32 | | "tree", /* OBJ_TREE = 2 */ |
33 | | "blob", /* OBJ_BLOB = 3 */ |
34 | | "tag", /* OBJ_TAG = 4 */ |
35 | | }; |
36 | | |
37 | | const char *type_name(unsigned int type) |
38 | 0 | { |
39 | 0 | if (type >= ARRAY_SIZE(object_type_strings)) |
40 | 0 | return NULL; |
41 | 0 | return object_type_strings[type]; |
42 | 0 | } |
43 | | |
44 | | int type_from_string_gently(const char *str, ssize_t len, int gentle) |
45 | 0 | { |
46 | 0 | int i; |
47 | |
|
48 | 0 | if (len < 0) |
49 | 0 | len = strlen(str); |
50 | |
|
51 | 0 | for (i = 1; i < ARRAY_SIZE(object_type_strings); i++) |
52 | 0 | if (!xstrncmpz(object_type_strings[i], str, len)) |
53 | 0 | return i; |
54 | | |
55 | 0 | if (gentle) |
56 | 0 | return -1; |
57 | | |
58 | 0 | die(_("invalid object type \"%s\""), str); |
59 | 0 | } |
60 | | |
61 | | /* |
62 | | * Return a numerical hash value between 0 and n-1 for the object with |
63 | | * the specified sha1. n must be a power of 2. Please note that the |
64 | | * return value is *not* consistent across computer architectures. |
65 | | */ |
66 | | static unsigned int hash_obj(const struct object_id *oid, unsigned int n) |
67 | 0 | { |
68 | 0 | return oidhash(oid) & (n - 1); |
69 | 0 | } |
70 | | |
71 | | /* |
72 | | * Insert obj into the hash table hash, which has length size (which |
73 | | * must be a power of 2). On collisions, simply overflow to the next |
74 | | * empty bucket. |
75 | | */ |
76 | | static void insert_obj_hash(struct object *obj, struct object **hash, unsigned int size) |
77 | 0 | { |
78 | 0 | unsigned int j = hash_obj(&obj->oid, size); |
79 | |
|
80 | 0 | while (hash[j]) { |
81 | 0 | j++; |
82 | 0 | if (j >= size) |
83 | 0 | j = 0; |
84 | 0 | } |
85 | 0 | hash[j] = obj; |
86 | 0 | } |
87 | | |
88 | | /* |
89 | | * Look up the record for the given sha1 in the hash map stored in |
90 | | * obj_hash. Return NULL if it was not found. |
91 | | */ |
92 | | struct object *lookup_object(struct repository *r, const struct object_id *oid) |
93 | 0 | { |
94 | 0 | unsigned int i, first; |
95 | 0 | struct object *obj; |
96 | |
|
97 | 0 | if (!r->parsed_objects->obj_hash) |
98 | 0 | return NULL; |
99 | | |
100 | 0 | first = i = hash_obj(oid, r->parsed_objects->obj_hash_size); |
101 | 0 | while ((obj = r->parsed_objects->obj_hash[i]) != NULL) { |
102 | 0 | if (oideq(oid, &obj->oid)) |
103 | 0 | break; |
104 | 0 | i++; |
105 | 0 | if (i == r->parsed_objects->obj_hash_size) |
106 | 0 | i = 0; |
107 | 0 | } |
108 | 0 | if (obj && i != first) { |
109 | | /* |
110 | | * Move object to where we started to look for it so |
111 | | * that we do not need to walk the hash table the next |
112 | | * time we look for it. |
113 | | */ |
114 | 0 | SWAP(r->parsed_objects->obj_hash[i], |
115 | 0 | r->parsed_objects->obj_hash[first]); |
116 | 0 | } |
117 | 0 | return obj; |
118 | 0 | } |
119 | | |
120 | | /* |
121 | | * Increase the size of the hash map stored in obj_hash to the next |
122 | | * power of 2 (but at least 32). Copy the existing values to the new |
123 | | * hash map. |
124 | | */ |
125 | | static void grow_object_hash(struct repository *r) |
126 | 0 | { |
127 | 0 | int i; |
128 | | /* |
129 | | * Note that this size must always be power-of-2 to match hash_obj |
130 | | * above. |
131 | | */ |
132 | 0 | int new_hash_size = r->parsed_objects->obj_hash_size < 32 ? 32 : 2 * r->parsed_objects->obj_hash_size; |
133 | 0 | struct object **new_hash; |
134 | |
|
135 | 0 | CALLOC_ARRAY(new_hash, new_hash_size); |
136 | 0 | for (i = 0; i < r->parsed_objects->obj_hash_size; i++) { |
137 | 0 | struct object *obj = r->parsed_objects->obj_hash[i]; |
138 | |
|
139 | 0 | if (!obj) |
140 | 0 | continue; |
141 | 0 | insert_obj_hash(obj, new_hash, new_hash_size); |
142 | 0 | } |
143 | 0 | free(r->parsed_objects->obj_hash); |
144 | 0 | r->parsed_objects->obj_hash = new_hash; |
145 | 0 | r->parsed_objects->obj_hash_size = new_hash_size; |
146 | 0 | } |
147 | | |
148 | | void *create_object(struct repository *r, const struct object_id *oid, void *o) |
149 | 0 | { |
150 | 0 | struct object *obj = o; |
151 | |
|
152 | 0 | obj->parsed = 0; |
153 | 0 | obj->flags = 0; |
154 | 0 | oidcpy(&obj->oid, oid); |
155 | |
|
156 | 0 | if (r->parsed_objects->obj_hash_size - 1 <= r->parsed_objects->nr_objs * 2) |
157 | 0 | grow_object_hash(r); |
158 | |
|
159 | 0 | insert_obj_hash(obj, r->parsed_objects->obj_hash, |
160 | 0 | r->parsed_objects->obj_hash_size); |
161 | 0 | r->parsed_objects->nr_objs++; |
162 | 0 | return obj; |
163 | 0 | } |
164 | | |
165 | | void *object_as_type(struct object *obj, enum object_type type, int quiet) |
166 | 0 | { |
167 | 0 | if (obj->type == type) |
168 | 0 | return obj; |
169 | 0 | else if (obj->type == OBJ_NONE) { |
170 | 0 | if (type == OBJ_COMMIT) |
171 | 0 | init_commit_node((struct commit *) obj); |
172 | 0 | else |
173 | 0 | obj->type = type; |
174 | 0 | return obj; |
175 | 0 | } |
176 | 0 | else { |
177 | 0 | if (!quiet) |
178 | 0 | error(_("object %s is a %s, not a %s"), |
179 | 0 | oid_to_hex(&obj->oid), |
180 | 0 | type_name(obj->type), type_name(type)); |
181 | 0 | return NULL; |
182 | 0 | } |
183 | 0 | } |
184 | | |
185 | | struct object *lookup_unknown_object(struct repository *r, const struct object_id *oid) |
186 | 0 | { |
187 | 0 | struct object *obj = lookup_object(r, oid); |
188 | 0 | if (!obj) |
189 | 0 | obj = create_object(r, oid, alloc_object_node(r)); |
190 | 0 | return obj; |
191 | 0 | } |
192 | | |
193 | | struct object *lookup_object_by_type(struct repository *r, |
194 | | const struct object_id *oid, |
195 | | enum object_type type) |
196 | 0 | { |
197 | 0 | switch (type) { |
198 | 0 | case OBJ_COMMIT: |
199 | 0 | return (struct object *)lookup_commit(r, oid); |
200 | 0 | case OBJ_TREE: |
201 | 0 | return (struct object *)lookup_tree(r, oid); |
202 | 0 | case OBJ_TAG: |
203 | 0 | return (struct object *)lookup_tag(r, oid); |
204 | 0 | case OBJ_BLOB: |
205 | 0 | return (struct object *)lookup_blob(r, oid); |
206 | 0 | default: |
207 | 0 | BUG("unknown object type %d", type); |
208 | 0 | } |
209 | 0 | } |
210 | | |
211 | | enum peel_status peel_object_ext(struct repository *r, |
212 | | const struct object_id *name, |
213 | | struct object_id *oid, |
214 | | unsigned flags, |
215 | | enum object_type *typep) |
216 | 0 | { |
217 | 0 | struct object *o = lookup_unknown_object(r, name); |
218 | |
|
219 | 0 | if (o->type == OBJ_NONE) { |
220 | 0 | int type = odb_read_object_info(r->objects, name, NULL); |
221 | 0 | if (type < 0 || !object_as_type(o, type, 0)) |
222 | 0 | return PEEL_INVALID; |
223 | 0 | } |
224 | | |
225 | 0 | if (o->type != OBJ_TAG) { |
226 | 0 | *typep = o->type; |
227 | 0 | return PEEL_NON_TAG; |
228 | 0 | } |
229 | | |
230 | 0 | while (o && o->type == OBJ_TAG) { |
231 | 0 | o = parse_object(r, &o->oid); |
232 | 0 | if (o && o->type == OBJ_TAG && ((struct tag *)o)->tagged) { |
233 | 0 | o = ((struct tag *)o)->tagged; |
234 | |
|
235 | 0 | if (flags & PEEL_OBJECT_VERIFY_TAGGED_OBJECT_TYPE) { |
236 | 0 | int type = odb_read_object_info(r->objects, &o->oid, NULL); |
237 | 0 | if (type < 0 || !object_as_type(o, type, 0)) |
238 | 0 | return PEEL_INVALID; |
239 | 0 | } |
240 | 0 | } else { |
241 | 0 | o = NULL; |
242 | 0 | } |
243 | 0 | } |
244 | 0 | if (!o) |
245 | 0 | return PEEL_INVALID; |
246 | | |
247 | 0 | oidcpy(oid, &o->oid); |
248 | 0 | *typep = o->type; |
249 | 0 | return PEEL_PEELED; |
250 | 0 | } |
251 | | |
252 | | enum peel_status peel_object(struct repository *r, |
253 | | const struct object_id *name, |
254 | | struct object_id *oid, |
255 | | unsigned flags) |
256 | 0 | { |
257 | 0 | enum object_type dummy; |
258 | 0 | return peel_object_ext(r, name, oid, flags, &dummy); |
259 | 0 | } |
260 | | |
261 | | struct object *parse_object_buffer(struct repository *r, const struct object_id *oid, enum object_type type, unsigned long size, void *buffer, int *eaten_p) |
262 | 0 | { |
263 | 0 | struct object *obj; |
264 | 0 | *eaten_p = 0; |
265 | |
|
266 | 0 | obj = NULL; |
267 | 0 | if (type == OBJ_BLOB) { |
268 | 0 | struct blob *blob = lookup_blob(r, oid); |
269 | 0 | if (blob) { |
270 | 0 | parse_blob_buffer(blob); |
271 | 0 | obj = &blob->object; |
272 | 0 | } |
273 | 0 | } else if (type == OBJ_TREE) { |
274 | 0 | struct tree *tree = lookup_tree(r, oid); |
275 | 0 | if (tree) { |
276 | 0 | obj = &tree->object; |
277 | 0 | if (!tree->buffer) |
278 | 0 | tree->object.parsed = 0; |
279 | 0 | if (!tree->object.parsed) { |
280 | 0 | if (parse_tree_buffer(tree, buffer, size)) |
281 | 0 | return NULL; |
282 | 0 | *eaten_p = 1; |
283 | 0 | } |
284 | 0 | } |
285 | 0 | } else if (type == OBJ_COMMIT) { |
286 | 0 | struct commit *commit = lookup_commit(r, oid); |
287 | 0 | if (commit) { |
288 | 0 | if (parse_commit_buffer(r, commit, buffer, size, 1)) |
289 | 0 | return NULL; |
290 | 0 | if (save_commit_buffer && |
291 | 0 | !get_cached_commit_buffer(r, commit, NULL)) { |
292 | 0 | set_commit_buffer(r, commit, buffer, size); |
293 | 0 | *eaten_p = 1; |
294 | 0 | } |
295 | 0 | obj = &commit->object; |
296 | 0 | } |
297 | 0 | } else if (type == OBJ_TAG) { |
298 | 0 | struct tag *tag = lookup_tag(r, oid); |
299 | 0 | if (tag) { |
300 | 0 | if (parse_tag_buffer(r, tag, buffer, size)) |
301 | 0 | return NULL; |
302 | 0 | obj = &tag->object; |
303 | 0 | } |
304 | 0 | } else { |
305 | 0 | warning(_("object %s has unknown type id %d"), oid_to_hex(oid), type); |
306 | 0 | obj = NULL; |
307 | 0 | } |
308 | 0 | return obj; |
309 | 0 | } |
310 | | |
311 | | struct object *parse_object_or_die(struct repository *repo, |
312 | | const struct object_id *oid, |
313 | | const char *name) |
314 | 0 | { |
315 | 0 | struct object *o = parse_object(repo, oid); |
316 | 0 | if (o) |
317 | 0 | return o; |
318 | | |
319 | 0 | die(_("unable to parse object: %s"), name ? name : oid_to_hex(oid)); |
320 | 0 | } |
321 | | |
322 | | struct object *parse_object_with_flags(struct repository *r, |
323 | | const struct object_id *oid, |
324 | | enum parse_object_flags flags) |
325 | 0 | { |
326 | 0 | int skip_hash = !!(flags & PARSE_OBJECT_SKIP_HASH_CHECK); |
327 | 0 | int discard_tree = !!(flags & PARSE_OBJECT_DISCARD_TREE); |
328 | 0 | unsigned long size; |
329 | 0 | enum object_type type; |
330 | 0 | int eaten; |
331 | 0 | const struct object_id *repl = lookup_replace_object(r, oid); |
332 | 0 | void *buffer; |
333 | 0 | struct object *obj; |
334 | |
|
335 | 0 | obj = lookup_object(r, oid); |
336 | 0 | if (obj && obj->parsed) |
337 | 0 | return obj; |
338 | | |
339 | 0 | if (skip_hash) { |
340 | 0 | struct commit *commit = lookup_commit_in_graph(r, repl); |
341 | 0 | if (commit) |
342 | 0 | return &commit->object; |
343 | 0 | } |
344 | | |
345 | 0 | if ((!obj || obj->type == OBJ_NONE || obj->type == OBJ_BLOB) && |
346 | 0 | odb_read_object_info(r->objects, oid, NULL) == OBJ_BLOB) { |
347 | 0 | if (!skip_hash) { |
348 | 0 | struct odb_read_stream *stream = odb_read_stream_open(r->objects, oid, NULL); |
349 | |
|
350 | 0 | if (!stream) { |
351 | 0 | error(_("unable to open object stream for %s"), oid_to_hex(oid)); |
352 | 0 | return NULL; |
353 | 0 | } |
354 | | |
355 | 0 | if (stream_object_signature(r, stream, repl) < 0) { |
356 | 0 | error(_("hash mismatch %s"), oid_to_hex(oid)); |
357 | 0 | odb_read_stream_close(stream); |
358 | 0 | return NULL; |
359 | 0 | } |
360 | | |
361 | 0 | odb_read_stream_close(stream); |
362 | 0 | } |
363 | 0 | parse_blob_buffer(lookup_blob(r, oid)); |
364 | 0 | return lookup_object(r, oid); |
365 | 0 | } |
366 | | |
367 | | /* |
368 | | * If the caller does not care about the tree buffer and does not |
369 | | * care about checking the hash, we can simply verify that we |
370 | | * have the on-disk object with the correct type. |
371 | | */ |
372 | 0 | if (skip_hash && discard_tree && |
373 | 0 | (!obj || obj->type == OBJ_NONE || obj->type == OBJ_TREE) && |
374 | 0 | odb_read_object_info(r->objects, oid, NULL) == OBJ_TREE) { |
375 | 0 | return &lookup_tree(r, oid)->object; |
376 | 0 | } |
377 | | |
378 | 0 | buffer = odb_read_object(r->objects, oid, &type, &size); |
379 | 0 | if (buffer) { |
380 | 0 | if (!skip_hash && |
381 | 0 | check_object_signature(r, repl, buffer, size, type) < 0) { |
382 | 0 | free(buffer); |
383 | 0 | error(_("hash mismatch %s"), oid_to_hex(repl)); |
384 | 0 | return NULL; |
385 | 0 | } |
386 | | |
387 | 0 | obj = parse_object_buffer(r, oid, type, size, |
388 | 0 | buffer, &eaten); |
389 | 0 | if (!eaten) |
390 | 0 | free(buffer); |
391 | 0 | if (discard_tree && type == OBJ_TREE) |
392 | 0 | free_tree_buffer((struct tree *)obj); |
393 | 0 | return obj; |
394 | 0 | } |
395 | 0 | return NULL; |
396 | 0 | } |
397 | | |
398 | | struct object *parse_object(struct repository *r, const struct object_id *oid) |
399 | 0 | { |
400 | 0 | return parse_object_with_flags(r, oid, 0); |
401 | 0 | } |
402 | | |
403 | | struct object_list *object_list_insert(struct object *item, |
404 | | struct object_list **list_p) |
405 | 0 | { |
406 | 0 | struct object_list *new_list = xmalloc(sizeof(struct object_list)); |
407 | 0 | new_list->item = item; |
408 | 0 | new_list->next = *list_p; |
409 | 0 | *list_p = new_list; |
410 | 0 | return new_list; |
411 | 0 | } |
412 | | |
413 | | int object_list_contains(struct object_list *list, struct object *obj) |
414 | 0 | { |
415 | 0 | while (list) { |
416 | 0 | if (list->item == obj) |
417 | 0 | return 1; |
418 | 0 | list = list->next; |
419 | 0 | } |
420 | 0 | return 0; |
421 | 0 | } |
422 | | |
423 | | void object_list_free(struct object_list **list) |
424 | 0 | { |
425 | 0 | while (*list) { |
426 | 0 | struct object_list *p = *list; |
427 | 0 | *list = p->next; |
428 | 0 | free(p); |
429 | 0 | } |
430 | 0 | } |
431 | | |
432 | | /* |
433 | | * A zero-length string to which object_array_entry::name can be |
434 | | * initialized without requiring a malloc/free. |
435 | | */ |
436 | | static char object_array_slopbuf[1]; |
437 | | |
438 | | void object_array_init(struct object_array *array) |
439 | 0 | { |
440 | 0 | struct object_array blank = OBJECT_ARRAY_INIT; |
441 | 0 | memcpy(array, &blank, sizeof(*array)); |
442 | 0 | } |
443 | | |
444 | | void add_object_array_with_path(struct object *obj, const char *name, |
445 | | struct object_array *array, |
446 | | unsigned mode, const char *path) |
447 | 0 | { |
448 | 0 | unsigned nr = array->nr; |
449 | 0 | unsigned alloc = array->alloc; |
450 | 0 | struct object_array_entry *objects = array->objects; |
451 | 0 | struct object_array_entry *entry; |
452 | |
|
453 | 0 | if (nr >= alloc) { |
454 | 0 | alloc = (alloc + 32) * 2; |
455 | 0 | REALLOC_ARRAY(objects, alloc); |
456 | 0 | array->alloc = alloc; |
457 | 0 | array->objects = objects; |
458 | 0 | } |
459 | 0 | entry = &objects[nr]; |
460 | 0 | entry->item = obj; |
461 | 0 | if (!name) |
462 | 0 | entry->name = NULL; |
463 | 0 | else if (!*name) |
464 | | /* Use our own empty string instead of allocating one: */ |
465 | 0 | entry->name = object_array_slopbuf; |
466 | 0 | else |
467 | 0 | entry->name = xstrdup(name); |
468 | 0 | entry->mode = mode; |
469 | 0 | if (path) |
470 | 0 | entry->path = xstrdup(path); |
471 | 0 | else |
472 | 0 | entry->path = NULL; |
473 | 0 | array->nr = ++nr; |
474 | 0 | } |
475 | | |
476 | | void add_object_array(struct object *obj, const char *name, struct object_array *array) |
477 | 0 | { |
478 | 0 | add_object_array_with_path(obj, name, array, S_IFINVALID, NULL); |
479 | 0 | } |
480 | | |
481 | | /* |
482 | | * Free all memory associated with an entry; the result is |
483 | | * in an unspecified state and should not be examined. |
484 | | */ |
485 | | static void object_array_release_entry(struct object_array_entry *ent) |
486 | 0 | { |
487 | 0 | if (ent->name != object_array_slopbuf) |
488 | 0 | free(ent->name); |
489 | 0 | free(ent->path); |
490 | 0 | } |
491 | | |
492 | | struct object *object_array_pop(struct object_array *array) |
493 | 0 | { |
494 | 0 | struct object *ret; |
495 | |
|
496 | 0 | if (!array->nr) |
497 | 0 | return NULL; |
498 | | |
499 | 0 | ret = array->objects[array->nr - 1].item; |
500 | 0 | object_array_release_entry(&array->objects[array->nr - 1]); |
501 | 0 | array->nr--; |
502 | 0 | return ret; |
503 | 0 | } |
504 | | |
505 | | void object_array_filter(struct object_array *array, |
506 | | object_array_each_func_t want, void *cb_data) |
507 | 0 | { |
508 | 0 | unsigned nr = array->nr, src, dst; |
509 | 0 | struct object_array_entry *objects = array->objects; |
510 | |
|
511 | 0 | for (src = dst = 0; src < nr; src++) { |
512 | 0 | if (want(&objects[src], cb_data)) { |
513 | 0 | if (src != dst) |
514 | 0 | objects[dst] = objects[src]; |
515 | 0 | dst++; |
516 | 0 | } else { |
517 | 0 | object_array_release_entry(&objects[src]); |
518 | 0 | } |
519 | 0 | } |
520 | 0 | array->nr = dst; |
521 | 0 | } |
522 | | |
523 | | void object_array_clear(struct object_array *array) |
524 | 0 | { |
525 | 0 | int i; |
526 | 0 | for (i = 0; i < array->nr; i++) |
527 | 0 | object_array_release_entry(&array->objects[i]); |
528 | 0 | FREE_AND_NULL(array->objects); |
529 | 0 | array->nr = array->alloc = 0; |
530 | 0 | } |
531 | | |
532 | | void clear_object_flags(struct repository *repo, unsigned flags) |
533 | 0 | { |
534 | 0 | int i; |
535 | |
|
536 | 0 | for (i = 0; i < repo->parsed_objects->obj_hash_size; i++) { |
537 | 0 | struct object *obj = repo->parsed_objects->obj_hash[i]; |
538 | 0 | if (obj) |
539 | 0 | obj->flags &= ~flags; |
540 | 0 | } |
541 | 0 | } |
542 | | |
543 | | void repo_clear_commit_marks(struct repository *r, unsigned int flags) |
544 | 0 | { |
545 | 0 | int i; |
546 | |
|
547 | 0 | for (i = 0; i < r->parsed_objects->obj_hash_size; i++) { |
548 | 0 | struct object *obj = r->parsed_objects->obj_hash[i]; |
549 | 0 | if (obj && obj->type == OBJ_COMMIT) |
550 | 0 | obj->flags &= ~flags; |
551 | 0 | } |
552 | 0 | } |
553 | | |
554 | | struct parsed_object_pool *parsed_object_pool_new(struct repository *repo) |
555 | 56 | { |
556 | 56 | struct parsed_object_pool *o = xmalloc(sizeof(*o)); |
557 | 56 | memset(o, 0, sizeof(*o)); |
558 | | |
559 | 56 | o->repo = repo; |
560 | 56 | o->blob_state = alloc_state_alloc(); |
561 | 56 | o->tree_state = alloc_state_alloc(); |
562 | 56 | o->commit_state = alloc_state_alloc(); |
563 | 56 | o->tag_state = alloc_state_alloc(); |
564 | 56 | o->object_state = alloc_state_alloc(); |
565 | 56 | o->is_shallow = -1; |
566 | 56 | CALLOC_ARRAY(o->shallow_stat, 1); |
567 | | |
568 | 56 | o->buffer_slab = allocate_commit_buffer_slab(); |
569 | | |
570 | 56 | return o; |
571 | 56 | } |
572 | | |
573 | | void parsed_object_pool_reset_commit_grafts(struct parsed_object_pool *o) |
574 | 56 | { |
575 | 56 | for (int i = 0; i < o->grafts_nr; i++) { |
576 | 0 | unparse_commit(o->repo, &o->grafts[i]->oid); |
577 | 0 | free(o->grafts[i]); |
578 | 0 | } |
579 | 56 | o->grafts_nr = 0; |
580 | 56 | o->commit_graft_prepared = 0; |
581 | 56 | } |
582 | | |
583 | | void parsed_object_pool_clear(struct parsed_object_pool *o) |
584 | 56 | { |
585 | | /* |
586 | | * As objects are allocated in slabs (see alloc.c), we do |
587 | | * not need to free each object, but each slab instead. |
588 | | * |
589 | | * Before doing so, we need to free any additional memory |
590 | | * the objects may hold. |
591 | | */ |
592 | 56 | unsigned i; |
593 | | |
594 | 56 | for (i = 0; i < o->obj_hash_size; i++) { |
595 | 0 | struct object *obj = o->obj_hash[i]; |
596 | |
|
597 | 0 | if (!obj) |
598 | 0 | continue; |
599 | | |
600 | 0 | if (obj->type == OBJ_TREE) |
601 | 0 | free_tree_buffer((struct tree*)obj); |
602 | 0 | else if (obj->type == OBJ_COMMIT) |
603 | 0 | release_commit_memory(o, (struct commit*)obj); |
604 | 0 | else if (obj->type == OBJ_TAG) |
605 | 0 | release_tag_memory((struct tag*)obj); |
606 | 0 | } |
607 | | |
608 | 56 | FREE_AND_NULL(o->obj_hash); |
609 | 56 | o->obj_hash_size = 0; |
610 | | |
611 | 56 | free_commit_buffer_slab(o->buffer_slab); |
612 | 56 | o->buffer_slab = NULL; |
613 | | |
614 | 56 | parsed_object_pool_reset_commit_grafts(o); |
615 | 56 | alloc_state_free_and_null(&o->blob_state); |
616 | 56 | alloc_state_free_and_null(&o->tree_state); |
617 | 56 | alloc_state_free_and_null(&o->commit_state); |
618 | 56 | alloc_state_free_and_null(&o->tag_state); |
619 | 56 | alloc_state_free_and_null(&o->object_state); |
620 | 56 | stat_validity_clear(o->shallow_stat); |
621 | | FREE_AND_NULL(o->shallow_stat); |
622 | 56 | } |