Line | Count | Source |
1 | | #define DISABLE_SIGN_COMPARE_WARNINGS |
2 | | |
3 | | #include "git-compat-util.h" |
4 | | #include "gettext.h" |
5 | | #include "hex.h" |
6 | | #include "object.h" |
7 | | #include "replace-object.h" |
8 | | #include "object-file.h" |
9 | | #include "blob.h" |
10 | | #include "statinfo.h" |
11 | | #include "tree.h" |
12 | | #include "commit.h" |
13 | | #include "tag.h" |
14 | | #include "alloc.h" |
15 | | #include "commit-graph.h" |
16 | | |
17 | | unsigned int get_max_object_index(const struct repository *repo) |
18 | 0 | { |
19 | 0 | return repo->parsed_objects->obj_hash_size; |
20 | 0 | } |
21 | | |
22 | | struct object *get_indexed_object(const struct repository *repo, |
23 | | unsigned int idx) |
24 | 0 | { |
25 | 0 | return repo->parsed_objects->obj_hash[idx]; |
26 | 0 | } |
27 | | |
28 | | static const char *object_type_strings[] = { |
29 | | NULL, /* OBJ_NONE = 0 */ |
30 | | "commit", /* OBJ_COMMIT = 1 */ |
31 | | "tree", /* OBJ_TREE = 2 */ |
32 | | "blob", /* OBJ_BLOB = 3 */ |
33 | | "tag", /* OBJ_TAG = 4 */ |
34 | | }; |
35 | | |
36 | | const char *type_name(unsigned int type) |
37 | 0 | { |
38 | 0 | if (type >= ARRAY_SIZE(object_type_strings)) |
39 | 0 | return NULL; |
40 | 0 | return object_type_strings[type]; |
41 | 0 | } |
42 | | |
43 | | int type_from_string_gently(const char *str, ssize_t len, int gentle) |
44 | 0 | { |
45 | 0 | int i; |
46 | |
|
47 | 0 | if (len < 0) |
48 | 0 | len = strlen(str); |
49 | |
|
50 | 0 | for (i = 1; i < ARRAY_SIZE(object_type_strings); i++) |
51 | 0 | if (!xstrncmpz(object_type_strings[i], str, len)) |
52 | 0 | return i; |
53 | | |
54 | 0 | if (gentle) |
55 | 0 | return -1; |
56 | | |
57 | 0 | die(_("invalid object type \"%s\""), str); |
58 | 0 | } |
59 | | |
60 | | /* |
61 | | * Return a numerical hash value between 0 and n-1 for the object with |
62 | | * the specified sha1. n must be a power of 2. Please note that the |
63 | | * return value is *not* consistent across computer architectures. |
64 | | */ |
65 | | static unsigned int hash_obj(const struct object_id *oid, unsigned int n) |
66 | 0 | { |
67 | 0 | return oidhash(oid) & (n - 1); |
68 | 0 | } |
69 | | |
70 | | /* |
71 | | * Insert obj into the hash table hash, which has length size (which |
72 | | * must be a power of 2). On collisions, simply overflow to the next |
73 | | * empty bucket. |
74 | | */ |
75 | | static void insert_obj_hash(struct object *obj, struct object **hash, unsigned int size) |
76 | 0 | { |
77 | 0 | unsigned int j = hash_obj(&obj->oid, size); |
78 | |
|
79 | 0 | while (hash[j]) { |
80 | 0 | j++; |
81 | 0 | if (j >= size) |
82 | 0 | j = 0; |
83 | 0 | } |
84 | 0 | hash[j] = obj; |
85 | 0 | } |
86 | | |
87 | | /* |
88 | | * Look up the record for the given sha1 in the hash map stored in |
89 | | * obj_hash. Return NULL if it was not found. |
90 | | */ |
91 | | struct object *lookup_object(struct repository *r, const struct object_id *oid) |
92 | 0 | { |
93 | 0 | unsigned int i, first; |
94 | 0 | struct object *obj; |
95 | |
|
96 | 0 | if (!r->parsed_objects->obj_hash) |
97 | 0 | return NULL; |
98 | | |
99 | 0 | first = i = hash_obj(oid, r->parsed_objects->obj_hash_size); |
100 | 0 | while ((obj = r->parsed_objects->obj_hash[i]) != NULL) { |
101 | 0 | if (oideq(oid, &obj->oid)) |
102 | 0 | break; |
103 | 0 | i++; |
104 | 0 | if (i == r->parsed_objects->obj_hash_size) |
105 | 0 | i = 0; |
106 | 0 | } |
107 | 0 | if (obj && i != first) { |
108 | | /* |
109 | | * Move object to where we started to look for it so |
110 | | * that we do not need to walk the hash table the next |
111 | | * time we look for it. |
112 | | */ |
113 | 0 | SWAP(r->parsed_objects->obj_hash[i], |
114 | 0 | r->parsed_objects->obj_hash[first]); |
115 | 0 | } |
116 | 0 | return obj; |
117 | 0 | } |
118 | | |
119 | | /* |
120 | | * Increase the size of the hash map stored in obj_hash to the next |
121 | | * power of 2 (but at least 32). Copy the existing values to the new |
122 | | * hash map. |
123 | | */ |
124 | | static void grow_object_hash(struct repository *r) |
125 | 0 | { |
126 | 0 | int i; |
127 | | /* |
128 | | * Note that this size must always be power-of-2 to match hash_obj |
129 | | * above. |
130 | | */ |
131 | 0 | int new_hash_size = r->parsed_objects->obj_hash_size < 32 ? 32 : 2 * r->parsed_objects->obj_hash_size; |
132 | 0 | struct object **new_hash; |
133 | |
|
134 | 0 | CALLOC_ARRAY(new_hash, new_hash_size); |
135 | 0 | for (i = 0; i < r->parsed_objects->obj_hash_size; i++) { |
136 | 0 | struct object *obj = r->parsed_objects->obj_hash[i]; |
137 | |
|
138 | 0 | if (!obj) |
139 | 0 | continue; |
140 | 0 | insert_obj_hash(obj, new_hash, new_hash_size); |
141 | 0 | } |
142 | 0 | free(r->parsed_objects->obj_hash); |
143 | 0 | r->parsed_objects->obj_hash = new_hash; |
144 | 0 | r->parsed_objects->obj_hash_size = new_hash_size; |
145 | 0 | } |
146 | | |
147 | | void *create_object(struct repository *r, const struct object_id *oid, void *o) |
148 | 0 | { |
149 | 0 | struct object *obj = o; |
150 | |
|
151 | 0 | obj->parsed = 0; |
152 | 0 | obj->flags = 0; |
153 | 0 | oidcpy(&obj->oid, oid); |
154 | |
|
155 | 0 | if (r->parsed_objects->obj_hash_size - 1 <= r->parsed_objects->nr_objs * 2) |
156 | 0 | grow_object_hash(r); |
157 | |
|
158 | 0 | insert_obj_hash(obj, r->parsed_objects->obj_hash, |
159 | 0 | r->parsed_objects->obj_hash_size); |
160 | 0 | r->parsed_objects->nr_objs++; |
161 | 0 | return obj; |
162 | 0 | } |
163 | | |
164 | | void *object_as_type(struct object *obj, enum object_type type, int quiet) |
165 | 0 | { |
166 | 0 | if (obj->type == type) |
167 | 0 | return obj; |
168 | 0 | else if (obj->type == OBJ_NONE) { |
169 | 0 | if (type == OBJ_COMMIT) |
170 | 0 | init_commit_node((struct commit *) obj); |
171 | 0 | else |
172 | 0 | obj->type = type; |
173 | 0 | return obj; |
174 | 0 | } |
175 | 0 | else { |
176 | 0 | if (!quiet) |
177 | 0 | error(_("object %s is a %s, not a %s"), |
178 | 0 | oid_to_hex(&obj->oid), |
179 | 0 | type_name(obj->type), type_name(type)); |
180 | 0 | return NULL; |
181 | 0 | } |
182 | 0 | } |
183 | | |
184 | | struct object *lookup_unknown_object(struct repository *r, const struct object_id *oid) |
185 | 0 | { |
186 | 0 | struct object *obj = lookup_object(r, oid); |
187 | 0 | if (!obj) |
188 | 0 | obj = create_object(r, oid, alloc_object_node(r)); |
189 | 0 | return obj; |
190 | 0 | } |
191 | | |
192 | | struct object *lookup_object_by_type(struct repository *r, |
193 | | const struct object_id *oid, |
194 | | enum object_type type) |
195 | 0 | { |
196 | 0 | switch (type) { |
197 | 0 | case OBJ_COMMIT: |
198 | 0 | return (struct object *)lookup_commit(r, oid); |
199 | 0 | case OBJ_TREE: |
200 | 0 | return (struct object *)lookup_tree(r, oid); |
201 | 0 | case OBJ_TAG: |
202 | 0 | return (struct object *)lookup_tag(r, oid); |
203 | 0 | case OBJ_BLOB: |
204 | 0 | return (struct object *)lookup_blob(r, oid); |
205 | 0 | default: |
206 | 0 | BUG("unknown object type %d", type); |
207 | 0 | } |
208 | 0 | } |
209 | | |
210 | | enum peel_status peel_object(struct repository *r, |
211 | | const struct object_id *name, |
212 | | struct object_id *oid, |
213 | | unsigned flags) |
214 | 0 | { |
215 | 0 | struct object *o = lookup_unknown_object(r, name); |
216 | |
|
217 | 0 | if (o->type == OBJ_NONE) { |
218 | 0 | int type = odb_read_object_info(r->objects, name, NULL); |
219 | 0 | if (type < 0 || !object_as_type(o, type, 0)) |
220 | 0 | return PEEL_INVALID; |
221 | 0 | } |
222 | | |
223 | 0 | if (o->type != OBJ_TAG) |
224 | 0 | return PEEL_NON_TAG; |
225 | | |
226 | 0 | while (o && o->type == OBJ_TAG) { |
227 | 0 | o = parse_object(r, &o->oid); |
228 | 0 | if (o && o->type == OBJ_TAG && ((struct tag *)o)->tagged) { |
229 | 0 | o = ((struct tag *)o)->tagged; |
230 | |
|
231 | 0 | if (flags & PEEL_OBJECT_VERIFY_TAGGED_OBJECT_TYPE) { |
232 | 0 | int type = odb_read_object_info(r->objects, &o->oid, NULL); |
233 | 0 | if (type < 0 || !object_as_type(o, type, 0)) |
234 | 0 | return PEEL_INVALID; |
235 | 0 | } |
236 | 0 | } else { |
237 | 0 | o = NULL; |
238 | 0 | } |
239 | 0 | } |
240 | 0 | if (!o) |
241 | 0 | return PEEL_INVALID; |
242 | | |
243 | 0 | oidcpy(oid, &o->oid); |
244 | 0 | return PEEL_PEELED; |
245 | 0 | } |
246 | | |
247 | | struct object *parse_object_buffer(struct repository *r, const struct object_id *oid, enum object_type type, unsigned long size, void *buffer, int *eaten_p) |
248 | 0 | { |
249 | 0 | struct object *obj; |
250 | 0 | *eaten_p = 0; |
251 | |
|
252 | 0 | obj = NULL; |
253 | 0 | if (type == OBJ_BLOB) { |
254 | 0 | struct blob *blob = lookup_blob(r, oid); |
255 | 0 | if (blob) { |
256 | 0 | parse_blob_buffer(blob); |
257 | 0 | obj = &blob->object; |
258 | 0 | } |
259 | 0 | } else if (type == OBJ_TREE) { |
260 | 0 | struct tree *tree = lookup_tree(r, oid); |
261 | 0 | if (tree) { |
262 | 0 | obj = &tree->object; |
263 | 0 | if (!tree->buffer) |
264 | 0 | tree->object.parsed = 0; |
265 | 0 | if (!tree->object.parsed) { |
266 | 0 | if (parse_tree_buffer(tree, buffer, size)) |
267 | 0 | return NULL; |
268 | 0 | *eaten_p = 1; |
269 | 0 | } |
270 | 0 | } |
271 | 0 | } else if (type == OBJ_COMMIT) { |
272 | 0 | struct commit *commit = lookup_commit(r, oid); |
273 | 0 | if (commit) { |
274 | 0 | if (parse_commit_buffer(r, commit, buffer, size, 1)) |
275 | 0 | return NULL; |
276 | 0 | if (save_commit_buffer && |
277 | 0 | !get_cached_commit_buffer(r, commit, NULL)) { |
278 | 0 | set_commit_buffer(r, commit, buffer, size); |
279 | 0 | *eaten_p = 1; |
280 | 0 | } |
281 | 0 | obj = &commit->object; |
282 | 0 | } |
283 | 0 | } else if (type == OBJ_TAG) { |
284 | 0 | struct tag *tag = lookup_tag(r, oid); |
285 | 0 | if (tag) { |
286 | 0 | if (parse_tag_buffer(r, tag, buffer, size)) |
287 | 0 | return NULL; |
288 | 0 | obj = &tag->object; |
289 | 0 | } |
290 | 0 | } else { |
291 | 0 | warning(_("object %s has unknown type id %d"), oid_to_hex(oid), type); |
292 | 0 | obj = NULL; |
293 | 0 | } |
294 | 0 | return obj; |
295 | 0 | } |
296 | | |
297 | | struct object *parse_object_or_die(struct repository *repo, |
298 | | const struct object_id *oid, |
299 | | const char *name) |
300 | 0 | { |
301 | 0 | struct object *o = parse_object(repo, oid); |
302 | 0 | if (o) |
303 | 0 | return o; |
304 | | |
305 | 0 | die(_("unable to parse object: %s"), name ? name : oid_to_hex(oid)); |
306 | 0 | } |
307 | | |
308 | | struct object *parse_object_with_flags(struct repository *r, |
309 | | const struct object_id *oid, |
310 | | enum parse_object_flags flags) |
311 | 0 | { |
312 | 0 | int skip_hash = !!(flags & PARSE_OBJECT_SKIP_HASH_CHECK); |
313 | 0 | int discard_tree = !!(flags & PARSE_OBJECT_DISCARD_TREE); |
314 | 0 | unsigned long size; |
315 | 0 | enum object_type type; |
316 | 0 | int eaten; |
317 | 0 | const struct object_id *repl = lookup_replace_object(r, oid); |
318 | 0 | void *buffer; |
319 | 0 | struct object *obj; |
320 | |
|
321 | 0 | obj = lookup_object(r, oid); |
322 | 0 | if (obj && obj->parsed) |
323 | 0 | return obj; |
324 | | |
325 | 0 | if (skip_hash) { |
326 | 0 | struct commit *commit = lookup_commit_in_graph(r, repl); |
327 | 0 | if (commit) |
328 | 0 | return &commit->object; |
329 | 0 | } |
330 | | |
331 | 0 | if ((!obj || obj->type == OBJ_NONE || obj->type == OBJ_BLOB) && |
332 | 0 | odb_read_object_info(r->objects, oid, NULL) == OBJ_BLOB) { |
333 | 0 | if (!skip_hash && stream_object_signature(r, repl) < 0) { |
334 | 0 | error(_("hash mismatch %s"), oid_to_hex(oid)); |
335 | 0 | return NULL; |
336 | 0 | } |
337 | 0 | parse_blob_buffer(lookup_blob(r, oid)); |
338 | 0 | return lookup_object(r, oid); |
339 | 0 | } |
340 | | |
341 | | /* |
342 | | * If the caller does not care about the tree buffer and does not |
343 | | * care about checking the hash, we can simply verify that we |
344 | | * have the on-disk object with the correct type. |
345 | | */ |
346 | 0 | if (skip_hash && discard_tree && |
347 | 0 | (!obj || obj->type == OBJ_NONE || obj->type == OBJ_TREE) && |
348 | 0 | odb_read_object_info(r->objects, oid, NULL) == OBJ_TREE) { |
349 | 0 | return &lookup_tree(r, oid)->object; |
350 | 0 | } |
351 | | |
352 | 0 | buffer = odb_read_object(r->objects, oid, &type, &size); |
353 | 0 | if (buffer) { |
354 | 0 | if (!skip_hash && |
355 | 0 | check_object_signature(r, repl, buffer, size, type) < 0) { |
356 | 0 | free(buffer); |
357 | 0 | error(_("hash mismatch %s"), oid_to_hex(repl)); |
358 | 0 | return NULL; |
359 | 0 | } |
360 | | |
361 | 0 | obj = parse_object_buffer(r, oid, type, size, |
362 | 0 | buffer, &eaten); |
363 | 0 | if (!eaten) |
364 | 0 | free(buffer); |
365 | 0 | if (discard_tree && type == OBJ_TREE) |
366 | 0 | free_tree_buffer((struct tree *)obj); |
367 | 0 | return obj; |
368 | 0 | } |
369 | 0 | return NULL; |
370 | 0 | } |
371 | | |
372 | | struct object *parse_object(struct repository *r, const struct object_id *oid) |
373 | 0 | { |
374 | 0 | return parse_object_with_flags(r, oid, 0); |
375 | 0 | } |
376 | | |
377 | | struct object_list *object_list_insert(struct object *item, |
378 | | struct object_list **list_p) |
379 | 0 | { |
380 | 0 | struct object_list *new_list = xmalloc(sizeof(struct object_list)); |
381 | 0 | new_list->item = item; |
382 | 0 | new_list->next = *list_p; |
383 | 0 | *list_p = new_list; |
384 | 0 | return new_list; |
385 | 0 | } |
386 | | |
387 | | int object_list_contains(struct object_list *list, struct object *obj) |
388 | 0 | { |
389 | 0 | while (list) { |
390 | 0 | if (list->item == obj) |
391 | 0 | return 1; |
392 | 0 | list = list->next; |
393 | 0 | } |
394 | 0 | return 0; |
395 | 0 | } |
396 | | |
397 | | void object_list_free(struct object_list **list) |
398 | 0 | { |
399 | 0 | while (*list) { |
400 | 0 | struct object_list *p = *list; |
401 | 0 | *list = p->next; |
402 | 0 | free(p); |
403 | 0 | } |
404 | 0 | } |
405 | | |
406 | | /* |
407 | | * A zero-length string to which object_array_entry::name can be |
408 | | * initialized without requiring a malloc/free. |
409 | | */ |
410 | | static char object_array_slopbuf[1]; |
411 | | |
412 | | void object_array_init(struct object_array *array) |
413 | 0 | { |
414 | 0 | struct object_array blank = OBJECT_ARRAY_INIT; |
415 | 0 | memcpy(array, &blank, sizeof(*array)); |
416 | 0 | } |
417 | | |
418 | | void add_object_array_with_path(struct object *obj, const char *name, |
419 | | struct object_array *array, |
420 | | unsigned mode, const char *path) |
421 | 0 | { |
422 | 0 | unsigned nr = array->nr; |
423 | 0 | unsigned alloc = array->alloc; |
424 | 0 | struct object_array_entry *objects = array->objects; |
425 | 0 | struct object_array_entry *entry; |
426 | |
|
427 | 0 | if (nr >= alloc) { |
428 | 0 | alloc = (alloc + 32) * 2; |
429 | 0 | REALLOC_ARRAY(objects, alloc); |
430 | 0 | array->alloc = alloc; |
431 | 0 | array->objects = objects; |
432 | 0 | } |
433 | 0 | entry = &objects[nr]; |
434 | 0 | entry->item = obj; |
435 | 0 | if (!name) |
436 | 0 | entry->name = NULL; |
437 | 0 | else if (!*name) |
438 | | /* Use our own empty string instead of allocating one: */ |
439 | 0 | entry->name = object_array_slopbuf; |
440 | 0 | else |
441 | 0 | entry->name = xstrdup(name); |
442 | 0 | entry->mode = mode; |
443 | 0 | if (path) |
444 | 0 | entry->path = xstrdup(path); |
445 | 0 | else |
446 | 0 | entry->path = NULL; |
447 | 0 | array->nr = ++nr; |
448 | 0 | } |
449 | | |
450 | | void add_object_array(struct object *obj, const char *name, struct object_array *array) |
451 | 0 | { |
452 | 0 | add_object_array_with_path(obj, name, array, S_IFINVALID, NULL); |
453 | 0 | } |
454 | | |
455 | | /* |
456 | | * Free all memory associated with an entry; the result is |
457 | | * in an unspecified state and should not be examined. |
458 | | */ |
459 | | static void object_array_release_entry(struct object_array_entry *ent) |
460 | 0 | { |
461 | 0 | if (ent->name != object_array_slopbuf) |
462 | 0 | free(ent->name); |
463 | 0 | free(ent->path); |
464 | 0 | } |
465 | | |
466 | | struct object *object_array_pop(struct object_array *array) |
467 | 0 | { |
468 | 0 | struct object *ret; |
469 | |
|
470 | 0 | if (!array->nr) |
471 | 0 | return NULL; |
472 | | |
473 | 0 | ret = array->objects[array->nr - 1].item; |
474 | 0 | object_array_release_entry(&array->objects[array->nr - 1]); |
475 | 0 | array->nr--; |
476 | 0 | return ret; |
477 | 0 | } |
478 | | |
479 | | void object_array_filter(struct object_array *array, |
480 | | object_array_each_func_t want, void *cb_data) |
481 | 0 | { |
482 | 0 | unsigned nr = array->nr, src, dst; |
483 | 0 | struct object_array_entry *objects = array->objects; |
484 | |
|
485 | 0 | for (src = dst = 0; src < nr; src++) { |
486 | 0 | if (want(&objects[src], cb_data)) { |
487 | 0 | if (src != dst) |
488 | 0 | objects[dst] = objects[src]; |
489 | 0 | dst++; |
490 | 0 | } else { |
491 | 0 | object_array_release_entry(&objects[src]); |
492 | 0 | } |
493 | 0 | } |
494 | 0 | array->nr = dst; |
495 | 0 | } |
496 | | |
497 | | void object_array_clear(struct object_array *array) |
498 | 0 | { |
499 | 0 | int i; |
500 | 0 | for (i = 0; i < array->nr; i++) |
501 | 0 | object_array_release_entry(&array->objects[i]); |
502 | 0 | FREE_AND_NULL(array->objects); |
503 | 0 | array->nr = array->alloc = 0; |
504 | 0 | } |
505 | | |
506 | | void clear_object_flags(struct repository *repo, unsigned flags) |
507 | 0 | { |
508 | 0 | int i; |
509 | |
|
510 | 0 | for (i = 0; i < repo->parsed_objects->obj_hash_size; i++) { |
511 | 0 | struct object *obj = repo->parsed_objects->obj_hash[i]; |
512 | 0 | if (obj) |
513 | 0 | obj->flags &= ~flags; |
514 | 0 | } |
515 | 0 | } |
516 | | |
517 | | void repo_clear_commit_marks(struct repository *r, unsigned int flags) |
518 | 0 | { |
519 | 0 | int i; |
520 | |
|
521 | 0 | for (i = 0; i < r->parsed_objects->obj_hash_size; i++) { |
522 | 0 | struct object *obj = r->parsed_objects->obj_hash[i]; |
523 | 0 | if (obj && obj->type == OBJ_COMMIT) |
524 | 0 | obj->flags &= ~flags; |
525 | 0 | } |
526 | 0 | } |
527 | | |
528 | | struct parsed_object_pool *parsed_object_pool_new(struct repository *repo) |
529 | 59 | { |
530 | 59 | struct parsed_object_pool *o = xmalloc(sizeof(*o)); |
531 | 59 | memset(o, 0, sizeof(*o)); |
532 | | |
533 | 59 | o->repo = repo; |
534 | 59 | o->blob_state = alloc_state_alloc(); |
535 | 59 | o->tree_state = alloc_state_alloc(); |
536 | 59 | o->commit_state = alloc_state_alloc(); |
537 | 59 | o->tag_state = alloc_state_alloc(); |
538 | 59 | o->object_state = alloc_state_alloc(); |
539 | 59 | o->is_shallow = -1; |
540 | 59 | CALLOC_ARRAY(o->shallow_stat, 1); |
541 | | |
542 | 59 | o->buffer_slab = allocate_commit_buffer_slab(); |
543 | | |
544 | 59 | return o; |
545 | 59 | } |
546 | | |
547 | | void parsed_object_pool_reset_commit_grafts(struct parsed_object_pool *o) |
548 | 59 | { |
549 | 59 | for (int i = 0; i < o->grafts_nr; i++) { |
550 | 0 | unparse_commit(o->repo, &o->grafts[i]->oid); |
551 | 0 | free(o->grafts[i]); |
552 | 0 | } |
553 | 59 | o->grafts_nr = 0; |
554 | 59 | o->commit_graft_prepared = 0; |
555 | 59 | } |
556 | | |
557 | | void parsed_object_pool_clear(struct parsed_object_pool *o) |
558 | 59 | { |
559 | | /* |
560 | | * As objects are allocated in slabs (see alloc.c), we do |
561 | | * not need to free each object, but each slab instead. |
562 | | * |
563 | | * Before doing so, we need to free any additional memory |
564 | | * the objects may hold. |
565 | | */ |
566 | 59 | unsigned i; |
567 | | |
568 | 59 | for (i = 0; i < o->obj_hash_size; i++) { |
569 | 0 | struct object *obj = o->obj_hash[i]; |
570 | |
|
571 | 0 | if (!obj) |
572 | 0 | continue; |
573 | | |
574 | 0 | if (obj->type == OBJ_TREE) |
575 | 0 | free_tree_buffer((struct tree*)obj); |
576 | 0 | else if (obj->type == OBJ_COMMIT) |
577 | 0 | release_commit_memory(o, (struct commit*)obj); |
578 | 0 | else if (obj->type == OBJ_TAG) |
579 | 0 | release_tag_memory((struct tag*)obj); |
580 | 0 | } |
581 | | |
582 | 59 | FREE_AND_NULL(o->obj_hash); |
583 | 59 | o->obj_hash_size = 0; |
584 | | |
585 | 59 | free_commit_buffer_slab(o->buffer_slab); |
586 | 59 | o->buffer_slab = NULL; |
587 | | |
588 | 59 | parsed_object_pool_reset_commit_grafts(o); |
589 | 59 | alloc_state_free_and_null(&o->blob_state); |
590 | 59 | alloc_state_free_and_null(&o->tree_state); |
591 | 59 | alloc_state_free_and_null(&o->commit_state); |
592 | 59 | alloc_state_free_and_null(&o->tag_state); |
593 | 59 | alloc_state_free_and_null(&o->object_state); |
594 | 59 | stat_validity_clear(o->shallow_stat); |
595 | | FREE_AND_NULL(o->shallow_stat); |
596 | 59 | } |