Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * alloc.c - specialized allocator for internal objects |
3 | | * |
4 | | * Copyright (C) 2006 Linus Torvalds |
5 | | * |
6 | | * The standard malloc/free wastes too much space for objects, partly because |
7 | | * it maintains all the allocation infrastructure, but even more because it ends |
8 | | * up with maximal alignment because it doesn't know what the object alignment |
9 | | * for the new allocation is. |
10 | | */ |
11 | | #include "git-compat-util.h" |
12 | | #include "object.h" |
13 | | #include "blob.h" |
14 | | #include "tree.h" |
15 | | #include "commit.h" |
16 | | #include "repository.h" |
17 | | #include "tag.h" |
18 | | #include "alloc.h" |
19 | | |
20 | 0 | #define BLOCKING 1024 |
21 | | |
22 | | union any_object { |
23 | | struct object object; |
24 | | struct blob blob; |
25 | | struct tree tree; |
26 | | struct commit commit; |
27 | | struct tag tag; |
28 | | }; |
29 | | |
30 | | struct alloc_state { |
31 | | int nr; /* number of nodes left in current allocation */ |
32 | | void *p; /* first free node in current allocation */ |
33 | | |
34 | | /* bookkeeping of allocations */ |
35 | | void **slabs; |
36 | | int slab_nr, slab_alloc; |
37 | | }; |
38 | | |
39 | | struct alloc_state *allocate_alloc_state(void) |
40 | 0 | { |
41 | 0 | return xcalloc(1, sizeof(struct alloc_state)); |
42 | 0 | } |
43 | | |
44 | | void clear_alloc_state(struct alloc_state *s) |
45 | 0 | { |
46 | 0 | while (s->slab_nr > 0) { |
47 | 0 | s->slab_nr--; |
48 | 0 | free(s->slabs[s->slab_nr]); |
49 | 0 | } |
50 | |
|
51 | 0 | FREE_AND_NULL(s->slabs); |
52 | 0 | } |
53 | | |
54 | | static inline void *alloc_node(struct alloc_state *s, size_t node_size) |
55 | 0 | { |
56 | 0 | void *ret; |
57 | |
|
58 | 0 | if (!s->nr) { |
59 | 0 | s->nr = BLOCKING; |
60 | 0 | s->p = xmalloc(BLOCKING * node_size); |
61 | |
|
62 | 0 | ALLOC_GROW(s->slabs, s->slab_nr + 1, s->slab_alloc); |
63 | 0 | s->slabs[s->slab_nr++] = s->p; |
64 | 0 | } |
65 | 0 | s->nr--; |
66 | 0 | ret = s->p; |
67 | 0 | s->p = (char *)s->p + node_size; |
68 | 0 | memset(ret, 0, node_size); |
69 | |
|
70 | 0 | return ret; |
71 | 0 | } |
72 | | |
73 | | void *alloc_blob_node(struct repository *r) |
74 | 0 | { |
75 | 0 | struct blob *b = alloc_node(r->parsed_objects->blob_state, sizeof(struct blob)); |
76 | 0 | b->object.type = OBJ_BLOB; |
77 | 0 | return b; |
78 | 0 | } |
79 | | |
80 | | void *alloc_tree_node(struct repository *r) |
81 | 0 | { |
82 | 0 | struct tree *t = alloc_node(r->parsed_objects->tree_state, sizeof(struct tree)); |
83 | 0 | t->object.type = OBJ_TREE; |
84 | 0 | return t; |
85 | 0 | } |
86 | | |
87 | | void *alloc_tag_node(struct repository *r) |
88 | 0 | { |
89 | 0 | struct tag *t = alloc_node(r->parsed_objects->tag_state, sizeof(struct tag)); |
90 | 0 | t->object.type = OBJ_TAG; |
91 | 0 | return t; |
92 | 0 | } |
93 | | |
94 | | void *alloc_object_node(struct repository *r) |
95 | 0 | { |
96 | 0 | struct object *obj = alloc_node(r->parsed_objects->object_state, sizeof(union any_object)); |
97 | 0 | obj->type = OBJ_NONE; |
98 | 0 | return obj; |
99 | 0 | } |
100 | | |
101 | | /* |
102 | | * The returned count is to be used as an index into commit slabs, |
103 | | * that are *NOT* maintained per repository, and that is why a single |
104 | | * global counter is used. |
105 | | */ |
106 | | static unsigned int alloc_commit_index(void) |
107 | 0 | { |
108 | 0 | static unsigned int parsed_commits_count; |
109 | 0 | return parsed_commits_count++; |
110 | 0 | } |
111 | | |
112 | | void init_commit_node(struct commit *c) |
113 | 0 | { |
114 | 0 | c->object.type = OBJ_COMMIT; |
115 | 0 | c->index = alloc_commit_index(); |
116 | 0 | } |
117 | | |
118 | | void *alloc_commit_node(struct repository *r) |
119 | 0 | { |
120 | 0 | struct commit *c = alloc_node(r->parsed_objects->commit_state, sizeof(struct commit)); |
121 | 0 | init_commit_node(c); |
122 | 0 | return c; |
123 | 0 | } |