Line | Count | Source |
1 | | /* |
2 | | * alloc.c - specialized allocator for internal objects |
3 | | * |
4 | | * Copyright (C) 2006 Linus Torvalds |
5 | | * |
6 | | * The standard malloc/free wastes too much space for objects, partly because |
7 | | * it maintains all the allocation infrastructure, but even more because it ends |
8 | | * up with maximal alignment because it doesn't know what the object alignment |
9 | | * for the new allocation is. |
10 | | */ |
11 | | #include "git-compat-util.h" |
12 | | #include "object.h" |
13 | | #include "blob.h" |
14 | | #include "tree.h" |
15 | | #include "commit.h" |
16 | | #include "repository.h" |
17 | | #include "tag.h" |
18 | | #include "alloc.h" |
19 | | |
20 | 0 | #define BLOCKING 1024 |
21 | | |
22 | | union any_object { |
23 | | struct object object; |
24 | | struct blob blob; |
25 | | struct tree tree; |
26 | | struct commit commit; |
27 | | struct tag tag; |
28 | | }; |
29 | | |
30 | | struct alloc_state { |
31 | | int nr; /* number of nodes left in current allocation */ |
32 | | void *p; /* first free node in current allocation */ |
33 | | |
34 | | /* bookkeeping of allocations */ |
35 | | void **slabs; |
36 | | int slab_nr, slab_alloc; |
37 | | }; |
38 | | |
39 | | struct alloc_state *alloc_state_alloc(void) |
40 | 0 | { |
41 | 0 | return xcalloc(1, sizeof(struct alloc_state)); |
42 | 0 | } |
43 | | |
44 | | void alloc_state_free_and_null(struct alloc_state **s_) |
45 | 0 | { |
46 | 0 | struct alloc_state *s = *s_; |
47 | |
|
48 | 0 | if (!s) |
49 | 0 | return; |
50 | | |
51 | 0 | while (s->slab_nr > 0) { |
52 | 0 | s->slab_nr--; |
53 | 0 | free(s->slabs[s->slab_nr]); |
54 | 0 | } |
55 | |
|
56 | 0 | FREE_AND_NULL(s->slabs); |
57 | 0 | FREE_AND_NULL(*s_); |
58 | 0 | } |
59 | | |
60 | | static inline void *alloc_node(struct alloc_state *s, size_t node_size) |
61 | 0 | { |
62 | 0 | void *ret; |
63 | |
|
64 | 0 | if (!s->nr) { |
65 | 0 | s->nr = BLOCKING; |
66 | 0 | s->p = xmalloc(BLOCKING * node_size); |
67 | |
|
68 | 0 | ALLOC_GROW(s->slabs, s->slab_nr + 1, s->slab_alloc); |
69 | 0 | s->slabs[s->slab_nr++] = s->p; |
70 | 0 | } |
71 | 0 | s->nr--; |
72 | 0 | ret = s->p; |
73 | 0 | s->p = (char *)s->p + node_size; |
74 | 0 | memset(ret, 0, node_size); |
75 | |
|
76 | 0 | return ret; |
77 | 0 | } |
78 | | |
79 | | void *alloc_blob_node(struct repository *r) |
80 | 0 | { |
81 | 0 | struct blob *b = alloc_node(r->parsed_objects->blob_state, sizeof(struct blob)); |
82 | 0 | b->object.type = OBJ_BLOB; |
83 | 0 | return b; |
84 | 0 | } |
85 | | |
86 | | void *alloc_tree_node(struct repository *r) |
87 | 0 | { |
88 | 0 | struct tree *t = alloc_node(r->parsed_objects->tree_state, sizeof(struct tree)); |
89 | 0 | t->object.type = OBJ_TREE; |
90 | 0 | return t; |
91 | 0 | } |
92 | | |
93 | | void *alloc_tag_node(struct repository *r) |
94 | 0 | { |
95 | 0 | struct tag *t = alloc_node(r->parsed_objects->tag_state, sizeof(struct tag)); |
96 | 0 | t->object.type = OBJ_TAG; |
97 | 0 | return t; |
98 | 0 | } |
99 | | |
100 | | void *alloc_object_node(struct repository *r) |
101 | 0 | { |
102 | 0 | struct object *obj = alloc_node(r->parsed_objects->object_state, sizeof(union any_object)); |
103 | 0 | obj->type = OBJ_NONE; |
104 | 0 | return obj; |
105 | 0 | } |
106 | | |
107 | | /* |
108 | | * The returned count is to be used as an index into commit slabs, |
109 | | * that are *NOT* maintained per repository, and that is why a single |
110 | | * global counter is used. |
111 | | */ |
112 | | static unsigned int alloc_commit_index(void) |
113 | 0 | { |
114 | 0 | static unsigned int parsed_commits_count; |
115 | 0 | return parsed_commits_count++; |
116 | 0 | } |
117 | | |
118 | | void init_commit_node(struct commit *c) |
119 | 0 | { |
120 | 0 | c->object.type = OBJ_COMMIT; |
121 | 0 | c->index = alloc_commit_index(); |
122 | 0 | } |
123 | | |
124 | | void *alloc_commit_node(struct repository *r) |
125 | 0 | { |
126 | 0 | struct commit *c = alloc_node(r->parsed_objects->commit_state, sizeof(struct commit)); |
127 | 0 | init_commit_node(c); |
128 | 0 | return c; |
129 | 0 | } |