/src/lvm2/libdm/mm/pool-fast.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved. |
3 | | * Copyright (C) 2004-2011 Red Hat, Inc. All rights reserved. |
4 | | * |
5 | | * This file is part of the device-mapper userspace tools. |
6 | | * |
7 | | * This copyrighted material is made available to anyone wishing to use, |
8 | | * modify, copy, or redistribute it subject to the terms and conditions |
9 | | * of the GNU Lesser General Public License v.2.1. |
10 | | * |
11 | | * You should have received a copy of the GNU Lesser General Public License |
12 | | * along with this program; if not, write to the Free Software Foundation, |
13 | | * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
14 | | */ |
15 | | |
16 | | #ifdef VALGRIND_POOL |
17 | | #include "memcheck.h" |
18 | | #endif |
19 | | |
20 | | #include "libdm/misc/dmlib.h" |
21 | | #include <stddef.h> /* For musl libc */ |
22 | | #include <malloc.h> |
23 | | |
24 | | struct chunk { |
25 | | char *begin, *end; |
26 | | struct chunk *prev; |
27 | | } __attribute__((aligned(8))); |
28 | | |
29 | | struct dm_pool { |
30 | | struct dm_list list; |
31 | | struct chunk *chunk, *spare_chunk; /* spare_chunk is a one entry free |
32 | | list to stop 'bobbling' */ |
33 | | const char *name; |
34 | | size_t chunk_size; |
35 | | size_t object_len; |
36 | | unsigned object_alignment; |
37 | | int locked; |
38 | | long crc; |
39 | | }; |
40 | | |
41 | | static void _align_chunk(struct chunk *c, unsigned alignment); |
42 | | static struct chunk *_new_chunk(struct dm_pool *p, size_t s); |
43 | | static void _free_chunk(struct chunk *c); |
44 | | |
45 | | /* by default things come out aligned for doubles */ |
46 | 0 | #define DEFAULT_ALIGNMENT __alignof__ (double) |
47 | | |
48 | | struct dm_pool *dm_pool_create(const char *name, size_t chunk_hint) |
49 | 0 | { |
50 | 0 | size_t new_size = 1024; |
51 | 0 | struct dm_pool *p = dm_zalloc(sizeof(*p)); |
52 | |
|
53 | 0 | if (!p) { |
54 | 0 | log_error("Couldn't create memory pool %s (size %" |
55 | 0 | PRIsize_t ")", name, sizeof(*p)); |
56 | 0 | return 0; |
57 | 0 | } |
58 | | |
59 | 0 | p->name = name; |
60 | | /* round chunk_hint up to the next power of 2 */ |
61 | 0 | p->chunk_size = chunk_hint + sizeof(struct chunk); |
62 | 0 | while (new_size < p->chunk_size) |
63 | 0 | new_size <<= 1; |
64 | 0 | p->chunk_size = new_size; |
65 | 0 | pthread_mutex_lock(&_dm_pools_mutex); |
66 | 0 | dm_list_add(&_dm_pools, &p->list); |
67 | 0 | pthread_mutex_unlock(&_dm_pools_mutex); |
68 | 0 | return p; |
69 | 0 | } |
70 | | |
71 | | void dm_pool_destroy(struct dm_pool *p) |
72 | 0 | { |
73 | 0 | struct chunk *c, *pr; |
74 | 0 | _free_chunk(p->spare_chunk); |
75 | 0 | c = p->chunk; |
76 | 0 | while (c) { |
77 | 0 | pr = c->prev; |
78 | 0 | _free_chunk(c); |
79 | 0 | c = pr; |
80 | 0 | } |
81 | |
|
82 | 0 | pthread_mutex_lock(&_dm_pools_mutex); |
83 | 0 | dm_list_del(&p->list); |
84 | 0 | pthread_mutex_unlock(&_dm_pools_mutex); |
85 | 0 | dm_free(p); |
86 | 0 | } |
87 | | |
88 | | void *dm_pool_alloc(struct dm_pool *p, size_t s) |
89 | 0 | { |
90 | 0 | return dm_pool_alloc_aligned(p, s, DEFAULT_ALIGNMENT); |
91 | 0 | } |
92 | | |
93 | | void *dm_pool_alloc_aligned(struct dm_pool *p, size_t s, unsigned alignment) |
94 | 0 | { |
95 | 0 | struct chunk *c = p->chunk; |
96 | 0 | void *r; |
97 | | |
98 | | /* realign begin */ |
99 | 0 | if (c) |
100 | 0 | _align_chunk(c, alignment); |
101 | | |
102 | | /* have we got room ? */ |
103 | 0 | if (!c || (c->begin > c->end) || ((c->end - c->begin) < (int) s)) { |
104 | | /* allocate new chunk */ |
105 | 0 | size_t needed = s + alignment + sizeof(struct chunk); |
106 | 0 | c = _new_chunk(p, (needed > p->chunk_size) ? |
107 | 0 | needed : p->chunk_size); |
108 | |
|
109 | 0 | if (!c) |
110 | 0 | return_NULL; |
111 | | |
112 | 0 | _align_chunk(c, alignment); |
113 | 0 | } |
114 | | |
115 | 0 | r = c->begin; |
116 | 0 | c->begin += s; |
117 | |
|
118 | | #ifdef VALGRIND_POOL |
119 | | VALGRIND_MAKE_MEM_UNDEFINED(r, s); |
120 | | #endif |
121 | |
|
122 | 0 | return r; |
123 | 0 | } |
124 | | |
125 | | void dm_pool_empty(struct dm_pool *p) |
126 | 0 | { |
127 | 0 | struct chunk *c; |
128 | |
|
129 | 0 | for (c = p->chunk; c && c->prev; c = c->prev) |
130 | 0 | ; |
131 | |
|
132 | 0 | if (c) |
133 | 0 | dm_pool_free(p, (char *) (c + 1)); |
134 | 0 | } |
135 | | |
136 | | void dm_pool_free(struct dm_pool *p, void *ptr) |
137 | 0 | { |
138 | 0 | struct chunk *c = p->chunk; |
139 | |
|
140 | 0 | while (c) { |
141 | 0 | if (((char *) c < (char *) ptr) && |
142 | 0 | ((char *) c->end > (char *) ptr)) { |
143 | 0 | c->begin = ptr; |
144 | | #ifdef VALGRIND_POOL |
145 | | VALGRIND_MAKE_MEM_NOACCESS(c->begin, c->end - c->begin); |
146 | | #endif |
147 | 0 | break; |
148 | 0 | } |
149 | | |
150 | 0 | if (p->spare_chunk) |
151 | 0 | _free_chunk(p->spare_chunk); |
152 | |
|
153 | 0 | c->begin = (char *) (c + 1); |
154 | | #ifdef VALGRIND_POOL |
155 | | VALGRIND_MAKE_MEM_NOACCESS(c->begin, c->end - c->begin); |
156 | | #endif |
157 | |
|
158 | 0 | p->spare_chunk = c; |
159 | 0 | c = c->prev; |
160 | 0 | } |
161 | |
|
162 | 0 | if (!c) |
163 | 0 | log_error(INTERNAL_ERROR "pool_free asked to free pointer " |
164 | 0 | "not in pool"); |
165 | 0 | else |
166 | 0 | p->chunk = c; |
167 | 0 | } |
168 | | |
169 | | int dm_pool_begin_object(struct dm_pool *p, size_t hint) |
170 | 0 | { |
171 | 0 | struct chunk *c = p->chunk; |
172 | 0 | const size_t align = DEFAULT_ALIGNMENT; |
173 | |
|
174 | 0 | p->object_len = 0; |
175 | 0 | p->object_alignment = align; |
176 | |
|
177 | 0 | if (c) |
178 | 0 | _align_chunk(c, align); |
179 | |
|
180 | 0 | if (!c || (c->begin > c->end) || ((c->end - c->begin) < (int) hint)) { |
181 | | /* allocate a new chunk */ |
182 | 0 | c = _new_chunk(p, |
183 | 0 | hint > (p->chunk_size - sizeof(struct chunk)) ? |
184 | 0 | hint + sizeof(struct chunk) + align : |
185 | 0 | p->chunk_size); |
186 | |
|
187 | 0 | if (!c) |
188 | 0 | return 0; |
189 | | |
190 | 0 | _align_chunk(c, align); |
191 | 0 | } |
192 | | |
193 | 0 | return 1; |
194 | 0 | } |
195 | | |
196 | | int dm_pool_grow_object(struct dm_pool *p, const void *extra, size_t delta) |
197 | 0 | { |
198 | 0 | struct chunk *c = p->chunk, *nc; |
199 | |
|
200 | 0 | if (!delta) |
201 | 0 | delta = strlen(extra); |
202 | |
|
203 | 0 | if ((c->end - (c->begin + p->object_len)) < (int) delta) { |
204 | | /* move into a new chunk */ |
205 | 0 | if (p->object_len + delta > (p->chunk_size / 2)) |
206 | 0 | nc = _new_chunk(p, (p->object_len + delta) * 2); |
207 | 0 | else |
208 | 0 | nc = _new_chunk(p, p->chunk_size); |
209 | |
|
210 | 0 | if (!nc) |
211 | 0 | return 0; |
212 | | |
213 | 0 | _align_chunk(p->chunk, p->object_alignment); |
214 | |
|
215 | | #ifdef VALGRIND_POOL |
216 | | VALGRIND_MAKE_MEM_UNDEFINED(p->chunk->begin, p->object_len); |
217 | | #endif |
218 | |
|
219 | 0 | memcpy(p->chunk->begin, c->begin, p->object_len); |
220 | |
|
221 | | #ifdef VALGRIND_POOL |
222 | | VALGRIND_MAKE_MEM_NOACCESS(c->begin, p->object_len); |
223 | | #endif |
224 | |
|
225 | 0 | c = p->chunk; |
226 | 0 | } |
227 | | |
228 | | #ifdef VALGRIND_POOL |
229 | | VALGRIND_MAKE_MEM_UNDEFINED(p->chunk->begin + p->object_len, delta); |
230 | | #endif |
231 | | |
232 | 0 | memcpy(c->begin + p->object_len, extra, delta); |
233 | 0 | p->object_len += delta; |
234 | 0 | return 1; |
235 | 0 | } |
236 | | |
237 | | void *dm_pool_end_object(struct dm_pool *p) |
238 | 0 | { |
239 | 0 | struct chunk *c = p->chunk; |
240 | 0 | void *r = c->begin; |
241 | 0 | c->begin += p->object_len; |
242 | 0 | p->object_len = 0u; |
243 | 0 | p->object_alignment = DEFAULT_ALIGNMENT; |
244 | 0 | return r; |
245 | 0 | } |
246 | | |
247 | | void dm_pool_abandon_object(struct dm_pool *p) |
248 | 0 | { |
249 | | #ifdef VALGRIND_POOL |
250 | | VALGRIND_MAKE_MEM_NOACCESS(p->chunk, p->object_len); |
251 | | #endif |
252 | 0 | p->object_len = 0; |
253 | 0 | p->object_alignment = DEFAULT_ALIGNMENT; |
254 | 0 | } |
255 | | |
256 | | static void _align_chunk(struct chunk *c, unsigned alignment) |
257 | 0 | { |
258 | 0 | c->begin += alignment - ((unsigned long) c->begin & (alignment - 1)); |
259 | 0 | } |
260 | | |
261 | | static struct chunk *_new_chunk(struct dm_pool *p, size_t s) |
262 | 0 | { |
263 | 0 | struct chunk *c; |
264 | |
|
265 | 0 | if (p->spare_chunk && |
266 | 0 | ((p->spare_chunk->end - p->spare_chunk->begin) >= (ptrdiff_t)s)) { |
267 | | /* reuse old chunk */ |
268 | 0 | c = p->spare_chunk; |
269 | 0 | p->spare_chunk = 0; |
270 | 0 | } else { |
271 | | #ifdef DEBUG_ENFORCE_POOL_LOCKING |
272 | | if (!_pagesize) { |
273 | | _pagesize = getpagesize(); /* lvm_pagesize(); */ |
274 | | _pagesize_mask = _pagesize - 1; |
275 | | } |
276 | | /* |
277 | | * Allocate page aligned size so malloc could work. |
278 | | * Otherwise page fault would happen from pool unrelated |
279 | | * memory writes of internal malloc pointers. |
280 | | */ |
281 | | # define aligned_malloc(s) (posix_memalign((void**)&c, _pagesize, \ |
282 | | ALIGN_ON_PAGE(s)) == 0) |
283 | | #else |
284 | 0 | # define aligned_malloc(s) (c = dm_malloc(s)) |
285 | 0 | #endif /* DEBUG_ENFORCE_POOL_LOCKING */ |
286 | 0 | if (!aligned_malloc(s)) { |
287 | 0 | #undef aligned_malloc |
288 | 0 | log_error("Out of memory. Requested %" PRIsize_t |
289 | 0 | " bytes.", s); |
290 | 0 | return NULL; |
291 | 0 | } |
292 | | |
293 | 0 | c->begin = (char *) (c + 1); |
294 | 0 | c->end = (char *) c + s; |
295 | |
|
296 | | #ifdef VALGRIND_POOL |
297 | | VALGRIND_MAKE_MEM_NOACCESS(c->begin, c->end - c->begin); |
298 | | #endif |
299 | 0 | } |
300 | | |
301 | 0 | c->prev = p->chunk; |
302 | 0 | p->chunk = c; |
303 | 0 | return c; |
304 | 0 | } |
305 | | |
306 | | static void _free_chunk(struct chunk *c) |
307 | 0 | { |
308 | | #ifdef VALGRIND_POOL |
309 | | # ifdef DEBUG_MEM |
310 | | if (c) |
311 | | VALGRIND_MAKE_MEM_UNDEFINED(c + 1, c->end - (char *) (c + 1)); |
312 | | # endif |
313 | | #endif |
314 | | #ifdef DEBUG_ENFORCE_POOL_LOCKING |
315 | | /* since DEBUG_MEM is using own memory list */ |
316 | | free(c); /* for posix_memalign() */ |
317 | | #else |
318 | 0 | dm_free(c); |
319 | 0 | #endif |
320 | 0 | } |
321 | | |
322 | | |
323 | | /** |
324 | | * Calc crc/hash from pool's memory chunks with internal pointers |
325 | | */ |
326 | | static long _pool_crc(const struct dm_pool *p) |
327 | 0 | { |
328 | 0 | long crc_hash = 0; |
329 | 0 | #ifndef DEBUG_ENFORCE_POOL_LOCKING |
330 | 0 | const struct chunk *c; |
331 | 0 | const long *ptr, *end; |
332 | |
|
333 | 0 | for (c = p->chunk; c; c = c->prev) { |
334 | 0 | end = (const long *) (c->begin < c->end ? (long) c->begin & ~7: (long) c->end); |
335 | 0 | ptr = (const long *) c; |
336 | | #ifdef VALGRIND_POOL |
337 | | VALGRIND_MAKE_MEM_DEFINED(ptr, (end - ptr) * sizeof(*end)); |
338 | | #endif |
339 | 0 | while (ptr < end) { |
340 | 0 | crc_hash += *ptr++; |
341 | 0 | crc_hash += (crc_hash << 10); |
342 | 0 | crc_hash ^= (crc_hash >> 6); |
343 | 0 | } |
344 | 0 | } |
345 | 0 | #endif /* DEBUG_ENFORCE_POOL_LOCKING */ |
346 | |
|
347 | 0 | return crc_hash; |
348 | 0 | } |
349 | | |
350 | | static int _pool_protect(struct dm_pool *p, int prot) |
351 | 0 | { |
352 | | #ifdef DEBUG_ENFORCE_POOL_LOCKING |
353 | | struct chunk *c; |
354 | | |
355 | | for (c = p->chunk; c; c = c->prev) { |
356 | | if (mprotect(c, (size_t) ((c->end - (char *) c) - 1), prot) != 0) { |
357 | | log_sys_error("mprotect", ""); |
358 | | return 0; |
359 | | } |
360 | | } |
361 | | #endif |
362 | 0 | return 1; |
363 | 0 | } |