/src/dovecot/src/lib/mempool-alloconly.c
Line | Count | Source |
1 | | /* Copyright (c) 2002-2018 Dovecot authors, see the included COPYING file */ |
2 | | |
3 | | /* @UNSAFE: whole file */ |
4 | | #include "lib.h" |
5 | | #include "safe-memset.h" |
6 | | #include "mempool.h" |
7 | | |
8 | | /* |
9 | | * As the name implies, alloconly pools support only allocating memory. |
10 | | * Memory freeing is not supported, except as a special case - the pool's |
11 | | * last allocation can be freed. Additionally, p_realloc() also tries to |
12 | | * grow an existing allocation if and only if it is the last allocation, |
13 | | * otherwise it just allocates a new memory area and copies the data there. |
14 | | * |
15 | | * Alloconly pools are commonly used for an object that builds its state |
16 | | * from many memory allocations, but doesn't change (much of) its state. |
17 | | * It is simpler to free such an object by destroying the entire memory |
18 | | * pool. |
19 | | * |
20 | | * Implementation |
21 | | * ============== |
22 | | * |
23 | | * Each alloconly pool contains a pool structure (struct alloconly_pool) to |
24 | | * keep track of alloconly-specific pool information and one or more blocks |
25 | | * (struct pool_block) that keep track of ranges of memory used to back the |
26 | | * allocations. The blocks are kept in a linked list implementing a stack. |
27 | | * The block size decreases the further down the stack one goes. |
28 | | * |
29 | | * +-----------+ |
30 | | * | alloconly | |
31 | | * | pool | |
32 | | * +-----+-----+ |
33 | | * | |
34 | | * | block +------------+ next +------------+ next |
35 | | * \------->| pool block |------>| pool block |------>... |
36 | | * +------------+ +------------+ |
37 | | * | <data> | | <data> | |
38 | | * . . |
39 | | * . . |
40 | | * . | <data> | |
41 | | * . +------------+ |
42 | | * | <data> | |
43 | | * +------------+ |
44 | | * |
45 | | * Creation |
46 | | * -------- |
47 | | * |
48 | | * When an alloconly pool is created, one block is allocated. This block is |
49 | | * large enough to hold the necessary internal structures (struct |
50 | | * alloconly_pool and struct pool_block) and still have enough space to |
51 | | * satisfy allocations for at least the amount of space requested by the |
52 | | * consumer via the size argument to pool_alloconly_create(). |
53 | | * |
54 | | * Allocation |
55 | | * ---------- |
56 | | * |
57 | | * Each allocation (via p_malloc()) checks the top-most block to see whether |
58 | | * or not it has enough space to satisfy the allocation. If there is not |
59 | | * enough space, it allocates a new block (via block_alloc()) to serve as |
60 | | * the new top-most block. This newly-allocated block is guaranteed to have |
61 | | * enough space for the allocation. Then, regardless of whether or not a |
62 | | * new block was allocated, the allocation code reserves enough space in the |
63 | | * top-most block for the allocation and returns a pointer to it to the |
64 | | * caller. |
65 | | * |
66 | | * The free space tracking within each block is very simple. In addition to |
67 | | * keeping track of the size of the block, the block header contains a |
68 | | * "pointer" to the beginning of free space. A new allocation simply moves |
69 | | * this pointer by the number of bytes allocated. |
70 | | * |
71 | | * Reallocation |
72 | | * ------------ |
73 | | * |
74 | | * If the passed in allocation is the last allocation in a block and there |
75 | | * is enough space after it, the allocation is resized. Otherwise, a new |
76 | | * buffer is allocated (see Allocation above) and the contents are copied |
77 | | * over. |
78 | | * |
79 | | * Freeing |
80 | | * ------- |
81 | | * |
82 | | * Freeing of the last allocation moves the "pointer" to free space back by |
83 | | * the size of the last allocation. |
84 | | * |
85 | | * Freeing of any other allocation is a no-op. |
86 | | * |
87 | | * Clearing |
88 | | * -------- |
89 | | * |
90 | | * Clearing the pool is supposed to return the pool to the same state it was |
91 | | * in when it was first created. To that end, the alloconly pool frees all |
92 | | * the blocks allocated since the pool's creation. The remaining block |
93 | | * (allocated during creation) is reset to consider all the space for |
94 | | * allocations as available. |
95 | | * |
96 | | * In other words, the per-block free space tracking variables are set to |
97 | | * indicate that the full block is available and that there have been no |
98 | | * allocations. |
99 | | * |
100 | | * Finally, if the pool was created via pool_alloconly_create_clean(), all |
101 | | * blocks are safe_memset()/memset() to zero before being free()d. |
102 | | * |
103 | | * Destruction |
104 | | * ----------- |
105 | | * |
106 | | * Destroying a pool first clears it (see above). The clearing leaves the |
107 | | * pool in a minimal state with only one block allocated. This remaining |
108 | | * block may be safe_memset() to zero if the pool was created with |
109 | | * pool_alloconly_create_clean(). |
110 | | * |
111 | | * Since the pool structure itself is allocated from the first block, this |
112 | | * final call to free() will release the memory allocated for struct |
113 | | * alloconly_pool and struct pool. |
114 | | */ |
115 | | |
116 | | #ifndef DEBUG |
117 | | # define POOL_ALLOCONLY_MAX_EXTRA MEM_ALIGN(1) |
118 | | #else |
119 | | # define POOL_ALLOCONLY_MAX_EXTRA \ |
120 | | (MEM_ALIGN(sizeof(size_t)) + MEM_ALIGN(1) + MEM_ALIGN(SENTRY_COUNT)) |
121 | | #endif |
122 | | |
123 | | struct alloconly_pool { |
124 | | struct pool pool; |
125 | | int refcount; |
126 | | |
127 | | struct pool_block *block; |
128 | | #ifdef DEBUG |
129 | | const char *name; |
130 | | size_t base_size; |
131 | | bool disable_warning; |
132 | | #endif |
133 | | bool clean_frees; |
134 | | }; |
135 | | |
136 | | struct pool_block { |
137 | | struct pool_block *prev; |
138 | | |
139 | | size_t size; |
140 | | size_t left; |
141 | | size_t last_alloc_size; |
142 | | |
143 | | /* unsigned char data[]; */ |
144 | | }; |
145 | 597k | #define SIZEOF_POOLBLOCK (MEM_ALIGN(sizeof(struct pool_block))) |
146 | | |
147 | | #define POOL_BLOCK_DATA(block) \ |
148 | 590k | ((unsigned char *) (block) + SIZEOF_POOLBLOCK) |
149 | | |
150 | 0 | #define DEFAULT_BASE_SIZE MEM_ALIGN(sizeof(struct alloconly_pool)) |
151 | | |
152 | | #ifdef DEBUG |
153 | | # define CLEAR_CHR 0xde |
154 | | # define SENTRY_COUNT 8 |
155 | | #else |
156 | | # define SENTRY_COUNT 0 |
157 | 0 | # define CLEAR_CHR 0 |
158 | | #endif |
159 | | |
160 | | static const char *pool_alloconly_get_name(pool_t pool); |
161 | | static void pool_alloconly_ref(pool_t pool); |
162 | | static void pool_alloconly_unref(pool_t *pool); |
163 | | static void *pool_alloconly_malloc(pool_t pool, size_t size); |
164 | | static void pool_alloconly_free(pool_t pool, void *mem); |
165 | | static void *pool_alloconly_realloc(pool_t pool, void *mem, |
166 | | size_t old_size, size_t new_size); |
167 | | static void pool_alloconly_clear(pool_t pool); |
168 | | static size_t pool_alloconly_get_max_easy_alloc_size(pool_t pool); |
169 | | |
170 | | static void block_alloc(struct alloconly_pool *pool, size_t size); |
171 | | |
172 | | static const struct pool_vfuncs static_alloconly_pool_vfuncs = { |
173 | | pool_alloconly_get_name, |
174 | | |
175 | | pool_alloconly_ref, |
176 | | pool_alloconly_unref, |
177 | | |
178 | | pool_alloconly_malloc, |
179 | | pool_alloconly_free, |
180 | | |
181 | | pool_alloconly_realloc, |
182 | | |
183 | | pool_alloconly_clear, |
184 | | pool_alloconly_get_max_easy_alloc_size |
185 | | }; |
186 | | |
187 | | static const struct pool static_alloconly_pool = { |
188 | | .v = &static_alloconly_pool_vfuncs, |
189 | | |
190 | | .alloconly_pool = TRUE, |
191 | | .datastack_pool = FALSE |
192 | | }; |
193 | | |
194 | | #ifdef DEBUG |
195 | | static void check_sentries(struct pool_block *block) |
196 | | { |
197 | | const unsigned char *data = POOL_BLOCK_DATA(block); |
198 | | size_t i, max_pos, alloc_size, used_size; |
199 | | |
200 | | used_size = block->size - block->left; |
201 | | for (i = 0; i < used_size; ) { |
202 | | alloc_size = *(size_t *)(data + i); |
203 | | if (alloc_size == 0 || used_size - i < alloc_size) |
204 | | i_panic("mempool-alloconly: saved alloc size broken"); |
205 | | i += MEM_ALIGN(sizeof(alloc_size)); |
206 | | max_pos = i + MEM_ALIGN(alloc_size + SENTRY_COUNT); |
207 | | i += alloc_size; |
208 | | |
209 | | for (; i < max_pos; i++) { |
210 | | if (data[i] != CLEAR_CHR) |
211 | | i_panic("mempool-alloconly: buffer overflow"); |
212 | | } |
213 | | } |
214 | | |
215 | | if (i != used_size) |
216 | | i_panic("mempool-alloconly: used_size wrong"); |
217 | | |
218 | | /* The unused data must be NULs */ |
219 | | for (; i < block->size; i++) { |
220 | | if (data[i] != '\0') |
221 | | i_unreached(); |
222 | | } |
223 | | if (block->prev != NULL) |
224 | | check_sentries(block->prev); |
225 | | } |
226 | | #endif |
227 | | |
228 | | pool_t pool_alloconly_create(const char *name ATTR_UNUSED, size_t size) |
229 | 2.86k | { |
230 | 2.86k | struct alloconly_pool apool, *new_apool; |
231 | 2.86k | size_t min_alloc = SIZEOF_POOLBLOCK + |
232 | 2.86k | MEM_ALIGN(sizeof(struct alloconly_pool) + SENTRY_COUNT); |
233 | | |
234 | 2.86k | (void) COMPILE_ERROR_IF_TRUE(POOL_ALLOCONLY_MAX_EXTRA > |
235 | 2.86k | (SSIZE_T_MAX - POOL_MAX_ALLOC_SIZE)); |
236 | | |
237 | | #ifdef DEBUG |
238 | | min_alloc += MEM_ALIGN(strlen(name) + 1 + SENTRY_COUNT) + |
239 | | sizeof(size_t)*2; |
240 | | #endif |
241 | | |
242 | | /* create a fake alloconly_pool so we can call block_alloc() */ |
243 | 2.86k | i_zero(&apool); |
244 | 2.86k | apool.pool = static_alloconly_pool; |
245 | 2.86k | apool.refcount = 1; |
246 | | |
247 | 2.86k | if (size < min_alloc) |
248 | 0 | size = nearest_power(size + min_alloc); |
249 | 2.86k | block_alloc(&apool, size); |
250 | | |
251 | | /* now allocate the actual alloconly_pool from the created block */ |
252 | 2.86k | new_apool = p_new(&apool.pool, struct alloconly_pool, 1); |
253 | 2.86k | *new_apool = apool; |
254 | | #ifdef DEBUG |
255 | | if (str_begins(name, MEMPOOL_GROWING, &name) || |
256 | | getenv("DEBUG_SILENT") != NULL) |
257 | | new_apool->disable_warning = TRUE; |
258 | | new_apool->name = p_strdup(&new_apool->pool, name); |
259 | | |
260 | | /* set base_size so p_clear() doesn't trash alloconly_pool structure. */ |
261 | | new_apool->base_size = new_apool->block->size - new_apool->block->left; |
262 | | new_apool->block->last_alloc_size = 0; |
263 | | #endif |
264 | | /* the first pool allocations must be from the first block */ |
265 | 2.86k | i_assert(new_apool->block->prev == NULL); |
266 | | |
267 | 2.86k | return &new_apool->pool; |
268 | 2.86k | } |
269 | | |
270 | | pool_t pool_alloconly_create_clean(const char *name, size_t size) |
271 | 0 | { |
272 | 0 | struct alloconly_pool *apool; |
273 | 0 | pool_t pool; |
274 | |
|
275 | 0 | pool = pool_alloconly_create(name, size); |
276 | 0 | apool = container_of(pool, struct alloconly_pool, pool); |
277 | 0 | apool->clean_frees = TRUE; |
278 | 0 | return pool; |
279 | 0 | } |
280 | | |
281 | | static void pool_alloconly_free_block(struct alloconly_pool *apool ATTR_UNUSED, |
282 | | struct pool_block *block) |
283 | 3.71k | { |
284 | | #ifdef DEBUG |
285 | | safe_memset(block, CLEAR_CHR, SIZEOF_POOLBLOCK + block->size); |
286 | | #else |
287 | 3.71k | if (apool->clean_frees) { |
288 | 0 | safe_memset(block, CLEAR_CHR, |
289 | 0 | SIZEOF_POOLBLOCK + block->size); |
290 | 0 | } |
291 | 3.71k | #endif |
292 | 3.71k | free(block); |
293 | 3.71k | } |
294 | | |
295 | | static void |
296 | | pool_alloconly_free_blocks_until_last(struct alloconly_pool *apool) |
297 | 2.86k | { |
298 | 2.86k | struct pool_block *block; |
299 | | |
300 | | /* destroy all blocks but the oldest, which contains the |
301 | | struct alloconly_pool allocation. */ |
302 | 3.71k | while (apool->block->prev != NULL) { |
303 | 847 | block = apool->block; |
304 | 847 | apool->block = block->prev; |
305 | | |
306 | 847 | pool_alloconly_free_block(apool, block); |
307 | 847 | } |
308 | 2.86k | } |
309 | | |
310 | | static void pool_alloconly_destroy(struct alloconly_pool *apool) |
311 | 2.86k | { |
312 | | /* destroy all but the last block */ |
313 | 2.86k | pool_alloconly_free_blocks_until_last(apool); |
314 | | |
315 | | /* destroy the last block */ |
316 | 2.86k | pool_alloconly_free_block(apool, apool->block); |
317 | 2.86k | } |
318 | | |
319 | | static const char *pool_alloconly_get_name(pool_t pool ATTR_UNUSED) |
320 | 0 | { |
321 | | #ifdef DEBUG |
322 | | struct alloconly_pool *apool = |
323 | | container_of(pool, struct alloconly_pool, pool); |
324 | | |
325 | | return apool->name; |
326 | | #else |
327 | 0 | return "alloconly"; |
328 | 0 | #endif |
329 | 0 | } |
330 | | |
331 | | static void pool_alloconly_ref(pool_t pool) |
332 | 2.86k | { |
333 | 2.86k | struct alloconly_pool *apool = |
334 | 2.86k | container_of(pool, struct alloconly_pool, pool); |
335 | | |
336 | 2.86k | apool->refcount++; |
337 | 2.86k | } |
338 | | |
339 | | static void pool_alloconly_unref(pool_t *pool) |
340 | 5.72k | { |
341 | 5.72k | struct alloconly_pool *apool = |
342 | 5.72k | container_of(*pool, struct alloconly_pool, pool); |
343 | | |
344 | | /* erase the pointer before freeing anything, as the pointer may |
345 | | exist inside the pool's memory area */ |
346 | 5.72k | *pool = NULL; |
347 | | |
348 | 5.72k | if (--apool->refcount > 0) |
349 | 2.86k | return; |
350 | | |
351 | 2.86k | pool_external_refs_unref(&apool->pool); |
352 | 2.86k | pool_alloconly_destroy(apool); |
353 | 2.86k | } |
354 | | |
355 | | static void block_alloc(struct alloconly_pool *apool, size_t size) |
356 | 3.71k | { |
357 | 3.71k | struct pool_block *block; |
358 | | |
359 | 3.71k | i_assert(size > SIZEOF_POOLBLOCK); |
360 | 3.71k | i_assert(size <= SSIZE_T_MAX); |
361 | | |
362 | 3.71k | if (apool->block != NULL) { |
363 | | /* each block is at least twice the size of the previous one */ |
364 | 847 | if (size <= apool->block->size) |
365 | 825 | size += apool->block->size; |
366 | | |
367 | | /* avoid crashing in nearest_power() if size is too large */ |
368 | 847 | size = I_MIN(size, SSIZE_T_MAX); |
369 | 847 | size = nearest_power(size); |
370 | | /* nearest_power() could have grown size to SSIZE_T_MAX+1 */ |
371 | 847 | size = I_MIN(size, SSIZE_T_MAX); |
372 | | #ifdef DEBUG |
373 | | if (!apool->disable_warning) { |
374 | | /* i_debug() overwrites unallocated data in data |
375 | | stack, so make sure everything is allocated before |
376 | | calling it. */ |
377 | | t_buffer_alloc_last_full(); |
378 | | i_debug("Growing pool '%s' with: %zu", |
379 | | apool->name, size); |
380 | | } |
381 | | #endif |
382 | 847 | } |
383 | | |
384 | 3.71k | block = calloc(size, 1); |
385 | 3.71k | if (unlikely(block == NULL)) { |
386 | 0 | i_fatal_status(FATAL_OUTOFMEM, "block_alloc(%zu" |
387 | 0 | "): Out of memory", size); |
388 | 0 | } |
389 | 3.71k | block->prev = apool->block; |
390 | 3.71k | apool->block = block; |
391 | | |
392 | 3.71k | block->size = size - SIZEOF_POOLBLOCK; |
393 | 3.71k | block->left = block->size; |
394 | 3.71k | } |
395 | | |
396 | | static void *pool_alloconly_malloc(pool_t pool, size_t size) |
397 | 590k | { |
398 | 590k | struct alloconly_pool *apool = |
399 | 590k | container_of(pool, struct alloconly_pool, pool); |
400 | 590k | void *mem; |
401 | 590k | size_t alloc_size; |
402 | | |
403 | 590k | #ifndef DEBUG |
404 | 590k | alloc_size = MEM_ALIGN(size); |
405 | | #else |
406 | | alloc_size = MEM_ALIGN(sizeof(size)) + MEM_ALIGN(size + SENTRY_COUNT); |
407 | | #endif |
408 | | |
409 | 590k | if (apool->block->left < alloc_size) { |
410 | | /* we need a new block */ |
411 | 847 | block_alloc(apool, alloc_size + SIZEOF_POOLBLOCK); |
412 | 847 | } |
413 | | |
414 | 590k | mem = POOL_BLOCK_DATA(apool->block) + |
415 | 590k | (apool->block->size - apool->block->left); |
416 | | |
417 | 590k | apool->block->left -= alloc_size; |
418 | 590k | apool->block->last_alloc_size = alloc_size; |
419 | | #ifdef DEBUG |
420 | | memcpy(mem, &size, sizeof(size)); |
421 | | mem = PTR_OFFSET(mem, MEM_ALIGN(sizeof(size))); |
422 | | /* write CLEAR_CHRs to sentry */ |
423 | | memset(PTR_OFFSET(mem, size), CLEAR_CHR, |
424 | | MEM_ALIGN(size + SENTRY_COUNT) - size); |
425 | | #endif |
426 | 590k | return mem; |
427 | 590k | } |
428 | | |
429 | | static void pool_alloconly_free(pool_t pool, void *mem) |
430 | 0 | { |
431 | 0 | struct alloconly_pool *apool = |
432 | 0 | container_of(pool, struct alloconly_pool, pool); |
433 | | |
434 | | /* we can free only the last allocation */ |
435 | 0 | if (POOL_BLOCK_DATA(apool->block) + |
436 | 0 | (apool->block->size - apool->block->left - |
437 | 0 | apool->block->last_alloc_size) == mem) { |
438 | 0 | memset(mem, 0, apool->block->last_alloc_size); |
439 | 0 | apool->block->left += apool->block->last_alloc_size; |
440 | 0 | apool->block->last_alloc_size = 0; |
441 | 0 | } |
442 | 0 | } |
443 | | |
444 | | static bool pool_alloconly_try_grow(struct alloconly_pool *apool, void *mem, size_t size) |
445 | 0 | { |
446 | | /* see if we want to grow the memory we allocated last */ |
447 | 0 | if (POOL_BLOCK_DATA(apool->block) + |
448 | 0 | (apool->block->size - apool->block->left - |
449 | 0 | apool->block->last_alloc_size) == mem) { |
450 | | /* yeah, see if we can grow */ |
451 | 0 | if (apool->block->left >= size-apool->block->last_alloc_size) { |
452 | | /* just shrink the available size */ |
453 | 0 | apool->block->left -= |
454 | 0 | size - apool->block->last_alloc_size; |
455 | 0 | apool->block->last_alloc_size = size; |
456 | 0 | return TRUE; |
457 | 0 | } |
458 | 0 | } |
459 | | |
460 | 0 | return FALSE; |
461 | 0 | } |
462 | | |
463 | | static void *pool_alloconly_realloc(pool_t pool, void *mem, |
464 | | size_t old_size, size_t new_size) |
465 | 0 | { |
466 | 0 | struct alloconly_pool *apool = |
467 | 0 | container_of(pool, struct alloconly_pool, pool); |
468 | 0 | unsigned char *new_mem; |
469 | |
|
470 | 0 | i_assert(old_size < SIZE_MAX); |
471 | | |
472 | 0 | if (new_size <= old_size) |
473 | 0 | return mem; |
474 | | |
475 | 0 | new_size = MEM_ALIGN(new_size); |
476 | | |
477 | | /* see if we can directly grow it */ |
478 | 0 | if (!pool_alloconly_try_grow(apool, mem, new_size)) { |
479 | | /* slow way - allocate + copy */ |
480 | 0 | new_mem = pool_alloconly_malloc(pool, new_size); |
481 | 0 | if (old_size > 0) |
482 | 0 | memcpy(new_mem, mem, old_size); |
483 | 0 | mem = new_mem; |
484 | 0 | } |
485 | |
|
486 | 0 | return mem; |
487 | 0 | } |
488 | | |
489 | | static void pool_alloconly_clear(pool_t pool) |
490 | 0 | { |
491 | 0 | struct alloconly_pool *apool = |
492 | 0 | container_of(pool, struct alloconly_pool, pool); |
493 | 0 | size_t base_size, avail_size; |
494 | |
|
495 | | #ifdef DEBUG |
496 | | check_sentries(apool->block); |
497 | | #endif |
498 | |
|
499 | 0 | pool_alloconly_free_blocks_until_last(apool); |
500 | | |
501 | | /* clear the first block */ |
502 | | #ifdef DEBUG |
503 | | base_size = apool->base_size; |
504 | | #else |
505 | 0 | base_size = DEFAULT_BASE_SIZE; |
506 | 0 | #endif |
507 | 0 | avail_size = apool->block->size - base_size; |
508 | 0 | memset(PTR_OFFSET(POOL_BLOCK_DATA(apool->block), base_size), 0, |
509 | 0 | avail_size - apool->block->left); |
510 | 0 | apool->block->left = avail_size; |
511 | 0 | apool->block->last_alloc_size = 0; |
512 | 0 | } |
513 | | |
514 | | static size_t pool_alloconly_get_max_easy_alloc_size(pool_t pool) |
515 | 0 | { |
516 | 0 | struct alloconly_pool *apool = |
517 | 0 | container_of(pool, struct alloconly_pool, pool); |
518 | |
|
519 | 0 | return apool->block->left; |
520 | 0 | } |
521 | | |
522 | | size_t pool_alloconly_get_total_used_size(pool_t pool) |
523 | 0 | { |
524 | 0 | struct alloconly_pool *apool = |
525 | 0 | container_of(pool, struct alloconly_pool, pool); |
526 | 0 | struct pool_block *block; |
527 | 0 | size_t size = 0; |
528 | |
|
529 | 0 | i_assert(pool->v == &static_alloconly_pool_vfuncs); |
530 | | |
531 | 0 | for (block = apool->block; block != NULL; block = block->prev) |
532 | 0 | size += block->size - block->left; |
533 | 0 | return size; |
534 | 0 | } |
535 | | |
536 | | size_t pool_alloconly_get_total_alloc_size(pool_t pool) |
537 | 0 | { |
538 | 0 | struct alloconly_pool *apool = |
539 | 0 | container_of(pool, struct alloconly_pool, pool); |
540 | 0 | struct pool_block *block; |
541 | 0 | size_t size = 0; |
542 | |
|
543 | 0 | i_assert(pool->v == &static_alloconly_pool_vfuncs); |
544 | | |
545 | 0 | for (block = apool->block; block != NULL; block = block->prev) |
546 | 0 | size += block->size + SIZEOF_POOLBLOCK; |
547 | 0 | return size; |
548 | 0 | } |