/src/dovecot/src/lib/data-stack.c
Line | Count | Source |
1 | | /* Copyright (c) 2002-2018 Dovecot authors, see the included COPYING file */ |
2 | | |
3 | | /* @UNSAFE: whole file */ |
4 | | |
5 | | #include "lib.h" |
6 | | #include "backtrace-string.h" |
7 | | #include "str.h" |
8 | | #include "data-stack.h" |
9 | | |
10 | | |
11 | | /* Initial stack size - this should be kept in a size that doesn't exceed |
12 | | in a normal use to avoid extra malloc()ing. */ |
13 | | #ifdef DEBUG |
14 | | # define INITIAL_STACK_SIZE (1024*10) |
15 | | #else |
16 | 1 | # define INITIAL_STACK_SIZE (1024*32) |
17 | | #endif |
18 | | |
19 | | #ifdef DEBUG |
20 | | # define CLEAR_CHR 0xD5 /* D5 is mnemonic for "Data 5tack" */ |
21 | | # define SENTRY_COUNT (4*8) |
22 | | # define BLOCK_CANARY ((void *)0xBADBADD5BADBADD5) /* contains 'D5' */ |
23 | | # define ALLOC_SIZE(size) (MEM_ALIGN(sizeof(size_t)) + MEM_ALIGN(size + SENTRY_COUNT)) |
24 | | #else |
25 | | # define CLEAR_CHR 0 |
26 | 10 | # define BLOCK_CANARY NULL |
27 | 49.2k | # define block_canary_check(block) do { ; } while(0) |
28 | 37.1k | # define ALLOC_SIZE(size) MEM_ALIGN(size) |
29 | | #endif |
30 | | |
31 | | struct stack_block { |
32 | | struct stack_block *prev, *next; |
33 | | |
34 | | size_t size, left; |
35 | | #ifdef DEBUG |
36 | | /* The lowest value that "left" has been in this block since it was |
37 | | last popped. This is used to keep track which parts of the block |
38 | | needs to be cleared if DEBUG is used. */ |
39 | | size_t left_lowwater; |
40 | | #endif |
41 | | /* NULL or a poison value, just in case something accesses |
42 | | the memory in front of an allocated area */ |
43 | | void *canary; |
44 | | unsigned char data[FLEXIBLE_ARRAY_MEMBER]; |
45 | | }; |
46 | | |
47 | 38.4k | #define SIZEOF_MEMBLOCK MEM_ALIGN(sizeof(struct stack_block)) |
48 | | |
49 | | #define STACK_BLOCK_DATA(block) \ |
50 | 38.4k | (block->data + (SIZEOF_MEMBLOCK - sizeof(struct stack_block))) |
51 | | |
52 | | struct stack_frame { |
53 | | struct stack_frame *prev; |
54 | | |
55 | | struct stack_block *block; |
56 | | /* Each frame initializes this to current_block->left, i.e. how much |
57 | | free space is left in the block. So the frame's start position in |
58 | | the block is (block.size - block_space_left) */ |
59 | | size_t block_space_left; |
60 | | size_t last_alloc_size; |
61 | | const char *marker; |
62 | | #ifdef DEBUG |
63 | | /* Fairly arbitrary profiling data */ |
64 | | unsigned long long alloc_bytes; |
65 | | unsigned int alloc_count; |
66 | | #endif |
67 | | }; |
68 | | |
69 | | #ifdef STATIC_CHECKER |
70 | | struct data_stack_frame { |
71 | | unsigned int id; |
72 | | }; |
73 | | #endif |
74 | | |
75 | | unsigned int data_stack_frame_id = 0; |
76 | | |
77 | | static bool data_stack_initialized = FALSE; |
78 | | static data_stack_frame_t root_frame_id; |
79 | | |
80 | | static struct stack_frame *current_frame; |
81 | | |
82 | | /* The latest block currently used for allocation. current_block->next is |
83 | | always NULL. */ |
84 | | static struct stack_block *current_block; |
85 | | /* The largest block that data stack has allocated so far, which was already |
86 | | freed. This can prevent rapid malloc()+free()ing when data stack is grown |
87 | | and shrunk constantly. */ |
88 | | static struct stack_block *unused_block = NULL; |
89 | | |
90 | | static struct event *event_datastack = NULL; |
91 | | static bool event_datastack_deinitialized = FALSE; |
92 | | |
93 | | static struct stack_block *last_buffer_block; |
94 | | static size_t last_buffer_size; |
95 | | static bool outofmem = FALSE; |
96 | | |
97 | | static union { |
98 | | struct stack_block block; |
99 | | unsigned char data[512]; |
100 | | } outofmem_area; |
101 | | |
102 | | static struct stack_block *mem_block_alloc(size_t min_size); |
103 | | |
104 | | static inline |
105 | | unsigned char *data_stack_after_last_alloc(struct stack_block *block) |
106 | 37.1k | { |
107 | 37.1k | return STACK_BLOCK_DATA(block) + (block->size - block->left); |
108 | 37.1k | } |
109 | | |
110 | | static void data_stack_last_buffer_reset(bool preserve_data ATTR_UNUSED) |
111 | 48.5k | { |
112 | 48.5k | if (last_buffer_block != NULL) { |
113 | | #ifdef DEBUG |
114 | | unsigned char *last_alloc_end, *p, *pend; |
115 | | |
116 | | /* We assume that this function gets called before |
117 | | current_block changes. */ |
118 | | i_assert(last_buffer_block == current_block); |
119 | | |
120 | | last_alloc_end = data_stack_after_last_alloc(current_block); |
121 | | p = last_alloc_end + MEM_ALIGN(sizeof(size_t)) + last_buffer_size; |
122 | | pend = last_alloc_end + ALLOC_SIZE(last_buffer_size); |
123 | | #endif |
124 | | /* reset t_buffer_get() mark - not really needed but makes it |
125 | | easier to notice if t_malloc()/t_push()/t_pop() is called |
126 | | between t_buffer_get() and t_buffer_alloc(). |
127 | | do this before we get to i_panic() to avoid recursive |
128 | | panics. */ |
129 | 11.8k | last_buffer_block = NULL; |
130 | | |
131 | | #ifdef DEBUG |
132 | | /* NOTE: If the below panic triggers, it may also be due to an |
133 | | internal bug in data-stack (since this is rather complex). While |
134 | | debugging whether that is the case, it's a good idea to change the |
135 | | i_panic() to abort(). Otherwise the i_panic() changes the |
136 | | data-stack's internal state and complicates debugging. */ |
137 | | while (p < pend) |
138 | | if (*p++ != CLEAR_CHR) |
139 | | i_panic("t_buffer_get(): buffer overflow"); |
140 | | |
141 | | if (!preserve_data) { |
142 | | p = last_alloc_end; |
143 | | memset(p, CLEAR_CHR, SENTRY_COUNT); |
144 | | } |
145 | | #endif |
146 | 11.8k | } |
147 | 48.5k | } |
148 | | |
149 | | data_stack_frame_t t_push(const char *marker) |
150 | 11.4k | { |
151 | 11.4k | struct stack_frame *frame; |
152 | | |
153 | 11.4k | i_assert(marker != NULL); |
154 | | |
155 | 11.4k | if (unlikely(!data_stack_initialized)) { |
156 | | /* kludgy, but allow this before initialization */ |
157 | 1 | data_stack_init(); |
158 | 1 | return t_push(marker); |
159 | 1 | } |
160 | | |
161 | | /* allocate new block */ |
162 | 11.4k | frame = t_buffer_get(sizeof(*frame)); |
163 | 11.4k | frame->prev = current_frame; |
164 | 11.4k | current_frame = frame; |
165 | | |
166 | | /* mark our current position */ |
167 | 11.4k | current_frame->block = current_block; |
168 | 11.4k | current_frame->block_space_left = current_block->left; |
169 | 11.4k | current_frame->last_alloc_size = 0; |
170 | 11.4k | current_frame->marker = marker; |
171 | | #ifdef DEBUG |
172 | | current_frame->alloc_bytes = 0; |
173 | | current_frame->alloc_count = 0; |
174 | | #endif |
175 | | |
176 | 11.4k | t_buffer_alloc(sizeof(*frame)); |
177 | | |
178 | 11.4k | #ifndef STATIC_CHECKER |
179 | 11.4k | return data_stack_frame_id++; |
180 | | #else |
181 | | struct data_stack_frame *ds_frame = i_new(struct data_stack_frame, 1); |
182 | | ds_frame->id = data_stack_frame_id++; |
183 | | return ds_frame; |
184 | | #endif |
185 | 11.4k | } |
186 | | |
187 | | data_stack_frame_t t_push_named(const char *format, ...) |
188 | 0 | { |
189 | 0 | data_stack_frame_t ret = t_push(format); |
190 | | #ifdef DEBUG |
191 | | va_list args; |
192 | | va_start(args, format); |
193 | | current_frame->marker = p_strdup_vprintf(unsafe_data_stack_pool, format, args); |
194 | | va_end(args); |
195 | | #else |
196 | 0 | (void)format; /* unused in non-DEBUG builds */ |
197 | 0 | #endif |
198 | |
|
199 | 0 | return ret; |
200 | 0 | } |
201 | | |
202 | | #ifdef DEBUG |
203 | | static void block_canary_check(struct stack_block *block) |
204 | | { |
205 | | if (block->canary != BLOCK_CANARY) { |
206 | | /* Make sure i_panic() won't try to allocate from the |
207 | | same block by falling back onto our emergency block. */ |
208 | | current_block = &outofmem_area.block; |
209 | | i_panic("Corrupted data stack canary"); |
210 | | } |
211 | | } |
212 | | #endif |
213 | | |
214 | | static void free_blocks(struct stack_block *block) |
215 | 693 | { |
216 | 693 | struct stack_block *next; |
217 | | |
218 | | /* free all the blocks, except if any of them is bigger than |
219 | | unused_block, replace it */ |
220 | 1.39k | while (block != NULL) { |
221 | 700 | block_canary_check(block); |
222 | 700 | next = block->next; |
223 | | |
224 | | #ifdef DEBUG |
225 | | memset(STACK_BLOCK_DATA(block), CLEAR_CHR, block->size); |
226 | | #endif |
227 | | |
228 | 700 | if (block == &outofmem_area.block) |
229 | 0 | ; |
230 | 700 | else if (unused_block == NULL || |
231 | 700 | block->size > unused_block->size) { |
232 | 700 | free(unused_block); |
233 | 700 | unused_block = block; |
234 | 700 | } else { |
235 | 0 | free(block); |
236 | 0 | } |
237 | | |
238 | 700 | block = next; |
239 | 700 | } |
240 | 693 | } |
241 | | |
242 | | #ifdef DEBUG |
243 | | static void t_pop_verify(void) |
244 | | { |
245 | | struct stack_block *block; |
246 | | unsigned char *p; |
247 | | size_t pos, max_pos, used_size; |
248 | | |
249 | | block = current_frame->block; |
250 | | pos = block->size - current_frame->block_space_left; |
251 | | while (block != NULL) { |
252 | | block_canary_check(block); |
253 | | used_size = block->size - block->left; |
254 | | p = STACK_BLOCK_DATA(block); |
255 | | while (pos < used_size) { |
256 | | size_t requested_size = *(size_t *)(p + pos); |
257 | | if (used_size - pos < requested_size) |
258 | | i_panic("data stack[%s]: saved alloc size broken", |
259 | | current_frame->marker); |
260 | | max_pos = pos + ALLOC_SIZE(requested_size); |
261 | | pos += MEM_ALIGN(sizeof(size_t)) + requested_size; |
262 | | |
263 | | for (; pos < max_pos; pos++) { |
264 | | if (p[pos] != CLEAR_CHR) |
265 | | i_panic("data stack[%s]: buffer overflow", |
266 | | current_frame->marker); |
267 | | } |
268 | | } |
269 | | |
270 | | /* if we had used t_buffer_get(), the rest of the buffer |
271 | | may not contain CLEAR_CHRs. but we've already checked all |
272 | | the allocations, so there's no need to check them anyway. */ |
273 | | block = block->next; |
274 | | pos = 0; |
275 | | } |
276 | | } |
277 | | #endif |
278 | | |
279 | | void t_pop_last_unsafe(void) |
280 | 11.4k | { |
281 | 11.4k | size_t block_space_left; |
282 | | |
283 | 11.4k | if (unlikely(current_frame == NULL)) |
284 | 0 | i_panic("t_pop() called with empty stack"); |
285 | | |
286 | 11.4k | data_stack_last_buffer_reset(FALSE); |
287 | | #ifdef DEBUG |
288 | | t_pop_verify(); |
289 | | #endif |
290 | | |
291 | | /* Usually the block doesn't change. If it doesn't, the next pointer |
292 | | must also be NULL. */ |
293 | 11.4k | if (current_block != current_frame->block) { |
294 | 693 | current_block = current_frame->block; |
295 | 693 | if (current_block->next != NULL) { |
296 | | /* free unused blocks */ |
297 | 693 | free_blocks(current_block->next); |
298 | 693 | current_block->next = NULL; |
299 | 693 | } |
300 | 693 | } |
301 | 11.4k | block_canary_check(current_block); |
302 | | |
303 | | /* current_frame points inside the stack frame that will be freed. |
304 | | make sure it's not accessed after it's already freed/cleaned. */ |
305 | 11.4k | block_space_left = current_frame->block_space_left; |
306 | 11.4k | current_frame = current_frame->prev; |
307 | | |
308 | | #ifdef DEBUG |
309 | | size_t start_pos, end_pos; |
310 | | |
311 | | start_pos = current_block->size - block_space_left; |
312 | | end_pos = current_block->size - current_block->left_lowwater; |
313 | | i_assert(end_pos >= start_pos); |
314 | | memset(STACK_BLOCK_DATA(current_block) + start_pos, CLEAR_CHR, |
315 | | end_pos - start_pos); |
316 | | current_block->left_lowwater = block_space_left; |
317 | | #endif |
318 | | |
319 | 11.4k | current_block->left = block_space_left; |
320 | | |
321 | 11.4k | data_stack_frame_id--; |
322 | 11.4k | } |
323 | | |
324 | | bool t_pop(data_stack_frame_t *id) |
325 | 11.4k | { |
326 | 11.4k | t_pop_last_unsafe(); |
327 | 11.4k | #ifndef STATIC_CHECKER |
328 | 11.4k | if (unlikely(data_stack_frame_id != *id)) |
329 | 0 | return FALSE; |
330 | 11.4k | *id = 0; |
331 | | #else |
332 | | unsigned int frame_id = (*id)->id; |
333 | | i_free_and_null(*id); |
334 | | |
335 | | if (unlikely(data_stack_frame_id != frame_id)) |
336 | | return FALSE; |
337 | | #endif |
338 | 11.4k | return TRUE; |
339 | 11.4k | } |
340 | | |
341 | | bool t_pop_pass_str(data_stack_frame_t *id, const char **str) |
342 | 5.23k | { |
343 | 5.23k | if (str == NULL || !data_stack_frame_contains(id, *str)) |
344 | 5.16k | return t_pop(id); |
345 | | |
346 | | /* FIXME: The string could be memmove()d to the beginning of the |
347 | | data stack frame and the previous frame's size extended past it. |
348 | | This would avoid the malloc. It's a bit complicated though. */ |
349 | 67 | char *tmp_str = i_strdup(*str); |
350 | 67 | bool ret = t_pop(id); |
351 | 67 | *str = t_strdup(tmp_str); |
352 | 67 | i_free(tmp_str); |
353 | 67 | return ret; |
354 | 5.23k | } |
355 | | |
356 | | static void mem_block_reset(struct stack_block *block) |
357 | 701 | { |
358 | 701 | block->prev = NULL; |
359 | 701 | block->next = NULL; |
360 | 701 | block->left = block->size; |
361 | | #ifdef DEBUG |
362 | | block->left_lowwater = block->size; |
363 | | #endif |
364 | 701 | } |
365 | | |
366 | | static struct stack_block *mem_block_alloc(size_t min_size) |
367 | 9 | { |
368 | 9 | struct stack_block *block; |
369 | 9 | size_t prev_size, alloc_size; |
370 | | |
371 | 9 | prev_size = current_block == NULL ? 0 : current_block->size; |
372 | | /* Use INITIAL_STACK_SIZE without growing it to nearest power. */ |
373 | 9 | alloc_size = prev_size == 0 ? min_size : |
374 | 9 | nearest_power(MALLOC_ADD(prev_size, min_size)); |
375 | | |
376 | | /* nearest_power() returns 2^n values, so alloc_size can't be |
377 | | anywhere close to SIZE_MAX */ |
378 | 9 | block = malloc(SIZEOF_MEMBLOCK + alloc_size); |
379 | 9 | if (unlikely(block == NULL)) { |
380 | 0 | if (outofmem) { |
381 | 0 | if (min_size > outofmem_area.block.left) |
382 | 0 | abort(); |
383 | 0 | return &outofmem_area.block; |
384 | 0 | } |
385 | 0 | outofmem = TRUE; |
386 | 0 | i_panic("data stack: Out of memory when allocating %zu bytes", |
387 | 0 | alloc_size + SIZEOF_MEMBLOCK); |
388 | 0 | } |
389 | 9 | block->size = alloc_size; |
390 | 9 | block->canary = BLOCK_CANARY; |
391 | 9 | mem_block_reset(block); |
392 | | #ifdef DEBUG |
393 | | memset(STACK_BLOCK_DATA(block), CLEAR_CHR, alloc_size); |
394 | | #endif |
395 | 9 | return block; |
396 | 9 | } |
397 | | |
398 | | static void data_stack_send_grow_event(size_t last_alloc_size) |
399 | 8 | { |
400 | | /* The t_malloc_real() adds a data stack frame. We don't care about it, |
401 | | but the previous one. */ |
402 | 8 | struct stack_frame *frame = current_frame->prev; |
403 | | |
404 | 8 | if (event_datastack_deinitialized) { |
405 | | /* already in the deinitialization code - |
406 | | don't send more events */ |
407 | 0 | return; |
408 | 0 | } |
409 | 8 | if (event_datastack == NULL) |
410 | 1 | event_datastack = event_create(NULL); |
411 | 8 | event_set_name(event_datastack, "data_stack_grow"); |
412 | 8 | event_add_int(event_datastack, "alloc_size", data_stack_get_alloc_size()); |
413 | 8 | event_add_int(event_datastack, "used_size", data_stack_get_used_size()); |
414 | 8 | event_add_int(event_datastack, "last_alloc_size", last_alloc_size); |
415 | 8 | event_add_int(event_datastack, "last_block_size", current_block->size); |
416 | | #ifdef DEBUG |
417 | | event_add_int(event_datastack, "frame_alloc_bytes", |
418 | | frame->alloc_bytes); |
419 | | event_add_int(event_datastack, "frame_alloc_count", |
420 | | frame->alloc_count); |
421 | | #endif |
422 | 8 | event_add_str(event_datastack, "frame_marker", frame->marker); |
423 | | |
424 | | /* It's possible that the data stack gets grown and shrunk rapidly. |
425 | | Try to avoid doing expensive work if the event isn't even used for |
426 | | anything. Note that at this point all the event fields must be |
427 | | set already that might potentially be used by the filters. */ |
428 | 8 | if (!event_want_debug(event_datastack)) |
429 | 8 | return; |
430 | | |
431 | | /* Getting backtrace is potentially inefficient, so do it after |
432 | | checking if the event is wanted. Note that this prevents using the |
433 | | backtrace field in event field comparisons. */ |
434 | 0 | const char *backtrace, *error; |
435 | 0 | if (backtrace_get(&backtrace, &error) == 0) |
436 | 0 | event_add_str(event_datastack, "backtrace", backtrace); |
437 | 0 | else { |
438 | 0 | backtrace = t_strdup_printf("backtrace failed: %s", error); |
439 | 0 | event_add_str(event_datastack, "backtrace_error", error); |
440 | 0 | } |
441 | |
|
442 | 0 | string_t *str = t_str_new(128); |
443 | 0 | str_printfa(str, "total_used=%zu, total_alloc=%zu, last_alloc_size=%zu", |
444 | 0 | data_stack_get_used_size(), |
445 | 0 | data_stack_get_alloc_size(), |
446 | 0 | last_alloc_size); |
447 | | #ifdef DEBUG |
448 | | str_printfa(str, ", frame_bytes=%llu, frame_alloc_count=%u", |
449 | | frame->alloc_bytes, frame->alloc_count); |
450 | | #endif |
451 | 0 | e_debug(event_datastack, "Growing data stack by %zu for '%s' (%s): %s", |
452 | 0 | current_block->size, frame->marker, str_c(str), backtrace); |
453 | 0 | } |
454 | | |
455 | | static void *t_malloc_real(size_t size, bool permanent) |
456 | 37.1k | { |
457 | 37.1k | void *ret; |
458 | 37.1k | size_t alloc_size; |
459 | 37.1k | bool warn = FALSE; |
460 | | #ifdef DEBUG |
461 | | int old_errno = errno; |
462 | | #endif |
463 | | |
464 | 37.1k | if (unlikely(size == 0 || size > SSIZE_T_MAX)) |
465 | 0 | i_panic("Trying to allocate %zu bytes", size); |
466 | | |
467 | 37.1k | if (unlikely(!data_stack_initialized)) { |
468 | | /* kludgy, but allow this before initialization */ |
469 | 0 | data_stack_init(); |
470 | 0 | } |
471 | 37.1k | block_canary_check(current_block); |
472 | | |
473 | | /* allocate only aligned amount of memory so alignment comes |
474 | | always properly */ |
475 | 37.1k | alloc_size = ALLOC_SIZE(size); |
476 | | #ifdef DEBUG |
477 | | if(permanent) { |
478 | | current_frame->alloc_bytes += alloc_size; |
479 | | current_frame->alloc_count++; |
480 | | } |
481 | | #endif |
482 | 37.1k | data_stack_last_buffer_reset(TRUE); |
483 | | |
484 | 37.1k | if (permanent) { |
485 | | /* used for t_try_realloc() */ |
486 | 25.2k | current_frame->last_alloc_size = alloc_size; |
487 | 25.2k | } |
488 | | |
489 | 37.1k | if (current_block->left < alloc_size) { |
490 | 700 | struct stack_block *block; |
491 | | |
492 | | /* current block is full, see if we can use the unused_block */ |
493 | 700 | if (unused_block != NULL && unused_block->size >= alloc_size) { |
494 | 692 | block = unused_block; |
495 | 692 | unused_block = NULL; |
496 | 692 | mem_block_reset(block); |
497 | 692 | } else { |
498 | | /* current block is full, allocate a new one */ |
499 | 8 | block = mem_block_alloc(alloc_size); |
500 | 8 | warn = TRUE; |
501 | 8 | } |
502 | | |
503 | | /* The newly allocated block will replace the current_block, |
504 | | i.e. current_block always points to the last element in |
505 | | the linked list. */ |
506 | 700 | block->prev = current_block; |
507 | 700 | current_block->next = block; |
508 | 700 | current_block = block; |
509 | 700 | } |
510 | | |
511 | | /* enough space in current block, use it */ |
512 | 37.1k | ret = data_stack_after_last_alloc(current_block); |
513 | | |
514 | | #ifdef DEBUG |
515 | | if (current_block->left - alloc_size < current_block->left_lowwater) |
516 | | current_block->left_lowwater = current_block->left - alloc_size; |
517 | | #endif |
518 | 37.1k | if (permanent) |
519 | 25.2k | current_block->left -= alloc_size; |
520 | | |
521 | 37.1k | if (warn) T_BEGIN { |
522 | | /* sending event can cause errno changes. */ |
523 | | #ifdef DEBUG |
524 | | i_assert(errno == old_errno); |
525 | | #else |
526 | 16 | int old_errno = errno; |
527 | 16 | #endif |
528 | | /* warn after allocation, so if e_debug() wants to |
529 | | allocate more memory we don't go to infinite loop */ |
530 | 16 | data_stack_send_grow_event(alloc_size); |
531 | | /* reset errno back to what it was */ |
532 | 16 | errno = old_errno; |
533 | 16 | } T_END; |
534 | | #ifdef DEBUG |
535 | | memcpy(ret, &size, sizeof(size)); |
536 | | ret = PTR_OFFSET(ret, MEM_ALIGN(sizeof(size))); |
537 | | /* make sure the sentry contains CLEAR_CHRs. it might not if we |
538 | | had used t_buffer_get(). */ |
539 | | memset(PTR_OFFSET(ret, size), CLEAR_CHR, |
540 | | MEM_ALIGN(size + SENTRY_COUNT) - size); |
541 | | |
542 | | /* we rely on errno not changing. it shouldn't. */ |
543 | | i_assert(errno == old_errno); |
544 | | #endif |
545 | 37.1k | return ret; |
546 | 37.1k | } |
547 | | |
548 | | void *t_malloc_no0(size_t size) |
549 | 6.10k | { |
550 | 6.10k | return t_malloc_real(size, TRUE); |
551 | 6.10k | } |
552 | | |
553 | | void *t_malloc0(size_t size) |
554 | 7.26k | { |
555 | 7.26k | void *mem; |
556 | | |
557 | 7.26k | mem = t_malloc_real(size, TRUE); |
558 | 7.26k | memset(mem, 0, size); |
559 | 7.26k | return mem; |
560 | 7.26k | } |
561 | | |
562 | | bool ATTR_NO_SANITIZE_INTEGER |
563 | | t_try_realloc(void *mem, size_t size) |
564 | 0 | { |
565 | 0 | size_t debug_adjust = 0, last_alloc_size; |
566 | 0 | unsigned char *after_last_alloc; |
567 | |
|
568 | 0 | if (unlikely(size == 0 || size > SSIZE_T_MAX)) |
569 | 0 | i_panic("Trying to allocate %zu bytes", size); |
570 | 0 | block_canary_check(current_block); |
571 | 0 | data_stack_last_buffer_reset(TRUE); |
572 | |
|
573 | 0 | last_alloc_size = current_frame->last_alloc_size; |
574 | | |
575 | | /* see if we're trying to grow the memory we allocated last */ |
576 | 0 | after_last_alloc = data_stack_after_last_alloc(current_block); |
577 | | #ifdef DEBUG |
578 | | debug_adjust = MEM_ALIGN(sizeof(size_t)); |
579 | | #endif |
580 | 0 | if (after_last_alloc - last_alloc_size + debug_adjust == mem) { |
581 | | /* yeah, see if we have space to grow */ |
582 | 0 | size_t new_alloc_size, alloc_growth; |
583 | |
|
584 | 0 | new_alloc_size = ALLOC_SIZE(size); |
585 | 0 | alloc_growth = (new_alloc_size - last_alloc_size); |
586 | | #ifdef DEBUG |
587 | | size_t old_raw_size; /* sorry, non-C99 users - add braces if you need them */ |
588 | | old_raw_size = *(size_t *)PTR_OFFSET(mem, -(ptrdiff_t)MEM_ALIGN(sizeof(size_t))); |
589 | | i_assert(ALLOC_SIZE(old_raw_size) == last_alloc_size); |
590 | | /* Only check one byte for over-run, that catches most |
591 | | offenders who are likely to use t_try_realloc() */ |
592 | | i_assert(((unsigned char*)mem)[old_raw_size] == CLEAR_CHR); |
593 | | #endif |
594 | |
|
595 | 0 | if (current_block->left >= alloc_growth) { |
596 | | /* just shrink the available size */ |
597 | 0 | current_block->left -= alloc_growth; |
598 | 0 | current_frame->last_alloc_size = new_alloc_size; |
599 | | #ifdef DEBUG |
600 | | if (current_block->left < current_block->left_lowwater) |
601 | | current_block->left_lowwater = current_block->left; |
602 | | /* All reallocs are permanent by definition |
603 | | However, they don't count as a new allocation */ |
604 | | current_frame->alloc_bytes += alloc_growth; |
605 | | *(size_t *)PTR_OFFSET(mem, -(ptrdiff_t)MEM_ALIGN(sizeof(size_t))) = size; |
606 | | memset(PTR_OFFSET(mem, size), CLEAR_CHR, |
607 | | new_alloc_size - size - MEM_ALIGN(sizeof(size_t))); |
608 | | #endif |
609 | 0 | return TRUE; |
610 | 0 | } |
611 | 0 | } |
612 | | |
613 | 0 | return FALSE; |
614 | 0 | } |
615 | | |
616 | | size_t t_get_bytes_available(void) |
617 | 0 | { |
618 | 0 | block_canary_check(current_block); |
619 | 0 | #ifndef DEBUG |
620 | 0 | const unsigned int min_extra = 0; |
621 | | #else |
622 | | const unsigned int min_extra = SENTRY_COUNT + MEM_ALIGN(sizeof(size_t)); |
623 | | if (current_block->left < min_extra) |
624 | | return 0; |
625 | | #endif |
626 | 0 | size_t size = current_block->left - min_extra; |
627 | 0 | i_assert(ALLOC_SIZE(size) == current_block->left); |
628 | 0 | return size; |
629 | 0 | } |
630 | | |
631 | | void *t_buffer_get(size_t size) |
632 | 11.8k | { |
633 | 11.8k | void *ret; |
634 | | |
635 | 11.8k | ret = t_malloc_real(size, FALSE); |
636 | | |
637 | 11.8k | last_buffer_size = size; |
638 | 11.8k | last_buffer_block = current_block; |
639 | 11.8k | return ret; |
640 | 11.8k | } |
641 | | |
642 | | void *t_buffer_reget(void *buffer, size_t size) |
643 | 0 | { |
644 | 0 | size_t old_size; |
645 | 0 | void *new_buffer; |
646 | |
|
647 | 0 | old_size = last_buffer_size; |
648 | 0 | if (size <= old_size) |
649 | 0 | return buffer; |
650 | | |
651 | 0 | new_buffer = t_buffer_get(size); |
652 | 0 | if (new_buffer != buffer) |
653 | 0 | memcpy(new_buffer, buffer, old_size); |
654 | |
|
655 | 0 | return new_buffer; |
656 | 0 | } |
657 | | |
658 | | void t_buffer_alloc(size_t size) |
659 | 11.8k | { |
660 | 11.8k | i_assert(last_buffer_block != NULL); |
661 | 11.8k | i_assert(last_buffer_size >= size); |
662 | 11.8k | i_assert(current_block->left >= size); |
663 | | |
664 | | /* we've already reserved the space, now we just mark it used */ |
665 | 11.8k | (void)t_malloc_real(size, TRUE); |
666 | 11.8k | } |
667 | | |
668 | | void t_buffer_alloc_last_full(void) |
669 | 0 | { |
670 | 0 | if (last_buffer_block != NULL) |
671 | 0 | (void)t_malloc_real(last_buffer_size, TRUE); |
672 | 0 | } |
673 | | |
674 | | bool data_stack_frame_contains(data_stack_frame_t *id, const void *_ptr) |
675 | 1.30k | { |
676 | 1.30k | const unsigned char *block_data, *ptr = _ptr; |
677 | 1.30k | const struct stack_block *block; |
678 | 1.30k | unsigned int wanted_frame_id; |
679 | 1.30k | size_t block_start_pos, block_used; |
680 | | |
681 | | /* first handle the fast path - NULL can never be within the frame */ |
682 | 1.30k | if (ptr == NULL) |
683 | 0 | return FALSE; |
684 | | |
685 | 1.30k | #ifndef STATIC_CHECKER |
686 | 1.30k | wanted_frame_id = *id; |
687 | | #else |
688 | | wanted_frame_id = (*id)->id; |
689 | | #endif |
690 | | /* Too much effort to support more than the latest frame. |
691 | | It's the only thing that is currently needed anyway. */ |
692 | 1.30k | i_assert(wanted_frame_id+1 == data_stack_frame_id); |
693 | 1.30k | block = current_frame->block; |
694 | 1.30k | i_assert(block != NULL); |
695 | | |
696 | | /* See if it's in the frame's first block. Only the data after |
697 | | block_start_pos belong to this frame. */ |
698 | 1.30k | block_data = STACK_BLOCK_DATA(block); |
699 | 1.30k | block_start_pos = block->size - current_frame->block_space_left; |
700 | 1.30k | block_used = block->size - block->left; |
701 | 1.30k | if (ptr >= block_data + block_start_pos && |
702 | 67 | ptr <= block_data + block_used) |
703 | 66 | return TRUE; |
704 | | |
705 | | /* See if it's in the other blocks. All the data in them belong to |
706 | | this frame. */ |
707 | 1.24k | for (block = block->next; block != NULL; block = block->next) { |
708 | 1 | block_data = STACK_BLOCK_DATA(block); |
709 | 1 | block_used = block->size - block->left; |
710 | 1 | if (ptr >= block_data && ptr < block_data + block_used) |
711 | 1 | return TRUE; |
712 | 1 | } |
713 | 1.24k | return FALSE; |
714 | 1.24k | } |
715 | | |
716 | | size_t data_stack_get_alloc_size(void) |
717 | 8 | { |
718 | 8 | struct stack_block *block; |
719 | 8 | size_t size = 0; |
720 | | |
721 | 8 | i_assert(current_block->next == NULL); |
722 | | |
723 | 31 | for (block = current_block; block != NULL; block = block->prev) |
724 | 23 | size += block->size; |
725 | 8 | return size; |
726 | 8 | } |
727 | | |
728 | | size_t data_stack_get_used_size(void) |
729 | 8 | { |
730 | 8 | struct stack_block *block; |
731 | 8 | size_t size = 0; |
732 | | |
733 | 8 | i_assert(current_block->next == NULL); |
734 | | |
735 | 31 | for (block = current_block; block != NULL; block = block->prev) |
736 | 23 | size += block->size - block->left; |
737 | 8 | return size; |
738 | 8 | } |
739 | | |
740 | | void data_stack_free_unused(void) |
741 | 0 | { |
742 | 0 | free(unused_block); |
743 | 0 | unused_block = NULL; |
744 | 0 | } |
745 | | |
746 | | void data_stack_init(void) |
747 | 2 | { |
748 | 2 | if (data_stack_initialized) { |
749 | | /* already initialized (we did auto-initialization in |
750 | | t_malloc/t_push) */ |
751 | 1 | return; |
752 | 1 | } |
753 | 1 | data_stack_initialized = TRUE; |
754 | 1 | data_stack_frame_id = 1; |
755 | | |
756 | 1 | outofmem_area.block.size = outofmem_area.block.left = |
757 | 1 | sizeof(outofmem_area) - sizeof(outofmem_area.block); |
758 | 1 | outofmem_area.block.canary = BLOCK_CANARY; |
759 | | |
760 | 1 | current_block = mem_block_alloc(INITIAL_STACK_SIZE); |
761 | 1 | current_frame = NULL; |
762 | | |
763 | 1 | last_buffer_block = NULL; |
764 | 1 | last_buffer_size = 0; |
765 | | |
766 | 1 | root_frame_id = t_push("data_stack_init"); |
767 | 1 | } |
768 | | |
769 | | void data_stack_deinit_event(void) |
770 | 0 | { |
771 | 0 | event_unref(&event_datastack); |
772 | 0 | event_datastack_deinitialized = TRUE; |
773 | 0 | } |
774 | | |
775 | | void data_stack_deinit(void) |
776 | 0 | { |
777 | 0 | if (!t_pop(&root_frame_id) || |
778 | 0 | current_frame != NULL) |
779 | 0 | i_panic("Missing t_pop() call"); |
780 | | |
781 | 0 | free(current_block); |
782 | 0 | current_block = NULL; |
783 | 0 | data_stack_free_unused(); |
784 | 0 | } |