/src/dovecot/src/lib/data-stack.c
Line | Count | Source |
1 | | /* Copyright (c) 2002-2018 Dovecot authors, see the included COPYING file */ |
2 | | |
3 | | /* @UNSAFE: whole file */ |
4 | | |
5 | | #include "lib.h" |
6 | | #include "backtrace-string.h" |
7 | | #include "str.h" |
8 | | #include "data-stack.h" |
9 | | |
10 | | |
11 | | /* Initial stack size - this should be kept in a size that doesn't exceed |
12 | | in a normal use to avoid extra malloc()ing. */ |
13 | | #ifdef DEBUG |
14 | | # define INITIAL_STACK_SIZE (1024*10) |
15 | | #else |
16 | 43.0k | # define INITIAL_STACK_SIZE (1024*32) |
17 | | #endif |
18 | | |
19 | | #ifdef DEBUG |
20 | | # define CLEAR_CHR 0xD5 /* D5 is mnemonic for "Data 5tack" */ |
21 | | # define SENTRY_COUNT (4*8) |
22 | | # define BLOCK_CANARY ((void *)0xBADBADD5BADBADD5) /* contains 'D5' */ |
23 | | # define ALLOC_SIZE(size) (MEM_ALIGN(sizeof(size_t)) + MEM_ALIGN(size + SENTRY_COUNT)) |
24 | | #else |
25 | | # define CLEAR_CHR 0 |
26 | 92.9k | # define BLOCK_CANARY NULL |
27 | 16.8M | # define block_canary_check(block) do { ; } while(0) |
28 | 13.4M | # define ALLOC_SIZE(size) MEM_ALIGN(size) |
29 | | #endif |
30 | | |
31 | | struct stack_block { |
32 | | struct stack_block *prev, *next; |
33 | | |
34 | | size_t size, left; |
35 | | #ifdef DEBUG |
36 | | /* The lowest value that "left" has been in this block since it was |
37 | | last popped. This is used to keep track which parts of the block |
38 | | needs to be cleared if DEBUG is used. */ |
39 | | size_t left_lowwater; |
40 | | #endif |
41 | | /* NULL or a poison value, just in case something accesses |
42 | | the memory in front of an allocated area */ |
43 | | void *canary; |
44 | | unsigned char data[FLEXIBLE_ARRAY_MEMBER]; |
45 | | }; |
46 | | |
47 | 13.4M | #define SIZEOF_MEMBLOCK MEM_ALIGN(sizeof(struct stack_block)) |
48 | | |
49 | | #define STACK_BLOCK_DATA(block) \ |
50 | 13.4M | (block->data + (SIZEOF_MEMBLOCK - sizeof(struct stack_block))) |
51 | | |
52 | | struct stack_frame { |
53 | | struct stack_frame *prev; |
54 | | |
55 | | struct stack_block *block; |
56 | | /* Each frame initializes this to current_block->left, i.e. how much |
57 | | free space is left in the block. So the frame's start position in |
58 | | the block is (block.size - block_space_left) */ |
59 | | size_t block_space_left; |
60 | | size_t last_alloc_size; |
61 | | const char *marker; |
62 | | #ifdef DEBUG |
63 | | /* Fairly arbitrary profiling data */ |
64 | | unsigned long long alloc_bytes; |
65 | | unsigned int alloc_count; |
66 | | #endif |
67 | | }; |
68 | | |
69 | | #ifdef STATIC_CHECKER |
70 | | struct data_stack_frame { |
71 | | unsigned int id; |
72 | | }; |
73 | | #endif |
74 | | |
75 | | unsigned int data_stack_frame_id = 0; |
76 | | |
77 | | static bool data_stack_initialized = FALSE; |
78 | | static data_stack_frame_t root_frame_id; |
79 | | |
80 | | static struct stack_frame *current_frame; |
81 | | |
82 | | /* The latest block currently used for allocation. current_block->next is |
83 | | always NULL. */ |
84 | | static struct stack_block *current_block; |
85 | | /* The largest block that data stack has allocated so far, which was already |
86 | | freed. This can prevent rapid malloc()+free()ing when data stack is grown |
87 | | and shrunk constantly. */ |
88 | | static struct stack_block *unused_block = NULL; |
89 | | |
90 | | static struct event *event_datastack = NULL; |
91 | | static bool event_datastack_deinitialized = FALSE; |
92 | | |
93 | | static struct stack_block *last_buffer_block; |
94 | | static size_t last_buffer_size; |
95 | | static bool outofmem = FALSE; |
96 | | |
97 | | static union { |
98 | | struct stack_block block; |
99 | | unsigned char data[512]; |
100 | | } outofmem_area; |
101 | | |
102 | | static struct stack_block *mem_block_alloc(size_t min_size); |
103 | | |
104 | | static inline |
105 | | unsigned char *data_stack_after_last_alloc(struct stack_block *block) |
106 | 13.4M | { |
107 | 13.4M | return STACK_BLOCK_DATA(block) + (block->size - block->left); |
108 | 13.4M | } |
109 | | |
110 | | static void data_stack_last_buffer_reset(bool preserve_data ATTR_UNUSED) |
111 | 16.7M | { |
112 | 16.7M | if (last_buffer_block != NULL) { |
113 | | #ifdef DEBUG |
114 | | unsigned char *last_alloc_end, *p, *pend; |
115 | | |
116 | | /* We assume that this function gets called before |
117 | | current_block changes. */ |
118 | | i_assert(last_buffer_block == current_block); |
119 | | |
120 | | last_alloc_end = data_stack_after_last_alloc(current_block); |
121 | | p = last_alloc_end + MEM_ALIGN(sizeof(size_t)) + last_buffer_size; |
122 | | pend = last_alloc_end + ALLOC_SIZE(last_buffer_size); |
123 | | #endif |
124 | | /* reset t_buffer_get() mark - not really needed but makes it |
125 | | easier to notice if t_malloc()/t_push()/t_pop() is called |
126 | | between t_buffer_get() and t_buffer_alloc(). |
127 | | do this before we get to i_panic() to avoid recursive |
128 | | panics. */ |
129 | 4.32M | last_buffer_block = NULL; |
130 | | |
131 | | #ifdef DEBUG |
132 | | /* NOTE: If the below panic triggers, it may also be due to an |
133 | | internal bug in data-stack (since this is rather complex). While |
134 | | debugging whether that is the case, it's a good idea to change the |
135 | | i_panic() to abort(). Otherwise the i_panic() changes the |
136 | | data-stack's internal state and complicates debugging. */ |
137 | | while (p < pend) |
138 | | if (*p++ != CLEAR_CHR) |
139 | | i_panic("t_buffer_get(): buffer overflow"); |
140 | | |
141 | | if (!preserve_data) { |
142 | | p = last_alloc_end; |
143 | | memset(p, CLEAR_CHR, SENTRY_COUNT); |
144 | | } |
145 | | #endif |
146 | 4.32M | } |
147 | 16.7M | } |
148 | | |
149 | | data_stack_frame_t t_push(const char *marker) |
150 | 3.27M | { |
151 | 3.27M | struct stack_frame *frame; |
152 | | |
153 | 3.27M | i_assert(marker != NULL); |
154 | | |
155 | 3.27M | if (unlikely(!data_stack_initialized)) { |
156 | | /* kludgy, but allow this before initialization */ |
157 | 0 | data_stack_init(); |
158 | 0 | return t_push(marker); |
159 | 0 | } |
160 | | |
161 | | /* allocate new block */ |
162 | 3.27M | frame = t_buffer_get(sizeof(*frame)); |
163 | 3.27M | frame->prev = current_frame; |
164 | 3.27M | current_frame = frame; |
165 | | |
166 | | /* mark our current position */ |
167 | 3.27M | current_frame->block = current_block; |
168 | 3.27M | current_frame->block_space_left = current_block->left; |
169 | 3.27M | current_frame->last_alloc_size = 0; |
170 | 3.27M | current_frame->marker = marker; |
171 | | #ifdef DEBUG |
172 | | current_frame->alloc_bytes = 0; |
173 | | current_frame->alloc_count = 0; |
174 | | #endif |
175 | | |
176 | 3.27M | t_buffer_alloc(sizeof(*frame)); |
177 | | |
178 | 3.27M | #ifndef STATIC_CHECKER |
179 | 3.27M | return data_stack_frame_id++; |
180 | | #else |
181 | | struct data_stack_frame *ds_frame = i_new(struct data_stack_frame, 1); |
182 | | ds_frame->id = data_stack_frame_id++; |
183 | | return ds_frame; |
184 | | #endif |
185 | 3.27M | } |
186 | | |
187 | | data_stack_frame_t t_push_named(const char *format, ...) |
188 | 65.4k | { |
189 | 65.4k | data_stack_frame_t ret = t_push(format); |
190 | | #ifdef DEBUG |
191 | | va_list args; |
192 | | va_start(args, format); |
193 | | current_frame->marker = p_strdup_vprintf(unsafe_data_stack_pool, format, args); |
194 | | va_end(args); |
195 | | #else |
196 | 65.4k | (void)format; /* unused in non-DEBUG builds */ |
197 | 65.4k | #endif |
198 | | |
199 | 65.4k | return ret; |
200 | 65.4k | } |
201 | | |
202 | | #ifdef DEBUG |
203 | | static void block_canary_check(struct stack_block *block) |
204 | | { |
205 | | if (block->canary != BLOCK_CANARY) { |
206 | | /* Make sure i_panic() won't try to allocate from the |
207 | | same block by falling back onto our emergency block. */ |
208 | | current_block = &outofmem_area.block; |
209 | | i_panic("Corrupted data stack canary"); |
210 | | } |
211 | | } |
212 | | #endif |
213 | | |
214 | | static void free_blocks(struct stack_block *block) |
215 | 6.92k | { |
216 | 6.92k | struct stack_block *next; |
217 | 6.92k | int old_errno = errno; |
218 | | |
219 | | /* free all the blocks, except if any of them is bigger than |
220 | | unused_block, replace it */ |
221 | 17.4k | while (block != NULL) { |
222 | 10.4k | block_canary_check(block); |
223 | 10.4k | next = block->next; |
224 | | |
225 | | #ifdef DEBUG |
226 | | memset(STACK_BLOCK_DATA(block), CLEAR_CHR, block->size); |
227 | | #endif |
228 | | |
229 | 10.4k | if (block == &outofmem_area.block) |
230 | 0 | ; |
231 | 10.4k | else if (unused_block == NULL || |
232 | 10.3k | block->size > unused_block->size) { |
233 | 10.3k | free(unused_block); |
234 | 10.3k | unused_block = block; |
235 | 10.3k | } else { |
236 | 132 | free(block); |
237 | 132 | } |
238 | | |
239 | 10.4k | block = next; |
240 | 10.4k | } |
241 | 6.92k | errno = old_errno; |
242 | 6.92k | } |
243 | | |
244 | | #ifdef DEBUG |
245 | | static void t_pop_verify(void) |
246 | | { |
247 | | struct stack_block *block; |
248 | | unsigned char *p; |
249 | | size_t pos, max_pos, used_size; |
250 | | |
251 | | block = current_frame->block; |
252 | | pos = block->size - current_frame->block_space_left; |
253 | | while (block != NULL) { |
254 | | block_canary_check(block); |
255 | | used_size = block->size - block->left; |
256 | | p = STACK_BLOCK_DATA(block); |
257 | | while (pos < used_size) { |
258 | | size_t requested_size = *(size_t *)(p + pos); |
259 | | if (used_size - pos < requested_size) |
260 | | i_panic("data stack[%s]: saved alloc size broken", |
261 | | current_frame->marker); |
262 | | max_pos = pos + ALLOC_SIZE(requested_size); |
263 | | pos += MEM_ALIGN(sizeof(size_t)) + requested_size; |
264 | | |
265 | | for (; pos < max_pos; pos++) { |
266 | | if (p[pos] != CLEAR_CHR) |
267 | | i_panic("data stack[%s]: buffer overflow", |
268 | | current_frame->marker); |
269 | | } |
270 | | } |
271 | | |
272 | | /* if we had used t_buffer_get(), the rest of the buffer |
273 | | may not contain CLEAR_CHRs. but we've already checked all |
274 | | the allocations, so there's no need to check them anyway. */ |
275 | | block = block->next; |
276 | | pos = 0; |
277 | | } |
278 | | } |
279 | | #endif |
280 | | |
281 | | void t_pop_last_unsafe(void) |
282 | 3.27M | { |
283 | 3.27M | size_t block_space_left; |
284 | | |
285 | 3.27M | if (unlikely(current_frame == NULL)) |
286 | 0 | i_panic("t_pop() called with empty stack"); |
287 | | |
288 | 3.27M | data_stack_last_buffer_reset(FALSE); |
289 | | #ifdef DEBUG |
290 | | t_pop_verify(); |
291 | | #endif |
292 | | |
293 | | /* Usually the block doesn't change. If it doesn't, the next pointer |
294 | | must also be NULL. */ |
295 | 3.27M | if (current_block != current_frame->block) { |
296 | 6.92k | current_block = current_frame->block; |
297 | 6.92k | if (current_block->next != NULL) { |
298 | | /* free unused blocks */ |
299 | 6.92k | free_blocks(current_block->next); |
300 | 6.92k | current_block->next = NULL; |
301 | 6.92k | } |
302 | 6.92k | } |
303 | 3.27M | block_canary_check(current_block); |
304 | | |
305 | | /* current_frame points inside the stack frame that will be freed. |
306 | | make sure it's not accessed after it's already freed/cleaned. */ |
307 | 3.27M | block_space_left = current_frame->block_space_left; |
308 | 3.27M | current_frame = current_frame->prev; |
309 | | |
310 | | #ifdef DEBUG |
311 | | size_t start_pos, end_pos; |
312 | | |
313 | | start_pos = current_block->size - block_space_left; |
314 | | end_pos = current_block->size - current_block->left_lowwater; |
315 | | i_assert(end_pos >= start_pos); |
316 | | memset(STACK_BLOCK_DATA(current_block) + start_pos, CLEAR_CHR, |
317 | | end_pos - start_pos); |
318 | | current_block->left_lowwater = block_space_left; |
319 | | #endif |
320 | | |
321 | 3.27M | current_block->left = block_space_left; |
322 | | |
323 | 3.27M | data_stack_frame_id--; |
324 | 3.27M | } |
325 | | |
326 | | bool t_pop(data_stack_frame_t *id) |
327 | 3.27M | { |
328 | 3.27M | t_pop_last_unsafe(); |
329 | 3.27M | #ifndef STATIC_CHECKER |
330 | 3.27M | if (unlikely(data_stack_frame_id != *id)) |
331 | 0 | return FALSE; |
332 | 3.27M | *id = 0; |
333 | | #else |
334 | | unsigned int frame_id = (*id)->id; |
335 | | i_free_and_null(*id); |
336 | | |
337 | | if (unlikely(data_stack_frame_id != frame_id)) |
338 | | return FALSE; |
339 | | #endif |
340 | 3.27M | return TRUE; |
341 | 3.27M | } |
342 | | |
343 | | bool t_pop_pass_str(data_stack_frame_t *id, const char **str) |
344 | 7.43k | { |
345 | 7.43k | if (str == NULL || !data_stack_frame_contains(id, *str)) |
346 | 5.30k | return t_pop(id); |
347 | | |
348 | | /* FIXME: The string could be memmove()d to the beginning of the |
349 | | data stack frame and the previous frame's size extended past it. |
350 | | This would avoid the malloc. It's a bit complicated though. */ |
351 | 2.12k | char *tmp_str = i_strdup(*str); |
352 | 2.12k | bool ret = t_pop(id); |
353 | 2.12k | *str = t_strdup(tmp_str); |
354 | 2.12k | i_free(tmp_str); |
355 | 2.12k | return ret; |
356 | 7.43k | } |
357 | | |
358 | | static void mem_block_reset(struct stack_block *block) |
359 | 53.5k | { |
360 | 53.5k | block->prev = NULL; |
361 | 53.5k | block->next = NULL; |
362 | 53.5k | block->left = block->size; |
363 | | #ifdef DEBUG |
364 | | block->left_lowwater = block->size; |
365 | | #endif |
366 | 53.5k | } |
367 | | |
368 | | static struct stack_block *mem_block_alloc(size_t min_size) |
369 | 49.9k | { |
370 | 49.9k | struct stack_block *block; |
371 | 49.9k | size_t prev_size, alloc_size; |
372 | 49.9k | int old_errno = errno; |
373 | | |
374 | 49.9k | prev_size = current_block == NULL ? 0 : current_block->size; |
375 | | /* Use INITIAL_STACK_SIZE without growing it to nearest power. */ |
376 | 49.9k | alloc_size = prev_size == 0 ? min_size : |
377 | 49.9k | nearest_power(MALLOC_ADD(prev_size, min_size)); |
378 | | |
379 | | /* nearest_power() returns 2^n values, so alloc_size can't be |
380 | | anywhere close to SIZE_MAX */ |
381 | 49.9k | block = malloc(MALLOC_ADD(SIZEOF_MEMBLOCK, alloc_size)); |
382 | 49.9k | if (unlikely(block == NULL)) { |
383 | 0 | if (outofmem) { |
384 | 0 | if (min_size > outofmem_area.block.left) |
385 | 0 | abort(); |
386 | 0 | return &outofmem_area.block; |
387 | 0 | } |
388 | 0 | outofmem = TRUE; |
389 | 0 | i_panic("data stack: Out of memory when allocating %zu bytes", |
390 | 0 | alloc_size + SIZEOF_MEMBLOCK); |
391 | 0 | } |
392 | 49.9k | errno = old_errno; |
393 | 49.9k | block->size = alloc_size; |
394 | 49.9k | block->canary = BLOCK_CANARY; |
395 | 49.9k | mem_block_reset(block); |
396 | | #ifdef DEBUG |
397 | | memset(STACK_BLOCK_DATA(block), CLEAR_CHR, alloc_size); |
398 | | #endif |
399 | 49.9k | return block; |
400 | 49.9k | } |
401 | | |
402 | | static void data_stack_send_grow_event(size_t last_alloc_size) |
403 | 6.88k | { |
404 | | /* The t_malloc_real() adds a data stack frame. We don't care about it, |
405 | | but the previous one. */ |
406 | 6.88k | struct stack_frame *frame = current_frame->prev; |
407 | | |
408 | 6.88k | if (event_datastack_deinitialized) { |
409 | | /* already in the deinitialization code - |
410 | | don't send more events */ |
411 | 0 | return; |
412 | 0 | } |
413 | 6.88k | if (event_datastack == NULL) |
414 | 3.25k | event_datastack = event_create(NULL); |
415 | 6.88k | event_set_name(event_datastack, "data_stack_grow"); |
416 | 6.88k | event_add_int(event_datastack, "alloc_size", data_stack_get_alloc_size()); |
417 | 6.88k | event_add_int(event_datastack, "used_size", data_stack_get_used_size()); |
418 | 6.88k | event_add_int(event_datastack, "last_alloc_size", last_alloc_size); |
419 | 6.88k | event_add_int(event_datastack, "last_block_size", current_block->size); |
420 | | #ifdef DEBUG |
421 | | event_add_int(event_datastack, "frame_alloc_bytes", |
422 | | frame->alloc_bytes); |
423 | | event_add_int(event_datastack, "frame_alloc_count", |
424 | | frame->alloc_count); |
425 | | #endif |
426 | 6.88k | event_add_str(event_datastack, "frame_marker", frame->marker); |
427 | | |
428 | | /* It's possible that the data stack gets grown and shrunk rapidly. |
429 | | Try to avoid doing expensive work if the event isn't even used for |
430 | | anything. Note that at this point all the event fields must be |
431 | | set already that might potentially be used by the filters. */ |
432 | 6.88k | if (!event_want_debug(event_datastack)) |
433 | 6.88k | return; |
434 | | |
435 | | /* Getting backtrace is potentially inefficient, so do it after |
436 | | checking if the event is wanted. Note that this prevents using the |
437 | | backtrace field in event field comparisons. */ |
438 | 0 | const char *backtrace, *error; |
439 | 0 | if (backtrace_get(&backtrace, &error) == 0) |
440 | 0 | event_add_str(event_datastack, "backtrace", backtrace); |
441 | 0 | else { |
442 | 0 | backtrace = t_strdup_printf("backtrace failed: %s", error); |
443 | 0 | event_add_str(event_datastack, "backtrace_error", error); |
444 | 0 | } |
445 | |
|
446 | 0 | string_t *str = t_str_new(128); |
447 | 0 | str_printfa(str, "total_used=%zu, total_alloc=%zu, last_alloc_size=%zu", |
448 | 0 | data_stack_get_used_size(), |
449 | 0 | data_stack_get_alloc_size(), |
450 | 0 | last_alloc_size); |
451 | | #ifdef DEBUG |
452 | | str_printfa(str, ", frame_bytes=%llu, frame_alloc_count=%u", |
453 | | frame->alloc_bytes, frame->alloc_count); |
454 | | #endif |
455 | 0 | e_debug(event_datastack, "Growing data stack by %zu for '%s' (%s): %s", |
456 | 0 | current_block->size, frame->marker, str_c(str), backtrace); |
457 | 0 | } |
458 | | |
459 | | static void *t_malloc_real(size_t size, bool permanent) |
460 | 13.3M | { |
461 | 13.3M | void *ret; |
462 | 13.3M | size_t alloc_size; |
463 | 13.3M | bool warn = FALSE; |
464 | 13.3M | int old_errno = errno; |
465 | | |
466 | 13.3M | if (unlikely(size == 0 || size > SSIZE_T_MAX)) |
467 | 0 | i_panic("Trying to allocate %zu bytes", size); |
468 | | |
469 | 13.3M | if (unlikely(!data_stack_initialized)) { |
470 | | /* kludgy, but allow this before initialization */ |
471 | 0 | data_stack_init(); |
472 | 0 | } |
473 | 13.3M | block_canary_check(current_block); |
474 | | |
475 | | /* allocate only aligned amount of memory so alignment comes |
476 | | always properly */ |
477 | 13.3M | alloc_size = ALLOC_SIZE(size); |
478 | | #ifdef DEBUG |
479 | | if(permanent) { |
480 | | current_frame->alloc_bytes += alloc_size; |
481 | | current_frame->alloc_count++; |
482 | | } |
483 | | #endif |
484 | 13.3M | data_stack_last_buffer_reset(TRUE); |
485 | | |
486 | 13.3M | if (permanent) { |
487 | | /* used for t_try_realloc() */ |
488 | 9.05M | current_frame->last_alloc_size = alloc_size; |
489 | 9.05M | } |
490 | | |
491 | 13.3M | if (current_block->left < alloc_size) { |
492 | 10.4k | struct stack_block *block; |
493 | | |
494 | | /* current block is full, see if we can use the unused_block */ |
495 | 10.4k | if (unused_block != NULL && unused_block->size >= alloc_size) { |
496 | 3.59k | block = unused_block; |
497 | 3.59k | unused_block = NULL; |
498 | 3.59k | mem_block_reset(block); |
499 | 6.88k | } else { |
500 | | /* current block is full, allocate a new one */ |
501 | 6.88k | block = mem_block_alloc(alloc_size); |
502 | 6.88k | warn = TRUE; |
503 | 6.88k | } |
504 | | |
505 | | /* The newly allocated block will replace the current_block, |
506 | | i.e. current_block always points to the last element in |
507 | | the linked list. */ |
508 | 10.4k | block->prev = current_block; |
509 | 10.4k | current_block->next = block; |
510 | 10.4k | current_block = block; |
511 | 10.4k | } |
512 | | |
513 | | /* enough space in current block, use it */ |
514 | 13.3M | ret = data_stack_after_last_alloc(current_block); |
515 | | |
516 | | #ifdef DEBUG |
517 | | if (current_block->left - alloc_size < current_block->left_lowwater) |
518 | | current_block->left_lowwater = current_block->left - alloc_size; |
519 | | #endif |
520 | 13.3M | if (permanent) |
521 | 9.05M | current_block->left -= alloc_size; |
522 | | |
523 | 13.3M | if (warn) T_BEGIN { |
524 | | /* warn after allocation, so if e_debug() wants to |
525 | | allocate more memory we don't go to infinite loop */ |
526 | 13.7k | data_stack_send_grow_event(alloc_size); |
527 | 13.7k | } T_END; |
528 | | #ifdef DEBUG |
529 | | memcpy(ret, &size, sizeof(size)); |
530 | | ret = PTR_OFFSET(ret, MEM_ALIGN(sizeof(size))); |
531 | | /* make sure the sentry contains CLEAR_CHRs. it might not if we |
532 | | had used t_buffer_get(). */ |
533 | | memset(PTR_OFFSET(ret, size), CLEAR_CHR, |
534 | | MEM_ALIGN(size + SENTRY_COUNT) - size); |
535 | | #endif |
536 | 13.3M | errno = old_errno; |
537 | 13.3M | return ret; |
538 | 13.3M | } |
539 | | |
540 | | void *t_malloc_no0(size_t size) |
541 | 1.11M | { |
542 | 1.11M | return t_malloc_real(size, TRUE); |
543 | 1.11M | } |
544 | | |
545 | | void *t_malloc0(size_t size) |
546 | 4.10M | { |
547 | 4.10M | void *mem; |
548 | | |
549 | 4.10M | mem = t_malloc_real(size, TRUE); |
550 | 4.10M | memset(mem, 0, size); |
551 | 4.10M | return mem; |
552 | 4.10M | } |
553 | | |
554 | | bool ATTR_NO_SANITIZE_INTEGER |
555 | | t_try_realloc(void *mem, size_t size) |
556 | 100k | { |
557 | 100k | size_t debug_adjust = 0, last_alloc_size; |
558 | 100k | unsigned char *after_last_alloc; |
559 | | |
560 | 100k | if (unlikely(size == 0 || size > SSIZE_T_MAX)) |
561 | 0 | i_panic("Trying to allocate %zu bytes", size); |
562 | 100k | block_canary_check(current_block); |
563 | 100k | data_stack_last_buffer_reset(TRUE); |
564 | | |
565 | 100k | last_alloc_size = current_frame->last_alloc_size; |
566 | | |
567 | | /* see if we're trying to grow the memory we allocated last */ |
568 | 100k | after_last_alloc = data_stack_after_last_alloc(current_block); |
569 | | #ifdef DEBUG |
570 | | debug_adjust = MEM_ALIGN(sizeof(size_t)); |
571 | | #endif |
572 | 100k | if (after_last_alloc - last_alloc_size + debug_adjust == mem) { |
573 | | /* yeah, see if we have space to grow */ |
574 | 59.9k | size_t new_alloc_size, alloc_growth; |
575 | | |
576 | 59.9k | new_alloc_size = ALLOC_SIZE(size); |
577 | 59.9k | alloc_growth = (new_alloc_size - last_alloc_size); |
578 | | #ifdef DEBUG |
579 | | size_t old_raw_size; /* sorry, non-C99 users - add braces if you need them */ |
580 | | old_raw_size = *(size_t *)PTR_OFFSET(mem, -(ptrdiff_t)MEM_ALIGN(sizeof(size_t))); |
581 | | i_assert(ALLOC_SIZE(old_raw_size) == last_alloc_size); |
582 | | /* Only check one byte for over-run, that catches most |
583 | | offenders who are likely to use t_try_realloc() */ |
584 | | i_assert(((unsigned char*)mem)[old_raw_size] == CLEAR_CHR); |
585 | | #endif |
586 | | |
587 | 59.9k | if (current_block->left >= alloc_growth) { |
588 | | /* just shrink the available size */ |
589 | 56.1k | current_block->left -= alloc_growth; |
590 | 56.1k | current_frame->last_alloc_size = new_alloc_size; |
591 | | #ifdef DEBUG |
592 | | if (current_block->left < current_block->left_lowwater) |
593 | | current_block->left_lowwater = current_block->left; |
594 | | /* All reallocs are permanent by definition |
595 | | However, they don't count as a new allocation */ |
596 | | current_frame->alloc_bytes += alloc_growth; |
597 | | *(size_t *)PTR_OFFSET(mem, -(ptrdiff_t)MEM_ALIGN(sizeof(size_t))) = size; |
598 | | memset(PTR_OFFSET(mem, size), CLEAR_CHR, |
599 | | new_alloc_size - size - MEM_ALIGN(sizeof(size_t))); |
600 | | #endif |
601 | 56.1k | return TRUE; |
602 | 56.1k | } |
603 | 59.9k | } |
604 | | |
605 | 44.1k | return FALSE; |
606 | 100k | } |
607 | | |
608 | | size_t t_get_bytes_available(void) |
609 | 91.5k | { |
610 | 91.5k | block_canary_check(current_block); |
611 | 91.5k | #ifndef DEBUG |
612 | 91.5k | const unsigned int min_extra = 0; |
613 | | #else |
614 | | const unsigned int min_extra = SENTRY_COUNT + MEM_ALIGN(sizeof(size_t)); |
615 | | if (current_block->left < min_extra) |
616 | | return 0; |
617 | | #endif |
618 | 91.5k | size_t size = current_block->left - min_extra; |
619 | 91.5k | i_assert(ALLOC_SIZE(size) == current_block->left); |
620 | 91.5k | return size; |
621 | 91.5k | } |
622 | | |
623 | | void *t_buffer_get(size_t size) |
624 | 4.32M | { |
625 | 4.32M | void *ret; |
626 | | |
627 | 4.32M | ret = t_malloc_real(size, FALSE); |
628 | | |
629 | 4.32M | last_buffer_size = size; |
630 | 4.32M | last_buffer_block = current_block; |
631 | 4.32M | return ret; |
632 | 4.32M | } |
633 | | |
634 | | void *t_buffer_reget(void *buffer, size_t size) |
635 | 140k | { |
636 | 140k | size_t old_size; |
637 | 140k | void *new_buffer; |
638 | | |
639 | 140k | old_size = last_buffer_size; |
640 | 140k | if (size <= old_size) |
641 | 0 | return buffer; |
642 | | |
643 | 140k | new_buffer = t_buffer_get(size); |
644 | 140k | if (new_buffer != buffer) |
645 | 62 | memcpy(new_buffer, buffer, old_size); |
646 | | |
647 | 140k | return new_buffer; |
648 | 140k | } |
649 | | |
650 | | void t_buffer_alloc(size_t size) |
651 | 3.83M | { |
652 | 3.83M | i_assert(last_buffer_block != NULL); |
653 | 3.83M | i_assert(last_buffer_size >= size); |
654 | 3.83M | i_assert(current_block->left >= size); |
655 | | |
656 | | /* we've already reserved the space, now we just mark it used */ |
657 | 3.83M | (void)t_malloc_real(size, TRUE); |
658 | 3.83M | } |
659 | | |
660 | | void t_buffer_alloc_last_full(void) |
661 | 0 | { |
662 | 0 | if (last_buffer_block != NULL) |
663 | 0 | (void)t_malloc_real(last_buffer_size, TRUE); |
664 | 0 | } |
665 | | |
666 | | bool data_stack_frame_contains(data_stack_frame_t *id, const void *_ptr) |
667 | 3.32k | { |
668 | 3.32k | const unsigned char *block_data, *ptr = _ptr; |
669 | 3.32k | const struct stack_block *block; |
670 | 3.32k | unsigned int wanted_frame_id; |
671 | 3.32k | size_t block_start_pos, block_used; |
672 | | |
673 | | /* first handle the fast path - NULL can never be within the frame */ |
674 | 3.32k | if (ptr == NULL) |
675 | 0 | return FALSE; |
676 | | |
677 | 3.32k | #ifndef STATIC_CHECKER |
678 | 3.32k | wanted_frame_id = *id; |
679 | | #else |
680 | | wanted_frame_id = (*id)->id; |
681 | | #endif |
682 | | /* Too much effort to support more than the latest frame. |
683 | | It's the only thing that is currently needed anyway. */ |
684 | 3.32k | i_assert(wanted_frame_id+1 == data_stack_frame_id); |
685 | 3.32k | block = current_frame->block; |
686 | 3.32k | i_assert(block != NULL); |
687 | | |
688 | | /* See if it's in the frame's first block. Only the data after |
689 | | block_start_pos belong to this frame. */ |
690 | 3.32k | block_data = STACK_BLOCK_DATA(block); |
691 | 3.32k | block_start_pos = block->size - current_frame->block_space_left; |
692 | 3.32k | block_used = block->size - block->left; |
693 | 3.32k | if (ptr >= block_data + block_start_pos && |
694 | 2.12k | ptr <= block_data + block_used) |
695 | 2.12k | return TRUE; |
696 | | |
697 | | /* See if it's in the other blocks. All the data in them belong to |
698 | | this frame. */ |
699 | 1.20k | for (block = block->next; block != NULL; block = block->next) { |
700 | 1 | block_data = STACK_BLOCK_DATA(block); |
701 | 1 | block_used = block->size - block->left; |
702 | 1 | if (ptr >= block_data && ptr < block_data + block_used) |
703 | 1 | return TRUE; |
704 | 1 | } |
705 | 1.20k | return FALSE; |
706 | 1.20k | } |
707 | | |
708 | | size_t data_stack_get_alloc_size(void) |
709 | 6.88k | { |
710 | 6.88k | struct stack_block *block; |
711 | 6.88k | size_t size = 0; |
712 | | |
713 | 6.88k | i_assert(current_block->next == NULL); |
714 | | |
715 | 28.0k | for (block = current_block; block != NULL; block = block->prev) |
716 | 21.1k | size += block->size; |
717 | 6.88k | return size; |
718 | 6.88k | } |
719 | | |
720 | | size_t data_stack_get_used_size(void) |
721 | 6.88k | { |
722 | 6.88k | struct stack_block *block; |
723 | 6.88k | size_t size = 0; |
724 | | |
725 | 6.88k | i_assert(current_block->next == NULL); |
726 | | |
727 | 28.0k | for (block = current_block; block != NULL; block = block->prev) |
728 | 21.1k | size += block->size - block->left; |
729 | 6.88k | return size; |
730 | 6.88k | } |
731 | | |
732 | | void data_stack_free_unused(void) |
733 | 43.0k | { |
734 | 43.0k | int old_errno = errno; |
735 | 43.0k | free(unused_block); |
736 | 43.0k | unused_block = NULL; |
737 | 43.0k | errno = old_errno; |
738 | 43.0k | } |
739 | | |
740 | | void data_stack_init(void) |
741 | 43.0k | { |
742 | 43.0k | if (data_stack_initialized) { |
743 | | /* already initialized (we did auto-initialization in |
744 | | t_malloc/t_push) */ |
745 | 0 | return; |
746 | 0 | } |
747 | 43.0k | data_stack_initialized = TRUE; |
748 | 43.0k | data_stack_frame_id = 1; |
749 | | |
750 | 43.0k | outofmem_area.block.size = outofmem_area.block.left = |
751 | 43.0k | sizeof(outofmem_area) - sizeof(outofmem_area.block); |
752 | 43.0k | outofmem_area.block.canary = BLOCK_CANARY; |
753 | 43.0k | outofmem = FALSE; |
754 | | |
755 | 43.0k | unused_block = NULL; |
756 | 43.0k | current_block = mem_block_alloc(INITIAL_STACK_SIZE); |
757 | 43.0k | current_frame = NULL; |
758 | | |
759 | 43.0k | last_buffer_block = NULL; |
760 | 43.0k | last_buffer_size = 0; |
761 | | |
762 | 43.0k | root_frame_id = t_push("data_stack_init"); |
763 | | |
764 | 43.0k | event_datastack = NULL; |
765 | 43.0k | event_datastack_deinitialized = FALSE; |
766 | 43.0k | } |
767 | | |
768 | | void data_stack_deinit_event(void) |
769 | 43.0k | { |
770 | 43.0k | event_unref(&event_datastack); |
771 | 43.0k | event_datastack_deinitialized = TRUE; |
772 | 43.0k | } |
773 | | |
774 | | void data_stack_deinit(void) |
775 | 43.0k | { |
776 | 43.0k | if (!t_pop(&root_frame_id) || |
777 | 43.0k | current_frame != NULL) |
778 | 0 | i_panic("Missing t_pop() call"); |
779 | | |
780 | 43.0k | free(current_block); |
781 | 43.0k | current_block = NULL; |
782 | 43.0k | data_stack_free_unused(); |
783 | 43.0k | data_stack_initialized = FALSE; |
784 | 43.0k | } |