/src/unbound/util/alloc.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * util/alloc.c - memory allocation service. |
3 | | * |
4 | | * Copyright (c) 2007, NLnet Labs. All rights reserved. |
5 | | * |
6 | | * This software is open source. |
7 | | * |
8 | | * Redistribution and use in source and binary forms, with or without |
9 | | * modification, are permitted provided that the following conditions |
10 | | * are met: |
11 | | * |
12 | | * Redistributions of source code must retain the above copyright notice, |
13 | | * this list of conditions and the following disclaimer. |
14 | | * |
15 | | * Redistributions in binary form must reproduce the above copyright notice, |
16 | | * this list of conditions and the following disclaimer in the documentation |
17 | | * and/or other materials provided with the distribution. |
18 | | * |
19 | | * Neither the name of the NLNET LABS nor the names of its contributors may |
20 | | * be used to endorse or promote products derived from this software without |
21 | | * specific prior written permission. |
22 | | * |
23 | | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
24 | | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
25 | | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
26 | | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
27 | | * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
28 | | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED |
29 | | * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
30 | | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF |
31 | | * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING |
32 | | * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS |
33 | | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
34 | | */ |
35 | | |
36 | | /** |
37 | | * \file |
38 | | * |
39 | | * This file contains memory allocation functions. |
40 | | */ |
41 | | |
42 | | #include "config.h" |
43 | | #include "util/alloc.h" |
44 | | #include "util/regional.h" |
45 | | #include "util/data/packed_rrset.h" |
46 | | #include "util/fptr_wlist.h" |
47 | | |
48 | | /** custom size of cached regional blocks */ |
49 | 0 | #define ALLOC_REG_SIZE 16384 |
50 | | /** number of bits for ID part of uint64, rest for number of threads. */ |
51 | 9.33k | #define THRNUM_SHIFT 48 /* for 65k threads, 2^48 rrsets per thr. */ |
52 | | |
53 | | /** setup new special type */ |
54 | | static void |
55 | | alloc_setup_special(alloc_special_type* t) |
56 | 19.3k | { |
57 | 19.3k | memset(t, 0, sizeof(*t)); |
58 | 19.3k | lock_rw_init(&t->entry.lock); |
59 | 19.3k | t->entry.key = t; |
60 | 19.3k | } |
61 | | |
62 | | /** prealloc some entries in the cache. To minimize contention. |
63 | | * Result is 1 lock per alloc_max newly created entries. |
64 | | * @param alloc: the structure to fill up. |
65 | | */ |
66 | | static void |
67 | | prealloc_setup(struct alloc_cache* alloc) |
68 | 1.75k | { |
69 | 1.75k | alloc_special_type* p; |
70 | 1.75k | int i; |
71 | 19.3k | for(i=0; i<ALLOC_SPECIAL_MAX; i++) { |
72 | 17.5k | if(!(p = (alloc_special_type*)malloc( |
73 | 17.5k | sizeof(alloc_special_type)))) { |
74 | 0 | log_err("prealloc: out of memory"); |
75 | 0 | return; |
76 | 0 | } |
77 | 17.5k | alloc_setup_special(p); |
78 | 17.5k | alloc_set_special_next(p, alloc->quar); |
79 | 17.5k | alloc->quar = p; |
80 | 17.5k | alloc->num_quar++; |
81 | 17.5k | } |
82 | 1.75k | } |
83 | | |
84 | | /** prealloc region blocks */ |
85 | | static void |
86 | | prealloc_blocks(struct alloc_cache* alloc, size_t num) |
87 | 0 | { |
88 | 0 | size_t i; |
89 | 0 | struct regional* r; |
90 | 0 | for(i=0; i<num; i++) { |
91 | 0 | r = regional_create_custom(ALLOC_REG_SIZE); |
92 | 0 | if(!r) { |
93 | 0 | log_err("prealloc blocks: out of memory"); |
94 | 0 | return; |
95 | 0 | } |
96 | 0 | r->next = (char*)alloc->reg_list; |
97 | 0 | alloc->reg_list = r; |
98 | 0 | alloc->num_reg_blocks ++; |
99 | 0 | } |
100 | 0 | } |
101 | | |
102 | | void |
103 | | alloc_init(struct alloc_cache* alloc, struct alloc_cache* super, |
104 | | int thread_num) |
105 | 4.66k | { |
106 | 4.66k | memset(alloc, 0, sizeof(*alloc)); |
107 | 4.66k | alloc->super = super; |
108 | 4.66k | alloc->thread_num = thread_num; |
109 | 4.66k | alloc->next_id = (uint64_t)thread_num; /* in steps, so that type */ |
110 | 4.66k | alloc->next_id <<= THRNUM_SHIFT; /* of *_id is used. */ |
111 | 4.66k | alloc->last_id = 1; /* so no 64bit constants, */ |
112 | 4.66k | alloc->last_id <<= THRNUM_SHIFT; /* or implicit 'int' ops. */ |
113 | 4.66k | alloc->last_id -= 1; /* for compiler portability. */ |
114 | 4.66k | alloc->last_id |= alloc->next_id; |
115 | 4.66k | alloc->next_id += 1; /* because id=0 is special. */ |
116 | 4.66k | alloc->max_reg_blocks = 100; |
117 | 4.66k | alloc->num_reg_blocks = 0; |
118 | 4.66k | alloc->reg_list = NULL; |
119 | 4.66k | alloc->cleanup = NULL; |
120 | 4.66k | alloc->cleanup_arg = NULL; |
121 | 4.66k | if(alloc->super) |
122 | 0 | prealloc_blocks(alloc, alloc->max_reg_blocks); |
123 | 4.66k | if(!alloc->super) { |
124 | 4.66k | lock_quick_init(&alloc->lock); |
125 | 4.66k | lock_protect(&alloc->lock, alloc, sizeof(*alloc)); |
126 | 4.66k | } |
127 | 4.66k | } |
128 | | |
129 | | /** free the special list */ |
130 | | static void |
131 | | alloc_clear_special_list(struct alloc_cache* alloc) |
132 | 4.66k | { |
133 | 4.66k | alloc_special_type* p, *np; |
134 | | /* free */ |
135 | 4.66k | p = alloc->quar; |
136 | 23.9k | while(p) { |
137 | 19.3k | np = alloc_special_next(p); |
138 | | /* deinit special type */ |
139 | 19.3k | lock_rw_destroy(&p->entry.lock); |
140 | 19.3k | free(p); |
141 | 19.3k | p = np; |
142 | 19.3k | } |
143 | 4.66k | } |
144 | | |
145 | | void |
146 | | alloc_clear_special(struct alloc_cache* alloc) |
147 | 0 | { |
148 | 0 | if(!alloc->super) { |
149 | 0 | lock_quick_lock(&alloc->lock); |
150 | 0 | } |
151 | 0 | alloc_clear_special_list(alloc); |
152 | 0 | alloc->quar = 0; |
153 | 0 | alloc->num_quar = 0; |
154 | 0 | if(!alloc->super) { |
155 | 0 | lock_quick_unlock(&alloc->lock); |
156 | 0 | } |
157 | 0 | } |
158 | | |
159 | | void |
160 | | alloc_clear(struct alloc_cache* alloc) |
161 | 4.66k | { |
162 | 4.66k | alloc_special_type* p; |
163 | 4.66k | struct regional* r, *nr; |
164 | 4.66k | if(!alloc) |
165 | 0 | return; |
166 | 4.66k | if(!alloc->super) { |
167 | 4.66k | lock_quick_destroy(&alloc->lock); |
168 | 4.66k | } |
169 | 4.66k | if(alloc->super && alloc->quar) { |
170 | | /* push entire list into super */ |
171 | 0 | p = alloc->quar; |
172 | 0 | while(alloc_special_next(p)) /* find last */ |
173 | 0 | p = alloc_special_next(p); |
174 | 0 | lock_quick_lock(&alloc->super->lock); |
175 | 0 | alloc_set_special_next(p, alloc->super->quar); |
176 | 0 | alloc->super->quar = alloc->quar; |
177 | 0 | alloc->super->num_quar += alloc->num_quar; |
178 | 0 | lock_quick_unlock(&alloc->super->lock); |
179 | 4.66k | } else { |
180 | 4.66k | alloc_clear_special_list(alloc); |
181 | 4.66k | } |
182 | 4.66k | alloc->quar = 0; |
183 | 4.66k | alloc->num_quar = 0; |
184 | 4.66k | r = alloc->reg_list; |
185 | 4.66k | while(r) { |
186 | 0 | nr = (struct regional*)r->next; |
187 | 0 | free(r); |
188 | 0 | r = nr; |
189 | 0 | } |
190 | 4.66k | alloc->reg_list = NULL; |
191 | 4.66k | alloc->num_reg_blocks = 0; |
192 | 4.66k | } |
193 | | |
194 | | uint64_t |
195 | | alloc_get_id(struct alloc_cache* alloc) |
196 | 14.9k | { |
197 | 14.9k | uint64_t id = alloc->next_id++; |
198 | 14.9k | if(id == alloc->last_id) { |
199 | 0 | log_warn("rrset alloc: out of 64bit ids. Clearing cache."); |
200 | 0 | fptr_ok(fptr_whitelist_alloc_cleanup(alloc->cleanup)); |
201 | 0 | (*alloc->cleanup)(alloc->cleanup_arg); |
202 | | |
203 | | /* start back at first number */ /* like in alloc_init*/ |
204 | 0 | alloc->next_id = (uint64_t)alloc->thread_num; |
205 | 0 | alloc->next_id <<= THRNUM_SHIFT; /* in steps for comp. */ |
206 | 0 | alloc->next_id += 1; /* portability. */ |
207 | | /* and generate new and safe id */ |
208 | 0 | id = alloc->next_id++; |
209 | 0 | } |
210 | 14.9k | return id; |
211 | 14.9k | } |
212 | | |
213 | | alloc_special_type* |
214 | | alloc_special_obtain(struct alloc_cache* alloc) |
215 | 14.9k | { |
216 | 14.9k | alloc_special_type* p; |
217 | 14.9k | log_assert(alloc); |
218 | | /* see if in local cache */ |
219 | 14.9k | if(alloc->quar) { |
220 | 13.2k | p = alloc->quar; |
221 | 13.2k | alloc->quar = alloc_special_next(p); |
222 | 13.2k | alloc->num_quar--; |
223 | 13.2k | p->id = alloc_get_id(alloc); |
224 | 13.2k | return p; |
225 | 13.2k | } |
226 | | /* see if in global cache */ |
227 | 1.75k | if(alloc->super) { |
228 | | /* could maybe grab alloc_max/2 entries in one go, |
229 | | * but really, isn't that just as fast as this code? */ |
230 | 0 | lock_quick_lock(&alloc->super->lock); |
231 | 0 | if((p = alloc->super->quar)) { |
232 | 0 | alloc->super->quar = alloc_special_next(p); |
233 | 0 | alloc->super->num_quar--; |
234 | 0 | } |
235 | 0 | lock_quick_unlock(&alloc->super->lock); |
236 | 0 | if(p) { |
237 | 0 | p->id = alloc_get_id(alloc); |
238 | 0 | return p; |
239 | 0 | } |
240 | 0 | } |
241 | | /* allocate new */ |
242 | 1.75k | prealloc_setup(alloc); |
243 | 1.75k | if(!(p = (alloc_special_type*)malloc(sizeof(alloc_special_type)))) { |
244 | 0 | log_err("alloc_special_obtain: out of memory"); |
245 | 0 | return NULL; |
246 | 0 | } |
247 | 1.75k | alloc_setup_special(p); |
248 | 1.75k | p->id = alloc_get_id(alloc); |
249 | 1.75k | return p; |
250 | 1.75k | } |
251 | | |
252 | | /** push mem and some more items to the super */ |
253 | | static void |
254 | | pushintosuper(struct alloc_cache* alloc, alloc_special_type* mem) |
255 | 0 | { |
256 | 0 | int i; |
257 | 0 | alloc_special_type *p = alloc->quar; |
258 | 0 | log_assert(p); |
259 | 0 | log_assert(alloc && alloc->super && |
260 | 0 | alloc->num_quar >= ALLOC_SPECIAL_MAX); |
261 | | /* push ALLOC_SPECIAL_MAX/2 after mem */ |
262 | 0 | alloc_set_special_next(mem, alloc->quar); |
263 | 0 | for(i=1; i<ALLOC_SPECIAL_MAX/2; i++) { |
264 | 0 | p = alloc_special_next(p); |
265 | 0 | } |
266 | 0 | alloc->quar = alloc_special_next(p); |
267 | 0 | alloc->num_quar -= ALLOC_SPECIAL_MAX/2; |
268 | | |
269 | | /* dump mem+list into the super quar list */ |
270 | 0 | lock_quick_lock(&alloc->super->lock); |
271 | 0 | alloc_set_special_next(p, alloc->super->quar); |
272 | 0 | alloc->super->quar = mem; |
273 | 0 | alloc->super->num_quar += ALLOC_SPECIAL_MAX/2 + 1; |
274 | 0 | lock_quick_unlock(&alloc->super->lock); |
275 | | /* so 1 lock per mem+alloc/2 deletes */ |
276 | 0 | } |
277 | | |
278 | | void |
279 | | alloc_special_release(struct alloc_cache* alloc, alloc_special_type* mem) |
280 | 14.9k | { |
281 | 14.9k | log_assert(alloc); |
282 | 14.9k | if(!mem) |
283 | 0 | return; |
284 | 14.9k | if(!alloc->super) { |
285 | 14.9k | lock_quick_lock(&alloc->lock); /* superalloc needs locking */ |
286 | 14.9k | } |
287 | | |
288 | 14.9k | alloc_special_clean(mem); |
289 | 14.9k | if(alloc->super && alloc->num_quar >= ALLOC_SPECIAL_MAX) { |
290 | | /* push it to the super structure */ |
291 | 0 | pushintosuper(alloc, mem); |
292 | 0 | return; |
293 | 0 | } |
294 | | |
295 | 14.9k | alloc_set_special_next(mem, alloc->quar); |
296 | 14.9k | alloc->quar = mem; |
297 | 14.9k | alloc->num_quar++; |
298 | 14.9k | if(!alloc->super) { |
299 | 14.9k | lock_quick_unlock(&alloc->lock); |
300 | 14.9k | } |
301 | 14.9k | } |
302 | | |
303 | | void |
304 | | alloc_stats(struct alloc_cache* alloc) |
305 | 0 | { |
306 | 0 | log_info("%salloc: %d in cache, %d blocks.", alloc->super?"":"sup", |
307 | 0 | (int)alloc->num_quar, (int)alloc->num_reg_blocks); |
308 | 0 | } |
309 | | |
310 | | size_t alloc_get_mem(struct alloc_cache* alloc) |
311 | 0 | { |
312 | 0 | alloc_special_type* p; |
313 | 0 | size_t s = sizeof(*alloc); |
314 | 0 | if(!alloc->super) { |
315 | 0 | lock_quick_lock(&alloc->lock); /* superalloc needs locking */ |
316 | 0 | } |
317 | 0 | s += sizeof(alloc_special_type) * alloc->num_quar; |
318 | 0 | for(p = alloc->quar; p; p = alloc_special_next(p)) { |
319 | 0 | s += lock_get_mem(&p->entry.lock); |
320 | 0 | } |
321 | 0 | s += alloc->num_reg_blocks * ALLOC_REG_SIZE; |
322 | 0 | if(!alloc->super) { |
323 | 0 | lock_quick_unlock(&alloc->lock); |
324 | 0 | } |
325 | 0 | return s; |
326 | 0 | } |
327 | | |
328 | | struct regional* |
329 | | alloc_reg_obtain(struct alloc_cache* alloc) |
330 | 0 | { |
331 | 0 | if(alloc->num_reg_blocks > 0) { |
332 | 0 | struct regional* r = alloc->reg_list; |
333 | 0 | alloc->reg_list = (struct regional*)r->next; |
334 | 0 | r->next = NULL; |
335 | 0 | alloc->num_reg_blocks--; |
336 | 0 | return r; |
337 | 0 | } |
338 | 0 | return regional_create_custom(ALLOC_REG_SIZE); |
339 | 0 | } |
340 | | |
341 | | void |
342 | | alloc_reg_release(struct alloc_cache* alloc, struct regional* r) |
343 | 0 | { |
344 | 0 | if(alloc->num_reg_blocks >= alloc->max_reg_blocks) { |
345 | 0 | regional_destroy(r); |
346 | 0 | return; |
347 | 0 | } |
348 | 0 | if(!r) return; |
349 | 0 | regional_free_all(r); |
350 | 0 | log_assert(r->next == NULL); |
351 | 0 | r->next = (char*)alloc->reg_list; |
352 | 0 | alloc->reg_list = r; |
353 | 0 | alloc->num_reg_blocks++; |
354 | 0 | } |
355 | | |
356 | | void |
357 | | alloc_set_id_cleanup(struct alloc_cache* alloc, void (*cleanup)(void*), |
358 | | void* arg) |
359 | 0 | { |
360 | 0 | alloc->cleanup = cleanup; |
361 | 0 | alloc->cleanup_arg = arg; |
362 | 0 | } |
363 | | |
364 | | /** global debug value to keep track of total memory mallocs */ |
365 | | size_t unbound_mem_alloc = 0; |
366 | | /** global debug value to keep track of total memory frees */ |
367 | | size_t unbound_mem_freed = 0; |
368 | | #ifdef UNBOUND_ALLOC_STATS |
369 | | /** special value to know if the memory is being tracked */ |
370 | | uint64_t mem_special = (uint64_t)0xfeed43327766abcdLL; |
371 | | #ifdef malloc |
372 | | #undef malloc |
373 | | #endif |
374 | | /** malloc with stats */ |
375 | | void *unbound_stat_malloc(size_t size) |
376 | | { |
377 | | void* res; |
378 | | if(size == 0) size = 1; |
379 | | log_assert(size <= SIZE_MAX-16); |
380 | | res = malloc(size+16); |
381 | | if(!res) return NULL; |
382 | | unbound_mem_alloc += size; |
383 | | log_info("stat %p=malloc(%u)", res+16, (unsigned)size); |
384 | | memcpy(res, &size, sizeof(size)); |
385 | | memcpy(res+8, &mem_special, sizeof(mem_special)); |
386 | | return res+16; |
387 | | } |
388 | | #ifdef calloc |
389 | | #undef calloc |
390 | | #endif |
391 | | #ifndef INT_MAX |
392 | | #define INT_MAX (((int)-1)>>1) |
393 | | #endif |
394 | | /** calloc with stats */ |
395 | | void *unbound_stat_calloc(size_t nmemb, size_t size) |
396 | | { |
397 | | size_t s; |
398 | | void* res; |
399 | | if(nmemb != 0 && INT_MAX/nmemb < size) |
400 | | return NULL; /* integer overflow check */ |
401 | | s = (nmemb*size==0)?(size_t)1:nmemb*size; |
402 | | log_assert(s <= SIZE_MAX-16); |
403 | | res = calloc(1, s+16); |
404 | | if(!res) return NULL; |
405 | | log_info("stat %p=calloc(%u, %u)", res+16, (unsigned)nmemb, (unsigned)size); |
406 | | unbound_mem_alloc += s; |
407 | | memcpy(res, &s, sizeof(s)); |
408 | | memcpy(res+8, &mem_special, sizeof(mem_special)); |
409 | | return res+16; |
410 | | } |
411 | | #ifdef free |
412 | | #undef free |
413 | | #endif |
414 | | /** free with stats */ |
415 | | void unbound_stat_free(void *ptr) |
416 | | { |
417 | | size_t s; |
418 | | if(!ptr) return; |
419 | | if(memcmp(ptr-8, &mem_special, sizeof(mem_special)) != 0) { |
420 | | free(ptr); |
421 | | return; |
422 | | } |
423 | | ptr-=16; |
424 | | memcpy(&s, ptr, sizeof(s)); |
425 | | log_info("stat free(%p) size %u", ptr+16, (unsigned)s); |
426 | | memset(ptr+8, 0, 8); |
427 | | unbound_mem_freed += s; |
428 | | free(ptr); |
429 | | } |
430 | | #ifdef realloc |
431 | | #undef realloc |
432 | | #endif |
433 | | /** realloc with stats */ |
434 | | void *unbound_stat_realloc(void *ptr, size_t size) |
435 | | { |
436 | | size_t cursz; |
437 | | void* res; |
438 | | if(!ptr) return unbound_stat_malloc(size); |
439 | | if(memcmp(ptr-8, &mem_special, sizeof(mem_special)) != 0) { |
440 | | return realloc(ptr, size); |
441 | | } |
442 | | if(size==0) { |
443 | | unbound_stat_free(ptr); |
444 | | return NULL; |
445 | | } |
446 | | ptr -= 16; |
447 | | memcpy(&cursz, ptr, sizeof(cursz)); |
448 | | if(cursz == size) { |
449 | | /* nothing changes */ |
450 | | return ptr; |
451 | | } |
452 | | log_assert(size <= SIZE_MAX-16); |
453 | | res = malloc(size+16); |
454 | | if(!res) return NULL; |
455 | | unbound_mem_alloc += size; |
456 | | unbound_mem_freed += cursz; |
457 | | log_info("stat realloc(%p, %u) from %u", ptr+16, (unsigned)size, (unsigned)cursz); |
458 | | if(cursz > size) { |
459 | | memcpy(res+16, ptr+16, size); |
460 | | } else if(size > cursz) { |
461 | | memcpy(res+16, ptr+16, cursz); |
462 | | } |
463 | | memset(ptr+8, 0, 8); |
464 | | free(ptr); |
465 | | memcpy(res, &size, sizeof(size)); |
466 | | memcpy(res+8, &mem_special, sizeof(mem_special)); |
467 | | return res+16; |
468 | | } |
469 | | |
470 | | /** log to file where alloc was done */ |
471 | | void *unbound_stat_malloc_log(size_t size, const char* file, int line, |
472 | | const char* func) |
473 | | { |
474 | | log_info("%s:%d %s malloc(%u)", file, line, func, (unsigned)size); |
475 | | return unbound_stat_malloc(size); |
476 | | } |
477 | | |
478 | | /** log to file where alloc was done */ |
479 | | void *unbound_stat_calloc_log(size_t nmemb, size_t size, const char* file, |
480 | | int line, const char* func) |
481 | | { |
482 | | log_info("%s:%d %s calloc(%u, %u)", file, line, func, |
483 | | (unsigned) nmemb, (unsigned)size); |
484 | | return unbound_stat_calloc(nmemb, size); |
485 | | } |
486 | | |
487 | | /** log to file where free was done */ |
488 | | void unbound_stat_free_log(void *ptr, const char* file, int line, |
489 | | const char* func) |
490 | | { |
491 | | if(ptr && memcmp(ptr-8, &mem_special, sizeof(mem_special)) == 0) { |
492 | | size_t s; |
493 | | memcpy(&s, ptr-16, sizeof(s)); |
494 | | log_info("%s:%d %s free(%p) size %u", |
495 | | file, line, func, ptr, (unsigned)s); |
496 | | } else |
497 | | log_info("%s:%d %s unmatched free(%p)", file, line, func, ptr); |
498 | | unbound_stat_free(ptr); |
499 | | } |
500 | | |
501 | | /** log to file where alloc was done */ |
502 | | void *unbound_stat_realloc_log(void *ptr, size_t size, const char* file, |
503 | | int line, const char* func) |
504 | | { |
505 | | log_info("%s:%d %s realloc(%p, %u)", file, line, func, |
506 | | ptr, (unsigned)size); |
507 | | return unbound_stat_realloc(ptr, size); |
508 | | } |
509 | | |
510 | | #endif /* UNBOUND_ALLOC_STATS */ |
511 | | #ifdef UNBOUND_ALLOC_LITE |
512 | | #undef malloc |
513 | | #undef calloc |
514 | | #undef free |
515 | | #undef realloc |
516 | | /** length of prefix and suffix */ |
517 | | static size_t lite_pad = 16; |
518 | | /** prefix value to check */ |
519 | | static char* lite_pre = "checkfront123456"; |
520 | | /** suffix value to check */ |
521 | | static char* lite_post= "checkafter123456"; |
522 | | |
523 | | void *unbound_stat_malloc_lite(size_t size, const char* file, int line, |
524 | | const char* func) |
525 | | { |
526 | | /* [prefix .. len .. actual data .. suffix] */ |
527 | | void* res; |
528 | | log_assert(size <= SIZE_MAX-(lite_pad*2+sizeof(size_t))); |
529 | | res = malloc(size+lite_pad*2+sizeof(size_t)); |
530 | | if(!res) return NULL; |
531 | | memmove(res, lite_pre, lite_pad); |
532 | | memmove(res+lite_pad, &size, sizeof(size_t)); |
533 | | memset(res+lite_pad+sizeof(size_t), 0x1a, size); /* init the memory */ |
534 | | memmove(res+lite_pad+size+sizeof(size_t), lite_post, lite_pad); |
535 | | return res+lite_pad+sizeof(size_t); |
536 | | } |
537 | | |
538 | | void *unbound_stat_calloc_lite(size_t nmemb, size_t size, const char* file, |
539 | | int line, const char* func) |
540 | | { |
541 | | size_t req; |
542 | | void* res; |
543 | | if(nmemb != 0 && INT_MAX/nmemb < size) |
544 | | return NULL; /* integer overflow check */ |
545 | | req = nmemb * size; |
546 | | log_assert(req <= SIZE_MAX-(lite_pad*2+sizeof(size_t))); |
547 | | res = malloc(req+lite_pad*2+sizeof(size_t)); |
548 | | if(!res) return NULL; |
549 | | memmove(res, lite_pre, lite_pad); |
550 | | memmove(res+lite_pad, &req, sizeof(size_t)); |
551 | | memset(res+lite_pad+sizeof(size_t), 0, req); |
552 | | memmove(res+lite_pad+req+sizeof(size_t), lite_post, lite_pad); |
553 | | return res+lite_pad+sizeof(size_t); |
554 | | } |
555 | | |
556 | | void unbound_stat_free_lite(void *ptr, const char* file, int line, |
557 | | const char* func) |
558 | | { |
559 | | void* real; |
560 | | size_t orig = 0; |
561 | | if(!ptr) return; |
562 | | real = ptr-lite_pad-sizeof(size_t); |
563 | | if(memcmp(real, lite_pre, lite_pad) != 0) { |
564 | | log_err("free(): prefix failed %s:%d %s", file, line, func); |
565 | | log_hex("prefix here", real, lite_pad); |
566 | | log_hex(" should be", lite_pre, lite_pad); |
567 | | fatal_exit("alloc assertion failed"); |
568 | | } |
569 | | memmove(&orig, real+lite_pad, sizeof(size_t)); |
570 | | if(memcmp(real+lite_pad+orig+sizeof(size_t), lite_post, lite_pad)!=0){ |
571 | | log_err("free(): suffix failed %s:%d %s", file, line, func); |
572 | | log_err("alloc size is %d", (int)orig); |
573 | | log_hex("suffix here", real+lite_pad+orig+sizeof(size_t), |
574 | | lite_pad); |
575 | | log_hex(" should be", lite_post, lite_pad); |
576 | | fatal_exit("alloc assertion failed"); |
577 | | } |
578 | | memset(real, 0xdd, orig+lite_pad*2+sizeof(size_t)); /* mark it */ |
579 | | free(real); |
580 | | } |
581 | | |
582 | | void *unbound_stat_realloc_lite(void *ptr, size_t size, const char* file, |
583 | | int line, const char* func) |
584 | | { |
585 | | /* always free and realloc (no growing) */ |
586 | | void* real, *newa; |
587 | | size_t orig = 0; |
588 | | if(!ptr) { |
589 | | /* like malloc() */ |
590 | | return unbound_stat_malloc_lite(size, file, line, func); |
591 | | } |
592 | | if(!size) { |
593 | | /* like free() */ |
594 | | unbound_stat_free_lite(ptr, file, line, func); |
595 | | return NULL; |
596 | | } |
597 | | /* change allocation size and copy */ |
598 | | real = ptr-lite_pad-sizeof(size_t); |
599 | | if(memcmp(real, lite_pre, lite_pad) != 0) { |
600 | | log_err("realloc(): prefix failed %s:%d %s", file, line, func); |
601 | | log_hex("prefix here", real, lite_pad); |
602 | | log_hex(" should be", lite_pre, lite_pad); |
603 | | fatal_exit("alloc assertion failed"); |
604 | | } |
605 | | memmove(&orig, real+lite_pad, sizeof(size_t)); |
606 | | if(memcmp(real+lite_pad+orig+sizeof(size_t), lite_post, lite_pad)!=0){ |
607 | | log_err("realloc(): suffix failed %s:%d %s", file, line, func); |
608 | | log_err("alloc size is %d", (int)orig); |
609 | | log_hex("suffix here", real+lite_pad+orig+sizeof(size_t), |
610 | | lite_pad); |
611 | | log_hex(" should be", lite_post, lite_pad); |
612 | | fatal_exit("alloc assertion failed"); |
613 | | } |
614 | | /* new alloc and copy over */ |
615 | | newa = unbound_stat_malloc_lite(size, file, line, func); |
616 | | if(!newa) |
617 | | return NULL; |
618 | | if(orig < size) |
619 | | memmove(newa, ptr, orig); |
620 | | else memmove(newa, ptr, size); |
621 | | memset(real, 0xdd, orig+lite_pad*2+sizeof(size_t)); /* mark it */ |
622 | | free(real); |
623 | | return newa; |
624 | | } |
625 | | |
626 | | char* unbound_strdup_lite(const char* s, const char* file, int line, |
627 | | const char* func) |
628 | | { |
629 | | /* this routine is made to make sure strdup() uses the malloc_lite */ |
630 | | size_t l = strlen(s)+1; |
631 | | char* n = (char*)unbound_stat_malloc_lite(l, file, line, func); |
632 | | if(!n) return NULL; |
633 | | memmove(n, s, l); |
634 | | return n; |
635 | | } |
636 | | |
637 | | char* unbound_lite_wrapstr(char* s) |
638 | | { |
639 | | char* n = unbound_strdup_lite(s, __FILE__, __LINE__, __func__); |
640 | | free(s); |
641 | | return n; |
642 | | } |
643 | | |
644 | | #undef sldns_pkt2wire |
645 | | sldns_status unbound_lite_pkt2wire(uint8_t **dest, const sldns_pkt *p, |
646 | | size_t *size) |
647 | | { |
648 | | uint8_t* md = NULL; |
649 | | size_t ms = 0; |
650 | | sldns_status s = sldns_pkt2wire(&md, p, &ms); |
651 | | if(md) { |
652 | | *dest = unbound_stat_malloc_lite(ms, __FILE__, __LINE__, |
653 | | __func__); |
654 | | *size = ms; |
655 | | if(!*dest) { free(md); return LDNS_STATUS_MEM_ERR; } |
656 | | memcpy(*dest, md, ms); |
657 | | free(md); |
658 | | } else { |
659 | | *dest = NULL; |
660 | | *size = 0; |
661 | | } |
662 | | return s; |
663 | | } |
664 | | |
665 | | #undef i2d_DSA_SIG |
666 | | int unbound_lite_i2d_DSA_SIG(DSA_SIG* dsasig, unsigned char** sig) |
667 | | { |
668 | | unsigned char* n = NULL; |
669 | | int r= i2d_DSA_SIG(dsasig, &n); |
670 | | if(n) { |
671 | | *sig = unbound_stat_malloc_lite((size_t)r, __FILE__, __LINE__, |
672 | | __func__); |
673 | | if(!*sig) return -1; |
674 | | memcpy(*sig, n, (size_t)r); |
675 | | free(n); |
676 | | return r; |
677 | | } |
678 | | *sig = NULL; |
679 | | return r; |
680 | | } |
681 | | |
682 | | #endif /* UNBOUND_ALLOC_LITE */ |