/src/cpython/Objects/mimalloc/alloc.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* ---------------------------------------------------------------------------- |
2 | | Copyright (c) 2018-2022, Microsoft Research, Daan Leijen |
3 | | This is free software; you can redistribute it and/or modify it under the |
4 | | terms of the MIT license. A copy of the license can be found in the file |
5 | | "LICENSE" at the root of this distribution. |
6 | | -----------------------------------------------------------------------------*/ |
7 | | #ifndef _DEFAULT_SOURCE |
8 | | #define _DEFAULT_SOURCE // for realpath() on Linux |
9 | | #endif |
10 | | |
11 | | #include "mimalloc.h" |
12 | | #include "mimalloc/internal.h" |
13 | | #include "mimalloc/atomic.h" |
14 | | #include "mimalloc/prim.h" // _mi_prim_thread_id() |
15 | | |
16 | | #include <string.h> // memset, strlen (for mi_strdup) |
17 | | #include <stdlib.h> // malloc, abort |
18 | | |
19 | 0 | #define _ZSt15get_new_handlerv _Py__ZSt15get_new_handlerv |
20 | | |
21 | | #define MI_IN_ALLOC_C |
22 | | #include "alloc-override.c" |
23 | | #undef MI_IN_ALLOC_C |
24 | | |
25 | | // ------------------------------------------------------ |
26 | | // Allocation |
27 | | // ------------------------------------------------------ |
28 | | |
29 | | #if (MI_DEBUG>0) |
30 | | static inline void mi_debug_fill(mi_page_t* page, mi_block_t* block, int c, size_t size) { |
31 | | size_t offset = (size_t)page->debug_offset; |
32 | | if (offset < size) { |
33 | | memset((char*)block + offset, c, size - offset); |
34 | | } |
35 | | } |
36 | | #endif |
37 | | |
38 | | // Fast allocation in a page: just pop from the free list. |
39 | | // Fall back to generic allocation only if the list is empty. |
40 | 0 | extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t size, bool zero) mi_attr_noexcept { |
41 | 0 | mi_assert_internal(page->xblock_size==0||mi_page_block_size(page) >= size); |
42 | 0 | mi_block_t* const block = page->free; |
43 | 0 | if mi_unlikely(block == NULL) { |
44 | 0 | return _mi_malloc_generic(heap, size, zero, 0); |
45 | 0 | } |
46 | 0 | mi_assert_internal(block != NULL && _mi_ptr_page(block) == page); |
47 | | // pop from the free list |
48 | 0 | page->used++; |
49 | 0 | page->free = mi_block_next(page, block); |
50 | 0 | mi_assert_internal(page->free == NULL || _mi_ptr_page(page->free) == page); |
51 | | #if MI_DEBUG>3 |
52 | | if (page->free_is_zero) { |
53 | | mi_assert_expensive(mi_mem_is_zero(block+1,size - sizeof(*block))); |
54 | | } |
55 | | #endif |
56 | | |
57 | | // allow use of the block internally |
58 | | // note: when tracking we need to avoid ever touching the MI_PADDING since |
59 | | // that is tracked by valgrind etc. as non-accessible (through the red-zone, see `mimalloc/track.h`) |
60 | 0 | mi_track_mem_undefined(block, mi_page_usable_block_size(page)); |
61 | | |
62 | | // zero the block? note: we need to zero the full block size (issue #63) |
63 | 0 | if mi_unlikely(zero) { |
64 | 0 | mi_assert_internal(page->xblock_size != 0); // do not call with zero'ing for huge blocks (see _mi_malloc_generic) |
65 | 0 | mi_assert_internal(page->xblock_size >= MI_PADDING_SIZE); |
66 | 0 | if (page->free_is_zero) { |
67 | 0 | block->next = 0; |
68 | 0 | mi_track_mem_defined(block, page->xblock_size - MI_PADDING_SIZE); |
69 | 0 | } |
70 | 0 | else { |
71 | 0 | _mi_memzero_aligned(block, page->xblock_size - MI_PADDING_SIZE); |
72 | 0 | } |
73 | 0 | } |
74 | |
|
75 | | #if (MI_DEBUG>0) && !MI_TRACK_ENABLED && !MI_TSAN |
76 | | if (!zero && !mi_page_is_huge(page)) { |
77 | | mi_debug_fill(page, block, MI_DEBUG_UNINIT, mi_page_usable_block_size(page)); |
78 | | } |
79 | | #elif (MI_SECURE!=0) |
80 | | if (!zero) { block->next = 0; } // don't leak internal data |
81 | | #endif |
82 | |
|
83 | | #if (MI_STAT>0) |
84 | | const size_t bsize = mi_page_usable_block_size(page); |
85 | | if (bsize <= MI_MEDIUM_OBJ_SIZE_MAX) { |
86 | | mi_heap_stat_increase(heap, normal, bsize); |
87 | | mi_heap_stat_counter_increase(heap, normal_count, 1); |
88 | | #if (MI_STAT>1) |
89 | | const size_t bin = _mi_bin(bsize); |
90 | | mi_heap_stat_increase(heap, normal_bins[bin], 1); |
91 | | #endif |
92 | | } |
93 | | #endif |
94 | |
|
95 | | #if MI_PADDING // && !MI_TRACK_ENABLED |
96 | | mi_padding_t* const padding = (mi_padding_t*)((uint8_t*)block + mi_page_usable_block_size(page)); |
97 | | ptrdiff_t delta = ((uint8_t*)padding - (uint8_t*)block - (size - MI_PADDING_SIZE)); |
98 | | #if (MI_DEBUG>=2) |
99 | | mi_assert_internal(delta >= 0 && mi_page_usable_block_size(page) >= (size - MI_PADDING_SIZE + delta)); |
100 | | #endif |
101 | | mi_track_mem_defined(padding,sizeof(mi_padding_t)); // note: re-enable since mi_page_usable_block_size may set noaccess |
102 | | padding->canary = (uint32_t)(mi_ptr_encode(page,block,page->keys)); |
103 | | padding->delta = (uint32_t)(delta); |
104 | | #if MI_PADDING_CHECK |
105 | | if (!mi_page_is_huge(page)) { |
106 | | uint8_t* fill = (uint8_t*)padding - delta; |
107 | | const size_t maxpad = (delta > MI_MAX_ALIGN_SIZE ? MI_MAX_ALIGN_SIZE : delta); // set at most N initial padding bytes |
108 | | for (size_t i = 0; i < maxpad; i++) { fill[i] = MI_DEBUG_PADDING; } |
109 | | } |
110 | | #endif |
111 | | #endif |
112 | |
|
113 | 0 | return block; |
114 | 0 | } |
115 | | |
116 | 0 | static inline mi_decl_restrict void* mi_heap_malloc_small_zero(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept { |
117 | 0 | mi_assert(heap != NULL); |
118 | | #if MI_DEBUG |
119 | | const uintptr_t tid = _mi_thread_id(); |
120 | | mi_assert(heap->thread_id == 0 || heap->thread_id == tid); // heaps are thread local |
121 | | #endif |
122 | 0 | mi_assert(size <= MI_SMALL_SIZE_MAX); |
123 | | #if (MI_PADDING) |
124 | | if (size == 0) { size = sizeof(void*); } |
125 | | #endif |
126 | 0 | mi_page_t* page = _mi_heap_get_free_small_page(heap, size + MI_PADDING_SIZE); |
127 | 0 | void* const p = _mi_page_malloc(heap, page, size + MI_PADDING_SIZE, zero); |
128 | 0 | mi_track_malloc(p,size,zero); |
129 | | #if MI_STAT>1 |
130 | | if (p != NULL) { |
131 | | if (!mi_heap_is_initialized(heap)) { heap = mi_prim_get_default_heap(); } |
132 | | mi_heap_stat_increase(heap, malloc, mi_usable_size(p)); |
133 | | } |
134 | | #endif |
135 | | #if MI_DEBUG>3 |
136 | | if (p != NULL && zero) { |
137 | | mi_assert_expensive(mi_mem_is_zero(p, size)); |
138 | | } |
139 | | #endif |
140 | 0 | return p; |
141 | 0 | } |
142 | | |
143 | | // allocate a small block |
144 | 0 | mi_decl_nodiscard extern inline mi_decl_restrict void* mi_heap_malloc_small(mi_heap_t* heap, size_t size) mi_attr_noexcept { |
145 | 0 | return mi_heap_malloc_small_zero(heap, size, false); |
146 | 0 | } |
147 | | |
148 | 0 | mi_decl_nodiscard extern inline mi_decl_restrict void* mi_malloc_small(size_t size) mi_attr_noexcept { |
149 | 0 | return mi_heap_malloc_small(mi_prim_get_default_heap(), size); |
150 | 0 | } |
151 | | |
152 | | // The main allocation function |
153 | 0 | extern inline void* _mi_heap_malloc_zero_ex(mi_heap_t* heap, size_t size, bool zero, size_t huge_alignment) mi_attr_noexcept { |
154 | 0 | if mi_likely(size <= MI_SMALL_SIZE_MAX) { |
155 | 0 | mi_assert_internal(huge_alignment == 0); |
156 | 0 | return mi_heap_malloc_small_zero(heap, size, zero); |
157 | 0 | } |
158 | 0 | else { |
159 | 0 | mi_assert(heap!=NULL); |
160 | 0 | mi_assert(heap->thread_id == 0 || heap->thread_id == _mi_thread_id()); // heaps are thread local |
161 | 0 | void* const p = _mi_malloc_generic(heap, size + MI_PADDING_SIZE, zero, huge_alignment); // note: size can overflow but it is detected in malloc_generic |
162 | 0 | mi_track_malloc(p,size,zero); |
163 | | #if MI_STAT>1 |
164 | | if (p != NULL) { |
165 | | if (!mi_heap_is_initialized(heap)) { heap = mi_prim_get_default_heap(); } |
166 | | mi_heap_stat_increase(heap, malloc, mi_usable_size(p)); |
167 | | } |
168 | | #endif |
169 | | #if MI_DEBUG>3 |
170 | | if (p != NULL && zero) { |
171 | | mi_assert_expensive(mi_mem_is_zero(p, size)); |
172 | | } |
173 | | #endif |
174 | 0 | return p; |
175 | 0 | } |
176 | 0 | } |
177 | | |
178 | 0 | extern inline void* _mi_heap_malloc_zero(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept { |
179 | 0 | return _mi_heap_malloc_zero_ex(heap, size, zero, 0); |
180 | 0 | } |
181 | | |
182 | 0 | mi_decl_nodiscard extern inline mi_decl_restrict void* mi_heap_malloc(mi_heap_t* heap, size_t size) mi_attr_noexcept { |
183 | 0 | return _mi_heap_malloc_zero(heap, size, false); |
184 | 0 | } |
185 | | |
186 | 0 | mi_decl_nodiscard extern inline mi_decl_restrict void* mi_malloc(size_t size) mi_attr_noexcept { |
187 | 0 | return mi_heap_malloc(mi_prim_get_default_heap(), size); |
188 | 0 | } |
189 | | |
190 | | // zero initialized small block |
191 | 0 | mi_decl_nodiscard mi_decl_restrict void* mi_zalloc_small(size_t size) mi_attr_noexcept { |
192 | 0 | return mi_heap_malloc_small_zero(mi_prim_get_default_heap(), size, true); |
193 | 0 | } |
194 | | |
195 | 0 | mi_decl_nodiscard extern inline mi_decl_restrict void* mi_heap_zalloc(mi_heap_t* heap, size_t size) mi_attr_noexcept { |
196 | 0 | return _mi_heap_malloc_zero(heap, size, true); |
197 | 0 | } |
198 | | |
199 | 0 | mi_decl_nodiscard mi_decl_restrict void* mi_zalloc(size_t size) mi_attr_noexcept { |
200 | 0 | return mi_heap_zalloc(mi_prim_get_default_heap(),size); |
201 | 0 | } |
202 | | |
203 | | |
204 | | // ------------------------------------------------------ |
205 | | // Check for double free in secure and debug mode |
206 | | // This is somewhat expensive so only enabled for secure mode 4 |
207 | | // ------------------------------------------------------ |
208 | | |
209 | | #if (MI_ENCODE_FREELIST && (MI_SECURE>=4 || MI_DEBUG!=0)) |
210 | | // linear check if the free list contains a specific element |
211 | | static bool mi_list_contains(const mi_page_t* page, const mi_block_t* list, const mi_block_t* elem) { |
212 | | while (list != NULL) { |
213 | | if (elem==list) return true; |
214 | | list = mi_block_next(page, list); |
215 | | } |
216 | | return false; |
217 | | } |
218 | | |
219 | | static mi_decl_noinline bool mi_check_is_double_freex(const mi_page_t* page, const mi_block_t* block) { |
220 | | // The decoded value is in the same page (or NULL). |
221 | | // Walk the free lists to verify positively if it is already freed |
222 | | if (mi_list_contains(page, page->free, block) || |
223 | | mi_list_contains(page, page->local_free, block) || |
224 | | mi_list_contains(page, mi_page_thread_free(page), block)) |
225 | | { |
226 | | _mi_error_message(EAGAIN, "double free detected of block %p with size %zu\n", block, mi_page_block_size(page)); |
227 | | return true; |
228 | | } |
229 | | return false; |
230 | | } |
231 | | |
232 | | #define mi_track_page(page,access) { size_t psize; void* pstart = _mi_page_start(_mi_page_segment(page),page,&psize); mi_track_mem_##access( pstart, psize); } |
233 | | |
234 | | static inline bool mi_check_is_double_free(const mi_page_t* page, const mi_block_t* block) { |
235 | | bool is_double_free = false; |
236 | | mi_block_t* n = mi_block_nextx(page, block, page->keys); // pretend it is freed, and get the decoded first field |
237 | | if (((uintptr_t)n & (MI_INTPTR_SIZE-1))==0 && // quick check: aligned pointer? |
238 | | (n==NULL || mi_is_in_same_page(block, n))) // quick check: in same page or NULL? |
239 | | { |
240 | | // Suspicious: decoded value a in block is in the same page (or NULL) -- maybe a double free? |
241 | | // (continue in separate function to improve code generation) |
242 | | is_double_free = mi_check_is_double_freex(page, block); |
243 | | } |
244 | | return is_double_free; |
245 | | } |
246 | | #else |
247 | 0 | static inline bool mi_check_is_double_free(const mi_page_t* page, const mi_block_t* block) { |
248 | 0 | MI_UNUSED(page); |
249 | 0 | MI_UNUSED(block); |
250 | 0 | return false; |
251 | 0 | } |
252 | | #endif |
253 | | |
254 | | // --------------------------------------------------------------------------- |
255 | | // Check for heap block overflow by setting up padding at the end of the block |
256 | | // --------------------------------------------------------------------------- |
257 | | |
258 | | #if MI_PADDING // && !MI_TRACK_ENABLED |
259 | | static bool mi_page_decode_padding(const mi_page_t* page, const mi_block_t* block, size_t* delta, size_t* bsize) { |
260 | | *bsize = mi_page_usable_block_size(page); |
261 | | const mi_padding_t* const padding = (mi_padding_t*)((uint8_t*)block + *bsize); |
262 | | mi_track_mem_defined(padding,sizeof(mi_padding_t)); |
263 | | *delta = padding->delta; |
264 | | uint32_t canary = padding->canary; |
265 | | uintptr_t keys[2]; |
266 | | keys[0] = page->keys[0]; |
267 | | keys[1] = page->keys[1]; |
268 | | bool ok = ((uint32_t)mi_ptr_encode(page,block,keys) == canary && *delta <= *bsize); |
269 | | mi_track_mem_noaccess(padding,sizeof(mi_padding_t)); |
270 | | return ok; |
271 | | } |
272 | | |
273 | | // Return the exact usable size of a block. |
274 | | static size_t mi_page_usable_size_of(const mi_page_t* page, const mi_block_t* block) { |
275 | | size_t bsize; |
276 | | size_t delta; |
277 | | bool ok = mi_page_decode_padding(page, block, &delta, &bsize); |
278 | | mi_assert_internal(ok); mi_assert_internal(delta <= bsize); |
279 | | return (ok ? bsize - delta : 0); |
280 | | } |
281 | | |
282 | | // When a non-thread-local block is freed, it becomes part of the thread delayed free |
283 | | // list that is freed later by the owning heap. If the exact usable size is too small to |
284 | | // contain the pointer for the delayed list, then shrink the padding (by decreasing delta) |
285 | | // so it will later not trigger an overflow error in `mi_free_block`. |
286 | | void _mi_padding_shrink(const mi_page_t* page, const mi_block_t* block, const size_t min_size) { |
287 | | size_t bsize; |
288 | | size_t delta; |
289 | | bool ok = mi_page_decode_padding(page, block, &delta, &bsize); |
290 | | mi_assert_internal(ok); |
291 | | if (!ok || (bsize - delta) >= min_size) return; // usually already enough space |
292 | | mi_assert_internal(bsize >= min_size); |
293 | | if (bsize < min_size) return; // should never happen |
294 | | size_t new_delta = (bsize - min_size); |
295 | | mi_assert_internal(new_delta < bsize); |
296 | | mi_padding_t* padding = (mi_padding_t*)((uint8_t*)block + bsize); |
297 | | mi_track_mem_defined(padding,sizeof(mi_padding_t)); |
298 | | padding->delta = (uint32_t)new_delta; |
299 | | mi_track_mem_noaccess(padding,sizeof(mi_padding_t)); |
300 | | } |
301 | | #else |
302 | 0 | static size_t mi_page_usable_size_of(const mi_page_t* page, const mi_block_t* block) { |
303 | 0 | MI_UNUSED(block); |
304 | 0 | return mi_page_usable_block_size(page); |
305 | 0 | } |
306 | | |
307 | 0 | void _mi_padding_shrink(const mi_page_t* page, const mi_block_t* block, const size_t min_size) { |
308 | 0 | MI_UNUSED(page); |
309 | 0 | MI_UNUSED(block); |
310 | 0 | MI_UNUSED(min_size); |
311 | 0 | } |
312 | | #endif |
313 | | |
314 | | #if MI_PADDING && MI_PADDING_CHECK |
315 | | |
316 | | static bool mi_verify_padding(const mi_page_t* page, const mi_block_t* block, size_t* size, size_t* wrong) { |
317 | | size_t bsize; |
318 | | size_t delta; |
319 | | bool ok = mi_page_decode_padding(page, block, &delta, &bsize); |
320 | | *size = *wrong = bsize; |
321 | | if (!ok) return false; |
322 | | mi_assert_internal(bsize >= delta); |
323 | | *size = bsize - delta; |
324 | | if (!mi_page_is_huge(page)) { |
325 | | uint8_t* fill = (uint8_t*)block + bsize - delta; |
326 | | const size_t maxpad = (delta > MI_MAX_ALIGN_SIZE ? MI_MAX_ALIGN_SIZE : delta); // check at most the first N padding bytes |
327 | | mi_track_mem_defined(fill, maxpad); |
328 | | for (size_t i = 0; i < maxpad; i++) { |
329 | | if (fill[i] != MI_DEBUG_PADDING) { |
330 | | *wrong = bsize - delta + i; |
331 | | ok = false; |
332 | | break; |
333 | | } |
334 | | } |
335 | | mi_track_mem_noaccess(fill, maxpad); |
336 | | } |
337 | | return ok; |
338 | | } |
339 | | |
340 | | static void mi_check_padding(const mi_page_t* page, const mi_block_t* block) { |
341 | | size_t size; |
342 | | size_t wrong; |
343 | | if (!mi_verify_padding(page,block,&size,&wrong)) { |
344 | | _mi_error_message(EFAULT, "buffer overflow in heap block %p of size %zu: write after %zu bytes\n", block, size, wrong ); |
345 | | } |
346 | | } |
347 | | |
348 | | #else |
349 | | |
350 | 0 | static void mi_check_padding(const mi_page_t* page, const mi_block_t* block) { |
351 | 0 | MI_UNUSED(page); |
352 | 0 | MI_UNUSED(block); |
353 | 0 | } |
354 | | |
355 | | #endif |
356 | | |
357 | | // only maintain stats for smaller objects if requested |
358 | | #if (MI_STAT>0) |
359 | | static void mi_stat_free(const mi_page_t* page, const mi_block_t* block) { |
360 | | #if (MI_STAT < 2) |
361 | | MI_UNUSED(block); |
362 | | #endif |
363 | | mi_heap_t* const heap = mi_heap_get_default(); |
364 | | const size_t bsize = mi_page_usable_block_size(page); |
365 | | #if (MI_STAT>1) |
366 | | const size_t usize = mi_page_usable_size_of(page, block); |
367 | | mi_heap_stat_decrease(heap, malloc, usize); |
368 | | #endif |
369 | | if (bsize <= MI_MEDIUM_OBJ_SIZE_MAX) { |
370 | | mi_heap_stat_decrease(heap, normal, bsize); |
371 | | #if (MI_STAT > 1) |
372 | | mi_heap_stat_decrease(heap, normal_bins[_mi_bin(bsize)], 1); |
373 | | #endif |
374 | | } |
375 | | else if (bsize <= MI_LARGE_OBJ_SIZE_MAX) { |
376 | | mi_heap_stat_decrease(heap, large, bsize); |
377 | | } |
378 | | else { |
379 | | mi_heap_stat_decrease(heap, huge, bsize); |
380 | | } |
381 | | } |
382 | | #else |
383 | 0 | static void mi_stat_free(const mi_page_t* page, const mi_block_t* block) { |
384 | 0 | MI_UNUSED(page); MI_UNUSED(block); |
385 | 0 | } |
386 | | #endif |
387 | | |
388 | | #if MI_HUGE_PAGE_ABANDON |
389 | | #if (MI_STAT>0) |
390 | | // maintain stats for huge objects |
391 | | static void mi_stat_huge_free(const mi_page_t* page) { |
392 | | mi_heap_t* const heap = mi_heap_get_default(); |
393 | | const size_t bsize = mi_page_block_size(page); // to match stats in `page.c:mi_page_huge_alloc` |
394 | | if (bsize <= MI_LARGE_OBJ_SIZE_MAX) { |
395 | | mi_heap_stat_decrease(heap, large, bsize); |
396 | | } |
397 | | else { |
398 | | mi_heap_stat_decrease(heap, huge, bsize); |
399 | | } |
400 | | } |
401 | | #else |
402 | | static void mi_stat_huge_free(const mi_page_t* page) { |
403 | | MI_UNUSED(page); |
404 | | } |
405 | | #endif |
406 | | #endif |
407 | | |
408 | | // ------------------------------------------------------ |
409 | | // Free |
410 | | // ------------------------------------------------------ |
411 | | |
412 | | // multi-threaded free (or free in huge block if compiled with MI_HUGE_PAGE_ABANDON) |
413 | | static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* block) |
414 | 0 | { |
415 | | // The padding check may access the non-thread-owned page for the key values. |
416 | | // that is safe as these are constant and the page won't be freed (as the block is not freed yet). |
417 | 0 | mi_check_padding(page, block); |
418 | 0 | _mi_padding_shrink(page, block, sizeof(mi_block_t)); // for small size, ensure we can fit the delayed thread pointers without triggering overflow detection |
419 | | |
420 | | // huge page segments are always abandoned and can be freed immediately |
421 | 0 | mi_segment_t* segment = _mi_page_segment(page); |
422 | 0 | if (segment->kind == MI_SEGMENT_HUGE) { |
423 | | #if MI_HUGE_PAGE_ABANDON |
424 | | // huge page segments are always abandoned and can be freed immediately |
425 | | mi_stat_huge_free(page); |
426 | | _mi_segment_huge_page_free(segment, page, block); |
427 | | return; |
428 | | #else |
429 | | // huge pages are special as they occupy the entire segment |
430 | | // as these are large we reset the memory occupied by the page so it is available to other threads |
431 | | // (as the owning thread needs to actually free the memory later). |
432 | 0 | _mi_segment_huge_page_reset(segment, page, block); |
433 | 0 | #endif |
434 | 0 | } |
435 | |
|
436 | | #if (MI_DEBUG>0) && !MI_TRACK_ENABLED && !MI_TSAN // note: when tracking, cannot use mi_usable_size with multi-threading |
437 | | if (segment->kind != MI_SEGMENT_HUGE) { // not for huge segments as we just reset the content |
438 | | mi_debug_fill(page, block, MI_DEBUG_FREED, mi_usable_size(block)); |
439 | | } |
440 | | #endif |
441 | | |
442 | | // Try to put the block on either the page-local thread free list, or the heap delayed free list. |
443 | 0 | mi_thread_free_t tfreex; |
444 | 0 | bool use_delayed; |
445 | 0 | mi_thread_free_t tfree = mi_atomic_load_relaxed(&page->xthread_free); |
446 | 0 | do { |
447 | 0 | use_delayed = (mi_tf_delayed(tfree) == MI_USE_DELAYED_FREE); |
448 | 0 | if mi_unlikely(use_delayed) { |
449 | | // unlikely: this only happens on the first concurrent free in a page that is in the full list |
450 | 0 | tfreex = mi_tf_set_delayed(tfree,MI_DELAYED_FREEING); |
451 | 0 | } |
452 | 0 | else { |
453 | | // usual: directly add to page thread_free list |
454 | 0 | mi_block_set_next(page, block, mi_tf_block(tfree)); |
455 | 0 | tfreex = mi_tf_set_block(tfree,block); |
456 | 0 | } |
457 | 0 | } while (!mi_atomic_cas_weak_release(&page->xthread_free, &tfree, tfreex)); |
458 | |
|
459 | 0 | if mi_unlikely(use_delayed) { |
460 | | // racy read on `heap`, but ok because MI_DELAYED_FREEING is set (see `mi_heap_delete` and `mi_heap_collect_abandon`) |
461 | 0 | mi_heap_t* const heap = (mi_heap_t*)(mi_atomic_load_acquire(&page->xheap)); //mi_page_heap(page); |
462 | 0 | mi_assert_internal(heap != NULL); |
463 | 0 | if (heap != NULL) { |
464 | | // add to the delayed free list of this heap. (do this atomically as the lock only protects heap memory validity) |
465 | 0 | mi_block_t* dfree = mi_atomic_load_ptr_relaxed(mi_block_t, &heap->thread_delayed_free); |
466 | 0 | do { |
467 | 0 | mi_block_set_nextx(heap,block,dfree, heap->keys); |
468 | 0 | } while (!mi_atomic_cas_ptr_weak_release(mi_block_t,&heap->thread_delayed_free, &dfree, block)); |
469 | 0 | } |
470 | | |
471 | | // and reset the MI_DELAYED_FREEING flag |
472 | 0 | tfree = mi_atomic_load_relaxed(&page->xthread_free); |
473 | 0 | do { |
474 | 0 | tfreex = tfree; |
475 | 0 | mi_assert_internal(mi_tf_delayed(tfree) == MI_DELAYED_FREEING); |
476 | 0 | tfreex = mi_tf_set_delayed(tfree,MI_NO_DELAYED_FREE); |
477 | 0 | } while (!mi_atomic_cas_weak_release(&page->xthread_free, &tfree, tfreex)); |
478 | 0 | } |
479 | 0 | } |
480 | | |
481 | | // regular free |
482 | | static inline void _mi_free_block(mi_page_t* page, bool local, mi_block_t* block) |
483 | 0 | { |
484 | | // and push it on the free list |
485 | | //const size_t bsize = mi_page_block_size(page); |
486 | 0 | if mi_likely(local) { |
487 | | // owning thread can free a block directly |
488 | 0 | if mi_unlikely(mi_check_is_double_free(page, block)) return; |
489 | 0 | mi_check_padding(page, block); |
490 | | #if (MI_DEBUG>0) && !MI_TRACK_ENABLED && !MI_TSAN |
491 | | if (!mi_page_is_huge(page)) { // huge page content may be already decommitted |
492 | | mi_debug_fill(page, block, MI_DEBUG_FREED, mi_page_block_size(page)); |
493 | | } |
494 | | #endif |
495 | 0 | mi_block_set_next(page, block, page->local_free); |
496 | 0 | page->local_free = block; |
497 | 0 | page->used--; |
498 | 0 | if mi_unlikely(mi_page_all_free(page)) { |
499 | 0 | _mi_page_retire(page); |
500 | 0 | } |
501 | 0 | else if mi_unlikely(mi_page_is_in_full(page)) { |
502 | 0 | _mi_page_unfull(page); |
503 | 0 | } |
504 | 0 | } |
505 | 0 | else { |
506 | 0 | _mi_free_block_mt(page,block); |
507 | 0 | } |
508 | 0 | } |
509 | | |
510 | | |
511 | | // Adjust a block that was allocated aligned, to the actual start of the block in the page. |
512 | 0 | mi_block_t* _mi_page_ptr_unalign(const mi_segment_t* segment, const mi_page_t* page, const void* p) { |
513 | 0 | mi_assert_internal(page!=NULL && p!=NULL); |
514 | 0 | const size_t diff = (uint8_t*)p - _mi_page_start(segment, page, NULL); |
515 | 0 | const size_t adjust = (diff % mi_page_block_size(page)); |
516 | 0 | return (mi_block_t*)((uintptr_t)p - adjust); |
517 | 0 | } |
518 | | |
519 | | |
520 | 0 | void mi_decl_noinline _mi_free_generic(const mi_segment_t* segment, mi_page_t* page, bool is_local, void* p) mi_attr_noexcept { |
521 | 0 | mi_block_t* const block = (mi_page_has_aligned(page) ? _mi_page_ptr_unalign(segment, page, p) : (mi_block_t*)p); |
522 | 0 | mi_stat_free(page, block); // stat_free may access the padding |
523 | 0 | mi_track_free_size(block, mi_page_usable_size_of(page,block)); |
524 | 0 | _mi_free_block(page, is_local, block); |
525 | 0 | } |
526 | | |
527 | | // Get the segment data belonging to a pointer |
528 | | // This is just a single `and` in assembly but does further checks in debug mode |
529 | | // (and secure mode) if this was a valid pointer. |
530 | | static inline mi_segment_t* mi_checked_ptr_segment(const void* p, const char* msg) |
531 | 0 | { |
532 | 0 | MI_UNUSED(msg); |
533 | 0 | mi_assert(p != NULL); |
534 | |
|
535 | | #if (MI_DEBUG>0) |
536 | | if mi_unlikely(((uintptr_t)p & (MI_INTPTR_SIZE - 1)) != 0) { |
537 | | _mi_error_message(EINVAL, "%s: invalid (unaligned) pointer: %p\n", msg, p); |
538 | | return NULL; |
539 | | } |
540 | | #endif |
541 | |
|
542 | 0 | mi_segment_t* const segment = _mi_ptr_segment(p); |
543 | 0 | mi_assert_internal(segment != NULL); |
544 | |
|
545 | | #if 0 && (MI_DEBUG>0) |
546 | | if mi_unlikely(!mi_is_in_heap_region(p)) { |
547 | | #if (MI_INTPTR_SIZE == 8 && defined(__linux__)) |
548 | | if (((uintptr_t)p >> 40) != 0x7F) { // linux tends to align large blocks above 0x7F000000000 (issue #640) |
549 | | #else |
550 | | { |
551 | | #endif |
552 | | _mi_warning_message("%s: pointer might not point to a valid heap region: %p\n" |
553 | | "(this may still be a valid very large allocation (over 64MiB))\n", msg, p); |
554 | | if mi_likely(_mi_ptr_cookie(segment) == segment->cookie) { |
555 | | _mi_warning_message("(yes, the previous pointer %p was valid after all)\n", p); |
556 | | } |
557 | | } |
558 | | } |
559 | | #endif |
560 | | #if (MI_DEBUG>0 || MI_SECURE>=4) |
561 | | if mi_unlikely(_mi_ptr_cookie(segment) != segment->cookie) { |
562 | | _mi_error_message(EINVAL, "%s: pointer does not point to a valid heap space: %p\n", msg, p); |
563 | | return NULL; |
564 | | } |
565 | | #endif |
566 | |
|
567 | 0 | return segment; |
568 | 0 | } |
569 | | |
570 | | // Free a block |
571 | | // fast path written carefully to prevent spilling on the stack |
572 | | void mi_free(void* p) mi_attr_noexcept |
573 | 0 | { |
574 | 0 | if mi_unlikely(p == NULL) return; |
575 | 0 | mi_segment_t* const segment = mi_checked_ptr_segment(p,"mi_free"); |
576 | 0 | const bool is_local= (_mi_prim_thread_id() == mi_atomic_load_relaxed(&segment->thread_id)); |
577 | 0 | mi_page_t* const page = _mi_segment_page_of(segment, p); |
578 | |
|
579 | 0 | if mi_likely(is_local) { // thread-local free? |
580 | 0 | if mi_likely(page->flags.full_aligned == 0) // and it is not a full page (full pages need to move from the full bin), nor has aligned blocks (aligned blocks need to be unaligned) |
581 | 0 | { |
582 | 0 | mi_block_t* const block = (mi_block_t*)p; |
583 | 0 | if mi_unlikely(mi_check_is_double_free(page, block)) return; |
584 | 0 | mi_check_padding(page, block); |
585 | 0 | mi_stat_free(page, block); |
586 | | #if (MI_DEBUG>0) && !MI_TRACK_ENABLED && !MI_TSAN |
587 | | mi_debug_fill(page, block, MI_DEBUG_FREED, mi_page_block_size(page)); |
588 | | #endif |
589 | 0 | mi_track_free_size(p, mi_page_usable_size_of(page,block)); // faster then mi_usable_size as we already know the page and that p is unaligned |
590 | 0 | mi_block_set_next(page, block, page->local_free); |
591 | 0 | page->local_free = block; |
592 | 0 | if mi_unlikely(--page->used == 0) { // using this expression generates better code than: page->used--; if (mi_page_all_free(page)) |
593 | 0 | _mi_page_retire(page); |
594 | 0 | } |
595 | 0 | } |
596 | 0 | else { |
597 | | // page is full or contains (inner) aligned blocks; use generic path |
598 | 0 | _mi_free_generic(segment, page, true, p); |
599 | 0 | } |
600 | 0 | } |
601 | 0 | else { |
602 | | // not thread-local; use generic path |
603 | 0 | _mi_free_generic(segment, page, false, p); |
604 | 0 | } |
605 | 0 | } |
606 | | |
607 | | // return true if successful |
608 | 0 | bool _mi_free_delayed_block(mi_block_t* block) { |
609 | | // get segment and page |
610 | 0 | const mi_segment_t* const segment = _mi_ptr_segment(block); |
611 | 0 | mi_assert_internal(_mi_ptr_cookie(segment) == segment->cookie); |
612 | 0 | #ifndef Py_GIL_DISABLED |
613 | | // The GC traverses heaps of other threads, which can trigger this assert. |
614 | 0 | mi_assert_internal(_mi_thread_id() == segment->thread_id); |
615 | 0 | #endif |
616 | 0 | mi_page_t* const page = _mi_segment_page_of(segment, block); |
617 | | |
618 | | // Clear the no-delayed flag so delayed freeing is used again for this page. |
619 | | // This must be done before collecting the free lists on this page -- otherwise |
620 | | // some blocks may end up in the page `thread_free` list with no blocks in the |
621 | | // heap `thread_delayed_free` list which may cause the page to be never freed! |
622 | | // (it would only be freed if we happen to scan it in `mi_page_queue_find_free_ex`) |
623 | 0 | if (!_mi_page_try_use_delayed_free(page, MI_USE_DELAYED_FREE, false /* dont overwrite never delayed */)) { |
624 | 0 | return false; |
625 | 0 | } |
626 | | |
627 | | // collect all other non-local frees to ensure up-to-date `used` count |
628 | 0 | _mi_page_free_collect(page, false); |
629 | | |
630 | | // and free the block (possibly freeing the page as well since used is updated) |
631 | 0 | _mi_free_block(page, true, block); |
632 | 0 | return true; |
633 | 0 | } |
634 | | |
635 | | // Bytes available in a block |
636 | 0 | mi_decl_noinline static size_t mi_page_usable_aligned_size_of(const mi_segment_t* segment, const mi_page_t* page, const void* p) mi_attr_noexcept { |
637 | 0 | const mi_block_t* block = _mi_page_ptr_unalign(segment, page, p); |
638 | 0 | const size_t size = mi_page_usable_size_of(page, block); |
639 | 0 | const ptrdiff_t adjust = (uint8_t*)p - (uint8_t*)block; |
640 | 0 | mi_assert_internal(adjust >= 0 && (size_t)adjust <= size); |
641 | 0 | return (size - adjust); |
642 | 0 | } |
643 | | |
644 | 0 | static inline size_t _mi_usable_size(const void* p, const char* msg) mi_attr_noexcept { |
645 | 0 | if (p == NULL) return 0; |
646 | 0 | const mi_segment_t* const segment = mi_checked_ptr_segment(p, msg); |
647 | 0 | const mi_page_t* const page = _mi_segment_page_of(segment, p); |
648 | 0 | if mi_likely(!mi_page_has_aligned(page)) { |
649 | 0 | const mi_block_t* block = (const mi_block_t*)p; |
650 | 0 | return mi_page_usable_size_of(page, block); |
651 | 0 | } |
652 | 0 | else { |
653 | | // split out to separate routine for improved code generation |
654 | 0 | return mi_page_usable_aligned_size_of(segment, page, p); |
655 | 0 | } |
656 | 0 | } |
657 | | |
658 | 0 | mi_decl_nodiscard size_t mi_usable_size(const void* p) mi_attr_noexcept { |
659 | 0 | return _mi_usable_size(p, "mi_usable_size"); |
660 | 0 | } |
661 | | |
662 | | |
663 | | // ------------------------------------------------------ |
664 | | // Allocation extensions |
665 | | // ------------------------------------------------------ |
666 | | |
667 | 0 | void mi_free_size(void* p, size_t size) mi_attr_noexcept { |
668 | 0 | MI_UNUSED_RELEASE(size); |
669 | 0 | mi_assert(p == NULL || size <= _mi_usable_size(p,"mi_free_size")); |
670 | 0 | mi_free(p); |
671 | 0 | } |
672 | | |
673 | 0 | void mi_free_size_aligned(void* p, size_t size, size_t alignment) mi_attr_noexcept { |
674 | 0 | MI_UNUSED_RELEASE(alignment); |
675 | 0 | mi_assert(((uintptr_t)p % alignment) == 0); |
676 | 0 | mi_free_size(p,size); |
677 | 0 | } |
678 | | |
679 | 0 | void mi_free_aligned(void* p, size_t alignment) mi_attr_noexcept { |
680 | 0 | MI_UNUSED_RELEASE(alignment); |
681 | 0 | mi_assert(((uintptr_t)p % alignment) == 0); |
682 | 0 | mi_free(p); |
683 | 0 | } |
684 | | |
685 | 0 | mi_decl_nodiscard extern inline mi_decl_restrict void* mi_heap_calloc(mi_heap_t* heap, size_t count, size_t size) mi_attr_noexcept { |
686 | 0 | size_t total; |
687 | 0 | if (mi_count_size_overflow(count,size,&total)) return NULL; |
688 | 0 | return mi_heap_zalloc(heap,total); |
689 | 0 | } |
690 | | |
691 | 0 | mi_decl_nodiscard mi_decl_restrict void* mi_calloc(size_t count, size_t size) mi_attr_noexcept { |
692 | 0 | return mi_heap_calloc(mi_prim_get_default_heap(),count,size); |
693 | 0 | } |
694 | | |
695 | | // Uninitialized `calloc` |
696 | 0 | mi_decl_nodiscard extern mi_decl_restrict void* mi_heap_mallocn(mi_heap_t* heap, size_t count, size_t size) mi_attr_noexcept { |
697 | 0 | size_t total; |
698 | 0 | if (mi_count_size_overflow(count, size, &total)) return NULL; |
699 | 0 | return mi_heap_malloc(heap, total); |
700 | 0 | } |
701 | | |
702 | 0 | mi_decl_nodiscard mi_decl_restrict void* mi_mallocn(size_t count, size_t size) mi_attr_noexcept { |
703 | 0 | return mi_heap_mallocn(mi_prim_get_default_heap(),count,size); |
704 | 0 | } |
705 | | |
706 | | // Expand (or shrink) in place (or fail) |
707 | 0 | void* mi_expand(void* p, size_t newsize) mi_attr_noexcept { |
708 | | #if MI_PADDING |
709 | | // we do not shrink/expand with padding enabled |
710 | | MI_UNUSED(p); MI_UNUSED(newsize); |
711 | | return NULL; |
712 | | #else |
713 | 0 | if (p == NULL) return NULL; |
714 | 0 | const size_t size = _mi_usable_size(p,"mi_expand"); |
715 | 0 | if (newsize > size) return NULL; |
716 | 0 | return p; // it fits |
717 | 0 | #endif |
718 | 0 | } |
719 | | |
720 | 0 | void* _mi_heap_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, bool zero) mi_attr_noexcept { |
721 | | // if p == NULL then behave as malloc. |
722 | | // else if size == 0 then reallocate to a zero-sized block (and don't return NULL, just as mi_malloc(0)). |
723 | | // (this means that returning NULL always indicates an error, and `p` will not have been freed in that case.) |
724 | 0 | const size_t size = _mi_usable_size(p,"mi_realloc"); // also works if p == NULL (with size 0) |
725 | 0 | if mi_unlikely(newsize <= size && newsize >= (size / 2) && newsize > 0) { // note: newsize must be > 0 or otherwise we return NULL for realloc(NULL,0) |
726 | 0 | mi_assert_internal(p!=NULL); |
727 | | // todo: do not track as the usable size is still the same in the free; adjust potential padding? |
728 | | // mi_track_resize(p,size,newsize) |
729 | | // if (newsize < size) { mi_track_mem_noaccess((uint8_t*)p + newsize, size - newsize); } |
730 | 0 | return p; // reallocation still fits and not more than 50% waste |
731 | 0 | } |
732 | 0 | void* newp = mi_heap_malloc(heap,newsize); |
733 | 0 | if mi_likely(newp != NULL) { |
734 | 0 | if (zero && newsize > size) { |
735 | | // also set last word in the previous allocation to zero to ensure any padding is zero-initialized |
736 | 0 | const size_t start = (size >= sizeof(intptr_t) ? size - sizeof(intptr_t) : 0); |
737 | 0 | _mi_memzero((uint8_t*)newp + start, newsize - start); |
738 | 0 | } |
739 | 0 | else if (newsize == 0) { |
740 | 0 | ((uint8_t*)newp)[0] = 0; // work around for applications that expect zero-reallocation to be zero initialized (issue #725) |
741 | 0 | } |
742 | 0 | if mi_likely(p != NULL) { |
743 | 0 | const size_t copysize = (newsize > size ? size : newsize); |
744 | 0 | mi_track_mem_defined(p,copysize); // _mi_useable_size may be too large for byte precise memory tracking.. |
745 | 0 | _mi_memcpy(newp, p, copysize); |
746 | 0 | mi_free(p); // only free the original pointer if successful |
747 | 0 | } |
748 | 0 | } |
749 | 0 | return newp; |
750 | 0 | } |
751 | | |
752 | 0 | mi_decl_nodiscard void* mi_heap_realloc(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept { |
753 | 0 | return _mi_heap_realloc_zero(heap, p, newsize, false); |
754 | 0 | } |
755 | | |
756 | 0 | mi_decl_nodiscard void* mi_heap_reallocn(mi_heap_t* heap, void* p, size_t count, size_t size) mi_attr_noexcept { |
757 | 0 | size_t total; |
758 | 0 | if (mi_count_size_overflow(count, size, &total)) return NULL; |
759 | 0 | return mi_heap_realloc(heap, p, total); |
760 | 0 | } |
761 | | |
762 | | |
763 | | // Reallocate but free `p` on errors |
764 | 0 | mi_decl_nodiscard void* mi_heap_reallocf(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept { |
765 | 0 | void* newp = mi_heap_realloc(heap, p, newsize); |
766 | 0 | if (newp==NULL && p!=NULL) mi_free(p); |
767 | 0 | return newp; |
768 | 0 | } |
769 | | |
770 | 0 | mi_decl_nodiscard void* mi_heap_rezalloc(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept { |
771 | 0 | return _mi_heap_realloc_zero(heap, p, newsize, true); |
772 | 0 | } |
773 | | |
774 | 0 | mi_decl_nodiscard void* mi_heap_recalloc(mi_heap_t* heap, void* p, size_t count, size_t size) mi_attr_noexcept { |
775 | 0 | size_t total; |
776 | 0 | if (mi_count_size_overflow(count, size, &total)) return NULL; |
777 | 0 | return mi_heap_rezalloc(heap, p, total); |
778 | 0 | } |
779 | | |
780 | | |
781 | 0 | mi_decl_nodiscard void* mi_realloc(void* p, size_t newsize) mi_attr_noexcept { |
782 | 0 | return mi_heap_realloc(mi_prim_get_default_heap(),p,newsize); |
783 | 0 | } |
784 | | |
785 | 0 | mi_decl_nodiscard void* mi_reallocn(void* p, size_t count, size_t size) mi_attr_noexcept { |
786 | 0 | return mi_heap_reallocn(mi_prim_get_default_heap(),p,count,size); |
787 | 0 | } |
788 | | |
789 | | // Reallocate but free `p` on errors |
790 | 0 | mi_decl_nodiscard void* mi_reallocf(void* p, size_t newsize) mi_attr_noexcept { |
791 | 0 | return mi_heap_reallocf(mi_prim_get_default_heap(),p,newsize); |
792 | 0 | } |
793 | | |
794 | 0 | mi_decl_nodiscard void* mi_rezalloc(void* p, size_t newsize) mi_attr_noexcept { |
795 | 0 | return mi_heap_rezalloc(mi_prim_get_default_heap(), p, newsize); |
796 | 0 | } |
797 | | |
798 | 0 | mi_decl_nodiscard void* mi_recalloc(void* p, size_t count, size_t size) mi_attr_noexcept { |
799 | 0 | return mi_heap_recalloc(mi_prim_get_default_heap(), p, count, size); |
800 | 0 | } |
801 | | |
802 | | |
803 | | |
804 | | // ------------------------------------------------------ |
805 | | // strdup, strndup, and realpath |
806 | | // ------------------------------------------------------ |
807 | | |
808 | | // `strdup` using mi_malloc |
809 | 0 | mi_decl_nodiscard mi_decl_restrict char* mi_heap_strdup(mi_heap_t* heap, const char* s) mi_attr_noexcept { |
810 | 0 | if (s == NULL) return NULL; |
811 | 0 | size_t n = strlen(s); |
812 | 0 | char* t = (char*)mi_heap_malloc(heap,n+1); |
813 | 0 | if (t == NULL) return NULL; |
814 | 0 | _mi_memcpy(t, s, n); |
815 | 0 | t[n] = 0; |
816 | 0 | return t; |
817 | 0 | } |
818 | | |
819 | 0 | mi_decl_nodiscard mi_decl_restrict char* mi_strdup(const char* s) mi_attr_noexcept { |
820 | 0 | return mi_heap_strdup(mi_prim_get_default_heap(), s); |
821 | 0 | } |
822 | | |
823 | | // `strndup` using mi_malloc |
824 | 0 | mi_decl_nodiscard mi_decl_restrict char* mi_heap_strndup(mi_heap_t* heap, const char* s, size_t n) mi_attr_noexcept { |
825 | 0 | if (s == NULL) return NULL; |
826 | 0 | const char* end = (const char*)memchr(s, 0, n); // find end of string in the first `n` characters (returns NULL if not found) |
827 | 0 | const size_t m = (end != NULL ? (size_t)(end - s) : n); // `m` is the minimum of `n` or the end-of-string |
828 | 0 | mi_assert_internal(m <= n); |
829 | 0 | char* t = (char*)mi_heap_malloc(heap, m+1); |
830 | 0 | if (t == NULL) return NULL; |
831 | 0 | _mi_memcpy(t, s, m); |
832 | 0 | t[m] = 0; |
833 | 0 | return t; |
834 | 0 | } |
835 | | |
836 | 0 | mi_decl_nodiscard mi_decl_restrict char* mi_strndup(const char* s, size_t n) mi_attr_noexcept { |
837 | 0 | return mi_heap_strndup(mi_prim_get_default_heap(),s,n); |
838 | 0 | } |
839 | | |
840 | | #ifndef __wasi__ |
841 | | // `realpath` using mi_malloc |
842 | | #ifdef _WIN32 |
843 | | #ifndef PATH_MAX |
844 | | #define PATH_MAX MAX_PATH |
845 | | #endif |
846 | | #include <windows.h> |
847 | | mi_decl_nodiscard mi_decl_restrict char* mi_heap_realpath(mi_heap_t* heap, const char* fname, char* resolved_name) mi_attr_noexcept { |
848 | | // todo: use GetFullPathNameW to allow longer file names |
849 | | char buf[PATH_MAX]; |
850 | | DWORD res = GetFullPathNameA(fname, PATH_MAX, (resolved_name == NULL ? buf : resolved_name), NULL); |
851 | | if (res == 0) { |
852 | | errno = GetLastError(); return NULL; |
853 | | } |
854 | | else if (res > PATH_MAX) { |
855 | | errno = EINVAL; return NULL; |
856 | | } |
857 | | else if (resolved_name != NULL) { |
858 | | return resolved_name; |
859 | | } |
860 | | else { |
861 | | return mi_heap_strndup(heap, buf, PATH_MAX); |
862 | | } |
863 | | } |
864 | | #else |
865 | | /* |
866 | | #include <unistd.h> // pathconf |
867 | | static size_t mi_path_max(void) { |
868 | | static size_t path_max = 0; |
869 | | if (path_max <= 0) { |
870 | | long m = pathconf("/",_PC_PATH_MAX); |
871 | | if (m <= 0) path_max = 4096; // guess |
872 | | else if (m < 256) path_max = 256; // at least 256 |
873 | | else path_max = m; |
874 | | } |
875 | | return path_max; |
876 | | } |
877 | | */ |
878 | 0 | char* mi_heap_realpath(mi_heap_t* heap, const char* fname, char* resolved_name) mi_attr_noexcept { |
879 | 0 | if (resolved_name != NULL) { |
880 | 0 | return realpath(fname,resolved_name); |
881 | 0 | } |
882 | 0 | else { |
883 | 0 | char* rname = realpath(fname, NULL); |
884 | 0 | if (rname == NULL) return NULL; |
885 | 0 | char* result = mi_heap_strdup(heap, rname); |
886 | 0 | free(rname); // use regular free! (which may be redirected to our free but that's ok) |
887 | 0 | return result; |
888 | 0 | } |
889 | | /* |
890 | | const size_t n = mi_path_max(); |
891 | | char* buf = (char*)mi_malloc(n+1); |
892 | | if (buf == NULL) { |
893 | | errno = ENOMEM; |
894 | | return NULL; |
895 | | } |
896 | | char* rname = realpath(fname,buf); |
897 | | char* result = mi_heap_strndup(heap,rname,n); // ok if `rname==NULL` |
898 | | mi_free(buf); |
899 | | return result; |
900 | | } |
901 | | */ |
902 | 0 | } |
903 | | #endif |
904 | | |
905 | 0 | mi_decl_nodiscard mi_decl_restrict char* mi_realpath(const char* fname, char* resolved_name) mi_attr_noexcept { |
906 | 0 | return mi_heap_realpath(mi_prim_get_default_heap(),fname,resolved_name); |
907 | 0 | } |
908 | | #endif |
909 | | |
910 | | /*------------------------------------------------------- |
911 | | C++ new and new_aligned |
912 | | The standard requires calling into `get_new_handler` and |
913 | | throwing the bad_alloc exception on failure. If we compile |
914 | | with a C++ compiler we can implement this precisely. If we |
915 | | use a C compiler we cannot throw a `bad_alloc` exception |
916 | | but we call `exit` instead (i.e. not returning). |
917 | | -------------------------------------------------------*/ |
918 | | |
919 | | #ifdef __cplusplus |
920 | | #include <new> |
921 | | static bool mi_try_new_handler(bool nothrow) { |
922 | | #if defined(_MSC_VER) || (__cplusplus >= 201103L) |
923 | | std::new_handler h = std::get_new_handler(); |
924 | | #else |
925 | | std::new_handler h = std::set_new_handler(); |
926 | | std::set_new_handler(h); |
927 | | #endif |
928 | | if (h==NULL) { |
929 | | _mi_error_message(ENOMEM, "out of memory in 'new'"); |
930 | | if (!nothrow) { |
931 | | throw std::bad_alloc(); |
932 | | } |
933 | | return false; |
934 | | } |
935 | | else { |
936 | | h(); |
937 | | return true; |
938 | | } |
939 | | } |
940 | | #else |
941 | | typedef void (*std_new_handler_t)(void); |
942 | | |
943 | | #if (defined(__GNUC__) || (defined(__clang__) && !defined(_MSC_VER))) // exclude clang-cl, see issue #631 |
944 | 0 | std_new_handler_t __attribute__((weak)) _ZSt15get_new_handlerv(void) { |
945 | 0 | return NULL; |
946 | 0 | } |
947 | 0 | static std_new_handler_t mi_get_new_handler(void) { |
948 | 0 | return _ZSt15get_new_handlerv(); |
949 | 0 | } |
950 | | #else |
951 | | // note: on windows we could dynamically link to `?get_new_handler@std@@YAP6AXXZXZ`. |
952 | | static std_new_handler_t mi_get_new_handler() { |
953 | | return NULL; |
954 | | } |
955 | | #endif |
956 | | |
957 | 0 | static bool mi_try_new_handler(bool nothrow) { |
958 | 0 | std_new_handler_t h = mi_get_new_handler(); |
959 | 0 | if (h==NULL) { |
960 | 0 | _mi_error_message(ENOMEM, "out of memory in 'new'"); |
961 | 0 | if (!nothrow) { |
962 | 0 | abort(); // cannot throw in plain C, use abort |
963 | 0 | } |
964 | 0 | return false; |
965 | 0 | } |
966 | 0 | else { |
967 | 0 | h(); |
968 | 0 | return true; |
969 | 0 | } |
970 | 0 | } |
971 | | #endif |
972 | | |
973 | 0 | mi_decl_export mi_decl_noinline void* mi_heap_try_new(mi_heap_t* heap, size_t size, bool nothrow ) { |
974 | 0 | void* p = NULL; |
975 | 0 | while(p == NULL && mi_try_new_handler(nothrow)) { |
976 | 0 | p = mi_heap_malloc(heap,size); |
977 | 0 | } |
978 | 0 | return p; |
979 | 0 | } |
980 | | |
981 | 0 | static mi_decl_noinline void* mi_try_new(size_t size, bool nothrow) { |
982 | 0 | return mi_heap_try_new(mi_prim_get_default_heap(), size, nothrow); |
983 | 0 | } |
984 | | |
985 | | |
986 | 0 | mi_decl_nodiscard mi_decl_restrict void* mi_heap_alloc_new(mi_heap_t* heap, size_t size) { |
987 | 0 | void* p = mi_heap_malloc(heap,size); |
988 | 0 | if mi_unlikely(p == NULL) return mi_heap_try_new(heap, size, false); |
989 | 0 | return p; |
990 | 0 | } |
991 | | |
992 | 0 | mi_decl_nodiscard mi_decl_restrict void* mi_new(size_t size) { |
993 | 0 | return mi_heap_alloc_new(mi_prim_get_default_heap(), size); |
994 | 0 | } |
995 | | |
996 | | |
997 | 0 | mi_decl_nodiscard mi_decl_restrict void* mi_heap_alloc_new_n(mi_heap_t* heap, size_t count, size_t size) { |
998 | 0 | size_t total; |
999 | 0 | if mi_unlikely(mi_count_size_overflow(count, size, &total)) { |
1000 | 0 | mi_try_new_handler(false); // on overflow we invoke the try_new_handler once to potentially throw std::bad_alloc |
1001 | 0 | return NULL; |
1002 | 0 | } |
1003 | 0 | else { |
1004 | 0 | return mi_heap_alloc_new(heap,total); |
1005 | 0 | } |
1006 | 0 | } |
1007 | | |
1008 | 0 | mi_decl_nodiscard mi_decl_restrict void* mi_new_n(size_t count, size_t size) { |
1009 | 0 | return mi_heap_alloc_new_n(mi_prim_get_default_heap(), size, count); |
1010 | 0 | } |
1011 | | |
1012 | | |
1013 | 0 | mi_decl_nodiscard mi_decl_restrict void* mi_new_nothrow(size_t size) mi_attr_noexcept { |
1014 | 0 | void* p = mi_malloc(size); |
1015 | 0 | if mi_unlikely(p == NULL) return mi_try_new(size, true); |
1016 | 0 | return p; |
1017 | 0 | } |
1018 | | |
1019 | 0 | mi_decl_nodiscard mi_decl_restrict void* mi_new_aligned(size_t size, size_t alignment) { |
1020 | 0 | void* p; |
1021 | 0 | do { |
1022 | 0 | p = mi_malloc_aligned(size, alignment); |
1023 | 0 | } |
1024 | 0 | while(p == NULL && mi_try_new_handler(false)); |
1025 | 0 | return p; |
1026 | 0 | } |
1027 | | |
1028 | 0 | mi_decl_nodiscard mi_decl_restrict void* mi_new_aligned_nothrow(size_t size, size_t alignment) mi_attr_noexcept { |
1029 | 0 | void* p; |
1030 | 0 | do { |
1031 | 0 | p = mi_malloc_aligned(size, alignment); |
1032 | 0 | } |
1033 | 0 | while(p == NULL && mi_try_new_handler(true)); |
1034 | 0 | return p; |
1035 | 0 | } |
1036 | | |
1037 | 0 | mi_decl_nodiscard void* mi_new_realloc(void* p, size_t newsize) { |
1038 | 0 | void* q; |
1039 | 0 | do { |
1040 | 0 | q = mi_realloc(p, newsize); |
1041 | 0 | } while (q == NULL && mi_try_new_handler(false)); |
1042 | 0 | return q; |
1043 | 0 | } |
1044 | | |
1045 | 0 | mi_decl_nodiscard void* mi_new_reallocn(void* p, size_t newcount, size_t size) { |
1046 | 0 | size_t total; |
1047 | 0 | if mi_unlikely(mi_count_size_overflow(newcount, size, &total)) { |
1048 | 0 | mi_try_new_handler(false); // on overflow we invoke the try_new_handler once to potentially throw std::bad_alloc |
1049 | 0 | return NULL; |
1050 | 0 | } |
1051 | 0 | else { |
1052 | 0 | return mi_new_realloc(p, total); |
1053 | 0 | } |
1054 | 0 | } |
1055 | | |
1056 | | // ------------------------------------------------------ |
1057 | | // ensure explicit external inline definitions are emitted! |
1058 | | // ------------------------------------------------------ |
1059 | | |
1060 | | #ifdef __cplusplus |
1061 | | void* _mi_externs[] = { |
1062 | | (void*)&_mi_page_malloc, |
1063 | | (void*)&_mi_heap_malloc_zero, |
1064 | | (void*)&_mi_heap_malloc_zero_ex, |
1065 | | (void*)&mi_malloc, |
1066 | | (void*)&mi_malloc_small, |
1067 | | (void*)&mi_zalloc_small, |
1068 | | (void*)&mi_heap_malloc, |
1069 | | (void*)&mi_heap_zalloc, |
1070 | | (void*)&mi_heap_malloc_small, |
1071 | | // (void*)&mi_heap_alloc_new, |
1072 | | // (void*)&mi_heap_alloc_new_n |
1073 | | }; |
1074 | | #endif |