/src/cpython/Objects/mimalloc/heap.c
Line | Count | Source |
1 | | /*---------------------------------------------------------------------------- |
2 | | Copyright (c) 2018-2021, Microsoft Research, Daan Leijen |
3 | | This is free software; you can redistribute it and/or modify it under the |
4 | | terms of the MIT license. A copy of the license can be found in the file |
5 | | "LICENSE" at the root of this distribution. |
6 | | -----------------------------------------------------------------------------*/ |
7 | | |
8 | | #include "mimalloc.h" |
9 | | #include "mimalloc/internal.h" |
10 | | #include "mimalloc/atomic.h" |
11 | | #include "mimalloc/prim.h" // mi_prim_get_default_heap |
12 | | |
13 | | #include <string.h> // memset, memcpy |
14 | | |
15 | | #if defined(_MSC_VER) && (_MSC_VER < 1920) |
16 | | #pragma warning(disable:4204) // non-constant aggregate initializer |
17 | | #endif |
18 | | |
19 | | /* ----------------------------------------------------------- |
20 | | Helpers |
21 | | ----------------------------------------------------------- */ |
22 | | |
23 | | // return `true` if ok, `false` to break |
24 | | typedef bool (heap_page_visitor_fun)(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* arg1, void* arg2); |
25 | | |
26 | | // Visit all pages in a heap; returns `false` if break was called. |
27 | | static bool mi_heap_visit_pages(mi_heap_t* heap, heap_page_visitor_fun* fn, void* arg1, void* arg2) |
28 | 0 | { |
29 | 0 | if (heap==NULL || heap->page_count==0) return true; |
30 | | |
31 | | // visit all pages |
32 | | #if MI_DEBUG>1 |
33 | | size_t total = heap->page_count; |
34 | | size_t count = 0; |
35 | | #endif |
36 | | |
37 | 0 | for (size_t i = 0; i <= MI_BIN_FULL; i++) { |
38 | 0 | mi_page_queue_t* pq = &heap->pages[i]; |
39 | 0 | mi_page_t* page = pq->first; |
40 | 0 | while(page != NULL) { |
41 | 0 | mi_page_t* next = page->next; // save next in case the page gets removed from the queue |
42 | 0 | mi_assert_internal(mi_page_heap(page) == heap); |
43 | | #if MI_DEBUG>1 |
44 | | count++; |
45 | | #endif |
46 | 0 | if (!fn(heap, pq, page, arg1, arg2)) return false; |
47 | 0 | page = next; // and continue |
48 | 0 | } |
49 | 0 | } |
50 | 0 | mi_assert_internal(count == total); |
51 | 0 | return true; |
52 | 0 | } |
53 | | |
54 | | |
55 | | #if MI_DEBUG>=2 |
56 | | static bool mi_heap_page_is_valid(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* arg1, void* arg2) { |
57 | | MI_UNUSED(arg1); |
58 | | MI_UNUSED(arg2); |
59 | | MI_UNUSED(pq); |
60 | | mi_assert_internal(mi_page_heap(page) == heap); |
61 | | mi_segment_t* segment = _mi_page_segment(page); |
62 | | mi_assert_internal(segment->thread_id == heap->thread_id); |
63 | | mi_assert_expensive(_mi_page_is_valid(page)); |
64 | | return true; |
65 | | } |
66 | | #endif |
67 | | #if MI_DEBUG>=3 |
68 | | static bool mi_heap_is_valid(mi_heap_t* heap) { |
69 | | mi_assert_internal(heap!=NULL); |
70 | | mi_heap_visit_pages(heap, &mi_heap_page_is_valid, NULL, NULL); |
71 | | return true; |
72 | | } |
73 | | #endif |
74 | | |
75 | | |
76 | | |
77 | | |
78 | | /* ----------------------------------------------------------- |
79 | | "Collect" pages by migrating `local_free` and `thread_free` |
80 | | lists and freeing empty pages. This is done when a thread |
81 | | stops (and in that case abandons pages if there are still |
82 | | blocks alive) |
83 | | ----------------------------------------------------------- */ |
84 | | |
85 | | typedef enum mi_collect_e { |
86 | | MI_NORMAL, |
87 | | MI_FORCE, |
88 | | MI_ABANDON |
89 | | } mi_collect_t; |
90 | | |
91 | | |
92 | 0 | static bool mi_heap_page_collect(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* arg_collect, void* arg2 ) { |
93 | 0 | MI_UNUSED(arg2); |
94 | 0 | MI_UNUSED(heap); |
95 | 0 | mi_assert_internal(mi_heap_page_is_valid(heap, pq, page, NULL, NULL)); |
96 | 0 | mi_collect_t collect = *((mi_collect_t*)arg_collect); |
97 | 0 | _mi_page_free_collect(page, collect >= MI_FORCE); |
98 | 0 | if (mi_page_all_free(page)) { |
99 | | // no more used blocks, free the page. |
100 | | // note: this will free retired pages as well. |
101 | 0 | bool freed = _PyMem_mi_page_maybe_free(page, pq, collect >= MI_FORCE); |
102 | 0 | if (!freed && collect == MI_ABANDON) { |
103 | | // _PyMem_mi_page_maybe_free may have moved the page to a different |
104 | | // page queue, so we need to re-fetch the correct queue. |
105 | 0 | uint8_t bin = (mi_page_is_in_full(page) ? MI_BIN_FULL : _mi_bin(page->xblock_size)); |
106 | 0 | _mi_page_abandon(page, &heap->pages[bin]); |
107 | 0 | } |
108 | 0 | } |
109 | 0 | else if (collect == MI_ABANDON) { |
110 | | // still used blocks but the thread is done; abandon the page |
111 | 0 | _mi_page_abandon(page, pq); |
112 | 0 | } |
113 | 0 | return true; // don't break |
114 | 0 | } |
115 | | |
116 | 0 | static bool mi_heap_page_never_delayed_free(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* arg1, void* arg2) { |
117 | 0 | MI_UNUSED(arg1); |
118 | 0 | MI_UNUSED(arg2); |
119 | 0 | MI_UNUSED(heap); |
120 | 0 | MI_UNUSED(pq); |
121 | 0 | _mi_page_use_delayed_free(page, MI_NEVER_DELAYED_FREE, false); |
122 | 0 | return true; // don't break |
123 | 0 | } |
124 | | |
125 | | static void mi_heap_collect_ex(mi_heap_t* heap, mi_collect_t collect) |
126 | 0 | { |
127 | 0 | if (heap==NULL || !mi_heap_is_initialized(heap)) return; |
128 | | |
129 | 0 | const bool force = collect >= MI_FORCE; |
130 | 0 | _mi_deferred_free(heap, force); |
131 | | |
132 | | // gh-112532: we may be called from a thread that is not the owner of the heap |
133 | 0 | bool is_main_thread = _mi_is_main_thread() && heap->thread_id == _mi_thread_id(); |
134 | | |
135 | | // note: never reclaim on collect but leave it to threads that need storage to reclaim |
136 | 0 | const bool force_main = |
137 | 0 | #ifdef NDEBUG |
138 | 0 | collect == MI_FORCE |
139 | | #else |
140 | | collect >= MI_FORCE |
141 | | #endif |
142 | 0 | && is_main_thread && mi_heap_is_backing(heap) && !heap->no_reclaim; |
143 | |
|
144 | 0 | if (force_main) { |
145 | | // the main thread is abandoned (end-of-program), try to reclaim all abandoned segments. |
146 | | // if all memory is freed by now, all segments should be freed. |
147 | 0 | _mi_abandoned_reclaim_all(heap, &heap->tld->segments); |
148 | 0 | } |
149 | | |
150 | | // if abandoning, mark all pages to no longer add to delayed_free |
151 | 0 | if (collect == MI_ABANDON) { |
152 | 0 | mi_heap_visit_pages(heap, &mi_heap_page_never_delayed_free, NULL, NULL); |
153 | 0 | } |
154 | | |
155 | | // free all current thread delayed blocks. |
156 | | // (if abandoning, after this there are no more thread-delayed references into the pages.) |
157 | 0 | _mi_heap_delayed_free_all(heap); |
158 | | |
159 | | // collect retired pages |
160 | 0 | _mi_heap_collect_retired(heap, force); |
161 | | |
162 | | // free pages that were delayed with QSBR |
163 | 0 | _PyMem_mi_heap_collect_qsbr(heap); |
164 | | |
165 | | // collect all pages owned by this thread |
166 | 0 | mi_heap_visit_pages(heap, &mi_heap_page_collect, &collect, NULL); |
167 | 0 | mi_assert_internal( collect != MI_ABANDON || mi_atomic_load_ptr_acquire(mi_block_t,&heap->thread_delayed_free) == NULL ); |
168 | | |
169 | | // collect abandoned segments (in particular, purge expired parts of segments in the abandoned segment list) |
170 | | // note: forced purge can be quite expensive if many threads are created/destroyed so we do not force on abandonment |
171 | 0 | _mi_abandoned_collect(heap, collect == MI_FORCE /* force? */, &heap->tld->segments); |
172 | | |
173 | | // collect segment local caches |
174 | 0 | if (force) { |
175 | 0 | _mi_segment_thread_collect(&heap->tld->segments); |
176 | 0 | } |
177 | | |
178 | | // collect regions on program-exit (or shared library unload) |
179 | 0 | if (force && is_main_thread && mi_heap_is_backing(heap)) { |
180 | 0 | _mi_thread_data_collect(); // collect thread data cache |
181 | 0 | _mi_arena_collect(true /* force purge */, &heap->tld->stats); |
182 | 0 | } |
183 | 0 | } |
184 | | |
185 | 0 | void _mi_heap_collect_abandon(mi_heap_t* heap) { |
186 | 0 | mi_heap_collect_ex(heap, MI_ABANDON); |
187 | 0 | } |
188 | | |
189 | 0 | void mi_heap_collect(mi_heap_t* heap, bool force) mi_attr_noexcept { |
190 | 0 | mi_heap_collect_ex(heap, (force ? MI_FORCE : MI_NORMAL)); |
191 | 0 | } |
192 | | |
193 | 0 | void mi_collect(bool force) mi_attr_noexcept { |
194 | 0 | mi_heap_collect(mi_prim_get_default_heap(), force); |
195 | 0 | } |
196 | | |
197 | | |
198 | | /* ----------------------------------------------------------- |
199 | | Heap new |
200 | | ----------------------------------------------------------- */ |
201 | | |
202 | 36 | mi_heap_t* mi_heap_get_default(void) { |
203 | 36 | mi_thread_init(); |
204 | 36 | return mi_prim_get_default_heap(); |
205 | 36 | } |
206 | | |
207 | 0 | static bool mi_heap_is_default(const mi_heap_t* heap) { |
208 | 0 | return (heap == mi_prim_get_default_heap()); |
209 | 0 | } |
210 | | |
211 | | |
212 | 0 | mi_heap_t* mi_heap_get_backing(void) { |
213 | 0 | mi_heap_t* heap = mi_heap_get_default(); |
214 | 0 | mi_assert_internal(heap!=NULL); |
215 | 0 | mi_heap_t* bheap = heap->tld->heap_backing; |
216 | 0 | mi_assert_internal(bheap!=NULL); |
217 | 0 | mi_assert_internal(bheap->thread_id == _mi_thread_id()); |
218 | 0 | return bheap; |
219 | 0 | } |
220 | | |
221 | | void _mi_heap_init_ex(mi_heap_t* heap, mi_tld_t* tld, mi_arena_id_t arena_id, bool no_reclaim, uint8_t tag) |
222 | 0 | { |
223 | 0 | _mi_memcpy_aligned(heap, &_mi_heap_empty, sizeof(mi_heap_t)); |
224 | 0 | heap->tld = tld; |
225 | 0 | heap->thread_id = _mi_thread_id(); |
226 | 0 | heap->arena_id = arena_id; |
227 | 0 | if (heap == tld->heap_backing) { |
228 | 0 | _mi_random_init(&heap->random); |
229 | 0 | } |
230 | 0 | else { |
231 | 0 | _mi_random_split(&tld->heap_backing->random, &heap->random); |
232 | 0 | } |
233 | 0 | heap->cookie = _mi_heap_random_next(heap) | 1; |
234 | 0 | heap->keys[0] = _mi_heap_random_next(heap); |
235 | 0 | heap->keys[1] = _mi_heap_random_next(heap); |
236 | 0 | heap->no_reclaim = no_reclaim; |
237 | 0 | heap->tag = tag; |
238 | | // push on the thread local heaps list |
239 | 0 | heap->next = heap->tld->heaps; |
240 | 0 | heap->tld->heaps = heap; |
241 | 0 | } |
242 | | |
243 | 0 | mi_decl_nodiscard mi_heap_t* mi_heap_new_in_arena(mi_arena_id_t arena_id) { |
244 | 0 | mi_heap_t* bheap = mi_heap_get_backing(); |
245 | 0 | mi_heap_t* heap = mi_heap_malloc_tp(bheap, mi_heap_t); // todo: OS allocate in secure mode? |
246 | 0 | if (heap == NULL) return NULL; |
247 | | // don't reclaim abandoned pages or otherwise destroy is unsafe |
248 | 0 | _mi_heap_init_ex(heap, bheap->tld, arena_id, true, 0); |
249 | 0 | return heap; |
250 | 0 | } |
251 | | |
252 | 0 | mi_decl_nodiscard mi_heap_t* mi_heap_new(void) { |
253 | 0 | return mi_heap_new_in_arena(_mi_arena_id_none()); |
254 | 0 | } |
255 | | |
256 | 0 | bool _mi_heap_memid_is_suitable(mi_heap_t* heap, mi_memid_t memid) { |
257 | 0 | return _mi_arena_memid_is_suitable(memid, heap->arena_id); |
258 | 0 | } |
259 | | |
260 | 108 | uintptr_t _mi_heap_random_next(mi_heap_t* heap) { |
261 | 108 | return _mi_random_next(&heap->random); |
262 | 108 | } |
263 | | |
264 | | // zero out the page queues |
265 | 0 | static void mi_heap_reset_pages(mi_heap_t* heap) { |
266 | 0 | mi_assert_internal(heap != NULL); |
267 | 0 | mi_assert_internal(mi_heap_is_initialized(heap)); |
268 | | // TODO: copy full empty heap instead? |
269 | 0 | memset(&heap->pages_free_direct, 0, sizeof(heap->pages_free_direct)); |
270 | 0 | _mi_memcpy_aligned(&heap->pages, &_mi_heap_empty.pages, sizeof(heap->pages)); |
271 | 0 | heap->thread_delayed_free = NULL; |
272 | 0 | heap->page_count = 0; |
273 | 0 | } |
274 | | |
275 | | // called from `mi_heap_destroy` and `mi_heap_delete` to free the internal heap resources. |
276 | 0 | static void mi_heap_free(mi_heap_t* heap) { |
277 | 0 | mi_assert(heap != NULL); |
278 | 0 | mi_assert_internal(mi_heap_is_initialized(heap)); |
279 | 0 | if (heap==NULL || !mi_heap_is_initialized(heap)) return; |
280 | 0 | if (mi_heap_is_backing(heap)) return; // dont free the backing heap |
281 | | |
282 | | // reset default |
283 | 0 | if (mi_heap_is_default(heap)) { |
284 | 0 | _mi_heap_set_default_direct(heap->tld->heap_backing); |
285 | 0 | } |
286 | | |
287 | | // remove ourselves from the thread local heaps list |
288 | | // linear search but we expect the number of heaps to be relatively small |
289 | 0 | mi_heap_t* prev = NULL; |
290 | 0 | mi_heap_t* curr = heap->tld->heaps; |
291 | 0 | while (curr != heap && curr != NULL) { |
292 | 0 | prev = curr; |
293 | 0 | curr = curr->next; |
294 | 0 | } |
295 | 0 | mi_assert_internal(curr == heap); |
296 | 0 | if (curr == heap) { |
297 | 0 | if (prev != NULL) { prev->next = heap->next; } |
298 | 0 | else { heap->tld->heaps = heap->next; } |
299 | 0 | } |
300 | 0 | mi_assert_internal(heap->tld->heaps != NULL); |
301 | | |
302 | | // and free the used memory |
303 | 0 | mi_free(heap); |
304 | 0 | } |
305 | | |
306 | | |
307 | | /* ----------------------------------------------------------- |
308 | | Heap destroy |
309 | | ----------------------------------------------------------- */ |
310 | | |
311 | 0 | static bool _mi_heap_page_destroy(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* arg1, void* arg2) { |
312 | 0 | MI_UNUSED(arg1); |
313 | 0 | MI_UNUSED(arg2); |
314 | 0 | MI_UNUSED(heap); |
315 | 0 | MI_UNUSED(pq); |
316 | | |
317 | | // ensure no more thread_delayed_free will be added |
318 | 0 | _mi_page_use_delayed_free(page, MI_NEVER_DELAYED_FREE, false); |
319 | | |
320 | | // stats |
321 | 0 | const size_t bsize = mi_page_block_size(page); |
322 | 0 | if (bsize > MI_MEDIUM_OBJ_SIZE_MAX) { |
323 | 0 | if (bsize <= MI_LARGE_OBJ_SIZE_MAX) { |
324 | 0 | mi_heap_stat_decrease(heap, large, bsize); |
325 | 0 | } |
326 | 0 | else { |
327 | 0 | mi_heap_stat_decrease(heap, huge, bsize); |
328 | 0 | } |
329 | 0 | } |
330 | | #if (MI_STAT) |
331 | | _mi_page_free_collect(page, false); // update used count |
332 | | const size_t inuse = page->used; |
333 | | if (bsize <= MI_LARGE_OBJ_SIZE_MAX) { |
334 | | mi_heap_stat_decrease(heap, normal, bsize * inuse); |
335 | | #if (MI_STAT>1) |
336 | | mi_heap_stat_decrease(heap, normal_bins[_mi_bin(bsize)], inuse); |
337 | | #endif |
338 | | } |
339 | | mi_heap_stat_decrease(heap, malloc, bsize * inuse); // todo: off for aligned blocks... |
340 | | #endif |
341 | | |
342 | | /// pretend it is all free now |
343 | 0 | mi_assert_internal(mi_page_thread_free(page) == NULL); |
344 | 0 | page->used = 0; |
345 | | |
346 | | // and free the page |
347 | | // mi_page_free(page,false); |
348 | 0 | page->next = NULL; |
349 | 0 | page->prev = NULL; |
350 | 0 | _mi_segment_page_free(page,false /* no force? */, &heap->tld->segments); |
351 | |
|
352 | 0 | return true; // keep going |
353 | 0 | } |
354 | | |
355 | 0 | void _mi_heap_destroy_pages(mi_heap_t* heap) { |
356 | 0 | mi_heap_visit_pages(heap, &_mi_heap_page_destroy, NULL, NULL); |
357 | 0 | mi_heap_reset_pages(heap); |
358 | 0 | } |
359 | | |
360 | | #if MI_TRACK_HEAP_DESTROY |
361 | | static bool mi_cdecl mi_heap_track_block_free(const mi_heap_t* heap, const mi_heap_area_t* area, void* block, size_t block_size, void* arg) { |
362 | | MI_UNUSED(heap); MI_UNUSED(area); MI_UNUSED(arg); MI_UNUSED(block_size); |
363 | | mi_track_free_size(block,mi_usable_size(block)); |
364 | | return true; |
365 | | } |
366 | | #endif |
367 | | |
368 | 0 | void mi_heap_destroy(mi_heap_t* heap) { |
369 | 0 | mi_assert(heap != NULL); |
370 | 0 | mi_assert(mi_heap_is_initialized(heap)); |
371 | 0 | mi_assert(heap->no_reclaim); |
372 | 0 | mi_assert_expensive(mi_heap_is_valid(heap)); |
373 | 0 | if (heap==NULL || !mi_heap_is_initialized(heap)) return; |
374 | 0 | if (!heap->no_reclaim) { |
375 | | // don't free in case it may contain reclaimed pages |
376 | 0 | mi_heap_delete(heap); |
377 | 0 | } |
378 | 0 | else { |
379 | | // track all blocks as freed |
380 | | #if MI_TRACK_HEAP_DESTROY |
381 | | mi_heap_visit_blocks(heap, true, mi_heap_track_block_free, NULL); |
382 | | #endif |
383 | | // free all pages |
384 | 0 | _mi_heap_destroy_pages(heap); |
385 | 0 | mi_heap_free(heap); |
386 | 0 | } |
387 | 0 | } |
388 | | |
389 | | // forcefully destroy all heaps in the current thread |
390 | 0 | void _mi_heap_unsafe_destroy_all(void) { |
391 | 0 | mi_heap_t* bheap = mi_heap_get_backing(); |
392 | 0 | mi_heap_t* curr = bheap->tld->heaps; |
393 | 0 | while (curr != NULL) { |
394 | 0 | mi_heap_t* next = curr->next; |
395 | 0 | if (curr->no_reclaim) { |
396 | 0 | mi_heap_destroy(curr); |
397 | 0 | } |
398 | 0 | else { |
399 | 0 | _mi_heap_destroy_pages(curr); |
400 | 0 | } |
401 | 0 | curr = next; |
402 | 0 | } |
403 | 0 | } |
404 | | |
405 | | /* ----------------------------------------------------------- |
406 | | Safe Heap delete |
407 | | ----------------------------------------------------------- */ |
408 | | |
409 | | // Transfer the pages from one heap to the other |
410 | 0 | static void mi_heap_absorb(mi_heap_t* heap, mi_heap_t* from) { |
411 | 0 | mi_assert_internal(heap!=NULL); |
412 | 0 | if (from==NULL || from->page_count == 0) return; |
413 | | |
414 | | // reduce the size of the delayed frees |
415 | 0 | _mi_heap_delayed_free_partial(from); |
416 | | |
417 | | // transfer all pages by appending the queues; this will set a new heap field |
418 | | // so threads may do delayed frees in either heap for a while. |
419 | | // note: appending waits for each page to not be in the `MI_DELAYED_FREEING` state |
420 | | // so after this only the new heap will get delayed frees |
421 | 0 | for (size_t i = 0; i <= MI_BIN_FULL; i++) { |
422 | 0 | mi_page_queue_t* pq = &heap->pages[i]; |
423 | 0 | mi_page_queue_t* append = &from->pages[i]; |
424 | 0 | size_t pcount = _mi_page_queue_append(heap, pq, append); |
425 | 0 | heap->page_count += pcount; |
426 | 0 | from->page_count -= pcount; |
427 | 0 | } |
428 | 0 | mi_assert_internal(from->page_count == 0); |
429 | | |
430 | | // and do outstanding delayed frees in the `from` heap |
431 | | // note: be careful here as the `heap` field in all those pages no longer point to `from`, |
432 | | // turns out to be ok as `_mi_heap_delayed_free` only visits the list and calls a |
433 | | // the regular `_mi_free_delayed_block` which is safe. |
434 | 0 | _mi_heap_delayed_free_all(from); |
435 | 0 | #if !defined(_MSC_VER) || (_MSC_VER > 1900) // somehow the following line gives an error in VS2015, issue #353 |
436 | 0 | mi_assert_internal(mi_atomic_load_ptr_relaxed(mi_block_t,&from->thread_delayed_free) == NULL); |
437 | 0 | #endif |
438 | | |
439 | | // and reset the `from` heap |
440 | 0 | mi_heap_reset_pages(from); |
441 | 0 | } |
442 | | |
443 | | // Safe delete a heap without freeing any still allocated blocks in that heap. |
444 | | void mi_heap_delete(mi_heap_t* heap) |
445 | 0 | { |
446 | 0 | mi_assert(heap != NULL); |
447 | 0 | mi_assert(mi_heap_is_initialized(heap)); |
448 | 0 | mi_assert_expensive(mi_heap_is_valid(heap)); |
449 | 0 | if (heap==NULL || !mi_heap_is_initialized(heap)) return; |
450 | | |
451 | 0 | if (!mi_heap_is_backing(heap)) { |
452 | | // transfer still used pages to the backing heap |
453 | 0 | mi_heap_absorb(heap->tld->heap_backing, heap); |
454 | 0 | } |
455 | 0 | else { |
456 | | // the backing heap abandons its pages |
457 | 0 | _mi_heap_collect_abandon(heap); |
458 | 0 | } |
459 | 0 | mi_assert_internal(heap->page_count==0); |
460 | 0 | mi_heap_free(heap); |
461 | 0 | } |
462 | | |
463 | 0 | mi_heap_t* mi_heap_set_default(mi_heap_t* heap) { |
464 | 0 | mi_assert(heap != NULL); |
465 | 0 | mi_assert(mi_heap_is_initialized(heap)); |
466 | 0 | if (heap==NULL || !mi_heap_is_initialized(heap)) return NULL; |
467 | 0 | mi_assert_expensive(mi_heap_is_valid(heap)); |
468 | 0 | mi_heap_t* old = mi_prim_get_default_heap(); |
469 | 0 | _mi_heap_set_default_direct(heap); |
470 | 0 | return old; |
471 | 0 | } |
472 | | |
473 | | |
474 | | |
475 | | |
476 | | /* ----------------------------------------------------------- |
477 | | Analysis |
478 | | ----------------------------------------------------------- */ |
479 | | |
480 | | // static since it is not thread safe to access heaps from other threads. |
481 | 0 | static mi_heap_t* mi_heap_of_block(const void* p) { |
482 | 0 | if (p == NULL) return NULL; |
483 | 0 | mi_segment_t* segment = _mi_ptr_segment(p); |
484 | 0 | bool valid = (_mi_ptr_cookie(segment) == segment->cookie); |
485 | 0 | mi_assert_internal(valid); |
486 | 0 | if mi_unlikely(!valid) return NULL; |
487 | 0 | return mi_page_heap(_mi_segment_page_of(segment,p)); |
488 | 0 | } |
489 | | |
490 | 0 | bool mi_heap_contains_block(mi_heap_t* heap, const void* p) { |
491 | 0 | mi_assert(heap != NULL); |
492 | 0 | if (heap==NULL || !mi_heap_is_initialized(heap)) return false; |
493 | 0 | return (heap == mi_heap_of_block(p)); |
494 | 0 | } |
495 | | |
496 | | |
497 | 0 | static bool mi_heap_page_check_owned(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* p, void* vfound) { |
498 | 0 | MI_UNUSED(heap); |
499 | 0 | MI_UNUSED(pq); |
500 | 0 | bool* found = (bool*)vfound; |
501 | 0 | mi_segment_t* segment = _mi_page_segment(page); |
502 | 0 | void* start = _mi_page_start(segment, page, NULL); |
503 | 0 | void* end = (uint8_t*)start + (page->capacity * mi_page_block_size(page)); |
504 | 0 | *found = (p >= start && p < end); |
505 | 0 | return (!*found); // continue if not found |
506 | 0 | } |
507 | | |
508 | 0 | bool mi_heap_check_owned(mi_heap_t* heap, const void* p) { |
509 | 0 | mi_assert(heap != NULL); |
510 | 0 | if (heap==NULL || !mi_heap_is_initialized(heap)) return false; |
511 | 0 | if (((uintptr_t)p & (MI_INTPTR_SIZE - 1)) != 0) return false; // only aligned pointers |
512 | 0 | bool found = false; |
513 | 0 | mi_heap_visit_pages(heap, &mi_heap_page_check_owned, (void*)p, &found); |
514 | 0 | return found; |
515 | 0 | } |
516 | | |
517 | 0 | bool mi_check_owned(const void* p) { |
518 | 0 | return mi_heap_check_owned(mi_prim_get_default_heap(), p); |
519 | 0 | } |
520 | | |
521 | | /* ----------------------------------------------------------- |
522 | | Visit all heap blocks and areas |
523 | | Todo: enable visiting abandoned pages, and |
524 | | enable visiting all blocks of all heaps across threads |
525 | | ----------------------------------------------------------- */ |
526 | | |
527 | | // Separate struct to keep `mi_page_t` out of the public interface |
528 | | typedef struct mi_heap_area_ex_s { |
529 | | mi_heap_area_t area; |
530 | | mi_page_t* page; |
531 | | } mi_heap_area_ex_t; |
532 | | |
533 | 0 | static void mi_fast_divisor(size_t divisor, size_t* magic, size_t* shift) { |
534 | 0 | mi_assert_internal(divisor > 0 && divisor <= UINT32_MAX); |
535 | 0 | *shift = MI_INTPTR_BITS - mi_clz(divisor - 1); |
536 | 0 | *magic = (size_t)(((1ULL << 32) * ((1ULL << *shift) - divisor)) / divisor + 1); |
537 | 0 | } |
538 | | |
539 | 0 | static size_t mi_fast_divide(size_t n, size_t magic, size_t shift) { |
540 | 0 | mi_assert_internal(n <= UINT32_MAX); |
541 | 0 | return ((((uint64_t) n * magic) >> 32) + n) >> shift; |
542 | 0 | } |
543 | | |
544 | 0 | bool _mi_heap_area_visit_blocks(const mi_heap_area_t* area, mi_page_t *page, mi_block_visit_fun* visitor, void* arg) { |
545 | 0 | mi_assert(area != NULL); |
546 | 0 | if (area==NULL) return true; |
547 | 0 | mi_assert(page != NULL); |
548 | 0 | if (page == NULL) return true; |
549 | | |
550 | 0 | mi_assert_internal(page->local_free == NULL); |
551 | 0 | if (page->used == 0) return true; |
552 | | |
553 | 0 | const size_t bsize = mi_page_block_size(page); |
554 | 0 | const size_t ubsize = mi_page_usable_block_size(page); // without padding |
555 | 0 | size_t psize; |
556 | 0 | uint8_t* pstart = _mi_page_start(_mi_page_segment(page), page, &psize); |
557 | 0 | mi_heap_t* heap = mi_page_heap(page); |
558 | |
|
559 | 0 | if (page->capacity == 1) { |
560 | | // optimize page with one block |
561 | 0 | mi_assert_internal(page->used == 1 && page->free == NULL); |
562 | 0 | return visitor(heap, area, pstart, ubsize, arg); |
563 | 0 | } |
564 | | |
565 | 0 | if (page->used == page->capacity) { |
566 | | // optimize full pages |
567 | 0 | uint8_t* block = pstart; |
568 | 0 | for (size_t i = 0; i < page->capacity; i++) { |
569 | 0 | if (!visitor(heap, area, block, ubsize, arg)) return false; |
570 | 0 | block += bsize; |
571 | 0 | } |
572 | 0 | return true; |
573 | 0 | } |
574 | | |
575 | | // create a bitmap of free blocks. |
576 | 0 | #define MI_MAX_BLOCKS (MI_SMALL_PAGE_SIZE / sizeof(void*)) |
577 | 0 | uintptr_t free_map[MI_MAX_BLOCKS / MI_INTPTR_BITS]; |
578 | 0 | size_t bmapsize = (page->capacity + MI_INTPTR_BITS - 1) / MI_INTPTR_BITS; |
579 | 0 | memset(free_map, 0, bmapsize * sizeof(uintptr_t)); |
580 | |
|
581 | 0 | if (page->capacity % MI_INTPTR_BITS != 0) { |
582 | 0 | size_t shift = (page->capacity % MI_INTPTR_BITS); |
583 | 0 | uintptr_t mask = (UINTPTR_MAX << shift); |
584 | 0 | free_map[bmapsize-1] = mask; |
585 | 0 | } |
586 | | |
587 | | // fast repeated division by the block size |
588 | 0 | size_t magic, shift; |
589 | 0 | mi_fast_divisor(bsize, &magic, &shift); |
590 | |
|
591 | | #if MI_DEBUG>1 |
592 | | size_t free_count = 0; |
593 | | #endif |
594 | 0 | for (mi_block_t* block = page->free; block != NULL; block = mi_block_next(page,block)) { |
595 | | #if MI_DEBUG>1 |
596 | | free_count++; |
597 | | #endif |
598 | 0 | mi_assert_internal((uint8_t*)block >= pstart && (uint8_t*)block < (pstart + psize)); |
599 | 0 | size_t offset = (uint8_t*)block - pstart; |
600 | 0 | mi_assert_internal(offset % bsize == 0); |
601 | 0 | size_t blockidx = mi_fast_divide(offset, magic, shift); |
602 | 0 | mi_assert_internal(blockidx == offset / bsize); |
603 | 0 | mi_assert_internal(blockidx < MI_MAX_BLOCKS); |
604 | 0 | size_t bitidx = (blockidx / MI_INTPTR_BITS); |
605 | 0 | size_t bit = blockidx - (bitidx * MI_INTPTR_BITS); |
606 | 0 | free_map[bitidx] |= ((uintptr_t)1 << bit); |
607 | 0 | } |
608 | 0 | mi_assert_internal(page->capacity == (free_count + page->used)); |
609 | | |
610 | | // walk through all blocks skipping the free ones |
611 | | #if MI_DEBUG>1 |
612 | | size_t used_count = 0; |
613 | | #endif |
614 | 0 | uint8_t* block = pstart; |
615 | 0 | for (size_t i = 0; i < bmapsize; i++) { |
616 | 0 | if (free_map[i] == 0) { |
617 | | // every block is in use |
618 | 0 | for (size_t j = 0; j < MI_INTPTR_BITS; j++) { |
619 | | #if MI_DEBUG>1 |
620 | | used_count++; |
621 | | #endif |
622 | 0 | if (!visitor(heap, area, block, ubsize, arg)) return false; |
623 | 0 | block += bsize; |
624 | 0 | } |
625 | 0 | } |
626 | 0 | else { |
627 | 0 | uintptr_t m = ~free_map[i]; |
628 | 0 | while (m) { |
629 | | #if MI_DEBUG>1 |
630 | | used_count++; |
631 | | #endif |
632 | 0 | size_t bitidx = mi_ctz(m); |
633 | 0 | if (!visitor(heap, area, block + (bitidx * bsize), ubsize, arg)) return false; |
634 | 0 | m &= m - 1; |
635 | 0 | } |
636 | 0 | block += bsize * MI_INTPTR_BITS; |
637 | 0 | } |
638 | 0 | } |
639 | 0 | mi_assert_internal(page->used == used_count); |
640 | 0 | return true; |
641 | 0 | } |
642 | | |
643 | | typedef bool (mi_heap_area_visit_fun)(const mi_heap_t* heap, const mi_heap_area_ex_t* area, void* arg); |
644 | | |
645 | 0 | void _mi_heap_area_init(mi_heap_area_t* area, mi_page_t* page) { |
646 | 0 | _mi_page_free_collect(page,true); |
647 | 0 | const size_t bsize = mi_page_block_size(page); |
648 | 0 | const size_t ubsize = mi_page_usable_block_size(page); |
649 | 0 | area->reserved = page->reserved * bsize; |
650 | 0 | area->committed = page->capacity * bsize; |
651 | 0 | area->blocks = _mi_page_start(_mi_page_segment(page), page, NULL); |
652 | 0 | area->used = page->used; // number of blocks in use (#553) |
653 | 0 | area->block_size = ubsize; |
654 | 0 | area->full_block_size = bsize; |
655 | 0 | } |
656 | | |
657 | 0 | static bool mi_heap_visit_areas_page(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* vfun, void* arg) { |
658 | 0 | MI_UNUSED(heap); |
659 | 0 | MI_UNUSED(pq); |
660 | 0 | mi_heap_area_visit_fun* fun = (mi_heap_area_visit_fun*)vfun; |
661 | 0 | mi_heap_area_ex_t xarea; |
662 | 0 | xarea.page = page; |
663 | 0 | _mi_heap_area_init(&xarea.area, page); |
664 | 0 | return fun(heap, &xarea, arg); |
665 | 0 | } |
666 | | |
667 | | // Visit all heap pages as areas |
668 | 0 | static bool mi_heap_visit_areas(const mi_heap_t* heap, mi_heap_area_visit_fun* visitor, void* arg) { |
669 | 0 | if (visitor == NULL) return false; |
670 | 0 | return mi_heap_visit_pages((mi_heap_t*)heap, &mi_heap_visit_areas_page, (void*)(visitor), arg); // note: function pointer to void* :-{ |
671 | 0 | } |
672 | | |
673 | | // Just to pass arguments |
674 | | typedef struct mi_visit_blocks_args_s { |
675 | | bool visit_blocks; |
676 | | mi_block_visit_fun* visitor; |
677 | | void* arg; |
678 | | } mi_visit_blocks_args_t; |
679 | | |
680 | 0 | static bool mi_heap_area_visitor(const mi_heap_t* heap, const mi_heap_area_ex_t* xarea, void* arg) { |
681 | 0 | mi_visit_blocks_args_t* args = (mi_visit_blocks_args_t*)arg; |
682 | 0 | if (!args->visitor(heap, &xarea->area, NULL, xarea->area.block_size, args->arg)) return false; |
683 | 0 | if (args->visit_blocks) { |
684 | 0 | return _mi_heap_area_visit_blocks(&xarea->area, xarea->page, args->visitor, args->arg); |
685 | 0 | } |
686 | 0 | else { |
687 | 0 | return true; |
688 | 0 | } |
689 | 0 | } |
690 | | |
691 | | // Visit all blocks in a heap |
692 | 0 | bool mi_heap_visit_blocks(const mi_heap_t* heap, bool visit_blocks, mi_block_visit_fun* visitor, void* arg) { |
693 | 0 | mi_visit_blocks_args_t args = { visit_blocks, visitor, arg }; |
694 | 0 | _mi_heap_delayed_free_partial((mi_heap_t *)heap); |
695 | 0 | return mi_heap_visit_areas(heap, &mi_heap_area_visitor, &args); |
696 | 0 | } |