Coverage Report

Created: 2025-07-04 06:49

/src/cpython/Objects/mimalloc/heap.c
Line
Count
Source (jump to first uncovered line)
1
/*----------------------------------------------------------------------------
2
Copyright (c) 2018-2021, Microsoft Research, Daan Leijen
3
This is free software; you can redistribute it and/or modify it under the
4
terms of the MIT license. A copy of the license can be found in the file
5
"LICENSE" at the root of this distribution.
6
-----------------------------------------------------------------------------*/
7
8
#include "mimalloc.h"
9
#include "mimalloc/internal.h"
10
#include "mimalloc/atomic.h"
11
#include "mimalloc/prim.h"  // mi_prim_get_default_heap
12
13
#include <string.h>  // memset, memcpy
14
15
#if defined(_MSC_VER) && (_MSC_VER < 1920)
16
#pragma warning(disable:4204)  // non-constant aggregate initializer
17
#endif
18
19
/* -----------------------------------------------------------
20
  Helpers
21
----------------------------------------------------------- */
22
23
// return `true` if ok, `false` to break
24
typedef bool (heap_page_visitor_fun)(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* arg1, void* arg2);
25
26
// Visit all pages in a heap; returns `false` if break was called.
27
static bool mi_heap_visit_pages(mi_heap_t* heap, heap_page_visitor_fun* fn, void* arg1, void* arg2)
28
0
{
29
0
  if (heap==NULL || heap->page_count==0) return true;
30
31
  // visit all pages
32
  #if MI_DEBUG>1
33
  size_t total = heap->page_count;
34
  size_t count = 0;
35
  #endif
36
37
0
  for (size_t i = 0; i <= MI_BIN_FULL; i++) {
38
0
    mi_page_queue_t* pq = &heap->pages[i];
39
0
    mi_page_t* page = pq->first;
40
0
    while(page != NULL) {
41
0
      mi_page_t* next = page->next; // save next in case the page gets removed from the queue
42
0
      mi_assert_internal(mi_page_heap(page) == heap);
43
      #if MI_DEBUG>1
44
      count++;
45
      #endif
46
0
      if (!fn(heap, pq, page, arg1, arg2)) return false;
47
0
      page = next; // and continue
48
0
    }
49
0
  }
50
0
  mi_assert_internal(count == total);
51
0
  return true;
52
0
}
53
54
55
#if MI_DEBUG>=2
56
static bool mi_heap_page_is_valid(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* arg1, void* arg2) {
57
  MI_UNUSED(arg1);
58
  MI_UNUSED(arg2);
59
  MI_UNUSED(pq);
60
  mi_assert_internal(mi_page_heap(page) == heap);
61
  mi_segment_t* segment = _mi_page_segment(page);
62
  mi_assert_internal(segment->thread_id == heap->thread_id);
63
  mi_assert_expensive(_mi_page_is_valid(page));
64
  return true;
65
}
66
#endif
67
#if MI_DEBUG>=3
68
static bool mi_heap_is_valid(mi_heap_t* heap) {
69
  mi_assert_internal(heap!=NULL);
70
  mi_heap_visit_pages(heap, &mi_heap_page_is_valid, NULL, NULL);
71
  return true;
72
}
73
#endif
74
75
76
77
78
/* -----------------------------------------------------------
79
  "Collect" pages by migrating `local_free` and `thread_free`
80
  lists and freeing empty pages. This is done when a thread
81
  stops (and in that case abandons pages if there are still
82
  blocks alive)
83
----------------------------------------------------------- */
84
85
typedef enum mi_collect_e {
86
  MI_NORMAL,
87
  MI_FORCE,
88
  MI_ABANDON
89
} mi_collect_t;
90
91
92
0
static bool mi_heap_page_collect(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* arg_collect, void* arg2 ) {
93
0
  MI_UNUSED(arg2);
94
0
  MI_UNUSED(heap);
95
0
  mi_assert_internal(mi_heap_page_is_valid(heap, pq, page, NULL, NULL));
96
0
  mi_collect_t collect = *((mi_collect_t*)arg_collect);
97
0
  _mi_page_free_collect(page, collect >= MI_FORCE);
98
0
  if (mi_page_all_free(page)) {
99
    // no more used blocks, free the page.
100
    // note: this will free retired pages as well.
101
0
    bool freed = _PyMem_mi_page_maybe_free(page, pq, collect >= MI_FORCE);
102
0
    if (!freed && collect == MI_ABANDON) {
103
0
      _mi_page_abandon(page, pq);
104
0
    }
105
0
  }
106
0
  else if (collect == MI_ABANDON) {
107
    // still used blocks but the thread is done; abandon the page
108
0
    _mi_page_abandon(page, pq);
109
0
  }
110
0
  return true; // don't break
111
0
}
112
113
0
static bool mi_heap_page_never_delayed_free(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* arg1, void* arg2) {
114
0
  MI_UNUSED(arg1);
115
0
  MI_UNUSED(arg2);
116
0
  MI_UNUSED(heap);
117
0
  MI_UNUSED(pq);
118
0
  _mi_page_use_delayed_free(page, MI_NEVER_DELAYED_FREE, false);
119
0
  return true; // don't break
120
0
}
121
122
static void mi_heap_collect_ex(mi_heap_t* heap, mi_collect_t collect)
123
0
{
124
0
  if (heap==NULL || !mi_heap_is_initialized(heap)) return;
125
126
0
  const bool force = collect >= MI_FORCE;
127
0
  _mi_deferred_free(heap, force);
128
129
  // gh-112532: we may be called from a thread that is not the owner of the heap
130
0
  bool is_main_thread = _mi_is_main_thread() && heap->thread_id == _mi_thread_id();
131
132
  // note: never reclaim on collect but leave it to threads that need storage to reclaim
133
0
  const bool force_main =
134
0
    #ifdef NDEBUG
135
0
      collect == MI_FORCE
136
    #else
137
      collect >= MI_FORCE
138
    #endif
139
0
      && is_main_thread && mi_heap_is_backing(heap) && !heap->no_reclaim;
140
141
0
  if (force_main) {
142
    // the main thread is abandoned (end-of-program), try to reclaim all abandoned segments.
143
    // if all memory is freed by now, all segments should be freed.
144
0
    _mi_abandoned_reclaim_all(heap, &heap->tld->segments);
145
0
  }
146
147
  // if abandoning, mark all pages to no longer add to delayed_free
148
0
  if (collect == MI_ABANDON) {
149
0
    mi_heap_visit_pages(heap, &mi_heap_page_never_delayed_free, NULL, NULL);
150
0
  }
151
152
  // free all current thread delayed blocks.
153
  // (if abandoning, after this there are no more thread-delayed references into the pages.)
154
0
  _mi_heap_delayed_free_all(heap);
155
156
  // collect retired pages
157
0
  _mi_heap_collect_retired(heap, force);
158
159
  // free pages that were delayed with QSBR
160
0
  _PyMem_mi_heap_collect_qsbr(heap);
161
162
  // collect all pages owned by this thread
163
0
  mi_heap_visit_pages(heap, &mi_heap_page_collect, &collect, NULL);
164
0
  mi_assert_internal( collect != MI_ABANDON || mi_atomic_load_ptr_acquire(mi_block_t,&heap->thread_delayed_free) == NULL );
165
166
  // collect abandoned segments (in particular, purge expired parts of segments in the abandoned segment list)
167
  // note: forced purge can be quite expensive if many threads are created/destroyed so we do not force on abandonment
168
0
  _mi_abandoned_collect(heap, collect == MI_FORCE /* force? */, &heap->tld->segments);
169
170
  // collect segment local caches
171
0
  if (force) {
172
0
    _mi_segment_thread_collect(&heap->tld->segments);
173
0
  }
174
175
  // collect regions on program-exit (or shared library unload)
176
0
  if (force && is_main_thread && mi_heap_is_backing(heap)) {
177
0
    _mi_thread_data_collect();  // collect thread data cache
178
0
    _mi_arena_collect(true /* force purge */, &heap->tld->stats);
179
0
  }
180
0
}
181
182
0
void _mi_heap_collect_abandon(mi_heap_t* heap) {
183
0
  mi_heap_collect_ex(heap, MI_ABANDON);
184
0
}
185
186
0
void mi_heap_collect(mi_heap_t* heap, bool force) mi_attr_noexcept {
187
0
  mi_heap_collect_ex(heap, (force ? MI_FORCE : MI_NORMAL));
188
0
}
189
190
0
void mi_collect(bool force) mi_attr_noexcept {
191
0
  mi_heap_collect(mi_prim_get_default_heap(), force);
192
0
}
193
194
195
/* -----------------------------------------------------------
196
  Heap new
197
----------------------------------------------------------- */
198
199
16
mi_heap_t* mi_heap_get_default(void) {
200
16
  mi_thread_init();
201
16
  return mi_prim_get_default_heap();
202
16
}
203
204
0
static bool mi_heap_is_default(const mi_heap_t* heap) {
205
0
  return (heap == mi_prim_get_default_heap());
206
0
}
207
208
209
0
mi_heap_t* mi_heap_get_backing(void) {
210
0
  mi_heap_t* heap = mi_heap_get_default();
211
0
  mi_assert_internal(heap!=NULL);
212
0
  mi_heap_t* bheap = heap->tld->heap_backing;
213
0
  mi_assert_internal(bheap!=NULL);
214
0
  mi_assert_internal(bheap->thread_id == _mi_thread_id());
215
0
  return bheap;
216
0
}
217
218
void _mi_heap_init_ex(mi_heap_t* heap, mi_tld_t* tld, mi_arena_id_t arena_id, bool no_reclaim, uint8_t tag)
219
0
{
220
0
  _mi_memcpy_aligned(heap, &_mi_heap_empty, sizeof(mi_heap_t));
221
0
  heap->tld = tld;
222
0
  heap->thread_id = _mi_thread_id();
223
0
  heap->arena_id = arena_id;
224
0
  if (heap == tld->heap_backing) {
225
0
    _mi_random_init(&heap->random);
226
0
  }
227
0
  else {
228
0
    _mi_random_split(&tld->heap_backing->random, &heap->random);
229
0
  }
230
0
  heap->cookie = _mi_heap_random_next(heap) | 1;
231
0
  heap->keys[0] = _mi_heap_random_next(heap);
232
0
  heap->keys[1] = _mi_heap_random_next(heap);
233
0
  heap->no_reclaim = no_reclaim;
234
0
  heap->tag = tag;
235
  // push on the thread local heaps list
236
0
  heap->next = heap->tld->heaps;
237
0
  heap->tld->heaps = heap;
238
0
}
239
240
0
mi_decl_nodiscard mi_heap_t* mi_heap_new_in_arena(mi_arena_id_t arena_id) {
241
0
  mi_heap_t* bheap = mi_heap_get_backing();
242
0
  mi_heap_t* heap = mi_heap_malloc_tp(bheap, mi_heap_t);  // todo: OS allocate in secure mode?
243
0
  if (heap == NULL) return NULL;
244
  // don't reclaim abandoned pages or otherwise destroy is unsafe
245
0
  _mi_heap_init_ex(heap, bheap->tld, arena_id, true, 0);
246
0
  return heap;
247
0
}
248
249
0
mi_decl_nodiscard mi_heap_t* mi_heap_new(void) {
250
0
  return mi_heap_new_in_arena(_mi_arena_id_none());
251
0
}
252
253
0
bool _mi_heap_memid_is_suitable(mi_heap_t* heap, mi_memid_t memid) {
254
0
  return _mi_arena_memid_is_suitable(memid, heap->arena_id);
255
0
}
256
257
48
uintptr_t _mi_heap_random_next(mi_heap_t* heap) {
258
48
  return _mi_random_next(&heap->random);
259
48
}
260
261
// zero out the page queues
262
0
static void mi_heap_reset_pages(mi_heap_t* heap) {
263
0
  mi_assert_internal(heap != NULL);
264
0
  mi_assert_internal(mi_heap_is_initialized(heap));
265
  // TODO: copy full empty heap instead?
266
0
  memset(&heap->pages_free_direct, 0, sizeof(heap->pages_free_direct));
267
0
  _mi_memcpy_aligned(&heap->pages, &_mi_heap_empty.pages, sizeof(heap->pages));
268
0
  heap->thread_delayed_free = NULL;
269
0
  heap->page_count = 0;
270
0
}
271
272
// called from `mi_heap_destroy` and `mi_heap_delete` to free the internal heap resources.
273
0
static void mi_heap_free(mi_heap_t* heap) {
274
0
  mi_assert(heap != NULL);
275
0
  mi_assert_internal(mi_heap_is_initialized(heap));
276
0
  if (heap==NULL || !mi_heap_is_initialized(heap)) return;
277
0
  if (mi_heap_is_backing(heap)) return; // dont free the backing heap
278
279
  // reset default
280
0
  if (mi_heap_is_default(heap)) {
281
0
    _mi_heap_set_default_direct(heap->tld->heap_backing);
282
0
  }
283
284
  // remove ourselves from the thread local heaps list
285
  // linear search but we expect the number of heaps to be relatively small
286
0
  mi_heap_t* prev = NULL;
287
0
  mi_heap_t* curr = heap->tld->heaps;
288
0
  while (curr != heap && curr != NULL) {
289
0
    prev = curr;
290
0
    curr = curr->next;
291
0
  }
292
0
  mi_assert_internal(curr == heap);
293
0
  if (curr == heap) {
294
0
    if (prev != NULL) { prev->next = heap->next; }
295
0
                 else { heap->tld->heaps = heap->next; }
296
0
  }
297
0
  mi_assert_internal(heap->tld->heaps != NULL);
298
299
  // and free the used memory
300
0
  mi_free(heap);
301
0
}
302
303
304
/* -----------------------------------------------------------
305
  Heap destroy
306
----------------------------------------------------------- */
307
308
0
static bool _mi_heap_page_destroy(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* arg1, void* arg2) {
309
0
  MI_UNUSED(arg1);
310
0
  MI_UNUSED(arg2);
311
0
  MI_UNUSED(heap);
312
0
  MI_UNUSED(pq);
313
314
  // ensure no more thread_delayed_free will be added
315
0
  _mi_page_use_delayed_free(page, MI_NEVER_DELAYED_FREE, false);
316
317
  // stats
318
0
  const size_t bsize = mi_page_block_size(page);
319
0
  if (bsize > MI_MEDIUM_OBJ_SIZE_MAX) {
320
0
    if (bsize <= MI_LARGE_OBJ_SIZE_MAX) {
321
0
      mi_heap_stat_decrease(heap, large, bsize);
322
0
    }
323
0
    else {
324
0
      mi_heap_stat_decrease(heap, huge, bsize);
325
0
    }
326
0
  }
327
#if (MI_STAT)
328
  _mi_page_free_collect(page, false);  // update used count
329
  const size_t inuse = page->used;
330
  if (bsize <= MI_LARGE_OBJ_SIZE_MAX) {
331
    mi_heap_stat_decrease(heap, normal, bsize * inuse);
332
#if (MI_STAT>1)
333
    mi_heap_stat_decrease(heap, normal_bins[_mi_bin(bsize)], inuse);
334
#endif
335
  }
336
  mi_heap_stat_decrease(heap, malloc, bsize * inuse);  // todo: off for aligned blocks...
337
#endif
338
339
  /// pretend it is all free now
340
0
  mi_assert_internal(mi_page_thread_free(page) == NULL);
341
0
  page->used = 0;
342
343
  // and free the page
344
  // mi_page_free(page,false);
345
0
  page->next = NULL;
346
0
  page->prev = NULL;
347
0
  _mi_segment_page_free(page,false /* no force? */, &heap->tld->segments);
348
349
0
  return true; // keep going
350
0
}
351
352
0
void _mi_heap_destroy_pages(mi_heap_t* heap) {
353
0
  mi_heap_visit_pages(heap, &_mi_heap_page_destroy, NULL, NULL);
354
0
  mi_heap_reset_pages(heap);
355
0
}
356
357
#if MI_TRACK_HEAP_DESTROY
358
static bool mi_cdecl mi_heap_track_block_free(const mi_heap_t* heap, const mi_heap_area_t* area, void* block, size_t block_size, void* arg) {
359
  MI_UNUSED(heap); MI_UNUSED(area);  MI_UNUSED(arg); MI_UNUSED(block_size);
360
  mi_track_free_size(block,mi_usable_size(block));
361
  return true;
362
}
363
#endif
364
365
0
void mi_heap_destroy(mi_heap_t* heap) {
366
0
  mi_assert(heap != NULL);
367
0
  mi_assert(mi_heap_is_initialized(heap));
368
0
  mi_assert(heap->no_reclaim);
369
0
  mi_assert_expensive(mi_heap_is_valid(heap));
370
0
  if (heap==NULL || !mi_heap_is_initialized(heap)) return;
371
0
  if (!heap->no_reclaim) {
372
    // don't free in case it may contain reclaimed pages
373
0
    mi_heap_delete(heap);
374
0
  }
375
0
  else {
376
    // track all blocks as freed
377
    #if MI_TRACK_HEAP_DESTROY
378
    mi_heap_visit_blocks(heap, true, mi_heap_track_block_free, NULL);
379
    #endif
380
    // free all pages
381
0
    _mi_heap_destroy_pages(heap);
382
0
    mi_heap_free(heap);
383
0
  }
384
0
}
385
386
// forcefully destroy all heaps in the current thread
387
0
void _mi_heap_unsafe_destroy_all(void) {
388
0
  mi_heap_t* bheap = mi_heap_get_backing();
389
0
  mi_heap_t* curr = bheap->tld->heaps;
390
0
  while (curr != NULL) {
391
0
    mi_heap_t* next = curr->next;
392
0
    if (curr->no_reclaim) {
393
0
      mi_heap_destroy(curr);
394
0
    }
395
0
    else {
396
0
      _mi_heap_destroy_pages(curr);
397
0
    }
398
0
    curr = next;
399
0
  }
400
0
}
401
402
/* -----------------------------------------------------------
403
  Safe Heap delete
404
----------------------------------------------------------- */
405
406
// Transfer the pages from one heap to the other
407
0
static void mi_heap_absorb(mi_heap_t* heap, mi_heap_t* from) {
408
0
  mi_assert_internal(heap!=NULL);
409
0
  if (from==NULL || from->page_count == 0) return;
410
411
  // reduce the size of the delayed frees
412
0
  _mi_heap_delayed_free_partial(from);
413
414
  // transfer all pages by appending the queues; this will set a new heap field
415
  // so threads may do delayed frees in either heap for a while.
416
  // note: appending waits for each page to not be in the `MI_DELAYED_FREEING` state
417
  // so after this only the new heap will get delayed frees
418
0
  for (size_t i = 0; i <= MI_BIN_FULL; i++) {
419
0
    mi_page_queue_t* pq = &heap->pages[i];
420
0
    mi_page_queue_t* append = &from->pages[i];
421
0
    size_t pcount = _mi_page_queue_append(heap, pq, append);
422
0
    heap->page_count += pcount;
423
0
    from->page_count -= pcount;
424
0
  }
425
0
  mi_assert_internal(from->page_count == 0);
426
427
  // and do outstanding delayed frees in the `from` heap
428
  // note: be careful here as the `heap` field in all those pages no longer point to `from`,
429
  // turns out to be ok as `_mi_heap_delayed_free` only visits the list and calls a
430
  // the regular `_mi_free_delayed_block` which is safe.
431
0
  _mi_heap_delayed_free_all(from);
432
0
  #if !defined(_MSC_VER) || (_MSC_VER > 1900) // somehow the following line gives an error in VS2015, issue #353
433
0
  mi_assert_internal(mi_atomic_load_ptr_relaxed(mi_block_t,&from->thread_delayed_free) == NULL);
434
0
  #endif
435
436
  // and reset the `from` heap
437
0
  mi_heap_reset_pages(from);
438
0
}
439
440
// Safe delete a heap without freeing any still allocated blocks in that heap.
441
void mi_heap_delete(mi_heap_t* heap)
442
0
{
443
0
  mi_assert(heap != NULL);
444
0
  mi_assert(mi_heap_is_initialized(heap));
445
0
  mi_assert_expensive(mi_heap_is_valid(heap));
446
0
  if (heap==NULL || !mi_heap_is_initialized(heap)) return;
447
448
0
  if (!mi_heap_is_backing(heap)) {
449
    // transfer still used pages to the backing heap
450
0
    mi_heap_absorb(heap->tld->heap_backing, heap);
451
0
  }
452
0
  else {
453
    // the backing heap abandons its pages
454
0
    _mi_heap_collect_abandon(heap);
455
0
  }
456
0
  mi_assert_internal(heap->page_count==0);
457
0
  mi_heap_free(heap);
458
0
}
459
460
0
mi_heap_t* mi_heap_set_default(mi_heap_t* heap) {
461
0
  mi_assert(heap != NULL);
462
0
  mi_assert(mi_heap_is_initialized(heap));
463
0
  if (heap==NULL || !mi_heap_is_initialized(heap)) return NULL;
464
0
  mi_assert_expensive(mi_heap_is_valid(heap));
465
0
  mi_heap_t* old = mi_prim_get_default_heap();
466
0
  _mi_heap_set_default_direct(heap);
467
0
  return old;
468
0
}
469
470
471
472
473
/* -----------------------------------------------------------
474
  Analysis
475
----------------------------------------------------------- */
476
477
// static since it is not thread safe to access heaps from other threads.
478
0
static mi_heap_t* mi_heap_of_block(const void* p) {
479
0
  if (p == NULL) return NULL;
480
0
  mi_segment_t* segment = _mi_ptr_segment(p);
481
0
  bool valid = (_mi_ptr_cookie(segment) == segment->cookie);
482
0
  mi_assert_internal(valid);
483
0
  if mi_unlikely(!valid) return NULL;
484
0
  return mi_page_heap(_mi_segment_page_of(segment,p));
485
0
}
486
487
0
bool mi_heap_contains_block(mi_heap_t* heap, const void* p) {
488
0
  mi_assert(heap != NULL);
489
0
  if (heap==NULL || !mi_heap_is_initialized(heap)) return false;
490
0
  return (heap == mi_heap_of_block(p));
491
0
}
492
493
494
0
static bool mi_heap_page_check_owned(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* p, void* vfound) {
495
0
  MI_UNUSED(heap);
496
0
  MI_UNUSED(pq);
497
0
  bool* found = (bool*)vfound;
498
0
  mi_segment_t* segment = _mi_page_segment(page);
499
0
  void* start = _mi_page_start(segment, page, NULL);
500
0
  void* end   = (uint8_t*)start + (page->capacity * mi_page_block_size(page));
501
0
  *found = (p >= start && p < end);
502
0
  return (!*found); // continue if not found
503
0
}
504
505
0
bool mi_heap_check_owned(mi_heap_t* heap, const void* p) {
506
0
  mi_assert(heap != NULL);
507
0
  if (heap==NULL || !mi_heap_is_initialized(heap)) return false;
508
0
  if (((uintptr_t)p & (MI_INTPTR_SIZE - 1)) != 0) return false;  // only aligned pointers
509
0
  bool found = false;
510
0
  mi_heap_visit_pages(heap, &mi_heap_page_check_owned, (void*)p, &found);
511
0
  return found;
512
0
}
513
514
0
bool mi_check_owned(const void* p) {
515
0
  return mi_heap_check_owned(mi_prim_get_default_heap(), p);
516
0
}
517
518
/* -----------------------------------------------------------
519
  Visit all heap blocks and areas
520
  Todo: enable visiting abandoned pages, and
521
        enable visiting all blocks of all heaps across threads
522
----------------------------------------------------------- */
523
524
// Separate struct to keep `mi_page_t` out of the public interface
525
typedef struct mi_heap_area_ex_s {
526
  mi_heap_area_t area;
527
  mi_page_t*     page;
528
} mi_heap_area_ex_t;
529
530
0
static void mi_fast_divisor(size_t divisor, size_t* magic, size_t* shift) {
531
0
  mi_assert_internal(divisor > 0 && divisor <= UINT32_MAX);
532
0
  *shift = MI_INTPTR_BITS - mi_clz(divisor - 1);
533
0
  *magic = (size_t)(((1ULL << 32) * ((1ULL << *shift) - divisor)) / divisor + 1);
534
0
}
535
536
0
static size_t mi_fast_divide(size_t n, size_t magic, size_t shift) {
537
0
  mi_assert_internal(n <= UINT32_MAX);
538
0
  return ((((uint64_t) n * magic) >> 32) + n) >> shift;
539
0
}
540
541
0
bool _mi_heap_area_visit_blocks(const mi_heap_area_t* area, mi_page_t *page, mi_block_visit_fun* visitor, void* arg) {
542
0
  mi_assert(area != NULL);
543
0
  if (area==NULL) return true;
544
0
  mi_assert(page != NULL);
545
0
  if (page == NULL) return true;
546
547
0
  mi_assert_internal(page->local_free == NULL);
548
0
  if (page->used == 0) return true;
549
550
0
  const size_t bsize = mi_page_block_size(page);
551
0
  const size_t ubsize = mi_page_usable_block_size(page); // without padding
552
0
  size_t   psize;
553
0
  uint8_t* pstart = _mi_page_start(_mi_page_segment(page), page, &psize);
554
0
  mi_heap_t* heap = mi_page_heap(page);
555
556
0
  if (page->capacity == 1) {
557
    // optimize page with one block
558
0
    mi_assert_internal(page->used == 1 && page->free == NULL);
559
0
    return visitor(heap, area, pstart, ubsize, arg);
560
0
  }
561
562
0
  if (page->used == page->capacity) {
563
    // optimize full pages
564
0
    uint8_t* block = pstart;
565
0
    for (size_t i = 0; i < page->capacity; i++) {
566
0
        if (!visitor(heap, area, block, ubsize, arg)) return false;
567
0
        block += bsize;
568
0
    }
569
0
    return true;
570
0
  }
571
572
  // create a bitmap of free blocks.
573
0
  #define MI_MAX_BLOCKS   (MI_SMALL_PAGE_SIZE / sizeof(void*))
574
0
  uintptr_t free_map[MI_MAX_BLOCKS / MI_INTPTR_BITS];
575
0
  size_t bmapsize = (page->capacity + MI_INTPTR_BITS - 1) / MI_INTPTR_BITS;
576
0
  memset(free_map, 0, bmapsize * sizeof(uintptr_t));
577
578
0
  if (page->capacity % MI_INTPTR_BITS != 0) {
579
0
    size_t shift = (page->capacity % MI_INTPTR_BITS);
580
0
    uintptr_t mask = (UINTPTR_MAX << shift);
581
0
    free_map[bmapsize-1] = mask;
582
0
  }
583
584
  // fast repeated division by the block size
585
0
  size_t magic, shift;
586
0
  mi_fast_divisor(bsize, &magic, &shift);
587
588
  #if MI_DEBUG>1
589
  size_t free_count = 0;
590
  #endif
591
0
  for (mi_block_t* block = page->free; block != NULL; block = mi_block_next(page,block)) {
592
    #if MI_DEBUG>1
593
    free_count++;
594
    #endif
595
0
    mi_assert_internal((uint8_t*)block >= pstart && (uint8_t*)block < (pstart + psize));
596
0
    size_t offset = (uint8_t*)block - pstart;
597
0
    mi_assert_internal(offset % bsize == 0);
598
0
    size_t blockidx = mi_fast_divide(offset, magic, shift);
599
0
    mi_assert_internal(blockidx == offset / bsize);
600
0
    mi_assert_internal(blockidx < MI_MAX_BLOCKS);
601
0
    size_t bitidx = (blockidx / MI_INTPTR_BITS);
602
0
    size_t bit = blockidx - (bitidx * MI_INTPTR_BITS);
603
0
    free_map[bitidx] |= ((uintptr_t)1 << bit);
604
0
  }
605
0
  mi_assert_internal(page->capacity == (free_count + page->used));
606
607
  // walk through all blocks skipping the free ones
608
  #if MI_DEBUG>1
609
  size_t used_count = 0;
610
  #endif
611
0
  uint8_t* block = pstart;
612
0
  for (size_t i = 0; i < bmapsize; i++) {
613
0
    if (free_map[i] == 0) {
614
      // every block is in use
615
0
      for (size_t j = 0; j < MI_INTPTR_BITS; j++) {
616
        #if MI_DEBUG>1
617
        used_count++;
618
        #endif
619
0
        if (!visitor(heap, area, block, ubsize, arg)) return false;
620
0
        block += bsize;
621
0
      }
622
0
    }
623
0
    else {
624
0
      uintptr_t m = ~free_map[i];
625
0
      while (m) {
626
        #if MI_DEBUG>1
627
        used_count++;
628
        #endif
629
0
        size_t bitidx = mi_ctz(m);
630
0
        if (!visitor(heap, area, block + (bitidx * bsize), ubsize, arg)) return false;
631
0
        m &= m - 1;
632
0
      }
633
0
      block += bsize * MI_INTPTR_BITS;
634
0
    }
635
0
  }
636
0
  mi_assert_internal(page->used == used_count);
637
0
  return true;
638
0
}
639
640
typedef bool (mi_heap_area_visit_fun)(const mi_heap_t* heap, const mi_heap_area_ex_t* area, void* arg);
641
642
0
void _mi_heap_area_init(mi_heap_area_t* area, mi_page_t* page) {
643
0
  _mi_page_free_collect(page,true);
644
0
  const size_t bsize = mi_page_block_size(page);
645
0
  const size_t ubsize = mi_page_usable_block_size(page);
646
0
  area->reserved = page->reserved * bsize;
647
0
  area->committed = page->capacity * bsize;
648
0
  area->blocks = _mi_page_start(_mi_page_segment(page), page, NULL);
649
0
  area->used = page->used;   // number of blocks in use (#553)
650
0
  area->block_size = ubsize;
651
0
  area->full_block_size = bsize;
652
0
}
653
654
0
static bool mi_heap_visit_areas_page(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* vfun, void* arg) {
655
0
  MI_UNUSED(heap);
656
0
  MI_UNUSED(pq);
657
0
  mi_heap_area_visit_fun* fun = (mi_heap_area_visit_fun*)vfun;
658
0
  mi_heap_area_ex_t xarea;
659
0
  xarea.page = page;
660
0
  _mi_heap_area_init(&xarea.area, page);
661
0
  return fun(heap, &xarea, arg);
662
0
}
663
664
// Visit all heap pages as areas
665
0
static bool mi_heap_visit_areas(const mi_heap_t* heap, mi_heap_area_visit_fun* visitor, void* arg) {
666
0
  if (visitor == NULL) return false;
667
0
  return mi_heap_visit_pages((mi_heap_t*)heap, &mi_heap_visit_areas_page, (void*)(visitor), arg); // note: function pointer to void* :-{
668
0
}
669
670
// Just to pass arguments
671
typedef struct mi_visit_blocks_args_s {
672
  bool  visit_blocks;
673
  mi_block_visit_fun* visitor;
674
  void* arg;
675
} mi_visit_blocks_args_t;
676
677
0
static bool mi_heap_area_visitor(const mi_heap_t* heap, const mi_heap_area_ex_t* xarea, void* arg) {
678
0
  mi_visit_blocks_args_t* args = (mi_visit_blocks_args_t*)arg;
679
0
  if (!args->visitor(heap, &xarea->area, NULL, xarea->area.block_size, args->arg)) return false;
680
0
  if (args->visit_blocks) {
681
0
    return _mi_heap_area_visit_blocks(&xarea->area, xarea->page, args->visitor, args->arg);
682
0
  }
683
0
  else {
684
0
    return true;
685
0
  }
686
0
}
687
688
// Visit all blocks in a heap
689
0
bool mi_heap_visit_blocks(const mi_heap_t* heap, bool visit_blocks, mi_block_visit_fun* visitor, void* arg) {
690
0
  mi_visit_blocks_args_t args = { visit_blocks, visitor, arg };
691
0
  _mi_heap_delayed_free_partial((mi_heap_t *)heap);
692
0
  return mi_heap_visit_areas(heap, &mi_heap_area_visitor, &args);
693
0
}