Coverage Report

Created: 2025-07-04 06:49

/src/cpython/Objects/mimalloc/arena.c
Line
Count
Source (jump to first uncovered line)
1
/* ----------------------------------------------------------------------------
2
Copyright (c) 2019-2023, Microsoft Research, Daan Leijen
3
This is free software; you can redistribute it and/or modify it under the
4
terms of the MIT license. A copy of the license can be found in the file
5
"LICENSE" at the root of this distribution.
6
-----------------------------------------------------------------------------*/
7
8
/* ----------------------------------------------------------------------------
9
"Arenas" are fixed area's of OS memory from which we can allocate
10
large blocks (>= MI_ARENA_MIN_BLOCK_SIZE, 4MiB).
11
In contrast to the rest of mimalloc, the arenas are shared between
12
threads and need to be accessed using atomic operations.
13
14
Arenas are used to for huge OS page (1GiB) reservations or for reserving
15
OS memory upfront which can be improve performance or is sometimes needed
16
on embedded devices. We can also employ this with WASI or `sbrk` systems
17
to reserve large arenas upfront and be able to reuse the memory more effectively.
18
19
The arena allocation needs to be thread safe and we use an atomic bitmap to allocate.
20
-----------------------------------------------------------------------------*/
21
#include "mimalloc.h"
22
#include "mimalloc/internal.h"
23
#include "mimalloc/atomic.h"
24
25
#include <string.h>  // memset
26
#include <errno.h>   // ENOMEM
27
28
#include "bitmap.h"  // atomic bitmap
29
30
/* -----------------------------------------------------------
31
  Arena allocation
32
----------------------------------------------------------- */
33
34
// Block info: bit 0 contains the `in_use` bit, the upper bits the
35
// size in count of arena blocks.
36
typedef uintptr_t mi_block_info_t;
37
0
#define MI_ARENA_BLOCK_SIZE   (MI_SEGMENT_SIZE)        // 64MiB  (must be at least MI_SEGMENT_ALIGN)
38
0
#define MI_ARENA_MIN_OBJ_SIZE (MI_ARENA_BLOCK_SIZE/2)  // 32MiB
39
0
#define MI_MAX_ARENAS         (112)                    // not more than 126 (since we use 7 bits in the memid and an arena index + 1)
40
41
// A memory arena descriptor
42
typedef struct mi_arena_s {
43
  mi_arena_id_t id;                       // arena id; 0 for non-specific
44
  mi_memid_t memid;                       // memid of the memory area
45
  _Atomic(uint8_t*) start;                // the start of the memory area
46
  size_t   block_count;                   // size of the area in arena blocks (of `MI_ARENA_BLOCK_SIZE`)
47
  size_t   field_count;                   // number of bitmap fields (where `field_count * MI_BITMAP_FIELD_BITS >= block_count`)
48
  size_t   meta_size;                     // size of the arena structure itself (including its bitmaps)
49
  mi_memid_t meta_memid;                  // memid of the arena structure itself (OS or static allocation)
50
  int      numa_node;                     // associated NUMA node
51
  bool     exclusive;                     // only allow allocations if specifically for this arena
52
  bool     is_large;                      // memory area consists of large- or huge OS pages (always committed)
53
  _Atomic(size_t) search_idx;             // optimization to start the search for free blocks
54
  _Atomic(mi_msecs_t) purge_expire;       // expiration time when blocks should be decommitted from `blocks_decommit`.
55
  mi_bitmap_field_t* blocks_dirty;        // are the blocks potentially non-zero?
56
  mi_bitmap_field_t* blocks_committed;    // are the blocks committed? (can be NULL for memory that cannot be decommitted)
57
  mi_bitmap_field_t* blocks_purge;        // blocks that can be (reset) decommitted. (can be NULL for memory that cannot be (reset) decommitted)
58
  mi_bitmap_field_t  blocks_inuse[1];     // in-place bitmap of in-use blocks (of size `field_count`)
59
} mi_arena_t;
60
61
62
// The available arenas
63
static mi_decl_cache_align _Atomic(mi_arena_t*) mi_arenas[MI_MAX_ARENAS];
64
static mi_decl_cache_align _Atomic(size_t)      mi_arena_count; // = 0
65
66
67
//static bool mi_manage_os_memory_ex2(void* start, size_t size, bool is_large, int numa_node, bool exclusive, mi_memid_t memid, mi_arena_id_t* arena_id) mi_attr_noexcept;
68
69
/* -----------------------------------------------------------
70
  Arena id's
71
  id = arena_index + 1
72
----------------------------------------------------------- */
73
74
0
static size_t mi_arena_id_index(mi_arena_id_t id) {
75
0
  return (size_t)(id <= 0 ? MI_MAX_ARENAS : id - 1);
76
0
}
77
78
0
static mi_arena_id_t mi_arena_id_create(size_t arena_index) {
79
0
  mi_assert_internal(arena_index < MI_MAX_ARENAS);
80
0
  return (int)arena_index + 1;
81
0
}
82
83
0
mi_arena_id_t _mi_arena_id_none(void) {
84
0
  return 0;
85
0
}
86
87
0
static bool mi_arena_id_is_suitable(mi_arena_id_t arena_id, bool arena_is_exclusive, mi_arena_id_t req_arena_id) {
88
0
  return ((!arena_is_exclusive && req_arena_id == _mi_arena_id_none()) ||
89
0
          (arena_id == req_arena_id));
90
0
}
91
92
0
bool _mi_arena_memid_is_suitable(mi_memid_t memid, mi_arena_id_t request_arena_id) {
93
0
  if (memid.memkind == MI_MEM_ARENA) {
94
0
    return mi_arena_id_is_suitable(memid.mem.arena.id, memid.mem.arena.is_exclusive, request_arena_id);
95
0
  }
96
0
  else {
97
0
    return mi_arena_id_is_suitable(0, false, request_arena_id);
98
0
  }
99
0
}
100
101
0
bool _mi_arena_memid_is_os_allocated(mi_memid_t memid) {
102
0
  return (memid.memkind == MI_MEM_OS);
103
0
}
104
105
/* -----------------------------------------------------------
106
  Arena allocations get a (currently) 16-bit memory id where the
107
  lower 8 bits are the arena id, and the upper bits the block index.
108
----------------------------------------------------------- */
109
110
0
static size_t mi_block_count_of_size(size_t size) {
111
0
  return _mi_divide_up(size, MI_ARENA_BLOCK_SIZE);
112
0
}
113
114
0
static size_t mi_arena_block_size(size_t bcount) {
115
0
  return (bcount * MI_ARENA_BLOCK_SIZE);
116
0
}
117
118
0
static size_t mi_arena_size(mi_arena_t* arena) {
119
0
  return mi_arena_block_size(arena->block_count);
120
0
}
121
122
0
static mi_memid_t mi_memid_create_arena(mi_arena_id_t id, bool is_exclusive, mi_bitmap_index_t bitmap_index) {
123
0
  mi_memid_t memid = _mi_memid_create(MI_MEM_ARENA);
124
0
  memid.mem.arena.id = id;
125
0
  memid.mem.arena.block_index = bitmap_index;
126
0
  memid.mem.arena.is_exclusive = is_exclusive;
127
0
  return memid;
128
0
}
129
130
0
static bool mi_arena_memid_indices(mi_memid_t memid, size_t* arena_index, mi_bitmap_index_t* bitmap_index) {
131
0
  mi_assert_internal(memid.memkind == MI_MEM_ARENA);
132
0
  *arena_index = mi_arena_id_index(memid.mem.arena.id);
133
0
  *bitmap_index = memid.mem.arena.block_index;
134
0
  return memid.mem.arena.is_exclusive;
135
0
}
136
137
138
139
/* -----------------------------------------------------------
140
  Special static area for mimalloc internal structures
141
  to avoid OS calls (for example, for the arena metadata)
142
----------------------------------------------------------- */
143
144
0
#define MI_ARENA_STATIC_MAX  (MI_INTPTR_SIZE*MI_KiB)  // 8 KiB on 64-bit
145
146
static uint8_t mi_arena_static[MI_ARENA_STATIC_MAX];
147
static _Atomic(size_t) mi_arena_static_top;
148
149
0
static void* mi_arena_static_zalloc(size_t size, size_t alignment, mi_memid_t* memid) {
150
0
  *memid = _mi_memid_none();
151
0
  if (size == 0 || size > MI_ARENA_STATIC_MAX) return NULL;
152
0
  if ((mi_atomic_load_relaxed(&mi_arena_static_top) + size) > MI_ARENA_STATIC_MAX) return NULL;
153
154
  // try to claim space
155
0
  if (alignment == 0) { alignment = 1; }
156
0
  const size_t oversize = size + alignment - 1;
157
0
  if (oversize > MI_ARENA_STATIC_MAX) return NULL;
158
0
  const size_t oldtop = mi_atomic_add_acq_rel(&mi_arena_static_top, oversize);
159
0
  size_t top = oldtop + oversize;
160
0
  if (top > MI_ARENA_STATIC_MAX) {
161
    // try to roll back, ok if this fails
162
0
    mi_atomic_cas_strong_acq_rel(&mi_arena_static_top, &top, oldtop);
163
0
    return NULL;
164
0
  }
165
166
  // success
167
0
  *memid = _mi_memid_create(MI_MEM_STATIC);
168
0
  const size_t start = _mi_align_up(oldtop, alignment);
169
0
  uint8_t* const p = &mi_arena_static[start];
170
0
  _mi_memzero(p, size);
171
0
  return p;
172
0
}
173
174
0
static void* mi_arena_meta_zalloc(size_t size, mi_memid_t* memid, mi_stats_t* stats) {
175
0
  *memid = _mi_memid_none();
176
177
  // try static
178
0
  void* p = mi_arena_static_zalloc(size, MI_ALIGNMENT_MAX, memid);
179
0
  if (p != NULL) return p;
180
181
  // or fall back to the OS
182
0
  return _mi_os_alloc(size, memid, stats);
183
0
}
184
185
0
static void mi_arena_meta_free(void* p, mi_memid_t memid, size_t size, mi_stats_t* stats) {
186
0
  if (mi_memkind_is_os(memid.memkind)) {
187
0
    _mi_os_free(p, size, memid, stats);
188
0
  }
189
0
  else {
190
0
    mi_assert(memid.memkind == MI_MEM_STATIC);
191
0
  }
192
0
}
193
194
0
static void* mi_arena_block_start(mi_arena_t* arena, mi_bitmap_index_t bindex) {
195
0
  return (arena->start + mi_arena_block_size(mi_bitmap_index_bit(bindex)));
196
0
}
197
198
199
/* -----------------------------------------------------------
200
  Thread safe allocation in an arena
201
----------------------------------------------------------- */
202
203
// claim the `blocks_inuse` bits
204
static bool mi_arena_try_claim(mi_arena_t* arena, size_t blocks, mi_bitmap_index_t* bitmap_idx)
205
0
{
206
0
  size_t idx = 0; // mi_atomic_load_relaxed(&arena->search_idx);  // start from last search; ok to be relaxed as the exact start does not matter
207
0
  if (_mi_bitmap_try_find_from_claim_across(arena->blocks_inuse, arena->field_count, idx, blocks, bitmap_idx)) {
208
0
    mi_atomic_store_relaxed(&arena->search_idx, mi_bitmap_index_field(*bitmap_idx));  // start search from found location next time around
209
0
    return true;
210
0
  };
211
0
  return false;
212
0
}
213
214
215
/* -----------------------------------------------------------
216
  Arena Allocation
217
----------------------------------------------------------- */
218
219
static mi_decl_noinline void* mi_arena_try_alloc_at(mi_arena_t* arena, size_t arena_index, size_t needed_bcount,
220
                                                    bool commit, mi_memid_t* memid, mi_os_tld_t* tld)
221
0
{
222
0
  MI_UNUSED(arena_index);
223
0
  mi_assert_internal(mi_arena_id_index(arena->id) == arena_index);
224
225
0
  mi_bitmap_index_t bitmap_index;
226
0
  if (!mi_arena_try_claim(arena, needed_bcount, &bitmap_index)) return NULL;
227
228
  // claimed it!
229
0
  void* p = mi_arena_block_start(arena, bitmap_index);
230
0
  *memid = mi_memid_create_arena(arena->id, arena->exclusive, bitmap_index);
231
0
  memid->is_pinned = arena->memid.is_pinned;
232
233
  // none of the claimed blocks should be scheduled for a decommit
234
0
  if (arena->blocks_purge != NULL) {
235
    // this is thread safe as a potential purge only decommits parts that are not yet claimed as used (in `blocks_inuse`).
236
0
    _mi_bitmap_unclaim_across(arena->blocks_purge, arena->field_count, needed_bcount, bitmap_index);
237
0
  }
238
239
  // set the dirty bits (todo: no need for an atomic op here?)
240
0
  if (arena->memid.initially_zero && arena->blocks_dirty != NULL) {
241
0
    memid->initially_zero = _mi_bitmap_claim_across(arena->blocks_dirty, arena->field_count, needed_bcount, bitmap_index, NULL);
242
0
  }
243
244
  // set commit state
245
0
  if (arena->blocks_committed == NULL) {
246
    // always committed
247
0
    memid->initially_committed = true;
248
0
  }
249
0
  else if (commit) {
250
    // commit requested, but the range may not be committed as a whole: ensure it is committed now
251
0
    memid->initially_committed = true;
252
0
    bool any_uncommitted;
253
0
    _mi_bitmap_claim_across(arena->blocks_committed, arena->field_count, needed_bcount, bitmap_index, &any_uncommitted);
254
0
    if (any_uncommitted) {
255
0
      bool commit_zero = false;
256
0
      if (!_mi_os_commit(p, mi_arena_block_size(needed_bcount), &commit_zero, tld->stats)) {
257
0
        memid->initially_committed = false;
258
0
      }
259
0
      else {
260
0
        if (commit_zero) { memid->initially_zero = true; }
261
0
      }
262
0
    }
263
0
  }
264
0
  else {
265
    // no need to commit, but check if already fully committed
266
0
    memid->initially_committed = _mi_bitmap_is_claimed_across(arena->blocks_committed, arena->field_count, needed_bcount, bitmap_index);
267
0
  }
268
269
0
  return p;
270
0
}
271
272
// allocate in a specific arena
273
static void* mi_arena_try_alloc_at_id(mi_arena_id_t arena_id, bool match_numa_node, int numa_node, size_t size, size_t alignment,
274
                                       bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld )
275
0
{
276
0
  MI_UNUSED_RELEASE(alignment);
277
0
  mi_assert_internal(alignment <= MI_SEGMENT_ALIGN);
278
0
  const size_t bcount = mi_block_count_of_size(size);
279
0
  const size_t arena_index = mi_arena_id_index(arena_id);
280
0
  mi_assert_internal(arena_index < mi_atomic_load_relaxed(&mi_arena_count));
281
0
  mi_assert_internal(size <= mi_arena_block_size(bcount));
282
283
  // Check arena suitability
284
0
  mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[arena_index]);
285
0
  if (arena == NULL) return NULL;
286
0
  if (!allow_large && arena->is_large) return NULL;
287
0
  if (!mi_arena_id_is_suitable(arena->id, arena->exclusive, req_arena_id)) return NULL;
288
0
  if (req_arena_id == _mi_arena_id_none()) { // in not specific, check numa affinity
289
0
    const bool numa_suitable = (numa_node < 0 || arena->numa_node < 0 || arena->numa_node == numa_node);
290
0
    if (match_numa_node) { if (!numa_suitable) return NULL; }
291
0
                    else { if (numa_suitable) return NULL; }
292
0
  }
293
294
  // try to allocate
295
0
  void* p = mi_arena_try_alloc_at(arena, arena_index, bcount, commit, memid, tld);
296
0
  mi_assert_internal(p == NULL || _mi_is_aligned(p, alignment));
297
0
  return p;
298
0
}
299
300
301
// allocate from an arena with fallback to the OS
302
static mi_decl_noinline void* mi_arena_try_alloc(int numa_node, size_t size, size_t alignment,
303
                                                  bool commit, bool allow_large,
304
                                                  mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld )
305
0
{
306
0
  MI_UNUSED(alignment);
307
0
  mi_assert_internal(alignment <= MI_SEGMENT_ALIGN);
308
0
  const size_t max_arena = mi_atomic_load_relaxed(&mi_arena_count);
309
0
  if mi_likely(max_arena == 0) return NULL;
310
311
0
  if (req_arena_id != _mi_arena_id_none()) {
312
    // try a specific arena if requested
313
0
    if (mi_arena_id_index(req_arena_id) < max_arena) {
314
0
      void* p = mi_arena_try_alloc_at_id(req_arena_id, true, numa_node, size, alignment, commit, allow_large, req_arena_id, memid, tld);
315
0
      if (p != NULL) return p;
316
0
    }
317
0
  }
318
0
  else {
319
    // try numa affine allocation
320
0
    for (size_t i = 0; i < max_arena; i++) {
321
0
      void* p = mi_arena_try_alloc_at_id(mi_arena_id_create(i), true, numa_node, size, alignment, commit, allow_large, req_arena_id, memid, tld);
322
0
      if (p != NULL) return p;
323
0
    }
324
325
    // try from another numa node instead..
326
0
    if (numa_node >= 0) {  // if numa_node was < 0 (no specific affinity requested), all arena's have been tried already
327
0
      for (size_t i = 0; i < max_arena; i++) {
328
0
        void* p = mi_arena_try_alloc_at_id(mi_arena_id_create(i), false /* only proceed if not numa local */, numa_node, size, alignment, commit, allow_large, req_arena_id, memid, tld);
329
0
        if (p != NULL) return p;
330
0
      }
331
0
    }
332
0
  }
333
0
  return NULL;
334
0
}
335
336
// try to reserve a fresh arena space
337
static bool mi_arena_reserve(size_t req_size, bool allow_large, mi_arena_id_t req_arena_id, mi_arena_id_t *arena_id)
338
0
{
339
0
  if (_mi_preloading()) return false;  // use OS only while pre loading
340
0
  if (req_arena_id != _mi_arena_id_none()) return false;
341
342
0
  const size_t arena_count = mi_atomic_load_acquire(&mi_arena_count);
343
0
  if (arena_count > (MI_MAX_ARENAS - 4)) return false;
344
345
0
  size_t arena_reserve = mi_option_get_size(mi_option_arena_reserve);
346
0
  if (arena_reserve == 0) return false;
347
348
0
  if (!_mi_os_has_virtual_reserve()) {
349
0
    arena_reserve = arena_reserve/4;  // be conservative if virtual reserve is not supported (for some embedded systems for example)
350
0
  }
351
0
  arena_reserve = _mi_align_up(arena_reserve, MI_ARENA_BLOCK_SIZE);
352
0
  if (arena_count >= 8 && arena_count <= 128) {
353
0
    arena_reserve = ((size_t)1<<(arena_count/8)) * arena_reserve;  // scale up the arena sizes exponentially
354
0
  }
355
0
  if (arena_reserve < req_size) return false;  // should be able to at least handle the current allocation size
356
357
  // commit eagerly?
358
0
  bool arena_commit = false;
359
0
  if (mi_option_get(mi_option_arena_eager_commit) == 2)      { arena_commit = _mi_os_has_overcommit(); }
360
0
  else if (mi_option_get(mi_option_arena_eager_commit) == 1) { arena_commit = true; }
361
362
0
  return (mi_reserve_os_memory_ex(arena_reserve, arena_commit, allow_large, false /* exclusive */, arena_id) == 0);
363
0
}
364
365
366
void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset, bool commit, bool allow_large,
367
                              mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld)
368
0
{
369
0
  mi_assert_internal(memid != NULL && tld != NULL);
370
0
  mi_assert_internal(size > 0);
371
0
  *memid = _mi_memid_none();
372
373
0
  const int numa_node = _mi_os_numa_node(tld); // current numa node
374
375
  // try to allocate in an arena if the alignment is small enough and the object is not too small (as for heap meta data)
376
0
  if (size >= MI_ARENA_MIN_OBJ_SIZE && alignment <= MI_SEGMENT_ALIGN && align_offset == 0) {
377
0
    void* p = mi_arena_try_alloc(numa_node, size, alignment, commit, allow_large, req_arena_id, memid, tld);
378
0
    if (p != NULL) return p;
379
380
    // otherwise, try to first eagerly reserve a new arena
381
0
    if (req_arena_id == _mi_arena_id_none()) {
382
0
      mi_arena_id_t arena_id = 0;
383
0
      if (mi_arena_reserve(size, allow_large, req_arena_id, &arena_id)) {
384
        // and try allocate in there
385
0
        mi_assert_internal(req_arena_id == _mi_arena_id_none());
386
0
        p = mi_arena_try_alloc_at_id(arena_id, true, numa_node, size, alignment, commit, allow_large, req_arena_id, memid, tld);
387
0
        if (p != NULL) return p;
388
0
      }
389
0
    }
390
0
  }
391
392
  // if we cannot use OS allocation, return NULL
393
0
  if (mi_option_is_enabled(mi_option_limit_os_alloc) || req_arena_id != _mi_arena_id_none()) {
394
0
    errno = ENOMEM;
395
0
    return NULL;
396
0
  }
397
398
  // finally, fall back to the OS
399
0
  if (align_offset > 0) {
400
0
    return _mi_os_alloc_aligned_at_offset(size, alignment, align_offset, commit, allow_large, memid, tld->stats);
401
0
  }
402
0
  else {
403
0
    return _mi_os_alloc_aligned(size, alignment, commit, allow_large, memid, tld->stats);
404
0
  }
405
0
}
406
407
void* _mi_arena_alloc(size_t size, bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld)
408
0
{
409
0
  return _mi_arena_alloc_aligned(size, MI_ARENA_BLOCK_SIZE, 0, commit, allow_large, req_arena_id, memid, tld);
410
0
}
411
412
413
0
void* mi_arena_area(mi_arena_id_t arena_id, size_t* size) {
414
0
  if (size != NULL) *size = 0;
415
0
  size_t arena_index = mi_arena_id_index(arena_id);
416
0
  if (arena_index >= MI_MAX_ARENAS) return NULL;
417
0
  mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[arena_index]);
418
0
  if (arena == NULL) return NULL;
419
0
  if (size != NULL) { *size = mi_arena_block_size(arena->block_count); }
420
0
  return arena->start;
421
0
}
422
423
424
/* -----------------------------------------------------------
425
  Arena purge
426
----------------------------------------------------------- */
427
428
0
static long mi_arena_purge_delay(void) {
429
  // <0 = no purging allowed, 0=immediate purging, >0=milli-second delay
430
0
  return (mi_option_get(mi_option_purge_delay) * mi_option_get(mi_option_arena_purge_mult));
431
0
}
432
433
// reset or decommit in an arena and update the committed/decommit bitmaps
434
// assumes we own the area (i.e. blocks_in_use is claimed by us)
435
0
static void mi_arena_purge(mi_arena_t* arena, size_t bitmap_idx, size_t blocks, mi_stats_t* stats) {
436
0
  mi_assert_internal(arena->blocks_committed != NULL);
437
0
  mi_assert_internal(arena->blocks_purge != NULL);
438
0
  mi_assert_internal(!arena->memid.is_pinned);
439
0
  const size_t size = mi_arena_block_size(blocks);
440
0
  void* const p = mi_arena_block_start(arena, bitmap_idx);
441
0
  bool needs_recommit;
442
0
  if (_mi_bitmap_is_claimed_across(arena->blocks_committed, arena->field_count, blocks, bitmap_idx)) {
443
    // all blocks are committed, we can purge freely
444
0
    needs_recommit = _mi_os_purge(p, size, stats);
445
0
  }
446
0
  else {
447
    // some blocks are not committed -- this can happen when a partially committed block is freed
448
    // in `_mi_arena_free` and it is conservatively marked as uncommitted but still scheduled for a purge
449
    // we need to ensure we do not try to reset (as that may be invalid for uncommitted memory),
450
    // and also undo the decommit stats (as it was already adjusted)
451
0
    mi_assert_internal(mi_option_is_enabled(mi_option_purge_decommits));
452
0
    needs_recommit = _mi_os_purge_ex(p, size, false /* allow reset? */, stats);
453
0
    _mi_stat_increase(&stats->committed, size);
454
0
  }
455
456
  // clear the purged blocks
457
0
  _mi_bitmap_unclaim_across(arena->blocks_purge, arena->field_count, blocks, bitmap_idx);
458
  // update committed bitmap
459
0
  if (needs_recommit) {
460
0
    _mi_bitmap_unclaim_across(arena->blocks_committed, arena->field_count, blocks, bitmap_idx);
461
0
  }
462
0
}
463
464
// Schedule a purge. This is usually delayed to avoid repeated decommit/commit calls.
465
// Note: assumes we (still) own the area as we may purge immediately
466
0
static void mi_arena_schedule_purge(mi_arena_t* arena, size_t bitmap_idx, size_t blocks, mi_stats_t* stats) {
467
0
  mi_assert_internal(arena->blocks_purge != NULL);
468
0
  const long delay = mi_arena_purge_delay();
469
0
  if (delay < 0) return;  // is purging allowed at all?
470
471
0
  if (_mi_preloading() || delay == 0) {
472
    // decommit directly
473
0
    mi_arena_purge(arena, bitmap_idx, blocks, stats);
474
0
  }
475
0
  else {
476
    // schedule decommit
477
0
    mi_msecs_t expire = mi_atomic_loadi64_relaxed(&arena->purge_expire);
478
0
    if (expire != 0) {
479
0
      mi_atomic_addi64_acq_rel(&arena->purge_expire, delay/10);  // add smallish extra delay
480
0
    }
481
0
    else {
482
0
      mi_atomic_storei64_release(&arena->purge_expire, _mi_clock_now() + delay);
483
0
    }
484
0
    _mi_bitmap_claim_across(arena->blocks_purge, arena->field_count, blocks, bitmap_idx, NULL);
485
0
  }
486
0
}
487
488
// purge a range of blocks
489
// return true if the full range was purged.
490
// assumes we own the area (i.e. blocks_in_use is claimed by us)
491
0
static bool mi_arena_purge_range(mi_arena_t* arena, size_t idx, size_t startidx, size_t bitlen, size_t purge, mi_stats_t* stats) {
492
0
  const size_t endidx = startidx + bitlen;
493
0
  size_t bitidx = startidx;
494
0
  bool all_purged = false;
495
0
  while (bitidx < endidx) {
496
    // count consecutive ones in the purge mask
497
0
    size_t count = 0;
498
0
    while (bitidx + count < endidx && (purge & ((size_t)1 << (bitidx + count))) != 0) {
499
0
      count++;
500
0
    }
501
0
    if (count > 0) {
502
      // found range to be purged
503
0
      const mi_bitmap_index_t range_idx = mi_bitmap_index_create(idx, bitidx);
504
0
      mi_arena_purge(arena, range_idx, count, stats);
505
0
      if (count == bitlen) {
506
0
        all_purged = true;
507
0
      }
508
0
    }
509
0
    bitidx += (count+1); // +1 to skip the zero bit (or end)
510
0
  }
511
0
  return all_purged;
512
0
}
513
514
// returns true if anything was purged
515
static bool mi_arena_try_purge(mi_arena_t* arena, mi_msecs_t now, bool force, mi_stats_t* stats)
516
0
{
517
0
  if (arena->memid.is_pinned || arena->blocks_purge == NULL) return false;
518
0
  mi_msecs_t expire = mi_atomic_loadi64_relaxed(&arena->purge_expire);
519
0
  if (expire == 0) return false;
520
0
  if (!force && expire > now) return false;
521
522
  // reset expire (if not already set concurrently)
523
0
  mi_atomic_casi64_strong_acq_rel(&arena->purge_expire, &expire, 0);
524
525
  // potential purges scheduled, walk through the bitmap
526
0
  bool any_purged = false;
527
0
  bool full_purge = true;
528
0
  for (size_t i = 0; i < arena->field_count; i++) {
529
0
    size_t purge = mi_atomic_load_relaxed(&arena->blocks_purge[i]);
530
0
    if (purge != 0) {
531
0
      size_t bitidx = 0;
532
0
      while (bitidx < MI_BITMAP_FIELD_BITS) {
533
        // find consecutive range of ones in the purge mask
534
0
        size_t bitlen = 0;
535
0
        while (bitidx + bitlen < MI_BITMAP_FIELD_BITS && (purge & ((size_t)1 << (bitidx + bitlen))) != 0) {
536
0
          bitlen++;
537
0
        }
538
        // try to claim the longest range of corresponding in_use bits
539
0
        const mi_bitmap_index_t bitmap_index = mi_bitmap_index_create(i, bitidx);
540
0
        while( bitlen > 0 ) {
541
0
          if (_mi_bitmap_try_claim(arena->blocks_inuse, arena->field_count, bitlen, bitmap_index)) {
542
0
            break;
543
0
          }
544
0
          bitlen--;
545
0
        }
546
        // actual claimed bits at `in_use`
547
0
        if (bitlen > 0) {
548
          // read purge again now that we have the in_use bits
549
0
          purge = mi_atomic_load_acquire(&arena->blocks_purge[i]);
550
0
          if (!mi_arena_purge_range(arena, i, bitidx, bitlen, purge, stats)) {
551
0
            full_purge = false;
552
0
          }
553
0
          any_purged = true;
554
          // release the claimed `in_use` bits again
555
0
          _mi_bitmap_unclaim(arena->blocks_inuse, arena->field_count, bitlen, bitmap_index);
556
0
        }
557
0
        bitidx += (bitlen+1);  // +1 to skip the zero (or end)
558
0
      } // while bitidx
559
0
    } // purge != 0
560
0
  }
561
  // if not fully purged, make sure to purge again in the future
562
0
  if (!full_purge) {
563
0
    const long delay = mi_arena_purge_delay();
564
0
    mi_msecs_t expected = 0;
565
0
    mi_atomic_casi64_strong_acq_rel(&arena->purge_expire,&expected,_mi_clock_now() + delay);
566
0
  }
567
0
  return any_purged;
568
0
}
569
570
0
static void mi_arenas_try_purge( bool force, bool visit_all, mi_stats_t* stats ) {
571
0
  if (_mi_preloading() || mi_arena_purge_delay() <= 0) return;  // nothing will be scheduled
572
573
0
  const size_t max_arena = mi_atomic_load_acquire(&mi_arena_count);
574
0
  if (max_arena == 0) return;
575
576
  // allow only one thread to purge at a time
577
0
  static mi_atomic_guard_t purge_guard;
578
0
  mi_atomic_guard(&purge_guard)
579
0
  {
580
0
    mi_msecs_t now = _mi_clock_now();
581
0
    size_t max_purge_count = (visit_all ? max_arena : 1);
582
0
    for (size_t i = 0; i < max_arena; i++) {
583
0
      mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[i]);
584
0
      if (arena != NULL) {
585
0
        if (mi_arena_try_purge(arena, now, force, stats)) {
586
0
          if (max_purge_count <= 1) break;
587
0
          max_purge_count--;
588
0
        }
589
0
      }
590
0
    }
591
0
  }
592
0
}
593
594
595
/* -----------------------------------------------------------
596
  Arena free
597
----------------------------------------------------------- */
598
599
0
void _mi_arena_free(void* p, size_t size, size_t committed_size, mi_memid_t memid, mi_stats_t* stats) {
600
0
  mi_assert_internal(size > 0 && stats != NULL);
601
0
  mi_assert_internal(committed_size <= size);
602
0
  if (p==NULL) return;
603
0
  if (size==0) return;
604
0
  const bool all_committed = (committed_size == size);
605
606
0
  if (mi_memkind_is_os(memid.memkind)) {
607
    // was a direct OS allocation, pass through
608
0
    if (!all_committed && committed_size > 0) {
609
      // if partially committed, adjust the committed stats (as `_mi_os_free` will increase decommit by the full size)
610
0
      _mi_stat_decrease(&stats->committed, committed_size);
611
0
    }
612
0
    _mi_os_free(p, size, memid, stats);
613
0
  }
614
0
  else if (memid.memkind == MI_MEM_ARENA) {
615
    // allocated in an arena
616
0
    size_t arena_idx;
617
0
    size_t bitmap_idx;
618
0
    mi_arena_memid_indices(memid, &arena_idx, &bitmap_idx);
619
0
    mi_assert_internal(arena_idx < MI_MAX_ARENAS);
620
0
    mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t,&mi_arenas[arena_idx]);
621
0
    mi_assert_internal(arena != NULL);
622
0
    const size_t blocks = mi_block_count_of_size(size);
623
624
    // checks
625
0
    if (arena == NULL) {
626
0
      _mi_error_message(EINVAL, "trying to free from non-existent arena: %p, size %zu, memid: 0x%zx\n", p, size, memid);
627
0
      return;
628
0
    }
629
0
    mi_assert_internal(arena->field_count > mi_bitmap_index_field(bitmap_idx));
630
0
    if (arena->field_count <= mi_bitmap_index_field(bitmap_idx)) {
631
0
      _mi_error_message(EINVAL, "trying to free from non-existent arena block: %p, size %zu, memid: 0x%zx\n", p, size, memid);
632
0
      return;
633
0
    }
634
635
    // need to set all memory to undefined as some parts may still be marked as no_access (like padding etc.)
636
0
    mi_track_mem_undefined(p,size);
637
638
    // potentially decommit
639
0
    if (arena->memid.is_pinned || arena->blocks_committed == NULL) {
640
0
      mi_assert_internal(all_committed);
641
0
    }
642
0
    else {
643
0
      mi_assert_internal(arena->blocks_committed != NULL);
644
0
      mi_assert_internal(arena->blocks_purge != NULL);
645
646
0
      if (!all_committed) {
647
        // mark the entire range as no longer committed (so we recommit the full range when re-using)
648
0
        _mi_bitmap_unclaim_across(arena->blocks_committed, arena->field_count, blocks, bitmap_idx);
649
0
        mi_track_mem_noaccess(p,size);
650
0
        if (committed_size > 0) {
651
          // if partially committed, adjust the committed stats (is it will be recommitted when re-using)
652
          // in the delayed purge, we now need to not count a decommit if the range is not marked as committed.
653
0
          _mi_stat_decrease(&stats->committed, committed_size);
654
0
        }
655
        // note: if not all committed, it may be that the purge will reset/decommit the entire range
656
        // that contains already decommitted parts. Since purge consistently uses reset or decommit that
657
        // works (as we should never reset decommitted parts).
658
0
      }
659
      // (delay) purge the entire range
660
0
      mi_arena_schedule_purge(arena, bitmap_idx, blocks, stats);
661
0
    }
662
663
    // and make it available to others again
664
0
    bool all_inuse = _mi_bitmap_unclaim_across(arena->blocks_inuse, arena->field_count, blocks, bitmap_idx);
665
0
    if (!all_inuse) {
666
0
      _mi_error_message(EAGAIN, "trying to free an already freed arena block: %p, size %zu\n", p, size);
667
0
      return;
668
0
    };
669
0
  }
670
0
  else {
671
    // arena was none, external, or static; nothing to do
672
0
    mi_assert_internal(memid.memkind < MI_MEM_OS);
673
0
  }
674
675
  // purge expired decommits
676
0
  mi_arenas_try_purge(false, false, stats);
677
0
}
678
679
// destroy owned arenas; this is unsafe and should only be done using `mi_option_destroy_on_exit`
680
// for dynamic libraries that are unloaded and need to release all their allocated memory.
681
0
static void mi_arenas_unsafe_destroy(void) {
682
0
  const size_t max_arena = mi_atomic_load_relaxed(&mi_arena_count);
683
0
  size_t new_max_arena = 0;
684
0
  for (size_t i = 0; i < max_arena; i++) {
685
0
    mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[i]);
686
0
    if (arena != NULL) {
687
0
      if (arena->start != NULL && mi_memkind_is_os(arena->memid.memkind)) {
688
0
        mi_atomic_store_ptr_release(mi_arena_t, &mi_arenas[i], NULL);
689
0
        _mi_os_free(arena->start, mi_arena_size(arena), arena->memid, &_mi_stats_main);
690
0
      }
691
0
      else {
692
0
        new_max_arena = i;
693
0
      }
694
0
      mi_arena_meta_free(arena, arena->meta_memid, arena->meta_size, &_mi_stats_main);
695
0
    }
696
0
  }
697
698
  // try to lower the max arena.
699
0
  size_t expected = max_arena;
700
0
  mi_atomic_cas_strong_acq_rel(&mi_arena_count, &expected, new_max_arena);
701
0
}
702
703
// Purge the arenas; if `force_purge` is true, amenable parts are purged even if not yet expired
704
0
void _mi_arena_collect(bool force_purge, mi_stats_t* stats) {
705
0
  mi_arenas_try_purge(force_purge, true /* visit all */, stats);
706
0
}
707
708
// destroy owned arenas; this is unsafe and should only be done using `mi_option_destroy_on_exit`
709
// for dynamic libraries that are unloaded and need to release all their allocated memory.
710
0
void _mi_arena_unsafe_destroy_all(mi_stats_t* stats) {
711
0
  mi_arenas_unsafe_destroy();
712
0
  _mi_arena_collect(true /* force purge */, stats);  // purge non-owned arenas
713
0
}
714
715
// Is a pointer inside any of our arenas?
716
0
bool _mi_arena_contains(const void* p) {
717
0
  const size_t max_arena = mi_atomic_load_relaxed(&mi_arena_count);
718
0
  for (size_t i = 0; i < max_arena; i++) {
719
0
    mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[i]);
720
0
    if (arena != NULL && arena->start <= (const uint8_t*)p && arena->start + mi_arena_block_size(arena->block_count) > (const uint8_t*)p) {
721
0
      return true;
722
0
    }
723
0
  }
724
0
  return false;
725
0
}
726
727
728
/* -----------------------------------------------------------
729
  Add an arena.
730
----------------------------------------------------------- */
731
732
0
static bool mi_arena_add(mi_arena_t* arena, mi_arena_id_t* arena_id) {
733
0
  mi_assert_internal(arena != NULL);
734
0
  mi_assert_internal((uintptr_t)mi_atomic_load_ptr_relaxed(uint8_t,&arena->start) % MI_SEGMENT_ALIGN == 0);
735
0
  mi_assert_internal(arena->block_count > 0);
736
0
  if (arena_id != NULL) { *arena_id = -1; }
737
738
0
  size_t i = mi_atomic_increment_acq_rel(&mi_arena_count);
739
0
  if (i >= MI_MAX_ARENAS) {
740
0
    mi_atomic_decrement_acq_rel(&mi_arena_count);
741
0
    return false;
742
0
  }
743
0
  arena->id = mi_arena_id_create(i);
744
0
  mi_atomic_store_ptr_release(mi_arena_t,&mi_arenas[i], arena);
745
0
  if (arena_id != NULL) { *arena_id = arena->id; }
746
0
  return true;
747
0
}
748
749
static bool mi_manage_os_memory_ex2(void* start, size_t size, bool is_large, int numa_node, bool exclusive, mi_memid_t memid, mi_arena_id_t* arena_id) mi_attr_noexcept
750
0
{
751
0
  if (arena_id != NULL) *arena_id = _mi_arena_id_none();
752
0
  if (size < MI_ARENA_BLOCK_SIZE) return false;
753
754
0
  if (is_large) {
755
0
    mi_assert_internal(memid.initially_committed && memid.is_pinned);
756
0
  }
757
758
0
  const size_t bcount = size / MI_ARENA_BLOCK_SIZE;
759
0
  const size_t fields = _mi_divide_up(bcount, MI_BITMAP_FIELD_BITS);
760
0
  const size_t bitmaps = (memid.is_pinned ? 2 : 4);
761
0
  const size_t asize  = sizeof(mi_arena_t) + (bitmaps*fields*sizeof(mi_bitmap_field_t));
762
0
  mi_memid_t meta_memid;
763
0
  mi_arena_t* arena   = (mi_arena_t*)mi_arena_meta_zalloc(asize, &meta_memid, &_mi_stats_main); // TODO: can we avoid allocating from the OS?
764
0
  if (arena == NULL) return false;
765
766
  // already zero'd due to os_alloc
767
  // _mi_memzero(arena, asize);
768
0
  arena->id = _mi_arena_id_none();
769
0
  arena->memid = memid;
770
0
  arena->exclusive = exclusive;
771
0
  arena->meta_size = asize;
772
0
  arena->meta_memid = meta_memid;
773
0
  arena->block_count = bcount;
774
0
  arena->field_count = fields;
775
0
  arena->start = (uint8_t*)start;
776
0
  arena->numa_node    = numa_node; // TODO: or get the current numa node if -1? (now it allows anyone to allocate on -1)
777
0
  arena->is_large     = is_large;
778
0
  arena->purge_expire = 0;
779
0
  arena->search_idx   = 0;
780
0
  arena->blocks_dirty = &arena->blocks_inuse[fields]; // just after inuse bitmap
781
0
  arena->blocks_committed = (arena->memid.is_pinned ? NULL : &arena->blocks_inuse[2*fields]); // just after dirty bitmap
782
0
  arena->blocks_purge  = (arena->memid.is_pinned ? NULL : &arena->blocks_inuse[3*fields]); // just after committed bitmap
783
  // initialize committed bitmap?
784
0
  if (arena->blocks_committed != NULL && arena->memid.initially_committed) {
785
0
    memset((void*)arena->blocks_committed, 0xFF, fields*sizeof(mi_bitmap_field_t)); // cast to void* to avoid atomic warning
786
0
  }
787
788
  // and claim leftover blocks if needed (so we never allocate there)
789
0
  ptrdiff_t post = (fields * MI_BITMAP_FIELD_BITS) - bcount;
790
0
  mi_assert_internal(post >= 0);
791
0
  if (post > 0) {
792
    // don't use leftover bits at the end
793
0
    mi_bitmap_index_t postidx = mi_bitmap_index_create(fields - 1, MI_BITMAP_FIELD_BITS - post);
794
0
    _mi_bitmap_claim(arena->blocks_inuse, fields, post, postidx, NULL);
795
0
  }
796
0
  return mi_arena_add(arena, arena_id);
797
798
0
}
799
800
0
bool mi_manage_os_memory_ex(void* start, size_t size, bool is_committed, bool is_large, bool is_zero, int numa_node, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept {
801
0
  mi_memid_t memid = _mi_memid_create(MI_MEM_EXTERNAL);
802
0
  memid.initially_committed = is_committed;
803
0
  memid.initially_zero = is_zero;
804
0
  memid.is_pinned = is_large;
805
0
  return mi_manage_os_memory_ex2(start,size,is_large,numa_node,exclusive,memid, arena_id);
806
0
}
807
808
// Reserve a range of regular OS memory
809
0
int mi_reserve_os_memory_ex(size_t size, bool commit, bool allow_large, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept {
810
0
  if (arena_id != NULL) *arena_id = _mi_arena_id_none();
811
0
  size = _mi_align_up(size, MI_ARENA_BLOCK_SIZE); // at least one block
812
0
  mi_memid_t memid;
813
0
  void* start = _mi_os_alloc_aligned(size, MI_SEGMENT_ALIGN, commit, allow_large, &memid, &_mi_stats_main);
814
0
  if (start == NULL) return ENOMEM;
815
0
  const bool is_large = memid.is_pinned; // todo: use separate is_large field?
816
0
  if (!mi_manage_os_memory_ex2(start, size, is_large, -1 /* numa node */, exclusive, memid, arena_id)) {
817
0
    _mi_os_free_ex(start, size, commit, memid, &_mi_stats_main);
818
0
    _mi_verbose_message("failed to reserve %zu k memory\n", _mi_divide_up(size, 1024));
819
0
    return ENOMEM;
820
0
  }
821
0
  _mi_verbose_message("reserved %zu KiB memory%s\n", _mi_divide_up(size, 1024), is_large ? " (in large os pages)" : "");
822
0
  return 0;
823
0
}
824
825
826
// Manage a range of regular OS memory
827
0
bool mi_manage_os_memory(void* start, size_t size, bool is_committed, bool is_large, bool is_zero, int numa_node) mi_attr_noexcept {
828
0
  return mi_manage_os_memory_ex(start, size, is_committed, is_large, is_zero, numa_node, false /* exclusive? */, NULL);
829
0
}
830
831
// Reserve a range of regular OS memory
832
0
int mi_reserve_os_memory(size_t size, bool commit, bool allow_large) mi_attr_noexcept {
833
0
  return mi_reserve_os_memory_ex(size, commit, allow_large, false, NULL);
834
0
}
835
836
837
/* -----------------------------------------------------------
838
  Debugging
839
----------------------------------------------------------- */
840
841
0
static size_t mi_debug_show_bitmap(const char* prefix, mi_bitmap_field_t* fields, size_t field_count ) {
842
0
  size_t inuse_count = 0;
843
0
  for (size_t i = 0; i < field_count; i++) {
844
0
    char buf[MI_BITMAP_FIELD_BITS + 1];
845
0
    uintptr_t field = mi_atomic_load_relaxed(&fields[i]);
846
0
    for (size_t bit = 0; bit < MI_BITMAP_FIELD_BITS; bit++) {
847
0
      bool inuse = ((((uintptr_t)1 << bit) & field) != 0);
848
0
      if (inuse) inuse_count++;
849
0
      buf[MI_BITMAP_FIELD_BITS - 1 - bit] = (inuse ? 'x' : '.');
850
0
    }
851
0
    buf[MI_BITMAP_FIELD_BITS] = 0;
852
0
    _mi_verbose_message("%s%s\n", prefix, buf);
853
0
  }
854
0
  return inuse_count;
855
0
}
856
857
0
void mi_debug_show_arenas(void) mi_attr_noexcept {
858
0
  size_t max_arenas = mi_atomic_load_relaxed(&mi_arena_count);
859
0
  for (size_t i = 0; i < max_arenas; i++) {
860
0
    mi_arena_t* arena = mi_atomic_load_ptr_relaxed(mi_arena_t, &mi_arenas[i]);
861
0
    if (arena == NULL) break;
862
0
    size_t inuse_count = 0;
863
0
    _mi_verbose_message("arena %zu: %zu blocks with %zu fields\n", i, arena->block_count, arena->field_count);
864
0
    inuse_count += mi_debug_show_bitmap("  ", arena->blocks_inuse, arena->field_count);
865
0
    _mi_verbose_message("  blocks in use ('x'): %zu\n", inuse_count);
866
0
  }
867
0
}
868
869
870
/* -----------------------------------------------------------
871
  Reserve a huge page arena.
872
----------------------------------------------------------- */
873
// reserve at a specific numa node
874
0
int mi_reserve_huge_os_pages_at_ex(size_t pages, int numa_node, size_t timeout_msecs, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept {
875
0
  if (arena_id != NULL) *arena_id = -1;
876
0
  if (pages==0) return 0;
877
0
  if (numa_node < -1) numa_node = -1;
878
0
  if (numa_node >= 0) numa_node = numa_node % _mi_os_numa_node_count();
879
0
  size_t hsize = 0;
880
0
  size_t pages_reserved = 0;
881
0
  mi_memid_t memid;
882
0
  void* p = _mi_os_alloc_huge_os_pages(pages, numa_node, timeout_msecs, &pages_reserved, &hsize, &memid);
883
0
  if (p==NULL || pages_reserved==0) {
884
0
    _mi_warning_message("failed to reserve %zu GiB huge pages\n", pages);
885
0
    return ENOMEM;
886
0
  }
887
0
  _mi_verbose_message("numa node %i: reserved %zu GiB huge pages (of the %zu GiB requested)\n", numa_node, pages_reserved, pages);
888
889
0
  if (!mi_manage_os_memory_ex2(p, hsize, true, numa_node, exclusive, memid, arena_id)) {
890
0
    _mi_os_free(p, hsize, memid, &_mi_stats_main);
891
0
    return ENOMEM;
892
0
  }
893
0
  return 0;
894
0
}
895
896
0
int mi_reserve_huge_os_pages_at(size_t pages, int numa_node, size_t timeout_msecs) mi_attr_noexcept {
897
0
  return mi_reserve_huge_os_pages_at_ex(pages, numa_node, timeout_msecs, false, NULL);
898
0
}
899
900
// reserve huge pages evenly among the given number of numa nodes (or use the available ones as detected)
901
0
int mi_reserve_huge_os_pages_interleave(size_t pages, size_t numa_nodes, size_t timeout_msecs) mi_attr_noexcept {
902
0
  if (pages == 0) return 0;
903
904
  // pages per numa node
905
0
  size_t numa_count = (numa_nodes > 0 ? numa_nodes : _mi_os_numa_node_count());
906
0
  if (numa_count <= 0) numa_count = 1;
907
0
  const size_t pages_per = pages / numa_count;
908
0
  const size_t pages_mod = pages % numa_count;
909
0
  const size_t timeout_per = (timeout_msecs==0 ? 0 : (timeout_msecs / numa_count) + 50);
910
911
  // reserve evenly among numa nodes
912
0
  for (size_t numa_node = 0; numa_node < numa_count && pages > 0; numa_node++) {
913
0
    size_t node_pages = pages_per;  // can be 0
914
0
    if (numa_node < pages_mod) node_pages++;
915
0
    int err = mi_reserve_huge_os_pages_at(node_pages, (int)numa_node, timeout_per);
916
0
    if (err) return err;
917
0
    if (pages < node_pages) {
918
0
      pages = 0;
919
0
    }
920
0
    else {
921
0
      pages -= node_pages;
922
0
    }
923
0
  }
924
925
0
  return 0;
926
0
}
927
928
0
int mi_reserve_huge_os_pages(size_t pages, double max_secs, size_t* pages_reserved) mi_attr_noexcept {
929
0
  MI_UNUSED(max_secs);
930
0
  _mi_warning_message("mi_reserve_huge_os_pages is deprecated: use mi_reserve_huge_os_pages_interleave/at instead\n");
931
0
  if (pages_reserved != NULL) *pages_reserved = 0;
932
0
  int err = mi_reserve_huge_os_pages_interleave(pages, 0, (size_t)(max_secs * 1000.0));
933
0
  if (err==0 && pages_reserved!=NULL) *pages_reserved = pages;
934
0
  return err;
935
0
}