Coverage Report

Created: 2025-07-04 06:49

/src/cpython/Objects/mimalloc/page-queue.c
Line
Count
Source (jump to first uncovered line)
1
/*----------------------------------------------------------------------------
2
Copyright (c) 2018-2020, Microsoft Research, Daan Leijen
3
This is free software; you can redistribute it and/or modify it under the
4
terms of the MIT license. A copy of the license can be found in the file
5
"LICENSE" at the root of this distribution.
6
-----------------------------------------------------------------------------*/
7
8
/* -----------------------------------------------------------
9
  Definition of page queues for each block size
10
----------------------------------------------------------- */
11
12
#ifndef MI_IN_PAGE_C
13
#error "this file should be included from 'page.c'"
14
#endif
15
16
/* -----------------------------------------------------------
17
  Minimal alignment in machine words (i.e. `sizeof(void*)`)
18
----------------------------------------------------------- */
19
20
#if (MI_MAX_ALIGN_SIZE > 4*MI_INTPTR_SIZE)
21
  #error "define alignment for more than 4x word size for this platform"
22
#elif (MI_MAX_ALIGN_SIZE > 2*MI_INTPTR_SIZE)
23
  #define MI_ALIGN4W   // 4 machine words minimal alignment
24
#elif (MI_MAX_ALIGN_SIZE > MI_INTPTR_SIZE)
25
  #define MI_ALIGN2W   // 2 machine words minimal alignment
26
#else
27
  // ok, default alignment is 1 word
28
#endif
29
30
31
/* -----------------------------------------------------------
32
  Queue query
33
----------------------------------------------------------- */
34
35
36
0
static inline bool mi_page_queue_is_huge(const mi_page_queue_t* pq) {
37
0
  return (pq->block_size == (MI_MEDIUM_OBJ_SIZE_MAX+sizeof(uintptr_t)));
38
0
}
39
40
0
static inline bool mi_page_queue_is_full(const mi_page_queue_t* pq) {
41
0
  return (pq->block_size == (MI_MEDIUM_OBJ_SIZE_MAX+(2*sizeof(uintptr_t))));
42
0
}
43
44
0
static inline bool mi_page_queue_is_special(const mi_page_queue_t* pq) {
45
0
  return (pq->block_size > MI_MEDIUM_OBJ_SIZE_MAX);
46
0
}
47
48
/* -----------------------------------------------------------
49
  Bins
50
----------------------------------------------------------- */
51
52
// Return the bin for a given field size.
53
// Returns MI_BIN_HUGE if the size is too large.
54
// We use `wsize` for the size in "machine word sizes",
55
// i.e. byte size == `wsize*sizeof(void*)`.
56
0
static inline uint8_t mi_bin(size_t size) {
57
0
  size_t wsize = _mi_wsize_from_size(size);
58
0
  uint8_t bin;
59
0
  if (wsize <= 1) {
60
0
    bin = 1;
61
0
  }
62
  #if defined(MI_ALIGN4W)
63
  else if (wsize <= 4) {
64
    bin = (uint8_t)((wsize+1)&~1); // round to double word sizes
65
  }
66
  #elif defined(MI_ALIGN2W)
67
0
  else if (wsize <= 8) {
68
0
    bin = (uint8_t)((wsize+1)&~1); // round to double word sizes
69
0
  }
70
  #else
71
  else if (wsize <= 8) {
72
    bin = (uint8_t)wsize;
73
  }
74
  #endif
75
0
  else if (wsize > MI_MEDIUM_OBJ_WSIZE_MAX) {
76
0
    bin = MI_BIN_HUGE;
77
0
  }
78
0
  else {
79
    #if defined(MI_ALIGN4W)
80
    if (wsize <= 16) { wsize = (wsize+3)&~3; } // round to 4x word sizes
81
    #endif
82
0
    wsize--;
83
    // find the highest bit
84
0
    uint8_t b = (uint8_t)mi_bsr(wsize);  // note: wsize != 0
85
    // and use the top 3 bits to determine the bin (~12.5% worst internal fragmentation).
86
    // - adjust with 3 because we use do not round the first 8 sizes
87
    //   which each get an exact bin
88
0
    bin = ((b << 2) + (uint8_t)((wsize >> (b - 2)) & 0x03)) - 3;
89
0
    mi_assert_internal(bin < MI_BIN_HUGE);
90
0
  }
91
0
  mi_assert_internal(bin > 0 && bin <= MI_BIN_HUGE);
92
0
  return bin;
93
0
}
94
95
96
97
/* -----------------------------------------------------------
98
  Queue of pages with free blocks
99
----------------------------------------------------------- */
100
101
0
uint8_t _mi_bin(size_t size) {
102
0
  return mi_bin(size);
103
0
}
104
105
0
size_t _mi_bin_size(uint8_t bin) {
106
0
  return _mi_heap_empty.pages[bin].block_size;
107
0
}
108
109
// Good size for allocation
110
0
size_t mi_good_size(size_t size) mi_attr_noexcept {
111
0
  if (size <= MI_MEDIUM_OBJ_SIZE_MAX) {
112
0
    return _mi_bin_size(mi_bin(size));
113
0
  }
114
0
  else {
115
0
    return _mi_align_up(size,_mi_os_page_size());
116
0
  }
117
0
}
118
119
#if (MI_DEBUG>1)
120
static bool mi_page_queue_contains(mi_page_queue_t* queue, const mi_page_t* page) {
121
  mi_assert_internal(page != NULL);
122
  mi_page_t* list = queue->first;
123
  while (list != NULL) {
124
    mi_assert_internal(list->next == NULL || list->next->prev == list);
125
    mi_assert_internal(list->prev == NULL || list->prev->next == list);
126
    if (list == page) break;
127
    list = list->next;
128
  }
129
  return (list == page);
130
}
131
132
#endif
133
134
#if (MI_DEBUG>1)
135
static bool mi_heap_contains_queue(const mi_heap_t* heap, const mi_page_queue_t* pq) {
136
  return (pq >= &heap->pages[0] && pq <= &heap->pages[MI_BIN_FULL]);
137
}
138
#endif
139
140
0
static mi_page_queue_t* mi_page_queue_of(const mi_page_t* page) {
141
0
  uint8_t bin = (mi_page_is_in_full(page) ? MI_BIN_FULL : mi_bin(page->xblock_size));
142
0
  mi_heap_t* heap = mi_page_heap(page);
143
0
  mi_assert_internal(heap != NULL && bin <= MI_BIN_FULL);
144
0
  mi_page_queue_t* pq = &heap->pages[bin];
145
0
  mi_assert_internal(bin >= MI_BIN_HUGE || page->xblock_size == pq->block_size);
146
0
  mi_assert_expensive(mi_page_queue_contains(pq, page));
147
0
  return pq;
148
0
}
149
150
0
static mi_page_queue_t* mi_heap_page_queue_of(mi_heap_t* heap, const mi_page_t* page) {
151
0
  uint8_t bin = (mi_page_is_in_full(page) ? MI_BIN_FULL : mi_bin(page->xblock_size));
152
0
  mi_assert_internal(bin <= MI_BIN_FULL);
153
0
  mi_page_queue_t* pq = &heap->pages[bin];
154
0
  mi_assert_internal(mi_page_is_in_full(page) || page->xblock_size == pq->block_size);
155
0
  return pq;
156
0
}
157
158
// The current small page array is for efficiency and for each
159
// small size (up to 256) it points directly to the page for that
160
// size without having to compute the bin. This means when the
161
// current free page queue is updated for a small bin, we need to update a
162
// range of entries in `_mi_page_small_free`.
163
0
static inline void mi_heap_queue_first_update(mi_heap_t* heap, const mi_page_queue_t* pq) {
164
0
  mi_assert_internal(mi_heap_contains_queue(heap,pq));
165
0
  size_t size = pq->block_size;
166
0
  if (size > MI_SMALL_SIZE_MAX) return;
167
168
0
  mi_page_t* page = pq->first;
169
0
  if (pq->first == NULL) page = (mi_page_t*)&_mi_page_empty;
170
171
  // find index in the right direct page array
172
0
  size_t start;
173
0
  size_t idx = _mi_wsize_from_size(size);
174
0
  mi_page_t** pages_free = heap->pages_free_direct;
175
176
0
  if (pages_free[idx] == page) return;  // already set
177
178
  // find start slot
179
0
  if (idx<=1) {
180
0
    start = 0;
181
0
  }
182
0
  else {
183
    // find previous size; due to minimal alignment upto 3 previous bins may need to be skipped
184
0
    uint8_t bin = mi_bin(size);
185
0
    const mi_page_queue_t* prev = pq - 1;
186
0
    while( bin == mi_bin(prev->block_size) && prev > &heap->pages[0]) {
187
0
      prev--;
188
0
    }
189
0
    start = 1 + _mi_wsize_from_size(prev->block_size);
190
0
    if (start > idx) start = idx;
191
0
  }
192
193
  // set size range to the right page
194
0
  mi_assert(start <= idx);
195
0
  for (size_t sz = start; sz <= idx; sz++) {
196
0
    pages_free[sz] = page;
197
0
  }
198
0
}
199
200
/*
201
static bool mi_page_queue_is_empty(mi_page_queue_t* queue) {
202
  return (queue->first == NULL);
203
}
204
*/
205
206
0
static void mi_page_queue_remove(mi_page_queue_t* queue, mi_page_t* page) {
207
0
  mi_assert_internal(page != NULL);
208
0
  mi_assert_expensive(mi_page_queue_contains(queue, page));
209
0
  mi_assert_internal(page->xblock_size == queue->block_size || (page->xblock_size > MI_MEDIUM_OBJ_SIZE_MAX && mi_page_queue_is_huge(queue))  || (mi_page_is_in_full(page) && mi_page_queue_is_full(queue)));
210
0
  mi_heap_t* heap = mi_page_heap(page);
211
212
0
  if (page->prev != NULL) page->prev->next = page->next;
213
0
  if (page->next != NULL) page->next->prev = page->prev;
214
0
  if (page == queue->last)  queue->last = page->prev;
215
0
  if (page == queue->first) {
216
0
    queue->first = page->next;
217
    // update first
218
0
    mi_assert_internal(mi_heap_contains_queue(heap, queue));
219
0
    mi_heap_queue_first_update(heap,queue);
220
0
  }
221
0
  heap->page_count--;
222
0
  page->next = NULL;
223
0
  page->prev = NULL;
224
  // mi_atomic_store_ptr_release(mi_atomic_cast(void*, &page->heap), NULL);
225
0
  mi_page_set_in_full(page,false);
226
0
}
227
228
229
0
static void mi_page_queue_push(mi_heap_t* heap, mi_page_queue_t* queue, mi_page_t* page) {
230
0
  mi_assert_internal(mi_page_heap(page) == heap);
231
0
  mi_assert_internal(!mi_page_queue_contains(queue, page));
232
  #if MI_HUGE_PAGE_ABANDON
233
  mi_assert_internal(_mi_page_segment(page)->kind != MI_SEGMENT_HUGE);
234
  #endif
235
0
  mi_assert_internal(page->xblock_size == queue->block_size ||
236
0
                      (page->xblock_size > MI_MEDIUM_OBJ_SIZE_MAX) ||
237
0
                        (mi_page_is_in_full(page) && mi_page_queue_is_full(queue)));
238
239
0
  mi_page_set_in_full(page, mi_page_queue_is_full(queue));
240
  // mi_atomic_store_ptr_release(mi_atomic_cast(void*, &page->heap), heap);
241
0
  page->next = queue->first;
242
0
  page->prev = NULL;
243
0
  if (queue->first != NULL) {
244
0
    mi_assert_internal(queue->first->prev == NULL);
245
0
    queue->first->prev = page;
246
0
    queue->first = page;
247
0
  }
248
0
  else {
249
0
    queue->first = queue->last = page;
250
0
  }
251
252
  // update direct
253
0
  mi_heap_queue_first_update(heap, queue);
254
0
  heap->page_count++;
255
0
}
256
257
258
0
static void mi_page_queue_enqueue_from(mi_page_queue_t* to, mi_page_queue_t* from, mi_page_t* page) {
259
0
  mi_assert_internal(page != NULL);
260
0
  mi_assert_expensive(mi_page_queue_contains(from, page));
261
0
  mi_assert_expensive(!mi_page_queue_contains(to, page));
262
263
0
  mi_assert_internal((page->xblock_size == to->block_size && page->xblock_size == from->block_size) ||
264
0
                     (page->xblock_size == to->block_size && mi_page_queue_is_full(from)) ||
265
0
                     (page->xblock_size == from->block_size && mi_page_queue_is_full(to)) ||
266
0
                     (page->xblock_size > MI_LARGE_OBJ_SIZE_MAX && mi_page_queue_is_huge(to)) ||
267
0
                     (page->xblock_size > MI_LARGE_OBJ_SIZE_MAX && mi_page_queue_is_full(to)));
268
269
0
  mi_heap_t* heap = mi_page_heap(page);
270
0
  if (page->prev != NULL) page->prev->next = page->next;
271
0
  if (page->next != NULL) page->next->prev = page->prev;
272
0
  if (page == from->last)  from->last = page->prev;
273
0
  if (page == from->first) {
274
0
    from->first = page->next;
275
    // update first
276
0
    mi_assert_internal(mi_heap_contains_queue(heap, from));
277
0
    mi_heap_queue_first_update(heap, from);
278
0
  }
279
280
0
  page->prev = to->last;
281
0
  page->next = NULL;
282
0
  if (to->last != NULL) {
283
0
    mi_assert_internal(heap == mi_page_heap(to->last));
284
0
    to->last->next = page;
285
0
    to->last = page;
286
0
  }
287
0
  else {
288
0
    to->first = page;
289
0
    to->last = page;
290
0
    mi_heap_queue_first_update(heap, to);
291
0
  }
292
293
0
  mi_page_set_in_full(page, mi_page_queue_is_full(to));
294
0
}
295
296
// Only called from `mi_heap_absorb`.
297
0
size_t _mi_page_queue_append(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_queue_t* append) {
298
0
  mi_assert_internal(mi_heap_contains_queue(heap,pq));
299
0
  mi_assert_internal(pq->block_size == append->block_size);
300
301
0
  if (append->first==NULL) return 0;
302
303
  // set append pages to new heap and count
304
0
  size_t count = 0;
305
0
  for (mi_page_t* page = append->first; page != NULL; page = page->next) {
306
    // inline `mi_page_set_heap` to avoid wrong assertion during absorption;
307
    // in this case it is ok to be delayed freeing since both "to" and "from" heap are still alive.
308
0
    mi_atomic_store_release(&page->xheap, (uintptr_t)heap);
309
    // set the flag to delayed free (not overriding NEVER_DELAYED_FREE) which has as a
310
    // side effect that it spins until any DELAYED_FREEING is finished. This ensures
311
    // that after appending only the new heap will be used for delayed free operations.
312
0
    _mi_page_use_delayed_free(page, MI_USE_DELAYED_FREE, false);
313
0
    count++;
314
0
  }
315
316
0
  if (pq->last==NULL) {
317
    // take over afresh
318
0
    mi_assert_internal(pq->first==NULL);
319
0
    pq->first = append->first;
320
0
    pq->last = append->last;
321
0
    mi_heap_queue_first_update(heap, pq);
322
0
  }
323
0
  else {
324
    // append to end
325
0
    mi_assert_internal(pq->last!=NULL);
326
0
    mi_assert_internal(append->first!=NULL);
327
0
    pq->last->next = append->first;
328
0
    append->first->prev = pq->last;
329
0
    pq->last = append->last;
330
0
  }
331
0
  return count;
332
0
}