/src/cpython/Objects/mimalloc/segment-map.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* ---------------------------------------------------------------------------- |
2 | | Copyright (c) 2019-2023, Microsoft Research, Daan Leijen |
3 | | This is free software; you can redistribute it and/or modify it under the |
4 | | terms of the MIT license. A copy of the license can be found in the file |
5 | | "LICENSE" at the root of this distribution. |
6 | | -----------------------------------------------------------------------------*/ |
7 | | |
8 | | /* ----------------------------------------------------------- |
9 | | The following functions are to reliably find the segment or |
10 | | block that encompasses any pointer p (or NULL if it is not |
11 | | in any of our segments). |
12 | | We maintain a bitmap of all memory with 1 bit per MI_SEGMENT_SIZE (64MiB) |
13 | | set to 1 if it contains the segment meta data. |
14 | | ----------------------------------------------------------- */ |
15 | | #include "mimalloc.h" |
16 | | #include "mimalloc/internal.h" |
17 | | #include "mimalloc/atomic.h" |
18 | | |
19 | | #if (MI_INTPTR_SIZE==8) |
20 | 0 | #define MI_MAX_ADDRESS ((size_t)40 << 40) // 40TB (to include huge page areas) |
21 | | #else |
22 | | #define MI_MAX_ADDRESS ((size_t)2 << 30) // 2Gb |
23 | | #endif |
24 | | |
25 | 0 | #define MI_SEGMENT_MAP_BITS (MI_MAX_ADDRESS / MI_SEGMENT_SIZE) |
26 | 0 | #define MI_SEGMENT_MAP_SIZE (MI_SEGMENT_MAP_BITS / 8) |
27 | 0 | #define MI_SEGMENT_MAP_WSIZE (MI_SEGMENT_MAP_SIZE / MI_INTPTR_SIZE) |
28 | | |
29 | | static _Atomic(uintptr_t) mi_segment_map[MI_SEGMENT_MAP_WSIZE + 1]; // 2KiB per TB with 64MiB segments |
30 | | |
31 | 0 | static size_t mi_segment_map_index_of(const mi_segment_t* segment, size_t* bitidx) { |
32 | 0 | mi_assert_internal(_mi_ptr_segment(segment + 1) == segment); // is it aligned on MI_SEGMENT_SIZE? |
33 | 0 | if ((uintptr_t)segment >= MI_MAX_ADDRESS) { |
34 | 0 | *bitidx = 0; |
35 | 0 | return MI_SEGMENT_MAP_WSIZE; |
36 | 0 | } |
37 | 0 | else { |
38 | 0 | const uintptr_t segindex = ((uintptr_t)segment) / MI_SEGMENT_SIZE; |
39 | 0 | *bitidx = segindex % MI_INTPTR_BITS; |
40 | 0 | const size_t mapindex = segindex / MI_INTPTR_BITS; |
41 | 0 | mi_assert_internal(mapindex < MI_SEGMENT_MAP_WSIZE); |
42 | 0 | return mapindex; |
43 | 0 | } |
44 | 0 | } |
45 | | |
46 | 0 | void _mi_segment_map_allocated_at(const mi_segment_t* segment) { |
47 | 0 | size_t bitidx; |
48 | 0 | size_t index = mi_segment_map_index_of(segment, &bitidx); |
49 | 0 | mi_assert_internal(index <= MI_SEGMENT_MAP_WSIZE); |
50 | 0 | if (index==MI_SEGMENT_MAP_WSIZE) return; |
51 | 0 | uintptr_t mask = mi_atomic_load_relaxed(&mi_segment_map[index]); |
52 | 0 | uintptr_t newmask; |
53 | 0 | do { |
54 | 0 | newmask = (mask | ((uintptr_t)1 << bitidx)); |
55 | 0 | } while (!mi_atomic_cas_weak_release(&mi_segment_map[index], &mask, newmask)); |
56 | 0 | } |
57 | | |
58 | 0 | void _mi_segment_map_freed_at(const mi_segment_t* segment) { |
59 | 0 | size_t bitidx; |
60 | 0 | size_t index = mi_segment_map_index_of(segment, &bitidx); |
61 | 0 | mi_assert_internal(index <= MI_SEGMENT_MAP_WSIZE); |
62 | 0 | if (index == MI_SEGMENT_MAP_WSIZE) return; |
63 | 0 | uintptr_t mask = mi_atomic_load_relaxed(&mi_segment_map[index]); |
64 | 0 | uintptr_t newmask; |
65 | 0 | do { |
66 | 0 | newmask = (mask & ~((uintptr_t)1 << bitidx)); |
67 | 0 | } while (!mi_atomic_cas_weak_release(&mi_segment_map[index], &mask, newmask)); |
68 | 0 | } |
69 | | |
70 | | // Determine the segment belonging to a pointer or NULL if it is not in a valid segment. |
71 | 0 | static mi_segment_t* _mi_segment_of(const void* p) { |
72 | 0 | if (p == NULL) return NULL; |
73 | 0 | mi_segment_t* segment = _mi_ptr_segment(p); |
74 | 0 | mi_assert_internal(segment != NULL); |
75 | 0 | size_t bitidx; |
76 | 0 | size_t index = mi_segment_map_index_of(segment, &bitidx); |
77 | | // fast path: for any pointer to valid small/medium/large object or first MI_SEGMENT_SIZE in huge |
78 | 0 | const uintptr_t mask = mi_atomic_load_relaxed(&mi_segment_map[index]); |
79 | 0 | if mi_likely((mask & ((uintptr_t)1 << bitidx)) != 0) { |
80 | 0 | return segment; // yes, allocated by us |
81 | 0 | } |
82 | 0 | if (index==MI_SEGMENT_MAP_WSIZE) return NULL; |
83 | | |
84 | | // TODO: maintain max/min allocated range for efficiency for more efficient rejection of invalid pointers? |
85 | | |
86 | | // search downwards for the first segment in case it is an interior pointer |
87 | | // could be slow but searches in MI_INTPTR_SIZE * MI_SEGMENT_SIZE (512MiB) steps through |
88 | | // valid huge objects |
89 | | // note: we could maintain a lowest index to speed up the path for invalid pointers? |
90 | 0 | size_t lobitidx; |
91 | 0 | size_t loindex; |
92 | 0 | uintptr_t lobits = mask & (((uintptr_t)1 << bitidx) - 1); |
93 | 0 | if (lobits != 0) { |
94 | 0 | loindex = index; |
95 | 0 | lobitidx = mi_bsr(lobits); // lobits != 0 |
96 | 0 | } |
97 | 0 | else if (index == 0) { |
98 | 0 | return NULL; |
99 | 0 | } |
100 | 0 | else { |
101 | 0 | mi_assert_internal(index > 0); |
102 | 0 | uintptr_t lomask = mask; |
103 | 0 | loindex = index; |
104 | 0 | do { |
105 | 0 | loindex--; |
106 | 0 | lomask = mi_atomic_load_relaxed(&mi_segment_map[loindex]); |
107 | 0 | } while (lomask != 0 && loindex > 0); |
108 | 0 | if (lomask == 0) return NULL; |
109 | 0 | lobitidx = mi_bsr(lomask); // lomask != 0 |
110 | 0 | } |
111 | 0 | mi_assert_internal(loindex < MI_SEGMENT_MAP_WSIZE); |
112 | | // take difference as the addresses could be larger than the MAX_ADDRESS space. |
113 | 0 | size_t diff = (((index - loindex) * (8*MI_INTPTR_SIZE)) + bitidx - lobitidx) * MI_SEGMENT_SIZE; |
114 | 0 | segment = (mi_segment_t*)((uint8_t*)segment - diff); |
115 | |
|
116 | 0 | if (segment == NULL) return NULL; |
117 | 0 | mi_assert_internal((void*)segment < p); |
118 | 0 | bool cookie_ok = (_mi_ptr_cookie(segment) == segment->cookie); |
119 | 0 | mi_assert_internal(cookie_ok); |
120 | 0 | if mi_unlikely(!cookie_ok) return NULL; |
121 | 0 | if (((uint8_t*)segment + mi_segment_size(segment)) <= (uint8_t*)p) return NULL; // outside the range |
122 | 0 | mi_assert_internal(p >= (void*)segment && (uint8_t*)p < (uint8_t*)segment + mi_segment_size(segment)); |
123 | 0 | return segment; |
124 | 0 | } |
125 | | |
126 | | // Is this a valid pointer in our heap? |
127 | 0 | static bool mi_is_valid_pointer(const void* p) { |
128 | 0 | return ((_mi_segment_of(p) != NULL) || (_mi_arena_contains(p))); |
129 | 0 | } |
130 | | |
131 | 0 | mi_decl_nodiscard mi_decl_export bool mi_is_in_heap_region(const void* p) mi_attr_noexcept { |
132 | 0 | return mi_is_valid_pointer(p); |
133 | 0 | } |
134 | | |
135 | | /* |
136 | | // Return the full segment range belonging to a pointer |
137 | | static void* mi_segment_range_of(const void* p, size_t* size) { |
138 | | mi_segment_t* segment = _mi_segment_of(p); |
139 | | if (segment == NULL) { |
140 | | if (size != NULL) *size = 0; |
141 | | return NULL; |
142 | | } |
143 | | else { |
144 | | if (size != NULL) *size = segment->segment_size; |
145 | | return segment; |
146 | | } |
147 | | mi_assert_expensive(page == NULL || mi_segment_is_valid(_mi_page_segment(page),tld)); |
148 | | mi_assert_internal(page == NULL || (mi_segment_page_size(_mi_page_segment(page)) - (MI_SECURE == 0 ? 0 : _mi_os_page_size())) >= block_size); |
149 | | mi_reset_delayed(tld); |
150 | | mi_assert_internal(page == NULL || mi_page_not_in_queue(page, tld)); |
151 | | return page; |
152 | | } |
153 | | */ |