/src/abseil-cpp/absl/base/internal/low_level_alloc.cc
Line | Count | Source (jump to first uncovered line) |
1 | | // Copyright 2017 The Abseil Authors. |
2 | | // |
3 | | // Licensed under the Apache License, Version 2.0 (the "License"); |
4 | | // you may not use this file except in compliance with the License. |
5 | | // You may obtain a copy of the License at |
6 | | // |
7 | | // https://www.apache.org/licenses/LICENSE-2.0 |
8 | | // |
9 | | // Unless required by applicable law or agreed to in writing, software |
10 | | // distributed under the License is distributed on an "AS IS" BASIS, |
11 | | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
12 | | // See the License for the specific language governing permissions and |
13 | | // limitations under the License. |
14 | | |
15 | | // A low-level allocator that can be used by other low-level |
16 | | // modules without introducing dependency cycles. |
17 | | // This allocator is slow and wasteful of memory; |
18 | | // it should not be used when performance is key. |
19 | | |
20 | | #include "absl/base/internal/low_level_alloc.h" |
21 | | |
22 | | #include <optional> |
23 | | #include <type_traits> |
24 | | |
25 | | #include "absl/base/call_once.h" |
26 | | #include "absl/base/config.h" |
27 | | #include "absl/base/internal/direct_mmap.h" |
28 | | #include "absl/base/internal/scheduling_mode.h" |
29 | | #include "absl/base/macros.h" |
30 | | #include "absl/base/thread_annotations.h" |
31 | | |
32 | | // LowLevelAlloc requires that the platform support low-level |
33 | | // allocation of virtual memory. Platforms lacking this cannot use |
34 | | // LowLevelAlloc. |
35 | | #ifndef ABSL_LOW_LEVEL_ALLOC_MISSING |
36 | | |
37 | | #ifndef _WIN32 |
38 | | #include <pthread.h> |
39 | | #include <signal.h> |
40 | | #include <sys/mman.h> |
41 | | #include <unistd.h> |
42 | | #else |
43 | | #include <windows.h> |
44 | | #endif |
45 | | |
46 | | #ifdef __linux__ |
47 | | #include <sys/prctl.h> |
48 | | #endif |
49 | | |
50 | | #include <string.h> |
51 | | |
52 | | #include <algorithm> |
53 | | #include <atomic> |
54 | | #include <cerrno> |
55 | | #include <cstddef> |
56 | | #include <new> // for placement-new |
57 | | |
58 | | #include "absl/base/dynamic_annotations.h" |
59 | | #include "absl/base/internal/raw_logging.h" |
60 | | #include "absl/base/internal/spinlock.h" |
61 | | |
62 | | #if defined(MAP_ANON) && !defined(MAP_ANONYMOUS) |
63 | | #define MAP_ANONYMOUS MAP_ANON |
64 | | #endif |
65 | | |
66 | | namespace absl { |
67 | | ABSL_NAMESPACE_BEGIN |
68 | | namespace base_internal { |
69 | | |
70 | | // A first-fit allocator with amortized logarithmic free() time. |
71 | | |
72 | | // --------------------------------------------------------------------------- |
73 | | static const int kMaxLevel = 30; |
74 | | |
75 | | namespace { |
76 | | // This struct describes one allocated block, or one free block. |
77 | | struct AllocList { |
78 | | struct Header { |
79 | | // Size of entire region, including this field. Must be |
80 | | // first. Valid in both allocated and unallocated blocks. |
81 | | uintptr_t size; |
82 | | |
83 | | // kMagicAllocated or kMagicUnallocated xor this. |
84 | | uintptr_t magic; |
85 | | |
86 | | // Pointer to parent arena. |
87 | | LowLevelAlloc::Arena *arena; |
88 | | |
89 | | // Aligns regions to 0 mod 2*sizeof(void*). |
90 | | void *dummy_for_alignment; |
91 | | } header; |
92 | | |
93 | | // Next two fields: in unallocated blocks: freelist skiplist data |
94 | | // in allocated blocks: overlaps with client data |
95 | | |
96 | | // Levels in skiplist used. |
97 | | int levels; |
98 | | |
99 | | // Actually has levels elements. The AllocList node may not have room |
100 | | // for all kMaxLevel entries. See max_fit in LLA_SkiplistLevels(). |
101 | | AllocList *next[kMaxLevel]; |
102 | | }; |
103 | | } // namespace |
104 | | |
105 | | // --------------------------------------------------------------------------- |
106 | | // A trivial skiplist implementation. This is used to keep the freelist |
107 | | // in address order while taking only logarithmic time per insert and delete. |
108 | | |
109 | | // An integer approximation of log2(size/base) |
110 | | // Requires size >= base. |
111 | 4.80k | static int IntLog2(size_t size, size_t base) { |
112 | 4.80k | int result = 0; |
113 | 33.4k | for (size_t i = size; i > base; i >>= 1) { // i == floor(size/2**result) |
114 | 28.6k | result++; |
115 | 28.6k | } |
116 | | // floor(size / 2**result) <= base < floor(size / 2**(result-1)) |
117 | | // => log2(size/(base+1)) <= result < 1+log2(size/base) |
118 | | // => result ~= log2(size/base) |
119 | 4.80k | return result; |
120 | 4.80k | } |
121 | | |
122 | | // Return a random integer n: p(n)=1/(2**n) if 1 <= n; p(n)=0 if n < 1. |
123 | 2.39k | static int Random(uint32_t *state) { |
124 | 2.39k | uint32_t r = *state; |
125 | 2.39k | int result = 1; |
126 | 4.79k | while ((((r = r * 1103515245 + 12345) >> 30) & 1) == 0) { |
127 | 2.39k | result++; |
128 | 2.39k | } |
129 | 2.39k | *state = r; |
130 | 2.39k | return result; |
131 | 2.39k | } |
132 | | |
133 | | // Return a number of skiplist levels for a node of size bytes, where |
134 | | // base is the minimum node size. Compute level=log2(size / base)+n |
135 | | // where n is 1 if random is false and otherwise a random number generated with |
136 | | // the standard distribution for a skiplist: See Random() above. |
137 | | // Bigger nodes tend to have more skiplist levels due to the log2(size / base) |
138 | | // term, so first-fit searches touch fewer nodes. "level" is clipped so |
139 | | // level<kMaxLevel and next[level-1] will fit in the node. |
140 | | // 0 < LLA_SkiplistLevels(x,y,false) <= LLA_SkiplistLevels(x,y,true) < kMaxLevel |
141 | 4.80k | static int LLA_SkiplistLevels(size_t size, size_t base, uint32_t *random) { |
142 | | // max_fit is the maximum number of levels that will fit in a node for the |
143 | | // given size. We can't return more than max_fit, no matter what the |
144 | | // random number generator says. |
145 | 4.80k | size_t max_fit = (size - offsetof(AllocList, next)) / sizeof(AllocList *); |
146 | 4.80k | int level = IntLog2(size, base) + (random != nullptr ? Random(random) : 1); |
147 | 4.80k | if (static_cast<size_t>(level) > max_fit) level = static_cast<int>(max_fit); |
148 | 4.80k | if (level > kMaxLevel - 1) level = kMaxLevel - 1; |
149 | 4.80k | ABSL_RAW_CHECK(level >= 1, "block not big enough for even one level"); |
150 | 4.80k | return level; |
151 | 4.80k | } |
152 | | |
153 | | // Return "atleast", the first element of AllocList *head s.t. *atleast >= *e. |
154 | | // For 0 <= i < head->levels, set prev[i] to "no_greater", where no_greater |
155 | | // points to the last element at level i in the AllocList less than *e, or is |
156 | | // head if no such element exists. |
157 | | static AllocList *LLA_SkiplistSearch(AllocList *head, AllocList *e, |
158 | 4.77k | AllocList **prev) { |
159 | 4.77k | AllocList *p = head; |
160 | 47.1k | for (int level = head->levels - 1; level >= 0; level--) { |
161 | 42.6k | for (AllocList *n; (n = p->next[level]) != nullptr && n < e; p = n) { |
162 | 347 | } |
163 | 42.3k | prev[level] = p; |
164 | 42.3k | } |
165 | 4.77k | return (head->levels == 0) ? nullptr : prev[0]->next[0]; |
166 | 4.77k | } |
167 | | |
168 | | // Insert element *e into AllocList *head. Set prev[] as LLA_SkiplistSearch. |
169 | | // Requires that e->levels be previously set by the caller (using |
170 | | // LLA_SkiplistLevels()) |
171 | | static void LLA_SkiplistInsert(AllocList *head, AllocList *e, |
172 | 2.39k | AllocList **prev) { |
173 | 2.39k | LLA_SkiplistSearch(head, e, prev); |
174 | 13.4k | for (; head->levels < e->levels; head->levels++) { // extend prev pointers |
175 | 11.0k | prev[head->levels] = head; // to all *e's levels |
176 | 11.0k | } |
177 | 28.5k | for (int i = 0; i != e->levels; i++) { // add element to list |
178 | 26.1k | e->next[i] = prev[i]->next[i]; |
179 | 26.1k | prev[i]->next[i] = e; |
180 | 26.1k | } |
181 | 2.39k | } |
182 | | |
183 | | // Remove element *e from AllocList *head. Set prev[] as LLA_SkiplistSearch(). |
184 | | // Requires that e->levels be previous set by the caller (using |
185 | | // LLA_SkiplistLevels()) |
186 | | static void LLA_SkiplistDelete(AllocList *head, AllocList *e, |
187 | 2.38k | AllocList **prev) { |
188 | 2.38k | AllocList *found = LLA_SkiplistSearch(head, e, prev); |
189 | 2.38k | ABSL_RAW_CHECK(e == found, "element not in freelist"); |
190 | 28.4k | for (int i = 0; i != e->levels && prev[i]->next[i] == e; i++) { |
191 | 26.0k | prev[i]->next[i] = e->next[i]; |
192 | 26.0k | } |
193 | 13.4k | while (head->levels > 0 && head->next[head->levels - 1] == nullptr) { |
194 | 11.0k | head->levels--; // reduce head->levels if level unused |
195 | 11.0k | } |
196 | 2.38k | } |
197 | | |
198 | | // --------------------------------------------------------------------------- |
199 | | // Arena implementation |
200 | | |
201 | | // Metadata for an LowLevelAlloc arena instance. |
202 | | struct LowLevelAlloc::Arena { |
203 | | // Constructs an arena with the given LowLevelAlloc flags. |
204 | | explicit Arena(uint32_t flags_value); |
205 | | |
206 | | base_internal::SpinLock mu; |
207 | | // Head of free list, sorted by address |
208 | | AllocList freelist ABSL_GUARDED_BY(mu); |
209 | | // Count of allocated blocks |
210 | | int32_t allocation_count ABSL_GUARDED_BY(mu); |
211 | | // flags passed to NewArena |
212 | | const uint32_t flags; |
213 | | // Result of sysconf(_SC_PAGESIZE) |
214 | | const size_t pagesize; |
215 | | // Lowest power of two >= max(16, sizeof(AllocList)) |
216 | | const size_t round_up; |
217 | | // Smallest allocation block size |
218 | | const size_t min_size; |
219 | | // PRNG state |
220 | | uint32_t random ABSL_GUARDED_BY(mu); |
221 | | }; |
222 | | |
223 | | namespace { |
224 | | // Static storage space for the lazily-constructed, default global arena |
225 | | // instances. We require this space because the whole point of LowLevelAlloc |
226 | | // is to avoid relying on malloc/new. |
227 | | alignas(LowLevelAlloc::Arena) unsigned char default_arena_storage[sizeof( |
228 | | LowLevelAlloc::Arena)]; |
229 | | alignas(LowLevelAlloc::Arena) unsigned char unhooked_arena_storage[sizeof( |
230 | | LowLevelAlloc::Arena)]; |
231 | | #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING |
232 | | alignas( |
233 | | LowLevelAlloc::Arena) unsigned char unhooked_async_sig_safe_arena_storage |
234 | | [sizeof(LowLevelAlloc::Arena)]; |
235 | | #endif |
236 | | |
237 | | // We must use LowLevelCallOnce here to construct the global arenas, rather than |
238 | | // using function-level statics, to avoid recursively invoking the scheduler. |
239 | | absl::once_flag create_globals_once; |
240 | | |
241 | 1 | void CreateGlobalArenas() { |
242 | 1 | new (&default_arena_storage) |
243 | 1 | LowLevelAlloc::Arena(LowLevelAlloc::kCallMallocHook); |
244 | 1 | new (&unhooked_arena_storage) LowLevelAlloc::Arena(0); |
245 | 1 | #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING |
246 | 1 | new (&unhooked_async_sig_safe_arena_storage) |
247 | 1 | LowLevelAlloc::Arena(LowLevelAlloc::kAsyncSignalSafe); |
248 | 1 | #endif |
249 | 1 | } |
250 | | |
251 | | // Returns a global arena that does not call into hooks. Used by NewArena() |
252 | | // when kCallMallocHook is not set. |
253 | 1 | LowLevelAlloc::Arena *UnhookedArena() { |
254 | 1 | base_internal::LowLevelCallOnce(&create_globals_once, CreateGlobalArenas); |
255 | 1 | return reinterpret_cast<LowLevelAlloc::Arena *>(&unhooked_arena_storage); |
256 | 1 | } |
257 | | |
258 | | #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING |
259 | | // Returns a global arena that is async-signal safe. Used by NewArena() when |
260 | | // kAsyncSignalSafe is set. |
261 | 0 | LowLevelAlloc::Arena *UnhookedAsyncSigSafeArena() { |
262 | 0 | base_internal::LowLevelCallOnce(&create_globals_once, CreateGlobalArenas); |
263 | 0 | return reinterpret_cast<LowLevelAlloc::Arena *>( |
264 | 0 | &unhooked_async_sig_safe_arena_storage); |
265 | 0 | } |
266 | | #endif |
267 | | |
268 | | } // namespace |
269 | | |
270 | | // Returns the default arena, as used by LowLevelAlloc::Alloc() and friends. |
271 | 4 | LowLevelAlloc::Arena *LowLevelAlloc::DefaultArena() { |
272 | 4 | base_internal::LowLevelCallOnce(&create_globals_once, CreateGlobalArenas); |
273 | 4 | return reinterpret_cast<LowLevelAlloc::Arena *>(&default_arena_storage); |
274 | 4 | } |
275 | | |
276 | | // magic numbers to identify allocated and unallocated blocks |
277 | | static const uintptr_t kMagicAllocated = 0x4c833e95U; |
278 | | static const uintptr_t kMagicUnallocated = ~kMagicAllocated; |
279 | | |
280 | | namespace { |
281 | | class ABSL_SCOPED_LOCKABLE ArenaLock { |
282 | | public: |
283 | | explicit ArenaLock(LowLevelAlloc::Arena *arena) |
284 | | ABSL_EXCLUSIVE_LOCK_FUNCTION(arena->mu) |
285 | 2.39k | : arena_(arena) { |
286 | 2.39k | #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING |
287 | 2.39k | if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) != 0) { |
288 | 0 | sigset_t all; |
289 | 0 | sigfillset(&all); |
290 | 0 | mask_valid_ = pthread_sigmask(SIG_BLOCK, &all, &mask_) == 0; |
291 | 0 | } |
292 | 2.39k | #endif |
293 | 2.39k | arena_->mu.Lock(); |
294 | 2.39k | } |
295 | 2.39k | ~ArenaLock() { ABSL_RAW_CHECK(left_, "haven't left Arena region"); } |
296 | 2.39k | void Leave() ABSL_UNLOCK_FUNCTION() { |
297 | 2.39k | arena_->mu.Unlock(); |
298 | 2.39k | #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING |
299 | 2.39k | if (mask_valid_) { |
300 | 0 | const int err = pthread_sigmask(SIG_SETMASK, &mask_, nullptr); |
301 | 0 | if (err != 0) { |
302 | 0 | ABSL_RAW_LOG(FATAL, "pthread_sigmask failed: %d", err); |
303 | 0 | } |
304 | 0 | } |
305 | 2.39k | #endif |
306 | 2.39k | left_ = true; |
307 | 2.39k | } |
308 | | |
309 | | private: |
310 | | bool left_ = false; // whether left region |
311 | | #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING |
312 | | bool mask_valid_ = false; |
313 | | sigset_t mask_; // old mask of blocked signals |
314 | | #endif |
315 | | LowLevelAlloc::Arena *arena_; |
316 | | ArenaLock(const ArenaLock &) = delete; |
317 | | ArenaLock &operator=(const ArenaLock &) = delete; |
318 | | }; |
319 | | } // namespace |
320 | | |
321 | | // create an appropriate magic number for an object at "ptr" |
322 | | // "magic" should be kMagicAllocated or kMagicUnallocated |
323 | 12.1k | inline static uintptr_t Magic(uintptr_t magic, AllocList::Header *ptr) { |
324 | 12.1k | return magic ^ reinterpret_cast<uintptr_t>(ptr); |
325 | 12.1k | } |
326 | | |
327 | | namespace { |
328 | 4 | size_t GetPageSize() { |
329 | | #ifdef _WIN32 |
330 | | SYSTEM_INFO system_info; |
331 | | GetSystemInfo(&system_info); |
332 | | return std::max(system_info.dwPageSize, system_info.dwAllocationGranularity); |
333 | | #elif defined(__wasm__) || defined(__asmjs__) || defined(__hexagon__) |
334 | | return static_cast<size_t>(getpagesize()); |
335 | | #else |
336 | 4 | return static_cast<size_t>(sysconf(_SC_PAGESIZE)); |
337 | 4 | #endif |
338 | 4 | } |
339 | | |
340 | 4 | size_t RoundedUpBlockSize() { |
341 | | // Round up block sizes to a power of two close to the header size. |
342 | 4 | size_t round_up = 16; |
343 | 8 | while (round_up < sizeof(AllocList::Header)) { |
344 | 4 | round_up += round_up; |
345 | 4 | } |
346 | 4 | return round_up; |
347 | 4 | } |
348 | | |
349 | | } // namespace |
350 | | |
351 | | LowLevelAlloc::Arena::Arena(uint32_t flags_value) |
352 | 4 | : mu(base_internal::SCHEDULE_KERNEL_ONLY), |
353 | 4 | allocation_count(0), |
354 | 4 | flags(flags_value), |
355 | 4 | pagesize(GetPageSize()), |
356 | 4 | round_up(RoundedUpBlockSize()), |
357 | 4 | min_size(2 * round_up), |
358 | 4 | random(0) { |
359 | 4 | freelist.header.size = 0; |
360 | 4 | freelist.header.magic = Magic(kMagicUnallocated, &freelist.header); |
361 | 4 | freelist.header.arena = this; |
362 | 4 | freelist.levels = 0; |
363 | 4 | memset(freelist.next, 0, sizeof(freelist.next)); |
364 | 4 | } |
365 | | |
366 | | // L < meta_data_arena->mu |
367 | 1 | LowLevelAlloc::Arena *LowLevelAlloc::NewArena(uint32_t flags) { |
368 | 1 | Arena *meta_data_arena = DefaultArena(); |
369 | 1 | #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING |
370 | 1 | if ((flags & LowLevelAlloc::kAsyncSignalSafe) != 0) { |
371 | 0 | meta_data_arena = UnhookedAsyncSigSafeArena(); |
372 | 0 | } else // NOLINT(readability/braces) |
373 | 1 | #endif |
374 | 1 | if ((flags & LowLevelAlloc::kCallMallocHook) == 0) { |
375 | 1 | meta_data_arena = UnhookedArena(); |
376 | 1 | } |
377 | 1 | Arena *result = |
378 | 1 | new (AllocWithArena(sizeof(*result), meta_data_arena)) Arena(flags); |
379 | 1 | return result; |
380 | 1 | } |
381 | | |
382 | | // L < arena->mu, L < arena->arena->mu |
383 | 0 | bool LowLevelAlloc::DeleteArena(Arena *arena) { |
384 | 0 | ABSL_RAW_CHECK( |
385 | 0 | arena != nullptr && arena != DefaultArena() && arena != UnhookedArena(), |
386 | 0 | "may not delete default arena"); |
387 | 0 | ArenaLock section(arena); |
388 | 0 | if (arena->allocation_count != 0) { |
389 | 0 | section.Leave(); |
390 | 0 | return false; |
391 | 0 | } |
392 | 0 | while (arena->freelist.next[0] != nullptr) { |
393 | 0 | AllocList *region = arena->freelist.next[0]; |
394 | 0 | size_t size = region->header.size; |
395 | 0 | arena->freelist.next[0] = region->next[0]; |
396 | 0 | ABSL_RAW_CHECK( |
397 | 0 | region->header.magic == Magic(kMagicUnallocated, ®ion->header), |
398 | 0 | "bad magic number in DeleteArena()"); |
399 | 0 | ABSL_RAW_CHECK(region->header.arena == arena, |
400 | 0 | "bad arena pointer in DeleteArena()"); |
401 | 0 | ABSL_RAW_CHECK(size % arena->pagesize == 0, |
402 | 0 | "empty arena has non-page-aligned block size"); |
403 | 0 | ABSL_RAW_CHECK(reinterpret_cast<uintptr_t>(region) % arena->pagesize == 0, |
404 | 0 | "empty arena has non-page-aligned block"); |
405 | 0 | int munmap_result; |
406 | | #ifdef _WIN32 |
407 | | munmap_result = VirtualFree(region, 0, MEM_RELEASE); |
408 | | ABSL_RAW_CHECK(munmap_result != 0, |
409 | | "LowLevelAlloc::DeleteArena: VitualFree failed"); |
410 | | #else |
411 | 0 | #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING |
412 | 0 | if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) == 0) { |
413 | 0 | munmap_result = munmap(region, size); |
414 | 0 | } else { |
415 | 0 | munmap_result = base_internal::DirectMunmap(region, size); |
416 | 0 | } |
417 | | #else |
418 | | munmap_result = munmap(region, size); |
419 | | #endif // ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING |
420 | 0 | if (munmap_result != 0) { |
421 | 0 | ABSL_RAW_LOG(FATAL, "LowLevelAlloc::DeleteArena: munmap failed: %d", |
422 | 0 | errno); |
423 | 0 | } |
424 | 0 | #endif // _WIN32 |
425 | 0 | } |
426 | 0 | section.Leave(); |
427 | 0 | arena->~Arena(); |
428 | 0 | Free(arena); |
429 | 0 | return true; |
430 | 0 | } |
431 | | |
432 | | // --------------------------------------------------------------------------- |
433 | | |
434 | | // Addition, checking for overflow. The intent is to die if an external client |
435 | | // manages to push through a request that would cause arithmetic to fail. |
436 | 7.17k | static inline uintptr_t CheckedAdd(uintptr_t a, uintptr_t b) { |
437 | 7.17k | uintptr_t sum = a + b; |
438 | 7.17k | ABSL_RAW_CHECK(sum >= a, "LowLevelAlloc arithmetic overflow"); |
439 | 7.17k | return sum; |
440 | 7.17k | } |
441 | | |
442 | | // Return value rounded up to next multiple of align. |
443 | | // align must be a power of two. |
444 | 2.40k | static inline uintptr_t RoundUp(uintptr_t addr, uintptr_t align) { |
445 | 2.40k | return CheckedAdd(addr, align - 1) & ~(align - 1); |
446 | 2.40k | } |
447 | | |
448 | | // Equivalent to "return prev->next[i]" but with sanity checking |
449 | | // that the freelist is in the correct order, that it |
450 | | // consists of regions marked "unallocated", and that no two regions |
451 | | // are adjacent in memory (they should have been coalesced). |
452 | | static AllocList *Next(int i, AllocList *prev, LowLevelAlloc::Arena *arena) |
453 | 2.64k | ABSL_EXCLUSIVE_LOCKS_REQUIRED(arena->mu) { |
454 | 2.64k | ABSL_RAW_CHECK(i < prev->levels, "too few levels in Next()"); |
455 | 2.64k | AllocList *next = prev->next[i]; |
456 | 2.64k | if (next != nullptr) { |
457 | 2.62k | ABSL_RAW_CHECK( |
458 | 2.62k | next->header.magic == Magic(kMagicUnallocated, &next->header), |
459 | 2.62k | "bad magic number in Next()"); |
460 | 2.62k | ABSL_RAW_CHECK(next->header.arena == arena, "bad arena pointer in Next()"); |
461 | 2.62k | if (prev != &arena->freelist) { |
462 | 220 | ABSL_RAW_CHECK(prev < next, "unordered freelist"); |
463 | 220 | ABSL_RAW_CHECK(reinterpret_cast<char *>(prev) + prev->header.size < |
464 | 220 | reinterpret_cast<char *>(next), |
465 | 220 | "malformed freelist"); |
466 | 220 | } |
467 | 2.62k | } |
468 | 2.64k | return next; |
469 | 2.64k | } |
470 | | |
471 | | // Coalesce list item "a" with its successor if they are adjacent. |
472 | 4.79k | static void Coalesce(AllocList *a) { |
473 | 4.79k | AllocList *n = a->next[0]; |
474 | 4.79k | if (n != nullptr && reinterpret_cast<char *>(a) + a->header.size == |
475 | 4.65k | reinterpret_cast<char *>(n)) { |
476 | 0 | LowLevelAlloc::Arena *arena = a->header.arena; |
477 | 0 | arena->mu.AssertHeld(); |
478 | 0 | a->header.size += n->header.size; |
479 | 0 | n->header.magic = 0; |
480 | 0 | n->header.arena = nullptr; |
481 | 0 | AllocList *prev[kMaxLevel]; |
482 | 0 | LLA_SkiplistDelete(&arena->freelist, n, prev); |
483 | 0 | LLA_SkiplistDelete(&arena->freelist, a, prev); |
484 | 0 | a->levels = |
485 | 0 | LLA_SkiplistLevels(a->header.size, arena->min_size, &arena->random); |
486 | 0 | LLA_SkiplistInsert(&arena->freelist, a, prev); |
487 | 0 | } |
488 | 4.79k | } |
489 | | |
490 | | // Adds block at location "v" to the free list |
491 | | static void AddToFreelist(void *v, LowLevelAlloc::Arena *arena) |
492 | 2.39k | ABSL_EXCLUSIVE_LOCKS_REQUIRED(arena->mu) { |
493 | 2.39k | AllocList *f = reinterpret_cast<AllocList *>(reinterpret_cast<char *>(v) - |
494 | 2.39k | sizeof(f->header)); |
495 | 2.39k | ABSL_RAW_CHECK(f->header.magic == Magic(kMagicAllocated, &f->header), |
496 | 2.39k | "bad magic number in AddToFreelist()"); |
497 | 2.39k | ABSL_RAW_CHECK(f->header.arena == arena, |
498 | 2.39k | "bad arena pointer in AddToFreelist()"); |
499 | 2.39k | f->levels = |
500 | 2.39k | LLA_SkiplistLevels(f->header.size, arena->min_size, &arena->random); |
501 | 2.39k | AllocList *prev[kMaxLevel]; |
502 | 2.39k | LLA_SkiplistInsert(&arena->freelist, f, prev); |
503 | 2.39k | f->header.magic = Magic(kMagicUnallocated, &f->header); |
504 | 2.39k | Coalesce(f); // maybe coalesce with successor |
505 | 2.39k | Coalesce(prev[0]); // maybe coalesce with predecessor |
506 | 2.39k | } |
507 | | |
508 | | // Frees storage allocated by LowLevelAlloc::Alloc(). |
509 | | // L < arena->mu |
510 | 8 | void LowLevelAlloc::Free(void *v) { |
511 | 8 | if (v != nullptr) { |
512 | 8 | AllocList *f = reinterpret_cast<AllocList *>(reinterpret_cast<char *>(v) - |
513 | 8 | sizeof(f->header)); |
514 | 8 | LowLevelAlloc::Arena *arena = f->header.arena; |
515 | 8 | ArenaLock section(arena); |
516 | 8 | AddToFreelist(v, arena); |
517 | 8 | ABSL_RAW_CHECK(arena->allocation_count > 0, "nothing in arena to free"); |
518 | 8 | arena->allocation_count--; |
519 | 8 | section.Leave(); |
520 | 8 | } |
521 | 8 | } |
522 | | |
523 | | // allocates and returns a block of size bytes, to be freed with Free() |
524 | | // L < arena->mu |
525 | 2.38k | static void *DoAllocWithArena(size_t request, LowLevelAlloc::Arena *arena) { |
526 | 2.38k | void *result = nullptr; |
527 | 2.38k | if (request != 0) { |
528 | 2.38k | AllocList *s; // will point to region that satisfies request |
529 | 2.38k | ArenaLock section(arena); |
530 | | // round up with header |
531 | 2.38k | size_t req_rnd = |
532 | 2.38k | RoundUp(CheckedAdd(request, sizeof(s->header)), arena->round_up); |
533 | 2.40k | for (;;) { // loop until we find a suitable region |
534 | | // find the minimum levels that a block of this size must have |
535 | 2.40k | int i = LLA_SkiplistLevels(req_rnd, arena->min_size, nullptr) - 1; |
536 | 2.40k | if (i < arena->freelist.levels) { // potential blocks exist |
537 | 2.40k | AllocList *before = &arena->freelist; // predecessor of s |
538 | 2.64k | while ((s = Next(i, before, arena)) != nullptr && |
539 | 2.64k | s->header.size < req_rnd) { |
540 | 239 | before = s; |
541 | 239 | } |
542 | 2.40k | if (s != nullptr) { // we found a region |
543 | 2.38k | break; |
544 | 2.38k | } |
545 | 2.40k | } |
546 | | // we unlock before mmap() both because mmap() may call a callback hook, |
547 | | // and because it may be slow. |
548 | 22 | arena->mu.Unlock(); |
549 | | // mmap generous 64K chunks to decrease |
550 | | // the chances/impact of fragmentation: |
551 | 22 | size_t new_pages_size = RoundUp(req_rnd, arena->pagesize * 16); |
552 | 22 | void *new_pages; |
553 | | #ifdef _WIN32 |
554 | | new_pages = VirtualAlloc(nullptr, new_pages_size, |
555 | | MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE); |
556 | | ABSL_RAW_CHECK(new_pages != nullptr, "VirtualAlloc failed"); |
557 | | #else |
558 | 22 | #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING |
559 | 22 | if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) != 0) { |
560 | 0 | new_pages = base_internal::DirectMmap(nullptr, new_pages_size, |
561 | 0 | PROT_WRITE|PROT_READ, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0); |
562 | 22 | } else { |
563 | 22 | new_pages = mmap(nullptr, new_pages_size, PROT_WRITE | PROT_READ, |
564 | 22 | MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); |
565 | 22 | } |
566 | | #else |
567 | | new_pages = mmap(nullptr, new_pages_size, PROT_WRITE | PROT_READ, |
568 | | MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); |
569 | | #endif // ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING |
570 | 22 | if (new_pages == MAP_FAILED) { |
571 | 0 | ABSL_RAW_LOG(FATAL, "mmap error: %d", errno); |
572 | 0 | } |
573 | | |
574 | 22 | #ifdef __linux__ |
575 | | #if defined(PR_SET_VMA) && defined(PR_SET_VMA_ANON_NAME) |
576 | | // Attempt to name the allocated address range in /proc/$PID/smaps on |
577 | | // Linux. |
578 | | // |
579 | | // This invocation of prctl() may fail if the Linux kernel was not |
580 | | // configured with the CONFIG_ANON_VMA_NAME option. This is OK since |
581 | | // the naming of arenas is primarily a debugging aid. |
582 | | prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, new_pages, new_pages_size, |
583 | | "absl"); |
584 | | #endif |
585 | 22 | #endif // __linux__ |
586 | 22 | #endif // _WIN32 |
587 | 22 | arena->mu.Lock(); |
588 | 22 | s = reinterpret_cast<AllocList *>(new_pages); |
589 | 22 | s->header.size = new_pages_size; |
590 | | // Pretend the block is allocated; call AddToFreelist() to free it. |
591 | 22 | s->header.magic = Magic(kMagicAllocated, &s->header); |
592 | 22 | s->header.arena = arena; |
593 | 22 | AddToFreelist(&s->levels, arena); // insert new region into free list |
594 | 22 | } |
595 | 2.38k | AllocList *prev[kMaxLevel]; |
596 | 2.38k | LLA_SkiplistDelete(&arena->freelist, s, prev); // remove from free list |
597 | | // s points to the first free region that's big enough |
598 | 2.38k | if (CheckedAdd(req_rnd, arena->min_size) <= s->header.size) { |
599 | | // big enough to split |
600 | 2.36k | AllocList *n = |
601 | 2.36k | reinterpret_cast<AllocList *>(req_rnd + reinterpret_cast<char *>(s)); |
602 | 2.36k | n->header.size = s->header.size - req_rnd; |
603 | 2.36k | n->header.magic = Magic(kMagicAllocated, &n->header); |
604 | 2.36k | n->header.arena = arena; |
605 | 2.36k | s->header.size = req_rnd; |
606 | 2.36k | AddToFreelist(&n->levels, arena); |
607 | 2.36k | } |
608 | 2.38k | s->header.magic = Magic(kMagicAllocated, &s->header); |
609 | 2.38k | ABSL_RAW_CHECK(s->header.arena == arena, ""); |
610 | 2.38k | arena->allocation_count++; |
611 | 2.38k | section.Leave(); |
612 | 2.38k | result = &s->levels; |
613 | 2.38k | } |
614 | 2.38k | ABSL_ANNOTATE_MEMORY_IS_UNINITIALIZED(result, request); |
615 | 2.38k | return result; |
616 | 2.38k | } |
617 | | |
618 | 3 | void *LowLevelAlloc::Alloc(size_t request) { |
619 | 3 | void *result = DoAllocWithArena(request, DefaultArena()); |
620 | 3 | return result; |
621 | 3 | } |
622 | | |
623 | 2.38k | void *LowLevelAlloc::AllocWithArena(size_t request, Arena *arena) { |
624 | 2.38k | ABSL_RAW_CHECK(arena != nullptr, "must pass a valid arena"); |
625 | 2.38k | void *result = DoAllocWithArena(request, arena); |
626 | 2.38k | return result; |
627 | 2.38k | } |
628 | | |
629 | | } // namespace base_internal |
630 | | ABSL_NAMESPACE_END |
631 | | } // namespace absl |
632 | | |
633 | | #endif // ABSL_LOW_LEVEL_ALLOC_MISSING |