Coverage Report

Created: 2025-08-25 06:55

/src/abseil-cpp/absl/base/internal/low_level_alloc.cc
Line
Count
Source (jump to first uncovered line)
1
// Copyright 2017 The Abseil Authors.
2
//
3
// Licensed under the Apache License, Version 2.0 (the "License");
4
// you may not use this file except in compliance with the License.
5
// You may obtain a copy of the License at
6
//
7
//      https://www.apache.org/licenses/LICENSE-2.0
8
//
9
// Unless required by applicable law or agreed to in writing, software
10
// distributed under the License is distributed on an "AS IS" BASIS,
11
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
// See the License for the specific language governing permissions and
13
// limitations under the License.
14
15
// A low-level allocator that can be used by other low-level
16
// modules without introducing dependency cycles.
17
// This allocator is slow and wasteful of memory;
18
// it should not be used when performance is key.
19
20
#include "absl/base/internal/low_level_alloc.h"
21
22
#include <stdint.h>
23
24
#include <optional>
25
#include <type_traits>
26
27
#include "absl/base/call_once.h"
28
#include "absl/base/config.h"
29
#include "absl/base/internal/direct_mmap.h"
30
#include "absl/base/internal/scheduling_mode.h"
31
#include "absl/base/macros.h"
32
#include "absl/base/thread_annotations.h"
33
34
// LowLevelAlloc requires that the platform support low-level
35
// allocation of virtual memory. Platforms lacking this cannot use
36
// LowLevelAlloc.
37
#ifndef ABSL_LOW_LEVEL_ALLOC_MISSING
38
39
#ifndef _WIN32
40
#include <pthread.h>
41
#include <signal.h>
42
#include <sys/mman.h>
43
#include <unistd.h>
44
#else
45
#include <windows.h>
46
#endif
47
48
#ifdef __linux__
49
#include <sys/prctl.h>
50
#endif
51
52
#include <string.h>
53
54
#include <algorithm>
55
#include <atomic>
56
#include <cerrno>
57
#include <cstddef>
58
#include <new>  // for placement-new
59
60
#include "absl/base/dynamic_annotations.h"
61
#include "absl/base/internal/raw_logging.h"
62
#include "absl/base/internal/spinlock.h"
63
64
#if defined(MAP_ANON) && !defined(MAP_ANONYMOUS)
65
#define MAP_ANONYMOUS MAP_ANON
66
#endif
67
68
namespace absl {
69
ABSL_NAMESPACE_BEGIN
70
namespace base_internal {
71
72
// A first-fit allocator with amortized logarithmic free() time.
73
74
// ---------------------------------------------------------------------------
75
static const int kMaxLevel = 30;
76
77
namespace {
78
// This struct describes one allocated block, or one free block.
79
struct AllocList {
80
  struct Header {
81
    // Size of entire region, including this field. Must be
82
    // first. Valid in both allocated and unallocated blocks.
83
    uintptr_t size;
84
85
    // kMagicAllocated or kMagicUnallocated xor this.
86
    uintptr_t magic;
87
88
    // Pointer to parent arena.
89
    LowLevelAlloc::Arena *arena;
90
91
    // Aligns regions to 0 mod 2*sizeof(void*).
92
    void *dummy_for_alignment;
93
  } header;
94
95
  // Next two fields: in unallocated blocks: freelist skiplist data
96
  //                  in allocated blocks: overlaps with client data
97
98
  // Levels in skiplist used.
99
  int levels;
100
101
  // Actually has levels elements. The AllocList node may not have room
102
  // for all kMaxLevel entries. See max_fit in LLA_SkiplistLevels().
103
  AllocList *next[kMaxLevel];
104
};
105
}  // namespace
106
107
// ---------------------------------------------------------------------------
108
// A trivial skiplist implementation.  This is used to keep the freelist
109
// in address order while taking only logarithmic time per insert and delete.
110
111
// An integer approximation of log2(size/base)
112
// Requires size >= base.
113
42
static int IntLog2(size_t size, size_t base) {
114
42
  int result = 0;
115
369
  for (size_t i = size; i > base; i >>= 1) {  // i == floor(size/2**result)
116
327
    result++;
117
327
  }
118
  //    floor(size / 2**result) <= base < floor(size / 2**(result-1))
119
  // =>     log2(size/(base+1)) <= result < 1+log2(size/base)
120
  // => result ~= log2(size/base)
121
42
  return result;
122
42
}
123
124
// Return a random integer n:  p(n)=1/(2**n) if 1 <= n; p(n)=0 if n < 1.
125
21
static int Random(uint32_t *state) {
126
21
  uint32_t r = *state;
127
21
  int result = 1;
128
44
  while ((((r = r * 1103515245 + 12345) >> 30) & 1) == 0) {
129
23
    result++;
130
23
  }
131
21
  *state = r;
132
21
  return result;
133
21
}
134
135
// Return a number of skiplist levels for a node of size bytes, where
136
// base is the minimum node size.  Compute level=log2(size / base)+n
137
// where n is 1 if random is false and otherwise a random number generated with
138
// the standard distribution for a skiplist:  See Random() above.
139
// Bigger nodes tend to have more skiplist levels due to the log2(size / base)
140
// term, so first-fit searches touch fewer nodes.  "level" is clipped so
141
// level<kMaxLevel and next[level-1] will fit in the node.
142
// 0 < LLA_SkiplistLevels(x,y,false) <= LLA_SkiplistLevels(x,y,true) < kMaxLevel
143
42
static int LLA_SkiplistLevels(size_t size, size_t base, uint32_t *random) {
144
  // max_fit is the maximum number of levels that will fit in a node for the
145
  // given size.   We can't return more than max_fit, no matter what the
146
  // random number generator says.
147
42
  size_t max_fit = (size - offsetof(AllocList, next)) / sizeof(AllocList *);
148
42
  int level = IntLog2(size, base) + (random != nullptr ? Random(random) : 1);
149
42
  if (static_cast<size_t>(level) > max_fit) level = static_cast<int>(max_fit);
150
42
  if (level > kMaxLevel - 1) level = kMaxLevel - 1;
151
42
  ABSL_RAW_CHECK(level >= 1, "block not big enough for even one level");
152
42
  return level;
153
42
}
154
155
// Return "atleast", the first element of AllocList *head s.t. *atleast >= *e.
156
// For 0 <= i < head->levels, set prev[i] to "no_greater", where no_greater
157
// points to the last element at level i in the AllocList less than *e, or is
158
// head if no such element exists.
159
static AllocList *LLA_SkiplistSearch(AllocList *head, AllocList *e,
160
36
                                     AllocList **prev) {
161
36
  AllocList *p = head;
162
226
  for (int level = head->levels - 1; level >= 0; level--) {
163
190
    for (AllocList *n; (n = p->next[level]) != nullptr && n < e; p = n) {
164
0
    }
165
190
    prev[level] = p;
166
190
  }
167
36
  return (head->levels == 0) ? nullptr : prev[0]->next[0];
168
36
}
169
170
// Insert element *e into AllocList *head.  Set prev[] as LLA_SkiplistSearch.
171
// Requires that e->levels be previously set by the caller (using
172
// LLA_SkiplistLevels())
173
static void LLA_SkiplistInsert(AllocList *head, AllocList *e,
174
21
                               AllocList **prev) {
175
21
  LLA_SkiplistSearch(head, e, prev);
176
285
  for (; head->levels < e->levels; head->levels++) {  // extend prev pointers
177
264
    prev[head->levels] = head;                        // to all *e's levels
178
264
  }
179
285
  for (int i = 0; i != e->levels; i++) {  // add element to list
180
264
    e->next[i] = prev[i]->next[i];
181
264
    prev[i]->next[i] = e;
182
264
  }
183
21
}
184
185
// Remove element *e from AllocList *head.  Set prev[] as LLA_SkiplistSearch().
186
// Requires that e->levels be previous set by the caller (using
187
// LLA_SkiplistLevels())
188
static void LLA_SkiplistDelete(AllocList *head, AllocList *e,
189
15
                               AllocList **prev) {
190
15
  AllocList *found = LLA_SkiplistSearch(head, e, prev);
191
15
  ABSL_RAW_CHECK(e == found, "element not in freelist");
192
205
  for (int i = 0; i != e->levels && prev[i]->next[i] == e; i++) {
193
190
    prev[i]->next[i] = e->next[i];
194
190
  }
195
205
  while (head->levels > 0 && head->next[head->levels - 1] == nullptr) {
196
190
    head->levels--;  // reduce head->levels if level unused
197
190
  }
198
15
}
199
200
// ---------------------------------------------------------------------------
201
// Arena implementation
202
203
// Metadata for an LowLevelAlloc arena instance.
204
struct LowLevelAlloc::Arena {
205
  // Constructs an arena with the given LowLevelAlloc flags.
206
  explicit Arena(uint32_t flags_value);
207
208
  base_internal::SpinLock mu;
209
  // Head of free list, sorted by address
210
  AllocList freelist ABSL_GUARDED_BY(mu);
211
  // Count of allocated blocks
212
  int32_t allocation_count ABSL_GUARDED_BY(mu);
213
  // flags passed to NewArena
214
  const uint32_t flags;
215
  // Result of sysconf(_SC_PAGESIZE)
216
  const size_t pagesize;
217
  // Lowest power of two >= max(16, sizeof(AllocList))
218
  const size_t round_up;
219
  // Smallest allocation block size
220
  const size_t min_size;
221
  // PRNG state
222
  uint32_t random ABSL_GUARDED_BY(mu);
223
};
224
225
// ---------------------------------------------------------------
226
// An async-signal-safe arena for LowLevelAlloc
227
static std::atomic<base_internal::LowLevelAlloc::Arena *> g_sig_safe_arena;
228
229
0
base_internal::LowLevelAlloc::Arena *SigSafeArena() {
230
0
  return g_sig_safe_arena.load(std::memory_order_acquire);
231
0
}
232
233
0
void InitSigSafeArena() {
234
0
  if (SigSafeArena() == nullptr) {
235
0
    uint32_t flags = 0;
236
0
#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
237
0
    flags |= base_internal::LowLevelAlloc::kAsyncSignalSafe;
238
0
#endif
239
0
    base_internal::LowLevelAlloc::Arena *new_arena =
240
0
        base_internal::LowLevelAlloc::NewArena(flags);
241
0
    base_internal::LowLevelAlloc::Arena *old_value = nullptr;
242
0
    if (!g_sig_safe_arena.compare_exchange_strong(old_value, new_arena,
243
0
                                                  std::memory_order_release,
244
0
                                                  std::memory_order_relaxed)) {
245
      // We lost a race to allocate an arena; deallocate.
246
0
      base_internal::LowLevelAlloc::DeleteArena(new_arena);
247
0
    }
248
0
  }
249
0
}
250
251
namespace {
252
// Static storage space for the lazily-constructed, default global arena
253
// instances.  We require this space because the whole point of LowLevelAlloc
254
// is to avoid relying on malloc/new.
255
alignas(LowLevelAlloc::Arena) unsigned char default_arena_storage[sizeof(
256
    LowLevelAlloc::Arena)];
257
alignas(LowLevelAlloc::Arena) unsigned char unhooked_arena_storage[sizeof(
258
    LowLevelAlloc::Arena)];
259
#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
260
alignas(
261
    LowLevelAlloc::Arena) unsigned char unhooked_async_sig_safe_arena_storage
262
    [sizeof(LowLevelAlloc::Arena)];
263
#endif
264
265
// We must use LowLevelCallOnce here to construct the global arenas, rather than
266
// using function-level statics, to avoid recursively invoking the scheduler.
267
absl::once_flag create_globals_once;
268
269
2
void CreateGlobalArenas() {
270
2
  new (&default_arena_storage)
271
2
      LowLevelAlloc::Arena(LowLevelAlloc::kCallMallocHook);
272
2
  new (&unhooked_arena_storage) LowLevelAlloc::Arena(0);
273
2
#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
274
2
  new (&unhooked_async_sig_safe_arena_storage)
275
2
      LowLevelAlloc::Arena(LowLevelAlloc::kAsyncSignalSafe);
276
2
#endif
277
2
}
278
279
// Returns a global arena that does not call into hooks.  Used by NewArena()
280
// when kCallMallocHook is not set.
281
2
LowLevelAlloc::Arena *UnhookedArena() {
282
2
  base_internal::LowLevelCallOnce(&create_globals_once, CreateGlobalArenas);
283
2
  return reinterpret_cast<LowLevelAlloc::Arena *>(&unhooked_arena_storage);
284
2
}
285
286
#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
287
// Returns a global arena that is async-signal safe.  Used by NewArena() when
288
// kAsyncSignalSafe is set.
289
0
LowLevelAlloc::Arena *UnhookedAsyncSigSafeArena() {
290
0
  base_internal::LowLevelCallOnce(&create_globals_once, CreateGlobalArenas);
291
0
  return reinterpret_cast<LowLevelAlloc::Arena *>(
292
0
      &unhooked_async_sig_safe_arena_storage);
293
0
}
294
#endif
295
296
}  // namespace
297
298
// Returns the default arena, as used by LowLevelAlloc::Alloc() and friends.
299
8
LowLevelAlloc::Arena *LowLevelAlloc::DefaultArena() {
300
8
  base_internal::LowLevelCallOnce(&create_globals_once, CreateGlobalArenas);
301
8
  return reinterpret_cast<LowLevelAlloc::Arena *>(&default_arena_storage);
302
8
}
303
304
// magic numbers to identify allocated and unallocated blocks
305
static const uintptr_t kMagicAllocated = 0x4c833e95U;
306
static const uintptr_t kMagicUnallocated = ~kMagicAllocated;
307
308
namespace {
309
class ABSL_SCOPED_LOCKABLE ArenaLock {
310
 public:
311
  explicit ArenaLock(LowLevelAlloc::Arena *arena)
312
      ABSL_EXCLUSIVE_LOCK_FUNCTION(arena->mu)
313
15
      : arena_(arena) {
314
15
#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
315
15
    if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) != 0) {
316
0
      sigset_t all;
317
0
      sigfillset(&all);
318
0
      mask_valid_ = pthread_sigmask(SIG_BLOCK, &all, &mask_) == 0;
319
0
    }
320
15
#endif
321
15
    arena_->mu.lock();
322
15
  }
323
15
  ~ArenaLock() { ABSL_RAW_CHECK(left_, "haven't left Arena region"); }
324
15
  void Leave() ABSL_UNLOCK_FUNCTION() {
325
15
    arena_->mu.unlock();
326
15
#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
327
15
    if (mask_valid_) {
328
0
      const int err = pthread_sigmask(SIG_SETMASK, &mask_, nullptr);
329
0
      if (err != 0) {
330
0
        ABSL_RAW_LOG(FATAL, "pthread_sigmask failed: %d", err);
331
0
      }
332
0
    }
333
15
#endif
334
15
    left_ = true;
335
15
  }
336
337
 private:
338
  bool left_ = false;  // whether left region
339
#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
340
  bool mask_valid_ = false;
341
  sigset_t mask_;  // old mask of blocked signals
342
#endif
343
  LowLevelAlloc::Arena *arena_;
344
  ArenaLock(const ArenaLock &) = delete;
345
  ArenaLock &operator=(const ArenaLock &) = delete;
346
};
347
}  // namespace
348
349
// create an appropriate magic number for an object at "ptr"
350
// "magic" should be kMagicAllocated or kMagicUnallocated
351
101
inline static uintptr_t Magic(uintptr_t magic, AllocList::Header *ptr) {
352
101
  return magic ^ reinterpret_cast<uintptr_t>(ptr);
353
101
}
354
355
namespace {
356
8
size_t GetPageSize() {
357
#ifdef _WIN32
358
  SYSTEM_INFO system_info;
359
  GetSystemInfo(&system_info);
360
  return std::max(system_info.dwPageSize, system_info.dwAllocationGranularity);
361
#elif defined(__wasm__) || defined(__asmjs__) || defined(__hexagon__)
362
  return static_cast<size_t>(getpagesize());
363
#else
364
8
  return static_cast<size_t>(sysconf(_SC_PAGESIZE));
365
8
#endif
366
8
}
367
368
8
size_t RoundedUpBlockSize() {
369
  // Round up block sizes to a power of two close to the header size.
370
8
  size_t round_up = 16;
371
16
  while (round_up < sizeof(AllocList::Header)) {
372
8
    round_up += round_up;
373
8
  }
374
8
  return round_up;
375
8
}
376
377
}  // namespace
378
379
LowLevelAlloc::Arena::Arena(uint32_t flags_value)
380
8
    : mu(base_internal::SCHEDULE_KERNEL_ONLY),
381
8
      allocation_count(0),
382
8
      flags(flags_value),
383
8
      pagesize(GetPageSize()),
384
8
      round_up(RoundedUpBlockSize()),
385
8
      min_size(2 * round_up),
386
8
      random(0) {
387
8
  freelist.header.size = 0;
388
8
  freelist.header.magic = Magic(kMagicUnallocated, &freelist.header);
389
8
  freelist.header.arena = this;
390
8
  freelist.levels = 0;
391
8
  memset(freelist.next, 0, sizeof(freelist.next));
392
8
}
393
394
// L < meta_data_arena->mu
395
2
LowLevelAlloc::Arena *LowLevelAlloc::NewArena(uint32_t flags) {
396
2
  Arena *meta_data_arena = DefaultArena();
397
2
#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
398
2
  if ((flags & LowLevelAlloc::kAsyncSignalSafe) != 0) {
399
0
    meta_data_arena = UnhookedAsyncSigSafeArena();
400
0
  } else  // NOLINT(readability/braces)
401
2
#endif
402
2
      if ((flags & LowLevelAlloc::kCallMallocHook) == 0) {
403
2
    meta_data_arena = UnhookedArena();
404
2
  }
405
2
  Arena *result =
406
2
      new (AllocWithArena(sizeof(*result), meta_data_arena)) Arena(flags);
407
2
  return result;
408
2
}
409
410
// L < arena->mu, L < arena->arena->mu
411
0
bool LowLevelAlloc::DeleteArena(Arena *arena) {
412
0
  ABSL_RAW_CHECK(
413
0
      arena != nullptr && arena != DefaultArena() && arena != UnhookedArena(),
414
0
      "may not delete default arena");
415
0
  ArenaLock section(arena);
416
0
  if (arena->allocation_count != 0) {
417
0
    section.Leave();
418
0
    return false;
419
0
  }
420
0
  while (arena->freelist.next[0] != nullptr) {
421
0
    AllocList *region = arena->freelist.next[0];
422
0
    size_t size = region->header.size;
423
0
    arena->freelist.next[0] = region->next[0];
424
0
    ABSL_RAW_CHECK(
425
0
        region->header.magic == Magic(kMagicUnallocated, &region->header),
426
0
        "bad magic number in DeleteArena()");
427
0
    ABSL_RAW_CHECK(region->header.arena == arena,
428
0
                   "bad arena pointer in DeleteArena()");
429
0
    ABSL_RAW_CHECK(size % arena->pagesize == 0,
430
0
                   "empty arena has non-page-aligned block size");
431
0
    ABSL_RAW_CHECK(reinterpret_cast<uintptr_t>(region) % arena->pagesize == 0,
432
0
                   "empty arena has non-page-aligned block");
433
0
    int munmap_result;
434
#ifdef _WIN32
435
    munmap_result = VirtualFree(region, 0, MEM_RELEASE);
436
    ABSL_RAW_CHECK(munmap_result != 0,
437
                   "LowLevelAlloc::DeleteArena: VitualFree failed");
438
#else
439
0
#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
440
0
    if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) == 0) {
441
0
      munmap_result = munmap(region, size);
442
0
    } else {
443
0
      munmap_result = base_internal::DirectMunmap(region, size);
444
0
    }
445
#else
446
    munmap_result = munmap(region, size);
447
#endif  // ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
448
0
    if (munmap_result != 0) {
449
0
      ABSL_RAW_LOG(FATAL, "LowLevelAlloc::DeleteArena: munmap failed: %d",
450
0
                   errno);
451
0
    }
452
0
#endif  // _WIN32
453
0
  }
454
0
  section.Leave();
455
0
  arena->~Arena();
456
0
  Free(arena);
457
0
  return true;
458
0
}
459
460
// ---------------------------------------------------------------------------
461
462
// Addition, checking for overflow.  The intent is to die if an external client
463
// manages to push through a request that would cause arithmetic to fail.
464
51
static inline uintptr_t CheckedAdd(uintptr_t a, uintptr_t b) {
465
51
  uintptr_t sum = a + b;
466
51
  ABSL_RAW_CHECK(sum >= a, "LowLevelAlloc arithmetic overflow");
467
51
  return sum;
468
51
}
469
470
// Return value rounded up to next multiple of align.
471
// align must be a power of two.
472
21
static inline uintptr_t RoundUp(uintptr_t addr, uintptr_t align) {
473
21
  return CheckedAdd(addr, align - 1) & ~(align - 1);
474
21
}
475
476
// Equivalent to "return prev->next[i]" but with sanity checking
477
// that the freelist is in the correct order, that it
478
// consists of regions marked "unallocated", and that no two regions
479
// are adjacent in memory (they should have been coalesced).
480
static AllocList *Next(int i, AllocList *prev, LowLevelAlloc::Arena *arena)
481
15
    ABSL_EXCLUSIVE_LOCKS_REQUIRED(arena->mu) {
482
15
  ABSL_RAW_CHECK(i < prev->levels, "too few levels in Next()");
483
15
  AllocList *next = prev->next[i];
484
15
  if (next != nullptr) {
485
15
    ABSL_RAW_CHECK(
486
15
        next->header.magic == Magic(kMagicUnallocated, &next->header),
487
15
        "bad magic number in Next()");
488
15
    ABSL_RAW_CHECK(next->header.arena == arena, "bad arena pointer in Next()");
489
15
    if (prev != &arena->freelist) {
490
0
      ABSL_RAW_CHECK(prev < next, "unordered freelist");
491
0
      ABSL_RAW_CHECK(reinterpret_cast<char *>(prev) + prev->header.size <
492
0
                         reinterpret_cast<char *>(next),
493
0
                     "malformed freelist");
494
0
    }
495
15
  }
496
15
  return next;
497
15
}
498
499
// Coalesce list item "a" with its successor if they are adjacent.
500
42
static void Coalesce(AllocList *a) {
501
42
  AllocList *n = a->next[0];
502
42
  if (n != nullptr && reinterpret_cast<char *>(a) + a->header.size ==
503
21
                          reinterpret_cast<char *>(n)) {
504
0
    LowLevelAlloc::Arena *arena = a->header.arena;
505
0
    arena->mu.AssertHeld();
506
0
    a->header.size += n->header.size;
507
0
    n->header.magic = 0;
508
0
    n->header.arena = nullptr;
509
0
    AllocList *prev[kMaxLevel];
510
0
    LLA_SkiplistDelete(&arena->freelist, n, prev);
511
0
    LLA_SkiplistDelete(&arena->freelist, a, prev);
512
0
    a->levels =
513
0
        LLA_SkiplistLevels(a->header.size, arena->min_size, &arena->random);
514
0
    LLA_SkiplistInsert(&arena->freelist, a, prev);
515
0
  }
516
42
}
517
518
// Adds block at location "v" to the free list
519
static void AddToFreelist(void *v, LowLevelAlloc::Arena *arena)
520
21
    ABSL_EXCLUSIVE_LOCKS_REQUIRED(arena->mu) {
521
21
  AllocList *f = reinterpret_cast<AllocList *>(reinterpret_cast<char *>(v) -
522
21
                                               sizeof(f->header));
523
21
  ABSL_RAW_CHECK(f->header.magic == Magic(kMagicAllocated, &f->header),
524
21
                 "bad magic number in AddToFreelist()");
525
21
  ABSL_RAW_CHECK(f->header.arena == arena,
526
21
                 "bad arena pointer in AddToFreelist()");
527
21
  f->levels =
528
21
      LLA_SkiplistLevels(f->header.size, arena->min_size, &arena->random);
529
21
  AllocList *prev[kMaxLevel];
530
21
  LLA_SkiplistInsert(&arena->freelist, f, prev);
531
21
  f->header.magic = Magic(kMagicUnallocated, &f->header);
532
21
  Coalesce(f);        // maybe coalesce with successor
533
21
  Coalesce(prev[0]);  // maybe coalesce with predecessor
534
21
}
535
536
// Frees storage allocated by LowLevelAlloc::Alloc().
537
// L < arena->mu
538
0
void LowLevelAlloc::Free(void *v) {
539
0
  if (v != nullptr) {
540
0
    AllocList *f = reinterpret_cast<AllocList *>(reinterpret_cast<char *>(v) -
541
0
                                                 sizeof(f->header));
542
0
    LowLevelAlloc::Arena *arena = f->header.arena;
543
0
    ArenaLock section(arena);
544
0
    AddToFreelist(v, arena);
545
0
    ABSL_RAW_CHECK(arena->allocation_count > 0, "nothing in arena to free");
546
0
    arena->allocation_count--;
547
0
    section.Leave();
548
0
  }
549
0
}
550
551
// allocates and returns a block of size bytes, to be freed with Free()
552
// L < arena->mu
553
15
static void *DoAllocWithArena(size_t request, LowLevelAlloc::Arena *arena) {
554
15
  void *result = nullptr;
555
15
  if (request != 0) {
556
15
    AllocList *s;  // will point to region that satisfies request
557
15
    ArenaLock section(arena);
558
    // round up with header
559
15
    size_t req_rnd =
560
15
        RoundUp(CheckedAdd(request, sizeof(s->header)), arena->round_up);
561
21
    for (;;) {  // loop until we find a suitable region
562
      // find the minimum levels that a block of this size must have
563
21
      int i = LLA_SkiplistLevels(req_rnd, arena->min_size, nullptr) - 1;
564
21
      if (i < arena->freelist.levels) {        // potential blocks exist
565
15
        AllocList *before = &arena->freelist;  // predecessor of s
566
15
        while ((s = Next(i, before, arena)) != nullptr &&
567
15
               s->header.size < req_rnd) {
568
0
          before = s;
569
0
        }
570
15
        if (s != nullptr) {  // we found a region
571
15
          break;
572
15
        }
573
15
      }
574
      // we unlock before mmap() both because mmap() may call a callback hook,
575
      // and because it may be slow.
576
6
      arena->mu.unlock();
577
      // mmap generous 64K chunks to decrease
578
      // the chances/impact of fragmentation:
579
6
      size_t new_pages_size = RoundUp(req_rnd, arena->pagesize * 16);
580
6
      void *new_pages;
581
#ifdef _WIN32
582
      new_pages = VirtualAlloc(nullptr, new_pages_size,
583
                               MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
584
      ABSL_RAW_CHECK(new_pages != nullptr, "VirtualAlloc failed");
585
#else
586
6
#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
587
6
      if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) != 0) {
588
0
        new_pages = base_internal::DirectMmap(nullptr, new_pages_size,
589
0
            PROT_WRITE|PROT_READ, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
590
6
      } else {
591
6
        new_pages = mmap(nullptr, new_pages_size, PROT_WRITE | PROT_READ,
592
6
                         MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
593
6
      }
594
#else
595
      new_pages = mmap(nullptr, new_pages_size, PROT_WRITE | PROT_READ,
596
                       MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
597
#endif  // ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
598
6
      if (new_pages == MAP_FAILED) {
599
0
        ABSL_RAW_LOG(FATAL, "mmap error: %d", errno);
600
0
      }
601
602
6
#ifdef __linux__
603
#if defined(PR_SET_VMA) && defined(PR_SET_VMA_ANON_NAME)
604
      // Attempt to name the allocated address range in /proc/$PID/smaps on
605
      // Linux.
606
      //
607
      // This invocation of prctl() may fail if the Linux kernel was not
608
      // configured with the CONFIG_ANON_VMA_NAME option.  This is OK since
609
      // the naming of arenas is primarily a debugging aid.
610
      prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, new_pages, new_pages_size,
611
            "absl");
612
#endif
613
6
#endif  // __linux__
614
6
#endif  // _WIN32
615
6
      arena->mu.lock();
616
6
      s = reinterpret_cast<AllocList *>(new_pages);
617
6
      s->header.size = new_pages_size;
618
      // Pretend the block is allocated; call AddToFreelist() to free it.
619
6
      s->header.magic = Magic(kMagicAllocated, &s->header);
620
6
      s->header.arena = arena;
621
6
      AddToFreelist(&s->levels, arena);  // insert new region into free list
622
6
    }
623
15
    AllocList *prev[kMaxLevel];
624
15
    LLA_SkiplistDelete(&arena->freelist, s, prev);  // remove from free list
625
    // s points to the first free region that's big enough
626
15
    if (CheckedAdd(req_rnd, arena->min_size) <= s->header.size) {
627
      // big enough to split
628
15
      AllocList *n =
629
15
          reinterpret_cast<AllocList *>(req_rnd + reinterpret_cast<char *>(s));
630
15
      n->header.size = s->header.size - req_rnd;
631
15
      n->header.magic = Magic(kMagicAllocated, &n->header);
632
15
      n->header.arena = arena;
633
15
      s->header.size = req_rnd;
634
15
      AddToFreelist(&n->levels, arena);
635
15
    }
636
15
    s->header.magic = Magic(kMagicAllocated, &s->header);
637
15
    ABSL_RAW_CHECK(s->header.arena == arena, "");
638
15
    arena->allocation_count++;
639
15
    section.Leave();
640
15
    result = &s->levels;
641
15
  }
642
15
  ABSL_ANNOTATE_MEMORY_IS_UNINITIALIZED(result, request);
643
15
  return result;
644
15
}
645
646
6
void *LowLevelAlloc::Alloc(size_t request) {
647
6
  void *result = DoAllocWithArena(request, DefaultArena());
648
6
  return result;
649
6
}
650
651
9
void *LowLevelAlloc::AllocWithArena(size_t request, Arena *arena) {
652
9
  ABSL_RAW_CHECK(arena != nullptr, "must pass a valid arena");
653
9
  void *result = DoAllocWithArena(request, arena);
654
9
  return result;
655
9
}
656
657
}  // namespace base_internal
658
ABSL_NAMESPACE_END
659
}  // namespace absl
660
661
#endif  // ABSL_LOW_LEVEL_ALLOC_MISSING