Coverage Report

Created: 2025-10-26 07:13

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/rocksdb/memory/arena.cc
Line
Count
Source
1
//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
2
//  This source code is licensed under both the GPLv2 (found in the
3
//  COPYING file in the root directory) and Apache 2.0 License
4
//  (found in the LICENSE.Apache file in the root directory).
5
//
6
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
7
// Use of this source code is governed by a BSD-style license that can be
8
// found in the LICENSE file. See the AUTHORS file for names of contributors.
9
10
#include "memory/arena.h"
11
12
#include <algorithm>
13
14
#include "logging/logging.h"
15
#include "port/malloc.h"
16
#include "port/port.h"
17
#include "rocksdb/env.h"
18
#include "test_util/sync_point.h"
19
#include "util/string_util.h"
20
21
namespace ROCKSDB_NAMESPACE {
22
23
619k
size_t Arena::OptimizeBlockSize(size_t block_size) {
24
  // Make sure block_size is in optimal range
25
619k
  block_size = std::max(Arena::kMinBlockSize, block_size);
26
619k
  block_size = std::min(Arena::kMaxBlockSize, block_size);
27
28
  // make sure block_size is the multiple of kAlignUnit
29
619k
  if (block_size % kAlignUnit != 0) {
30
0
    block_size = (1 + block_size / kAlignUnit) * kAlignUnit;
31
0
  }
32
33
619k
  return block_size;
34
619k
}
35
36
Arena::Arena(size_t block_size, AllocTracker* tracker, size_t huge_page_size)
37
505k
    : kBlockSize(OptimizeBlockSize(block_size)), tracker_(tracker) {
38
505k
  assert(kBlockSize >= kMinBlockSize && kBlockSize <= kMaxBlockSize &&
39
505k
         kBlockSize % kAlignUnit == 0);
40
505k
  TEST_SYNC_POINT_CALLBACK("Arena::Arena:0", const_cast<size_t*>(&kBlockSize));
41
505k
  alloc_bytes_remaining_ = sizeof(inline_block_);
42
505k
  blocks_memory_ += alloc_bytes_remaining_;
43
505k
  aligned_alloc_ptr_ = inline_block_;
44
505k
  unaligned_alloc_ptr_ = inline_block_ + alloc_bytes_remaining_;
45
505k
  if (MemMapping::kHugePageSupported) {
46
505k
    hugetlb_size_ = huge_page_size;
47
505k
    if (hugetlb_size_ && kBlockSize > hugetlb_size_) {
48
0
      hugetlb_size_ = ((kBlockSize - 1U) / hugetlb_size_ + 1U) * hugetlb_size_;
49
0
    }
50
505k
  }
51
505k
  if (tracker_ != nullptr) {
52
0
    tracker_->Allocate(kInlineSize);
53
0
  }
54
505k
}
55
56
505k
Arena::~Arena() {
57
505k
  if (tracker_ != nullptr) {
58
0
    assert(tracker_->is_freed());
59
0
    tracker_->FreeMem();
60
0
  }
61
505k
}
62
63
26.2k
char* Arena::AllocateFallback(size_t bytes, bool aligned) {
64
26.2k
  if (bytes > kBlockSize / 4) {
65
5.11k
    ++irregular_block_num;
66
    // Object is more than a quarter of our block size.  Allocate it separately
67
    // to avoid wasting too much space in leftover bytes.
68
5.11k
    return AllocateNewBlock(bytes);
69
5.11k
  }
70
71
  // We waste the remaining space in the current block.
72
21.0k
  size_t size = 0;
73
21.0k
  char* block_head = nullptr;
74
21.0k
  if (MemMapping::kHugePageSupported && hugetlb_size_ > 0) {
75
0
    size = hugetlb_size_;
76
0
    block_head = AllocateFromHugePage(size);
77
0
  }
78
21.0k
  if (!block_head) {
79
21.0k
    size = kBlockSize;
80
21.0k
    block_head = AllocateNewBlock(size);
81
21.0k
  }
82
21.0k
  alloc_bytes_remaining_ = size - bytes;
83
84
21.0k
  if (aligned) {
85
21.0k
    aligned_alloc_ptr_ = block_head + bytes;
86
21.0k
    unaligned_alloc_ptr_ = block_head + size;
87
21.0k
    return block_head;
88
21.0k
  } else {
89
0
    aligned_alloc_ptr_ = block_head;
90
0
    unaligned_alloc_ptr_ = block_head + size - bytes;
91
0
    return unaligned_alloc_ptr_;
92
0
  }
93
21.0k
}
94
95
0
char* Arena::AllocateFromHugePage(size_t bytes) {
96
0
  MemMapping mm = MemMapping::AllocateHuge(bytes);
97
0
  auto addr = static_cast<char*>(mm.Get());
98
0
  if (addr) {
99
0
    huge_blocks_.push_back(std::move(mm));
100
0
    blocks_memory_ += bytes;
101
0
    if (tracker_ != nullptr) {
102
0
      tracker_->Allocate(bytes);
103
0
    }
104
0
  }
105
0
  return addr;
106
0
}
107
108
char* Arena::AllocateAligned(size_t bytes, size_t huge_page_size,
109
3.03M
                             Logger* logger) {
110
3.03M
  if (MemMapping::kHugePageSupported && hugetlb_size_ > 0 &&
111
0
      huge_page_size > 0 && bytes > 0) {
112
    // Allocate from a huge page TLB table.
113
0
    size_t reserved_size =
114
0
        ((bytes - 1U) / huge_page_size + 1U) * huge_page_size;
115
0
    assert(reserved_size >= bytes);
116
117
0
    char* addr = AllocateFromHugePage(reserved_size);
118
0
    if (addr == nullptr) {
119
0
      ROCKS_LOG_WARN(logger,
120
0
                     "AllocateAligned fail to allocate huge TLB pages: %s",
121
0
                     errnoStr(errno).c_str());
122
      // fail back to malloc
123
0
    } else {
124
0
      return addr;
125
0
    }
126
0
  }
127
128
3.03M
  size_t current_mod =
129
3.03M
      reinterpret_cast<uintptr_t>(aligned_alloc_ptr_) & (kAlignUnit - 1);
130
3.03M
  size_t slop = (current_mod == 0 ? 0 : kAlignUnit - current_mod);
131
3.03M
  size_t needed = bytes + slop;
132
3.03M
  char* result;
133
3.03M
  if (needed <= alloc_bytes_remaining_) {
134
3.00M
    result = aligned_alloc_ptr_ + slop;
135
3.00M
    aligned_alloc_ptr_ += needed;
136
3.00M
    alloc_bytes_remaining_ -= needed;
137
3.00M
  } else {
138
    // AllocateFallback always returns aligned memory
139
26.2k
    result = AllocateFallback(bytes, true /* aligned */);
140
26.2k
  }
141
3.03M
  assert((reinterpret_cast<uintptr_t>(result) & (kAlignUnit - 1)) == 0);
142
3.03M
  return result;
143
3.03M
}
144
145
26.2k
char* Arena::AllocateNewBlock(size_t block_bytes) {
146
  // NOTE: std::make_unique zero-initializes the block so is not appropriate
147
  // here
148
26.2k
  char* block = new char[block_bytes];
149
26.2k
  blocks_.push_back(std::unique_ptr<char[]>(block));
150
151
26.2k
  size_t allocated_size;
152
26.2k
#ifdef ROCKSDB_MALLOC_USABLE_SIZE
153
26.2k
  allocated_size = malloc_usable_size(block);
154
#ifndef NDEBUG
155
  // It's hard to predict what malloc_usable_size() returns.
156
  // A callback can allow users to change the costed size.
157
  std::pair<size_t*, size_t*> pair(&allocated_size, &block_bytes);
158
  TEST_SYNC_POINT_CALLBACK("Arena::AllocateNewBlock:0", &pair);
159
#endif  // NDEBUG
160
#else
161
  allocated_size = block_bytes;
162
#endif  // ROCKSDB_MALLOC_USABLE_SIZE
163
26.2k
  blocks_memory_ += allocated_size;
164
26.2k
  if (tracker_ != nullptr) {
165
0
    tracker_->Allocate(allocated_size);
166
0
  }
167
26.2k
  return block;
168
26.2k
}
169
170
}  // namespace ROCKSDB_NAMESPACE