Coverage Report

Created: 2024-09-08 07:17

/src/rocksdb/memory/arena.cc
Line
Count
Source (jump to first uncovered line)
1
//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
2
//  This source code is licensed under both the GPLv2 (found in the
3
//  COPYING file in the root directory) and Apache 2.0 License
4
//  (found in the LICENSE.Apache file in the root directory).
5
//
6
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
7
// Use of this source code is governed by a BSD-style license that can be
8
// found in the LICENSE file. See the AUTHORS file for names of contributors.
9
10
#include "memory/arena.h"
11
12
#include <algorithm>
13
14
#include "logging/logging.h"
15
#include "port/malloc.h"
16
#include "port/port.h"
17
#include "rocksdb/env.h"
18
#include "test_util/sync_point.h"
19
#include "util/string_util.h"
20
21
namespace ROCKSDB_NAMESPACE {
22
23
95.1k
size_t Arena::OptimizeBlockSize(size_t block_size) {
24
  // Make sure block_size is in optimal range
25
95.1k
  block_size = std::max(Arena::kMinBlockSize, block_size);
26
95.1k
  block_size = std::min(Arena::kMaxBlockSize, block_size);
27
28
  // make sure block_size is the multiple of kAlignUnit
29
95.1k
  if (block_size % kAlignUnit != 0) {
30
0
    block_size = (1 + block_size / kAlignUnit) * kAlignUnit;
31
0
  }
32
33
95.1k
  return block_size;
34
95.1k
}
35
36
Arena::Arena(size_t block_size, AllocTracker* tracker, size_t huge_page_size)
37
77.7k
    : kBlockSize(OptimizeBlockSize(block_size)), tracker_(tracker) {
38
77.7k
  assert(kBlockSize >= kMinBlockSize && kBlockSize <= kMaxBlockSize &&
39
77.7k
         kBlockSize % kAlignUnit == 0);
40
77.7k
  TEST_SYNC_POINT_CALLBACK("Arena::Arena:0", const_cast<size_t*>(&kBlockSize));
41
77.7k
  alloc_bytes_remaining_ = sizeof(inline_block_);
42
77.7k
  blocks_memory_ += alloc_bytes_remaining_;
43
77.7k
  aligned_alloc_ptr_ = inline_block_;
44
77.7k
  unaligned_alloc_ptr_ = inline_block_ + alloc_bytes_remaining_;
45
77.7k
  if (MemMapping::kHugePageSupported) {
46
77.7k
    hugetlb_size_ = huge_page_size;
47
77.7k
    if (hugetlb_size_ && kBlockSize > hugetlb_size_) {
48
0
      hugetlb_size_ = ((kBlockSize - 1U) / hugetlb_size_ + 1U) * hugetlb_size_;
49
0
    }
50
77.7k
  }
51
77.7k
  if (tracker_ != nullptr) {
52
0
    tracker_->Allocate(kInlineSize);
53
0
  }
54
77.7k
}
55
56
77.7k
Arena::~Arena() {
57
77.7k
  if (tracker_ != nullptr) {
58
0
    assert(tracker_->is_freed());
59
0
    tracker_->FreeMem();
60
0
  }
61
77.7k
}
62
63
6.93k
char* Arena::AllocateFallback(size_t bytes, bool aligned) {
64
6.93k
  if (bytes > kBlockSize / 4) {
65
1.20k
    ++irregular_block_num;
66
    // Object is more than a quarter of our block size.  Allocate it separately
67
    // to avoid wasting too much space in leftover bytes.
68
1.20k
    return AllocateNewBlock(bytes);
69
1.20k
  }
70
71
  // We waste the remaining space in the current block.
72
5.72k
  size_t size = 0;
73
5.72k
  char* block_head = nullptr;
74
5.72k
  if (MemMapping::kHugePageSupported && hugetlb_size_ > 0) {
75
0
    size = hugetlb_size_;
76
0
    block_head = AllocateFromHugePage(size);
77
0
  }
78
5.72k
  if (!block_head) {
79
5.72k
    size = kBlockSize;
80
5.72k
    block_head = AllocateNewBlock(size);
81
5.72k
  }
82
5.72k
  alloc_bytes_remaining_ = size - bytes;
83
84
5.72k
  if (aligned) {
85
5.72k
    aligned_alloc_ptr_ = block_head + bytes;
86
5.72k
    unaligned_alloc_ptr_ = block_head + size;
87
5.72k
    return block_head;
88
5.72k
  } else {
89
0
    aligned_alloc_ptr_ = block_head;
90
0
    unaligned_alloc_ptr_ = block_head + size - bytes;
91
0
    return unaligned_alloc_ptr_;
92
0
  }
93
5.72k
}
94
95
0
char* Arena::AllocateFromHugePage(size_t bytes) {
96
0
  MemMapping mm = MemMapping::AllocateHuge(bytes);
97
0
  auto addr = static_cast<char*>(mm.Get());
98
0
  if (addr) {
99
0
    huge_blocks_.push_back(std::move(mm));
100
0
    blocks_memory_ += bytes;
101
0
    if (tracker_ != nullptr) {
102
0
      tracker_->Allocate(bytes);
103
0
    }
104
0
  }
105
0
  return addr;
106
0
}
107
108
char* Arena::AllocateAligned(size_t bytes, size_t huge_page_size,
109
2.33M
                             Logger* logger) {
110
2.33M
  if (MemMapping::kHugePageSupported && hugetlb_size_ > 0 &&
111
2.33M
      huge_page_size > 0 && bytes > 0) {
112
    // Allocate from a huge page TLB table.
113
0
    size_t reserved_size =
114
0
        ((bytes - 1U) / huge_page_size + 1U) * huge_page_size;
115
0
    assert(reserved_size >= bytes);
116
117
0
    char* addr = AllocateFromHugePage(reserved_size);
118
0
    if (addr == nullptr) {
119
0
      ROCKS_LOG_WARN(logger,
120
0
                     "AllocateAligned fail to allocate huge TLB pages: %s",
121
0
                     errnoStr(errno).c_str());
122
      // fail back to malloc
123
0
    } else {
124
0
      return addr;
125
0
    }
126
0
  }
127
128
2.33M
  size_t current_mod =
129
2.33M
      reinterpret_cast<uintptr_t>(aligned_alloc_ptr_) & (kAlignUnit - 1);
130
2.33M
  size_t slop = (current_mod == 0 ? 0 : kAlignUnit - current_mod);
131
2.33M
  size_t needed = bytes + slop;
132
2.33M
  char* result;
133
2.33M
  if (needed <= alloc_bytes_remaining_) {
134
2.32M
    result = aligned_alloc_ptr_ + slop;
135
2.32M
    aligned_alloc_ptr_ += needed;
136
2.32M
    alloc_bytes_remaining_ -= needed;
137
2.32M
  } else {
138
    // AllocateFallback always returns aligned memory
139
6.93k
    result = AllocateFallback(bytes, true /* aligned */);
140
6.93k
  }
141
2.33M
  assert((reinterpret_cast<uintptr_t>(result) & (kAlignUnit - 1)) == 0);
142
2.33M
  return result;
143
2.33M
}
144
145
6.93k
char* Arena::AllocateNewBlock(size_t block_bytes) {
146
  // NOTE: std::make_unique zero-initializes the block so is not appropriate
147
  // here
148
6.93k
  char* block = new char[block_bytes];
149
6.93k
  blocks_.push_back(std::unique_ptr<char[]>(block));
150
151
6.93k
  size_t allocated_size;
152
6.93k
#ifdef ROCKSDB_MALLOC_USABLE_SIZE
153
6.93k
  allocated_size = malloc_usable_size(block);
154
#ifndef NDEBUG
155
  // It's hard to predict what malloc_usable_size() returns.
156
  // A callback can allow users to change the costed size.
157
  std::pair<size_t*, size_t*> pair(&allocated_size, &block_bytes);
158
  TEST_SYNC_POINT_CALLBACK("Arena::AllocateNewBlock:0", &pair);
159
#endif  // NDEBUG
160
#else
161
  allocated_size = block_bytes;
162
#endif  // ROCKSDB_MALLOC_USABLE_SIZE
163
6.93k
  blocks_memory_ += allocated_size;
164
6.93k
  if (tracker_ != nullptr) {
165
0
    tracker_->Allocate(allocated_size);
166
0
  }
167
6.93k
  return block;
168
6.93k
}
169
170
}  // namespace ROCKSDB_NAMESPACE