/src/rocksdb/memory/arena.h
Line | Count | Source (jump to first uncovered line) |
1 | | // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. |
2 | | // This source code is licensed under both the GPLv2 (found in the |
3 | | // COPYING file in the root directory) and Apache 2.0 License |
4 | | // (found in the LICENSE.Apache file in the root directory). |
5 | | // |
6 | | // Copyright (c) 2011 The LevelDB Authors. All rights reserved. |
7 | | // Use of this source code is governed by a BSD-style license that can be |
8 | | // found in the LICENSE file. See the AUTHORS file for names of contributors. |
9 | | |
10 | | // Arena is an implementation of Allocator class. For a request of small size, |
11 | | // it allocates a block with pre-defined block size. For a request of big |
12 | | // size, it uses malloc to directly get the requested size. |
13 | | |
14 | | #pragma once |
15 | | |
16 | | #include <cstddef> |
17 | | #include <deque> |
18 | | |
19 | | #include "memory/allocator.h" |
20 | | #include "port/mmap.h" |
21 | | #include "rocksdb/env.h" |
22 | | |
23 | | namespace ROCKSDB_NAMESPACE { |
24 | | |
25 | | class Arena : public Allocator { |
26 | | public: |
27 | | // No copying allowed |
28 | | Arena(const Arena&) = delete; |
29 | | void operator=(const Arena&) = delete; |
30 | | |
31 | | static constexpr size_t kInlineSize = 2048; |
32 | | static constexpr size_t kMinBlockSize = 4096; |
33 | | static constexpr size_t kMaxBlockSize = 2u << 30; |
34 | | |
35 | | static constexpr unsigned kAlignUnit = alignof(std::max_align_t); |
36 | | static_assert((kAlignUnit & (kAlignUnit - 1)) == 0, |
37 | | "Pointer size should be power of 2"); |
38 | | |
39 | | // huge_page_size: if 0, don't use huge page TLB. If > 0 (should set to the |
40 | | // supported hugepage size of the system), block allocation will try huge |
41 | | // page TLB first. If allocation fails, will fall back to normal case. |
42 | | explicit Arena(size_t block_size = kMinBlockSize, |
43 | | AllocTracker* tracker = nullptr, size_t huge_page_size = 0); |
44 | | ~Arena(); |
45 | | |
46 | | char* Allocate(size_t bytes) override; |
47 | | |
48 | | // huge_page_size: if >0, will try to allocate from huage page TLB. |
49 | | // The argument will be the size of the page size for huge page TLB. Bytes |
50 | | // will be rounded up to multiple of the page size to allocate through mmap |
51 | | // anonymous option with huge page on. The extra space allocated will be |
52 | | // wasted. If allocation fails, will fall back to normal case. To enable it, |
53 | | // need to reserve huge pages for it to be allocated, like: |
54 | | // sysctl -w vm.nr_hugepages=20 |
55 | | // See linux doc Documentation/vm/hugetlbpage.txt for details. |
56 | | // huge page allocation can fail. In this case it will fail back to |
57 | | // normal cases. The messages will be logged to logger. So when calling with |
58 | | // huge_page_tlb_size > 0, we highly recommend a logger is passed in. |
59 | | // Otherwise, the error message will be printed out to stderr directly. |
60 | | char* AllocateAligned(size_t bytes, size_t huge_page_size = 0, |
61 | | Logger* logger = nullptr) override; |
62 | | |
63 | | // Returns an estimate of the total memory usage of data allocated |
64 | | // by the arena (exclude the space allocated but not yet used for future |
65 | | // allocations). |
66 | 4.98k | size_t ApproximateMemoryUsage() const { |
67 | 4.98k | return blocks_memory_ + blocks_.size() * sizeof(char*) - |
68 | 4.98k | alloc_bytes_remaining_; |
69 | 4.98k | } |
70 | | |
71 | 205k | size_t MemoryAllocatedBytes() const { return blocks_memory_; } |
72 | | |
73 | 205k | size_t AllocatedAndUnused() const { return alloc_bytes_remaining_; } |
74 | | |
75 | | // If an allocation is too big, we'll allocate an irregular block with the |
76 | | // same size of that allocation. |
77 | 205k | size_t IrregularBlockNum() const { return irregular_block_num; } |
78 | | |
79 | 0 | size_t BlockSize() const override { return kBlockSize; } |
80 | | |
81 | 0 | bool IsInInlineBlock() const { |
82 | 0 | return blocks_.empty() && huge_blocks_.empty(); |
83 | 0 | } |
84 | | |
85 | | // check and adjust the block_size so that the return value is |
86 | | // 1. in the range of [kMinBlockSize, kMaxBlockSize]. |
87 | | // 2. the multiple of align unit. |
88 | | static size_t OptimizeBlockSize(size_t block_size); |
89 | | |
90 | | private: |
91 | | alignas(std::max_align_t) char inline_block_[kInlineSize]; |
92 | | // Number of bytes allocated in one block |
93 | | const size_t kBlockSize; |
94 | | // Allocated memory blocks |
95 | | std::deque<std::unique_ptr<char[]>> blocks_; |
96 | | // Huge page allocations |
97 | | std::deque<MemMapping> huge_blocks_; |
98 | | size_t irregular_block_num = 0; |
99 | | |
100 | | // Stats for current active block. |
101 | | // For each block, we allocate aligned memory chucks from one end and |
102 | | // allocate unaligned memory chucks from the other end. Otherwise the |
103 | | // memory waste for alignment will be higher if we allocate both types of |
104 | | // memory from one direction. |
105 | | char* unaligned_alloc_ptr_ = nullptr; |
106 | | char* aligned_alloc_ptr_ = nullptr; |
107 | | // How many bytes left in currently active block? |
108 | | size_t alloc_bytes_remaining_ = 0; |
109 | | |
110 | | size_t hugetlb_size_ = 0; |
111 | | |
112 | | char* AllocateFromHugePage(size_t bytes); |
113 | | char* AllocateFallback(size_t bytes, bool aligned); |
114 | | char* AllocateNewBlock(size_t block_bytes); |
115 | | |
116 | | // Bytes of memory in blocks allocated so far |
117 | | size_t blocks_memory_ = 0; |
118 | | // Non-owned |
119 | | AllocTracker* tracker_; |
120 | | }; |
121 | | |
122 | 0 | inline char* Arena::Allocate(size_t bytes) { |
123 | | // The semantics of what to return are a bit messy if we allow |
124 | | // 0-byte allocations, so we disallow them here (we don't need |
125 | | // them for our internal use). |
126 | 0 | assert(bytes > 0); |
127 | 0 | if (bytes <= alloc_bytes_remaining_) { |
128 | 0 | unaligned_alloc_ptr_ -= bytes; |
129 | 0 | alloc_bytes_remaining_ -= bytes; |
130 | 0 | return unaligned_alloc_ptr_; |
131 | 0 | } |
132 | 0 | return AllocateFallback(bytes, false /* unaligned */); |
133 | 0 | } |
134 | | |
135 | | // Like std::destroy_at but a callable type |
136 | | template <typename T> |
137 | | struct Destroyer { |
138 | 11.4k | void operator()(T* ptr) { ptr->~T(); } |
139 | | }; |
140 | | |
141 | | // Like std::unique_ptr but only placement-deletes the object (for |
142 | | // objects allocated on an arena). |
143 | | template <typename T> |
144 | | using ScopedArenaPtr = std::unique_ptr<T, Destroyer<T>>; |
145 | | |
146 | | } // namespace ROCKSDB_NAMESPACE |