/src/hermes/lib/VM/StorageProvider.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright (c) Meta Platforms, Inc. and affiliates. |
3 | | * |
4 | | * This source code is licensed under the MIT license found in the |
5 | | * LICENSE file in the root directory of this source tree. |
6 | | */ |
7 | | |
8 | | #include "hermes/VM/StorageProvider.h" |
9 | | |
10 | | #include "hermes/Support/CheckedMalloc.h" |
11 | | #include "hermes/Support/Compiler.h" |
12 | | #include "hermes/Support/OSCompat.h" |
13 | | #include "hermes/VM/AlignedStorage.h" |
14 | | |
15 | | #include "llvh/ADT/DenseMap.h" |
16 | | #include "llvh/Support/ErrorHandling.h" |
17 | | #include "llvh/Support/MathExtras.h" |
18 | | |
19 | | #include <cassert> |
20 | | #include <limits> |
21 | | #include <random> |
22 | | #include <stack> |
23 | | #pragma GCC diagnostic push |
24 | | |
25 | | #ifdef HERMES_COMPILER_SUPPORTS_WSHORTEN_64_TO_32 |
26 | | #pragma GCC diagnostic ignored "-Wshorten-64-to-32" |
27 | | #endif |
28 | | |
29 | | #if (defined(__linux__) || defined(__ANDROID__)) && defined(__aarch64__) |
30 | | /* On Linux on ARM64 we most likely have at least 39 bits of virtual address |
31 | | * space https://github.com/torvalds/linux/blob/v6.7/arch/arm64/Kconfig#L1262 If |
32 | | * our mmap hint is above 2**39 it will likely fail. */ |
33 | | #define MAX_ADDR_HINT 0x37FFFFFFFF |
34 | | #elif defined(__APPLE__) && defined(__aarch64__) |
35 | | /* On ios/arm64 assume we have at least 39 bits of virtual address space ( |
36 | | * similar to linux on arm64). This should be true for all iOS versions >=14 |
37 | | * (https://github.com/golang/go/issues/46860), older versions <14 are |
38 | | * unsupported. Note that the effective addressable space might vary, depending |
39 | | * on apps entitelmnets as well as various other factors, hence we go for a |
40 | | * conservative 39 bit address space limit, which is sufficient for most |
41 | | * applications and should be good enough for this purpose. |
42 | | */ |
43 | | #define MAX_ADDR_HINT 0x37FFFFFFFF |
44 | | #elif (defined(__linux__) || defined(__ANDROID__)) && defined(__amd64__) |
45 | 47 | #define MAX_ADDR_HINT 0x3FFFFFFFFFFF |
46 | | #elif defined(_WIN64) |
47 | | /* On Windows use a 37 bit address space limit as this is the lowest |
48 | | * configuration for Windows Home |
49 | | * https://learn.microsoft.com/en-us/windows/win32/memory/memory-limits-for-windows-releases |
50 | | */ |
51 | | #define MAX_ADDR_HINT 0x1FFFFFFFFF |
52 | | #else |
53 | | /* For other non-explicitly listed configuration, be extra conservative and use |
54 | | * a 32 bit address space limit. */ |
55 | | #define MAX_ADDR_HINT 0xFFFFFFFF |
56 | | #endif |
57 | | |
58 | | namespace hermes { |
59 | | namespace vm { |
60 | | |
61 | | namespace { |
62 | | |
63 | 0 | bool isAligned(void *p) { |
64 | 0 | return (reinterpret_cast<uintptr_t>(p) & (AlignedStorage::size() - 1)) == 0; |
65 | 0 | } |
66 | | |
67 | 47 | char *alignAlloc(void *p) { |
68 | 47 | return reinterpret_cast<char *>( |
69 | 47 | llvh::alignTo(reinterpret_cast<uintptr_t>(p), AlignedStorage::size())); |
70 | 47 | } |
71 | | |
72 | 47 | void *getMmapHint() { |
73 | 47 | uintptr_t addr = std::random_device()(); |
74 | 47 | if constexpr (sizeof(uintptr_t) >= 8) { |
75 | | // std::random_device() yields an unsigned int, so combine two. |
76 | 47 | addr = (addr << 32) | std::random_device()(); |
77 | | // Don't use the entire address space, to ensure this is a valid address. |
78 | 47 | addr &= MAX_ADDR_HINT; |
79 | 47 | } |
80 | 47 | return alignAlloc(reinterpret_cast<void *>(addr)); |
81 | 47 | } |
82 | | |
83 | | class VMAllocateStorageProvider final : public StorageProvider { |
84 | | public: |
85 | | llvh::ErrorOr<void *> newStorageImpl(const char *name) override; |
86 | | void deleteStorageImpl(void *storage) override; |
87 | | }; |
88 | | |
89 | | class ContiguousVAStorageProvider final : public StorageProvider { |
90 | | public: |
91 | | ContiguousVAStorageProvider(size_t size) |
92 | 47 | : size_(llvh::alignTo<AlignedStorage::size()>(size)) { |
93 | 47 | auto result = oscompat::vm_reserve_aligned( |
94 | 47 | size_, AlignedStorage::size(), getMmapHint()); |
95 | 47 | if (!result) |
96 | 0 | hermes_fatal("Contiguous storage allocation failed.", result.getError()); |
97 | 47 | level_ = start_ = static_cast<char *>(*result); |
98 | 47 | oscompat::vm_name(start_, size_, kFreeRegionName); |
99 | 47 | } |
100 | 47 | ~ContiguousVAStorageProvider() override { |
101 | 47 | oscompat::vm_release_aligned(start_, size_); |
102 | 47 | } |
103 | | |
104 | 141 | llvh::ErrorOr<void *> newStorageImpl(const char *name) override { |
105 | 141 | void *storage; |
106 | 141 | if (!freelist_.empty()) { |
107 | 0 | storage = freelist_.back(); |
108 | 0 | freelist_.pop_back(); |
109 | 141 | } else if (level_ < start_ + size_) { |
110 | 141 | storage = std::exchange(level_, level_ + AlignedStorage::size()); |
111 | 141 | } else { |
112 | 0 | return make_error_code(OOMError::MaxStorageReached); |
113 | 0 | } |
114 | 141 | auto res = oscompat::vm_commit(storage, AlignedStorage::size()); |
115 | 141 | if (res) { |
116 | 141 | oscompat::vm_name(storage, AlignedStorage::size(), name); |
117 | 141 | } |
118 | 141 | return res; |
119 | 141 | } |
120 | | |
121 | 141 | void deleteStorageImpl(void *storage) override { |
122 | 141 | assert( |
123 | 141 | !llvh::alignmentAdjustment(storage, AlignedStorage::size()) && |
124 | 141 | "Storage not aligned"); |
125 | 141 | assert(storage >= start_ && storage < level_ && "Storage not in region"); |
126 | 141 | oscompat::vm_name(storage, AlignedStorage::size(), kFreeRegionName); |
127 | 141 | oscompat::vm_uncommit(storage, AlignedStorage::size()); |
128 | 141 | freelist_.push_back(storage); |
129 | 141 | } |
130 | | |
131 | | private: |
132 | | static constexpr const char *kFreeRegionName = "hermes-free-heap"; |
133 | | size_t size_; |
134 | | char *start_; |
135 | | char *level_; |
136 | | llvh::SmallVector<void *, 0> freelist_; |
137 | | }; |
138 | | |
139 | | class MallocStorageProvider final : public StorageProvider { |
140 | | public: |
141 | | llvh::ErrorOr<void *> newStorageImpl(const char *name) override; |
142 | | void deleteStorageImpl(void *storage) override; |
143 | | |
144 | | private: |
145 | | /// Map aligned starts to actual starts for freeing. |
146 | | /// NOTE: Since this is only used for debugging purposes, and it is rare to |
147 | | /// create and delete storage, it's ok to use a map. |
148 | | llvh::DenseMap<void *, void *> lowLimToAllocHandle_; |
149 | | }; |
150 | | |
151 | | llvh::ErrorOr<void *> VMAllocateStorageProvider::newStorageImpl( |
152 | 0 | const char *name) { |
153 | 0 | assert(AlignedStorage::size() % oscompat::page_size() == 0); |
154 | | // Allocate the space, hoping it will be the correct alignment. |
155 | 0 | auto result = oscompat::vm_allocate_aligned( |
156 | 0 | AlignedStorage::size(), AlignedStorage::size(), getMmapHint()); |
157 | 0 | if (!result) { |
158 | 0 | return result; |
159 | 0 | } |
160 | 0 | void *mem = *result; |
161 | 0 | assert(isAligned(mem)); |
162 | 0 | (void)&isAligned; |
163 | | #ifdef HERMESVM_ALLOW_HUGE_PAGES |
164 | | oscompat::vm_hugepage(mem, AlignedStorage::size()); |
165 | | #endif |
166 | | |
167 | | // Name the memory region on platforms that support naming. |
168 | 0 | oscompat::vm_name(mem, AlignedStorage::size(), name); |
169 | 0 | return mem; |
170 | 0 | } |
171 | | |
172 | 0 | void VMAllocateStorageProvider::deleteStorageImpl(void *storage) { |
173 | 0 | if (!storage) { |
174 | 0 | return; |
175 | 0 | } |
176 | 0 | oscompat::vm_free_aligned(storage, AlignedStorage::size()); |
177 | 0 | } |
178 | | |
179 | 0 | llvh::ErrorOr<void *> MallocStorageProvider::newStorageImpl(const char *name) { |
180 | | // name is unused, can't name malloc memory. |
181 | 0 | (void)name; |
182 | 0 | void *mem = checkedMalloc2(AlignedStorage::size(), 2u); |
183 | 0 | void *lowLim = alignAlloc(mem); |
184 | 0 | assert(isAligned(lowLim) && "New storage should be aligned"); |
185 | 0 | lowLimToAllocHandle_[lowLim] = mem; |
186 | 0 | return lowLim; |
187 | 0 | } |
188 | | |
189 | 0 | void MallocStorageProvider::deleteStorageImpl(void *storage) { |
190 | 0 | if (!storage) { |
191 | 0 | return; |
192 | 0 | } |
193 | 0 | free(lowLimToAllocHandle_[storage]); |
194 | 0 | lowLimToAllocHandle_.erase(storage); |
195 | 0 | } |
196 | | |
197 | | } // namespace |
198 | | |
199 | 47 | StorageProvider::~StorageProvider() { |
200 | 47 | assert(numLiveAllocs() == 0); |
201 | 47 | } |
202 | | |
203 | | /* static */ |
204 | 0 | std::unique_ptr<StorageProvider> StorageProvider::mmapProvider() { |
205 | 0 | return std::unique_ptr<StorageProvider>(new VMAllocateStorageProvider); |
206 | 0 | } |
207 | | |
208 | | /* static */ |
209 | | std::unique_ptr<StorageProvider> StorageProvider::contiguousVAProvider( |
210 | 47 | size_t size) { |
211 | 47 | return std::make_unique<ContiguousVAStorageProvider>(size); |
212 | 47 | } |
213 | | |
214 | | /* static */ |
215 | 0 | std::unique_ptr<StorageProvider> StorageProvider::mallocProvider() { |
216 | 0 | return std::unique_ptr<StorageProvider>(new MallocStorageProvider); |
217 | 0 | } |
218 | | |
219 | 141 | llvh::ErrorOr<void *> StorageProvider::newStorage(const char *name) { |
220 | 141 | auto res = newStorageImpl(name); |
221 | | |
222 | 141 | if (res) { |
223 | 141 | numSucceededAllocs_++; |
224 | 141 | } else { |
225 | 0 | numFailedAllocs_++; |
226 | 0 | } |
227 | | |
228 | 141 | return res; |
229 | 141 | } |
230 | | |
231 | 141 | void StorageProvider::deleteStorage(void *storage) { |
232 | 141 | if (!storage) { |
233 | 0 | return; |
234 | 0 | } |
235 | | |
236 | 141 | numDeletedAllocs_++; |
237 | 141 | deleteStorageImpl(storage); |
238 | 141 | } |
239 | | |
240 | | llvh::ErrorOr<std::pair<void *, size_t>> |
241 | 0 | vmAllocateAllowLess(size_t sz, size_t minSz, size_t alignment) { |
242 | 0 | assert(sz >= minSz && "Shouldn't supply a lower size than the minimum"); |
243 | 0 | assert(minSz != 0 && "Minimum size must not be zero"); |
244 | 0 | assert(sz == llvh::alignTo(sz, alignment)); |
245 | 0 | assert(minSz == llvh::alignTo(minSz, alignment)); |
246 | | // Try fractions of the requested size, down to the minimum. |
247 | | // We'll do it by eighths. |
248 | 0 | assert(sz >= 8); // Since sz is page-aligned, safe assumption. |
249 | 0 | const size_t increment = sz / 8; |
250 | | // Store the result for the case where all attempts fail. |
251 | 0 | llvh::ErrorOr<void *> result{std::error_code{}}; |
252 | 0 | while (sz >= minSz) { |
253 | 0 | result = oscompat::vm_allocate_aligned(sz, alignment, getMmapHint()); |
254 | 0 | if (result) { |
255 | 0 | assert( |
256 | 0 | sz == llvh::alignTo(sz, alignment) && |
257 | 0 | "Should not return an un-aligned size"); |
258 | 0 | return std::make_pair(result.get(), sz); |
259 | 0 | } |
260 | 0 | if (sz < increment || sz == minSz) { |
261 | | // Would either underflow or can't reduce any lower. |
262 | 0 | break; |
263 | 0 | } |
264 | 0 | sz = std::max( |
265 | 0 | static_cast<size_t>(llvh::alignDown(sz - increment, alignment)), minSz); |
266 | 0 | } |
267 | 0 | assert(!result && "Must be an error if none of the allocations succeeded"); |
268 | 0 | return result.getError(); |
269 | 0 | } |
270 | | |
271 | 0 | size_t StorageProvider::numSucceededAllocs() const { |
272 | 0 | return numSucceededAllocs_; |
273 | 0 | } |
274 | | |
275 | 0 | size_t StorageProvider::numFailedAllocs() const { |
276 | 0 | return numFailedAllocs_; |
277 | 0 | } |
278 | | |
279 | 0 | size_t StorageProvider::numDeletedAllocs() const { |
280 | 0 | return numDeletedAllocs_; |
281 | 0 | } |
282 | | |
283 | 47 | size_t StorageProvider::numLiveAllocs() const { |
284 | 47 | return numSucceededAllocs_ - numDeletedAllocs_; |
285 | 47 | } |
286 | | |
287 | | } // namespace vm |
288 | | } // namespace hermes |