Line data Source code
1 : // Copyright 2017 the V8 project authors. All rights reserved.
2 : // Use of this source code is governed by a BSD-style license that can be
3 : // found in the LICENSE file.
4 :
5 : #include <limits>
6 :
7 : #include "src/counters.h"
8 : #include "src/heap/heap-inl.h"
9 : #include "src/objects-inl.h"
10 : #include "src/objects/js-array-buffer-inl.h"
11 : #include "src/wasm/wasm-engine.h"
12 : #include "src/wasm/wasm-limits.h"
13 : #include "src/wasm/wasm-memory.h"
14 : #include "src/wasm/wasm-module.h"
15 :
16 : namespace v8 {
17 : namespace internal {
18 : namespace wasm {
19 :
20 : namespace {
21 :
22 : constexpr size_t kNegativeGuardSize = 1u << 31; // 2GiB
23 :
24 : void AddAllocationStatusSample(Isolate* isolate,
25 : WasmMemoryTracker::AllocationStatus status) {
26 : isolate->counters()->wasm_memory_allocation_result()->AddSample(
27 176537 : static_cast<int>(status));
28 : }
29 :
30 176537 : void* TryAllocateBackingStore(WasmMemoryTracker* memory_tracker, Heap* heap,
31 : size_t size, void** allocation_base,
32 : size_t* allocation_length) {
33 : using AllocationStatus = WasmMemoryTracker::AllocationStatus;
34 : #if V8_TARGET_ARCH_64_BIT
35 : bool require_full_guard_regions = true;
36 : #else
37 : bool require_full_guard_regions = false;
38 : #endif
39 : // Let the WasmMemoryTracker know we are going to reserve a bunch of
40 : // address space.
41 : // Try up to three times; getting rid of dead JSArrayBuffer allocations might
42 : // require two GCs because the first GC maybe incremental and may have
43 : // floating garbage.
44 : static constexpr int kAllocationRetries = 2;
45 : bool did_retry = false;
46 7417 : for (int trial = 0;; ++trial) {
47 : // For guard regions, we always allocate the largest possible offset into
48 : // the heap, so the addressable memory after the guard page can be made
49 : // inaccessible.
50 : //
51 : // To protect against 32-bit integer overflow issues, we also protect the
52 : // 2GiB before the valid part of the memory buffer.
53 : // TODO(7881): do not use static_cast<uint32_t>() here
54 : *allocation_length =
55 : require_full_guard_regions
56 183125 : ? RoundUp(kWasmMaxHeapOffset + kNegativeGuardSize, CommitPageSize())
57 : : RoundUp(base::bits::RoundUpToPowerOfTwo32(
58 : static_cast<uint32_t>(size)),
59 185612 : kWasmPageSize);
60 : DCHECK_GE(*allocation_length, size);
61 : DCHECK_GE(*allocation_length, kWasmPageSize);
62 :
63 : auto limit = require_full_guard_regions ? WasmMemoryTracker::kSoftLimit
64 183954 : : WasmMemoryTracker::kHardLimit;
65 183954 : if (memory_tracker->ReserveAddressSpace(*allocation_length, limit)) break;
66 :
67 : did_retry = true;
68 : // After first and second GC: retry.
69 7425 : if (trial == kAllocationRetries) {
70 : // If we fail to allocate guard regions and the fallback is enabled, then
71 : // retry without full guard regions.
72 837 : if (require_full_guard_regions && FLAG_wasm_trap_handler_fallback) {
73 : require_full_guard_regions = false;
74 829 : --trial; // one more try.
75 829 : continue;
76 : }
77 :
78 : // We are over the address space limit. Fail.
79 : //
80 : // When running under the correctness fuzzer (i.e.
81 : // --abort-on-stack-or-string-length-overflow is preset), we crash
82 : // instead so it is not incorrectly reported as a correctness
83 : // violation. See https://crbug.com/828293#c4
84 8 : if (FLAG_abort_on_stack_or_string_length_overflow) {
85 0 : FATAL("could not allocate wasm memory");
86 : }
87 : AddAllocationStatusSample(
88 : heap->isolate(), AllocationStatus::kAddressSpaceLimitReachedFailure);
89 8 : return nullptr;
90 : }
91 : // Collect garbage and retry.
92 6588 : heap->MemoryPressureNotification(MemoryPressureLevel::kCritical, true);
93 7417 : }
94 :
95 : // The Reserve makes the whole region inaccessible by default.
96 : DCHECK_NULL(*allocation_base);
97 0 : for (int trial = 0;; ++trial) {
98 : *allocation_base =
99 : AllocatePages(GetPlatformPageAllocator(), nullptr, *allocation_length,
100 176529 : kWasmPageSize, PageAllocator::kNoAccess);
101 176529 : if (*allocation_base != nullptr) break;
102 0 : if (trial == kAllocationRetries) {
103 0 : memory_tracker->ReleaseReservation(*allocation_length);
104 : AddAllocationStatusSample(heap->isolate(),
105 : AllocationStatus::kOtherFailure);
106 0 : return nullptr;
107 : }
108 0 : heap->MemoryPressureNotification(MemoryPressureLevel::kCritical, true);
109 0 : }
110 : byte* memory = reinterpret_cast<byte*>(*allocation_base);
111 176529 : if (require_full_guard_regions) {
112 175700 : memory += kNegativeGuardSize;
113 : }
114 :
115 : // Make the part we care about accessible.
116 176529 : if (size > 0) {
117 : bool result =
118 : SetPermissions(GetPlatformPageAllocator(), memory,
119 50052 : RoundUp(size, kWasmPageSize), PageAllocator::kReadWrite);
120 : // SetPermissions commits the extra memory, which may put us over the
121 : // process memory limit. If so, report this as an OOM.
122 50052 : if (!result) {
123 0 : V8::FatalProcessOutOfMemory(nullptr, "TryAllocateBackingStore");
124 : }
125 : }
126 :
127 : memory_tracker->RegisterAllocation(heap->isolate(), *allocation_base,
128 353058 : *allocation_length, memory, size);
129 : AddAllocationStatusSample(heap->isolate(),
130 : did_retry ? AllocationStatus::kSuccessAfterRetry
131 176529 : : AllocationStatus::kSuccess);
132 176529 : return memory;
133 : }
134 :
135 : #if V8_TARGET_ARCH_MIPS64
136 : // MIPS64 has a user space of 2^40 bytes on most processors,
137 : // address space limits needs to be smaller.
138 : constexpr size_t kAddressSpaceSoftLimit = 0x2100000000L; // 132 GiB
139 : constexpr size_t kAddressSpaceHardLimit = 0x4000000000L; // 256 GiB
140 : #elif V8_TARGET_ARCH_64_BIT
141 : constexpr size_t kAddressSpaceSoftLimit = 0x6000000000L; // 384 GiB
142 : constexpr size_t kAddressSpaceHardLimit = 0x10100000000L; // 1 TiB + 4 GiB
143 : #else
144 : constexpr size_t kAddressSpaceSoftLimit = 0x90000000; // 2 GiB + 256 MiB
145 : constexpr size_t kAddressSpaceHardLimit = 0xC0000000; // 3 GiB
146 : #endif
147 :
148 : } // namespace
149 :
150 117004 : WasmMemoryTracker::~WasmMemoryTracker() {
151 : // All reserved address space should be released before the allocation tracker
152 : // is destroyed.
153 : DCHECK_EQ(reserved_address_space_, 0u);
154 : DCHECK_EQ(allocated_address_space_, 0u);
155 58502 : }
156 :
157 12 : void* WasmMemoryTracker::TryAllocateBackingStoreForTesting(
158 : Heap* heap, size_t size, void** allocation_base,
159 : size_t* allocation_length) {
160 : return TryAllocateBackingStore(this, heap, size, allocation_base,
161 12 : allocation_length);
162 : }
163 :
164 12 : void WasmMemoryTracker::FreeBackingStoreForTesting(base::AddressRegion memory,
165 : void* buffer_start) {
166 12 : ReleaseAllocation(nullptr, buffer_start);
167 12 : CHECK(FreePages(GetPlatformPageAllocator(),
168 : reinterpret_cast<void*>(memory.begin()), memory.size()));
169 12 : }
170 :
171 1424920 : bool WasmMemoryTracker::ReserveAddressSpace(size_t num_bytes,
172 : ReservationLimit limit) {
173 : size_t reservation_limit =
174 1424920 : limit == kSoftLimit ? kAddressSpaceSoftLimit : kAddressSpaceHardLimit;
175 : while (true) {
176 1424927 : size_t old_count = reserved_address_space_.load();
177 2849848 : if (old_count > reservation_limit) return false;
178 1424594 : if (reservation_limit - old_count < num_bytes) return false;
179 1416558 : if (reserved_address_space_.compare_exchange_weak(old_count,
180 1416558 : old_count + num_bytes)) {
181 : return true;
182 : }
183 7 : }
184 : }
185 :
186 1240023 : void WasmMemoryTracker::ReleaseReservation(size_t num_bytes) {
187 : size_t const old_reserved = reserved_address_space_.fetch_sub(num_bytes);
188 : USE(old_reserved);
189 : DCHECK_LE(num_bytes, old_reserved);
190 1240023 : }
191 :
192 176529 : void WasmMemoryTracker::RegisterAllocation(Isolate* isolate,
193 : void* allocation_base,
194 : size_t allocation_length,
195 : void* buffer_start,
196 : size_t buffer_length) {
197 176529 : base::MutexGuard scope_lock(&mutex_);
198 :
199 176529 : allocated_address_space_ += allocation_length;
200 : AddAddressSpaceSample(isolate);
201 :
202 : allocations_.emplace(buffer_start,
203 : AllocationData{allocation_base, allocation_length,
204 353058 : buffer_start, buffer_length});
205 176529 : }
206 :
207 176528 : WasmMemoryTracker::AllocationData WasmMemoryTracker::ReleaseAllocation(
208 : Isolate* isolate, const void* buffer_start) {
209 176528 : base::MutexGuard scope_lock(&mutex_);
210 :
211 176529 : auto find_result = allocations_.find(buffer_start);
212 353058 : CHECK_NE(find_result, allocations_.end());
213 :
214 176529 : if (find_result != allocations_.end()) {
215 176529 : size_t num_bytes = find_result->second.allocation_length;
216 : DCHECK_LE(num_bytes, reserved_address_space_);
217 : DCHECK_LE(num_bytes, allocated_address_space_);
218 : reserved_address_space_ -= num_bytes;
219 176529 : allocated_address_space_ -= num_bytes;
220 : // ReleaseAllocation might be called with a nullptr as isolate if the
221 : // embedder is releasing the allocation and not a specific isolate. This
222 : // happens if the allocation was shared between multiple isolates (threads).
223 176529 : if (isolate) AddAddressSpaceSample(isolate);
224 :
225 176529 : AllocationData allocation_data = find_result->second;
226 : allocations_.erase(find_result);
227 353058 : return allocation_data;
228 : }
229 0 : UNREACHABLE();
230 : }
231 :
232 3610 : const WasmMemoryTracker::AllocationData* WasmMemoryTracker::FindAllocationData(
233 : const void* buffer_start) {
234 3610 : base::MutexGuard scope_lock(&mutex_);
235 3610 : const auto& result = allocations_.find(buffer_start);
236 3610 : if (result != allocations_.end()) {
237 3610 : return &result->second;
238 : }
239 : return nullptr;
240 : }
241 :
242 176964 : bool WasmMemoryTracker::IsWasmMemory(const void* buffer_start) {
243 176964 : base::MutexGuard scope_lock(&mutex_);
244 176972 : return allocations_.find(buffer_start) != allocations_.end();
245 : }
246 :
247 130600 : bool WasmMemoryTracker::HasFullGuardRegions(const void* buffer_start) {
248 130600 : base::MutexGuard scope_lock(&mutex_);
249 : const auto allocation = allocations_.find(buffer_start);
250 :
251 130600 : if (allocation == allocations_.end()) {
252 : return false;
253 : }
254 :
255 130600 : Address start = reinterpret_cast<Address>(buffer_start);
256 : Address limit =
257 130600 : reinterpret_cast<Address>(allocation->second.allocation_base) +
258 130600 : allocation->second.allocation_length;
259 130600 : return start + kWasmMaxHeapOffset < limit;
260 : }
261 :
262 176507 : bool WasmMemoryTracker::FreeMemoryIfIsWasmMemory(Isolate* isolate,
263 : const void* buffer_start) {
264 176507 : if (IsWasmMemory(buffer_start)) {
265 176517 : const AllocationData allocation = ReleaseAllocation(isolate, buffer_start);
266 176517 : CHECK(FreePages(GetPlatformPageAllocator(), allocation.allocation_base,
267 : allocation.allocation_length));
268 : return true;
269 : }
270 : return false;
271 : }
272 :
273 0 : void WasmMemoryTracker::AddAddressSpaceSample(Isolate* isolate) {
274 : // Report address space usage in MiB so the full range fits in an int on all
275 : // platforms.
276 : isolate->counters()->wasm_address_space_usage_mb()->AddSample(
277 705994 : static_cast<int>(allocated_address_space_ >> 20));
278 0 : }
279 :
280 178677 : Handle<JSArrayBuffer> SetupArrayBuffer(Isolate* isolate, void* backing_store,
281 : size_t size, bool is_external,
282 : SharedFlag shared) {
283 : Handle<JSArrayBuffer> buffer =
284 178677 : isolate->factory()->NewJSArrayBuffer(shared, TENURED);
285 : constexpr bool is_wasm_memory = true;
286 : JSArrayBuffer::Setup(buffer, isolate, is_external, backing_store, size,
287 178677 : shared, is_wasm_memory);
288 : buffer->set_is_detachable(false);
289 : buffer->set_is_growable(true);
290 178677 : return buffer;
291 : }
292 :
293 176525 : MaybeHandle<JSArrayBuffer> NewArrayBuffer(Isolate* isolate, size_t size,
294 : SharedFlag shared) {
295 : // Enforce flag-limited maximum allocation size.
296 176525 : if (size > max_mem_bytes()) return {};
297 :
298 176525 : WasmMemoryTracker* memory_tracker = isolate->wasm_engine()->memory_tracker();
299 :
300 : // Set by TryAllocateBackingStore or GetEmptyBackingStore
301 176525 : void* allocation_base = nullptr;
302 176525 : size_t allocation_length = 0;
303 :
304 : void* memory = TryAllocateBackingStore(memory_tracker, isolate->heap(), size,
305 176525 : &allocation_base, &allocation_length);
306 176525 : if (memory == nullptr) return {};
307 :
308 : #if DEBUG
309 : // Double check the API allocator actually zero-initialized the memory.
310 : const byte* bytes = reinterpret_cast<const byte*>(memory);
311 : for (size_t i = 0; i < size; ++i) {
312 : DCHECK_EQ(0, bytes[i]);
313 : }
314 : #endif
315 :
316 : reinterpret_cast<v8::Isolate*>(isolate)
317 176517 : ->AdjustAmountOfExternalAllocatedMemory(size);
318 :
319 : constexpr bool is_external = false;
320 176517 : return SetupArrayBuffer(isolate, memory, size, is_external, shared);
321 : }
322 :
323 2160 : void DetachMemoryBuffer(Isolate* isolate, Handle<JSArrayBuffer> buffer,
324 : bool free_memory) {
325 4320 : if (buffer->is_shared()) return; // Detaching shared buffers is impossible.
326 : DCHECK(!buffer->is_detachable());
327 :
328 : const bool is_external = buffer->is_external();
329 : DCHECK(!buffer->is_detachable());
330 2160 : if (!is_external) {
331 : buffer->set_is_external(true);
332 2148 : isolate->heap()->UnregisterArrayBuffer(*buffer);
333 2148 : if (free_memory) {
334 : // We need to free the memory before detaching the buffer because
335 : // FreeBackingStore reads buffer->allocation_base(), which is nulled out
336 : // by Detach. This means there is a dangling pointer until we detach the
337 : // buffer. Since there is no way for the user to directly call
338 : // FreeBackingStore, we can ensure this is safe.
339 0 : buffer->FreeBackingStoreFromMainThread();
340 : }
341 : }
342 :
343 : DCHECK(buffer->is_external());
344 : buffer->set_is_wasm_memory(false);
345 : buffer->set_is_detachable(true);
346 2160 : buffer->Detach();
347 : }
348 :
349 : } // namespace wasm
350 : } // namespace internal
351 178779 : } // namespace v8
|