Line data Source code
1 : // Copyright 2017 the V8 project authors. All rights reserved.
2 : // Use of this source code is governed by a BSD-style license that can be
3 : // found in the LICENSE file.
4 :
5 : #include <limits>
6 :
7 : #include "src/counters.h"
8 : #include "src/heap/heap-inl.h"
9 : #include "src/objects-inl.h"
10 : #include "src/objects/js-array-buffer-inl.h"
11 : #include "src/wasm/wasm-engine.h"
12 : #include "src/wasm/wasm-limits.h"
13 : #include "src/wasm/wasm-memory.h"
14 : #include "src/wasm/wasm-module.h"
15 :
16 : namespace v8 {
17 : namespace internal {
18 : namespace wasm {
19 :
20 : namespace {
21 :
22 : constexpr size_t kNegativeGuardSize = 1u << 31; // 2GiB
23 :
24 : void AddAllocationStatusSample(Isolate* isolate,
25 : WasmMemoryTracker::AllocationStatus status) {
26 : isolate->counters()->wasm_memory_allocation_result()->AddSample(
27 176347 : static_cast<int>(status));
28 : }
29 :
30 529025 : bool RunWithGCAndRetry(const std::function<bool()>& fn, Heap* heap,
31 : bool* did_retry) {
32 : // Try up to three times; getting rid of dead JSArrayBuffer allocations might
33 : // require two GCs because the first GC maybe incremental and may have
34 : // floating garbage.
35 : static constexpr int kAllocationRetries = 2;
36 :
37 1530 : for (int trial = 0;; ++trial) {
38 530555 : if (fn()) return true;
39 : // {fn} failed. If {kAllocationRetries} is reached, fail.
40 1538 : *did_retry = true;
41 1538 : if (trial == kAllocationRetries) return false;
42 : // Otherwise, collect garbage and retry.
43 1530 : heap->MemoryPressureNotification(MemoryPressureLevel::kCritical, true);
44 : }
45 : }
46 :
47 176347 : void* TryAllocateBackingStore(WasmMemoryTracker* memory_tracker, Heap* heap,
48 : size_t size, size_t max_size,
49 : void** allocation_base,
50 : size_t* allocation_length) {
51 : using AllocationStatus = WasmMemoryTracker::AllocationStatus;
52 : #if V8_TARGET_ARCH_64_BIT
53 : constexpr bool kRequireFullGuardRegions = true;
54 : #else
55 : constexpr bool kRequireFullGuardRegions = false;
56 : #endif
57 : // Let the WasmMemoryTracker know we are going to reserve a bunch of
58 : // address space.
59 176347 : size_t reservation_size = std::max(max_size, size);
60 176347 : bool did_retry = false;
61 :
62 177877 : auto reserve_memory_space = [&] {
63 : // For guard regions, we always allocate the largest possible offset
64 : // into the heap, so the addressable memory after the guard page can
65 : // be made inaccessible.
66 : //
67 : // To protect against 32-bit integer overflow issues, we also
68 : // protect the 2GiB before the valid part of the memory buffer.
69 355754 : *allocation_length =
70 : kRequireFullGuardRegions
71 177877 : ? RoundUp(kWasmMaxHeapOffset + kNegativeGuardSize, CommitPageSize())
72 : : RoundUp(base::bits::RoundUpToPowerOfTwo(reservation_size),
73 533631 : kWasmPageSize);
74 : DCHECK_GE(*allocation_length, size);
75 : DCHECK_GE(*allocation_length, kWasmPageSize);
76 :
77 177877 : return memory_tracker->ReserveAddressSpace(*allocation_length);
78 : };
79 352694 : if (!RunWithGCAndRetry(reserve_memory_space, heap, &did_retry)) {
80 : // Reset reservation_size to initial size so that at least the initial size
81 : // can be allocated if maximum size reservation is not possible.
82 8 : reservation_size = size;
83 :
84 : // We are over the address space limit. Fail.
85 : //
86 : // When running under the correctness fuzzer (i.e.
87 : // --abort-on-stack-or-string-length-overflow is preset), we crash
88 : // instead so it is not incorrectly reported as a correctness
89 : // violation. See https://crbug.com/828293#c4
90 8 : if (FLAG_abort_on_stack_or_string_length_overflow) {
91 0 : FATAL("could not allocate wasm memory");
92 : }
93 : AddAllocationStatusSample(
94 : heap->isolate(), AllocationStatus::kAddressSpaceLimitReachedFailure);
95 8 : return nullptr;
96 : }
97 :
98 : // The Reserve makes the whole region inaccessible by default.
99 : DCHECK_NULL(*allocation_base);
100 176339 : auto allocate_pages = [&] {
101 352678 : *allocation_base =
102 176339 : AllocatePages(GetPlatformPageAllocator(), nullptr, *allocation_length,
103 352678 : kWasmPageSize, PageAllocator::kNoAccess);
104 176339 : return *allocation_base != nullptr;
105 : };
106 352678 : if (!RunWithGCAndRetry(allocate_pages, heap, &did_retry)) {
107 0 : memory_tracker->ReleaseReservation(*allocation_length);
108 : AddAllocationStatusSample(heap->isolate(), AllocationStatus::kOtherFailure);
109 0 : return nullptr;
110 : }
111 :
112 176339 : byte* memory = reinterpret_cast<byte*>(*allocation_base);
113 : if (kRequireFullGuardRegions) {
114 176339 : memory += kNegativeGuardSize;
115 : }
116 :
117 : // Make the part we care about accessible.
118 176339 : auto commit_memory = [&] {
119 224922 : return size == 0 || SetPermissions(GetPlatformPageAllocator(), memory,
120 : RoundUp(size, kWasmPageSize),
121 : PageAllocator::kReadWrite);
122 176339 : };
123 : // SetPermissions commits the extra memory, which may put us over the
124 : // process memory limit. If so, report this as an OOM.
125 352678 : if (!RunWithGCAndRetry(commit_memory, heap, &did_retry)) {
126 0 : V8::FatalProcessOutOfMemory(nullptr, "TryAllocateBackingStore");
127 : }
128 :
129 352678 : memory_tracker->RegisterAllocation(heap->isolate(), *allocation_base,
130 352678 : *allocation_length, memory, size);
131 176339 : AddAllocationStatusSample(heap->isolate(),
132 : did_retry ? AllocationStatus::kSuccessAfterRetry
133 : : AllocationStatus::kSuccess);
134 176339 : return memory;
135 : }
136 :
137 : #if V8_TARGET_ARCH_MIPS64
138 : // MIPS64 has a user space of 2^40 bytes on most processors,
139 : // address space limits needs to be smaller.
140 : constexpr size_t kAddressSpaceLimit = 0x4000000000L; // 256 GiB
141 : #elif V8_TARGET_ARCH_64_BIT
142 : constexpr size_t kAddressSpaceLimit = 0x10100000000L; // 1 TiB + 4 GiB
143 : #else
144 : constexpr size_t kAddressSpaceLimit = 0xC0000000; // 3 GiB
145 : #endif
146 :
147 : } // namespace
148 :
149 119778 : WasmMemoryTracker::~WasmMemoryTracker() {
150 : // All reserved address space should be released before the allocation tracker
151 : // is destroyed.
152 : DCHECK_EQ(reserved_address_space_, 0u);
153 : DCHECK_EQ(allocated_address_space_, 0u);
154 : DCHECK(allocations_.empty());
155 59889 : }
156 :
157 12 : void* WasmMemoryTracker::TryAllocateBackingStoreForTesting(
158 : Heap* heap, size_t size, void** allocation_base,
159 : size_t* allocation_length) {
160 : return TryAllocateBackingStore(this, heap, size, size, allocation_base,
161 12 : allocation_length);
162 : }
163 :
164 12 : void WasmMemoryTracker::FreeBackingStoreForTesting(base::AddressRegion memory,
165 : void* buffer_start) {
166 12 : base::MutexGuard scope_lock(&mutex_);
167 24 : ReleaseAllocation_Locked(nullptr, buffer_start);
168 12 : CHECK(FreePages(GetPlatformPageAllocator(),
169 : reinterpret_cast<void*>(memory.begin()), memory.size()));
170 12 : }
171 :
172 1421173 : bool WasmMemoryTracker::ReserveAddressSpace(size_t num_bytes) {
173 : size_t reservation_limit = kAddressSpaceLimit;
174 10 : while (true) {
175 1421183 : size_t old_count = reserved_address_space_.load();
176 2842356 : if (old_count > reservation_limit) return false;
177 1421191 : if (reservation_limit - old_count < num_bytes) return false;
178 2837510 : if (reserved_address_space_.compare_exchange_weak(old_count,
179 : old_count + num_bytes)) {
180 : return true;
181 : }
182 : }
183 : }
184 :
185 1242407 : void WasmMemoryTracker::ReleaseReservation(size_t num_bytes) {
186 : size_t const old_reserved = reserved_address_space_.fetch_sub(num_bytes);
187 : USE(old_reserved);
188 : DCHECK_LE(num_bytes, old_reserved);
189 1242407 : }
190 :
191 176339 : void WasmMemoryTracker::RegisterAllocation(Isolate* isolate,
192 : void* allocation_base,
193 : size_t allocation_length,
194 : void* buffer_start,
195 : size_t buffer_length) {
196 176339 : base::MutexGuard scope_lock(&mutex_);
197 :
198 176339 : allocated_address_space_ += allocation_length;
199 : // Report address space usage in MiB so the full range fits in an int on all
200 : // platforms.
201 176339 : isolate->counters()->wasm_address_space_usage_mb()->AddSample(
202 352678 : static_cast<int>(allocated_address_space_ / MB));
203 :
204 : allocations_.emplace(buffer_start,
205 : AllocationData{allocation_base, allocation_length,
206 352678 : buffer_start, buffer_length});
207 176339 : }
208 :
209 176339 : WasmMemoryTracker::AllocationData WasmMemoryTracker::ReleaseAllocation_Locked(
210 : Isolate* isolate, const void* buffer_start) {
211 : auto find_result = allocations_.find(buffer_start);
212 176339 : CHECK_NE(find_result, allocations_.end());
213 :
214 176339 : size_t num_bytes = find_result->second.allocation_length;
215 : DCHECK_LE(num_bytes, reserved_address_space_);
216 : DCHECK_LE(num_bytes, allocated_address_space_);
217 : reserved_address_space_ -= num_bytes;
218 176339 : allocated_address_space_ -= num_bytes;
219 :
220 : AllocationData allocation_data = find_result->second;
221 : allocations_.erase(find_result);
222 176339 : return allocation_data;
223 : }
224 :
225 4620 : const WasmMemoryTracker::AllocationData* WasmMemoryTracker::FindAllocationData(
226 : const void* buffer_start) {
227 4620 : base::MutexGuard scope_lock(&mutex_);
228 : const auto& result = allocations_.find(buffer_start);
229 4620 : if (result != allocations_.end()) {
230 4620 : return &result->second;
231 : }
232 : return nullptr;
233 : }
234 :
235 805 : bool WasmMemoryTracker::IsWasmMemory(const void* buffer_start) {
236 805 : base::MutexGuard scope_lock(&mutex_);
237 805 : return allocations_.find(buffer_start) != allocations_.end();
238 : }
239 :
240 108 : bool WasmMemoryTracker::IsWasmSharedMemory(const void* buffer_start) {
241 108 : base::MutexGuard scope_lock(&mutex_);
242 : const auto& result = allocations_.find(buffer_start);
243 : // Should be a wasm allocation, and registered as a shared allocation.
244 216 : return (result != allocations_.end() && result->second.is_shared);
245 : }
246 :
247 1324 : void WasmMemoryTracker::MarkWasmMemoryNotGrowable(
248 : Handle<JSArrayBuffer> buffer) {
249 1324 : base::MutexGuard scope_lock(&mutex_);
250 2648 : const auto& allocation = allocations_.find(buffer->backing_store());
251 1324 : if (allocation == allocations_.end()) return;
252 8 : allocation->second.is_growable = false;
253 : }
254 :
255 2424 : bool WasmMemoryTracker::IsWasmMemoryGrowable(Handle<JSArrayBuffer> buffer) {
256 2424 : base::MutexGuard scope_lock(&mutex_);
257 2424 : if (buffer->backing_store() == nullptr) return true;
258 4848 : const auto& allocation = allocations_.find(buffer->backing_store());
259 2424 : if (allocation == allocations_.end()) return false;
260 2424 : return allocation->second.is_growable;
261 : }
262 :
263 176321 : bool WasmMemoryTracker::FreeMemoryIfIsWasmMemory(Isolate* isolate,
264 : const void* buffer_start) {
265 176321 : base::MutexGuard scope_lock(&mutex_);
266 : const auto& result = allocations_.find(buffer_start);
267 176335 : if (result == allocations_.end()) return false;
268 176327 : if (result->second.is_shared) {
269 : // This is a shared WebAssembly.Memory allocation
270 89 : FreeMemoryIfNotShared_Locked(isolate, buffer_start);
271 89 : return true;
272 : }
273 : // This is a WebAssembly.Memory allocation
274 : const AllocationData allocation =
275 176238 : ReleaseAllocation_Locked(isolate, buffer_start);
276 176238 : CHECK(FreePages(GetPlatformPageAllocator(), allocation.allocation_base,
277 : allocation.allocation_length));
278 : return true;
279 : }
280 :
281 306 : void WasmMemoryTracker::RegisterWasmMemoryAsShared(
282 : Handle<WasmMemoryObject> object, Isolate* isolate) {
283 306 : const void* backing_store = object->array_buffer()->backing_store();
284 : // TODO(V8:8810): This should be a DCHECK, currently some tests do not
285 : // use a full WebAssembly.Memory, and fail on registering so return early.
286 308 : if (!IsWasmMemory(backing_store)) return;
287 : {
288 304 : base::MutexGuard scope_lock(&mutex_);
289 : // Register as shared allocation when it is post messaged. This happens only
290 : // the first time a buffer is shared over Postmessage, and track all the
291 : // memory objects that are associated with this backing store.
292 304 : RegisterSharedWasmMemory_Locked(object, isolate);
293 : // Add isolate to backing store mapping.
294 : isolates_per_buffer_[backing_store].emplace(isolate);
295 : }
296 : }
297 :
298 100 : void WasmMemoryTracker::SetPendingUpdateOnGrow(Handle<JSArrayBuffer> old_buffer,
299 : size_t new_size) {
300 100 : base::MutexGuard scope_lock(&mutex_);
301 : // Keep track of the new size of the buffer associated with each backing
302 : // store.
303 100 : AddBufferToGrowMap_Locked(old_buffer, new_size);
304 : // Request interrupt to GROW_SHARED_MEMORY to other isolates
305 100 : TriggerSharedGrowInterruptOnAllIsolates_Locked(old_buffer);
306 100 : }
307 :
308 136 : void WasmMemoryTracker::UpdateSharedMemoryInstances(Isolate* isolate) {
309 136 : base::MutexGuard scope_lock(&mutex_);
310 : // For every buffer in the grow_entry_map_, update the size for all the
311 : // memory objects associated with this isolate.
312 360 : for (auto it = grow_update_map_.begin(); it != grow_update_map_.end();) {
313 224 : UpdateSharedMemoryStateOnInterrupt_Locked(isolate, it->first, it->second);
314 : // If all the isolates that share this buffer have hit a stack check, their
315 : // memory objects are updated, and this grow entry can be erased.
316 224 : if (AreAllIsolatesUpdated_Locked(it->first)) {
317 : it = grow_update_map_.erase(it);
318 : } else {
319 : it++;
320 : }
321 : }
322 136 : }
323 :
324 304 : void WasmMemoryTracker::RegisterSharedWasmMemory_Locked(
325 : Handle<WasmMemoryObject> object, Isolate* isolate) {
326 : DCHECK(object->array_buffer()->is_shared());
327 :
328 : void* backing_store = object->array_buffer()->backing_store();
329 : // The allocation of a WasmMemoryObject should always be registered with the
330 : // WasmMemoryTracker.
331 608 : const auto& result = allocations_.find(backing_store);
332 304 : if (result == allocations_.end()) return;
333 :
334 : // Register the allocation as shared, if not alreadt marked as shared.
335 304 : if (!result->second.is_shared) result->second.is_shared = true;
336 :
337 : // Create persistent global handles for the memory objects that are shared
338 : GlobalHandles* global_handles = isolate->global_handles();
339 : object = global_handles->Create(*object);
340 :
341 : // Add to memory_object_vector to track memory objects, instance objects
342 : // that will need to be updated on a Grow call
343 608 : result->second.memory_object_vector.push_back(
344 : SharedMemoryObjectState(object, isolate));
345 : }
346 :
347 100 : void WasmMemoryTracker::AddBufferToGrowMap_Locked(
348 : Handle<JSArrayBuffer> old_buffer, size_t new_size) {
349 100 : void* backing_store = old_buffer->backing_store();
350 200 : auto entry = grow_update_map_.find(old_buffer->backing_store());
351 100 : if (entry == grow_update_map_.end()) {
352 : // No pending grow for this backing store, add to map.
353 : grow_update_map_.emplace(backing_store, new_size);
354 56 : return;
355 : }
356 : // If grow on the same buffer is requested before the update is complete,
357 : // the new_size should always be greater or equal to the old_size. Equal
358 : // in the case that grow(0) is called, but new buffer handles are mandated
359 : // by the Spec.
360 44 : CHECK_LE(entry->second, new_size);
361 44 : entry->second = new_size;
362 : // Flush instances_updated everytime a new grow size needs to be updates
363 44 : ClearUpdatedInstancesOnPendingGrow_Locked(backing_store);
364 : }
365 :
366 100 : void WasmMemoryTracker::TriggerSharedGrowInterruptOnAllIsolates_Locked(
367 : Handle<JSArrayBuffer> old_buffer) {
368 : // Request a GrowShareMemory interrupt on all the isolates that share
369 : // the backing store.
370 200 : const auto& isolates = isolates_per_buffer_.find(old_buffer->backing_store());
371 300 : for (const auto& isolate : isolates->second) {
372 200 : isolate->stack_guard()->RequestGrowSharedMemory();
373 : }
374 100 : }
375 :
376 224 : void WasmMemoryTracker::UpdateSharedMemoryStateOnInterrupt_Locked(
377 : Isolate* isolate, void* backing_store, size_t new_size) {
378 : // Update objects only if there are memory objects that share this backing
379 : // store, and this isolate is marked as one of the isolates that shares this
380 : // buffer.
381 224 : if (MemoryObjectsNeedUpdate_Locked(isolate, backing_store)) {
382 196 : UpdateMemoryObjectsForIsolate_Locked(isolate, backing_store, new_size);
383 : // As the memory objects are updated, add this isolate to a set of isolates
384 : // that are updated on grow. This state is maintained to track if all the
385 : // isolates that share the backing store have hit a StackCheck.
386 392 : isolates_updated_on_grow_[backing_store].emplace(isolate);
387 : }
388 224 : }
389 :
390 224 : bool WasmMemoryTracker::AreAllIsolatesUpdated_Locked(
391 : const void* backing_store) {
392 : const auto& buffer_isolates = isolates_per_buffer_.find(backing_store);
393 : // No isolates share this buffer.
394 224 : if (buffer_isolates == isolates_per_buffer_.end()) return true;
395 : const auto& updated_isolates = isolates_updated_on_grow_.find(backing_store);
396 : // Some isolates share the buffer, but no isolates have been updated yet.
397 224 : if (updated_isolates == isolates_updated_on_grow_.end()) return false;
398 224 : if (buffer_isolates->second == updated_isolates->second) {
399 : // If all the isolates that share this backing_store have hit a stack check,
400 : // and the memory objects have been updated, remove the entry from the
401 : // updatemap, and return true.
402 : isolates_updated_on_grow_.erase(backing_store);
403 44 : return true;
404 : }
405 : return false;
406 : }
407 :
408 44 : void WasmMemoryTracker::ClearUpdatedInstancesOnPendingGrow_Locked(
409 : const void* backing_store) {
410 : // On multiple grows to the same buffer, the entries for that buffer should be
411 : // flushed. This is done so that any consecutive grows to the same buffer will
412 : // update all instances that share this buffer.
413 : const auto& value = isolates_updated_on_grow_.find(backing_store);
414 44 : if (value != isolates_updated_on_grow_.end()) {
415 : value->second.clear();
416 : }
417 44 : }
418 :
419 196 : void WasmMemoryTracker::UpdateMemoryObjectsForIsolate_Locked(
420 : Isolate* isolate, void* backing_store, size_t new_size) {
421 392 : const auto& result = allocations_.find(backing_store);
422 196 : if (result == allocations_.end() || !result->second.is_shared) return;
423 596 : for (const auto& memory_obj_state : result->second.memory_object_vector) {
424 : DCHECK_NE(memory_obj_state.isolate, nullptr);
425 400 : if (isolate == memory_obj_state.isolate) {
426 : HandleScope scope(isolate);
427 200 : Handle<WasmMemoryObject> memory_object = memory_obj_state.memory_object;
428 : DCHECK(memory_object->IsWasmMemoryObject());
429 : DCHECK(memory_object->array_buffer()->is_shared());
430 : // Permissions adjusted, but create a new buffer with new size
431 : // and old attributes. Buffer has already been allocated,
432 : // just create a new buffer with same backing store.
433 : bool is_external = memory_object->array_buffer()->is_external();
434 : Handle<JSArrayBuffer> new_buffer = SetupArrayBuffer(
435 200 : isolate, backing_store, new_size, is_external, SharedFlag::kShared);
436 200 : memory_obj_state.memory_object->update_instances(isolate, new_buffer);
437 : }
438 : }
439 : }
440 :
441 224 : bool WasmMemoryTracker::MemoryObjectsNeedUpdate_Locked(
442 : Isolate* isolate, const void* backing_store) {
443 : // Return true if this buffer has memory_objects it needs to update.
444 : const auto& result = allocations_.find(backing_store);
445 224 : if (result == allocations_.end() || !result->second.is_shared) return false;
446 : // Only update if the buffer has memory objects that need to be updated.
447 224 : if (result->second.memory_object_vector.empty()) return false;
448 : const auto& isolate_entry = isolates_per_buffer_.find(backing_store);
449 448 : return (isolate_entry != isolates_per_buffer_.end() &&
450 : isolate_entry->second.count(isolate) != 0);
451 : }
452 :
453 89 : void WasmMemoryTracker::FreeMemoryIfNotShared_Locked(
454 : Isolate* isolate, const void* backing_store) {
455 89 : RemoveSharedBufferState_Locked(isolate, backing_store);
456 89 : if (CanFreeSharedMemory_Locked(backing_store)) {
457 : const AllocationData allocation =
458 89 : ReleaseAllocation_Locked(isolate, backing_store);
459 89 : CHECK(FreePages(GetPlatformPageAllocator(), allocation.allocation_base,
460 : allocation.allocation_length));
461 : }
462 89 : }
463 :
464 0 : bool WasmMemoryTracker::CanFreeSharedMemory_Locked(const void* backing_store) {
465 : const auto& value = isolates_per_buffer_.find(backing_store);
466 : // If no isolates share this buffer, backing store can be freed.
467 : // Erase the buffer entry.
468 89 : if (value == isolates_per_buffer_.end()) return true;
469 0 : if (value->second.empty()) {
470 : // If no isolates share this buffer, the global handles to memory objects
471 : // associated with this buffer should have been destroyed.
472 : // DCHECK(shared_memory_map_.find(backing_store) ==
473 : // shared_memory_map_.end());
474 : return true;
475 : }
476 0 : return false;
477 : }
478 :
479 89 : void WasmMemoryTracker::RemoveSharedBufferState_Locked(
480 : Isolate* isolate, const void* backing_store) {
481 89 : if (isolate != nullptr) {
482 0 : DestroyMemoryObjectsAndRemoveIsolateEntry_Locked(isolate, backing_store);
483 0 : RemoveIsolateFromBackingStore_Locked(isolate, backing_store);
484 : } else {
485 : // This happens for externalized contents cleanup shared memory state
486 : // associated with this buffer across isolates.
487 89 : DestroyMemoryObjectsAndRemoveIsolateEntry_Locked(backing_store);
488 : }
489 89 : }
490 :
491 89 : void WasmMemoryTracker::DestroyMemoryObjectsAndRemoveIsolateEntry_Locked(
492 : const void* backing_store) {
493 : const auto& result = allocations_.find(backing_store);
494 89 : CHECK(result != allocations_.end() && result->second.is_shared);
495 : auto& object_vector = result->second.memory_object_vector;
496 89 : if (object_vector.empty()) return;
497 241 : for (const auto& mem_obj_state : object_vector) {
498 152 : GlobalHandles::Destroy(mem_obj_state.memory_object.location());
499 : }
500 : object_vector.clear();
501 : // Remove isolate from backing store map.
502 : isolates_per_buffer_.erase(backing_store);
503 : }
504 :
505 152 : void WasmMemoryTracker::DestroyMemoryObjectsAndRemoveIsolateEntry_Locked(
506 : Isolate* isolate, const void* backing_store) {
507 : // This gets called when an internal handle to the ArrayBuffer should be
508 : // freed, on heap tear down for that isolate, remove the memory objects
509 : // that are associated with this buffer and isolate.
510 : const auto& result = allocations_.find(backing_store);
511 152 : CHECK(result != allocations_.end() && result->second.is_shared);
512 : auto& object_vector = result->second.memory_object_vector;
513 152 : if (object_vector.empty()) return;
514 784 : for (auto it = object_vector.begin(); it != object_vector.end();) {
515 632 : if (isolate == it->isolate) {
516 152 : GlobalHandles::Destroy(it->memory_object.location());
517 : it = object_vector.erase(it);
518 : } else {
519 : ++it;
520 : }
521 : }
522 : }
523 :
524 0 : void WasmMemoryTracker::RemoveIsolateFromBackingStore_Locked(
525 : Isolate* isolate, const void* backing_store) {
526 : const auto& isolates = isolates_per_buffer_.find(backing_store);
527 0 : if (isolates == isolates_per_buffer_.end() || isolates->second.empty())
528 : return;
529 : isolates->second.erase(isolate);
530 : }
531 :
532 62427 : void WasmMemoryTracker::DeleteSharedMemoryObjectsOnIsolate(Isolate* isolate) {
533 62427 : base::MutexGuard scope_lock(&mutex_);
534 : // This is possible for buffers that are externalized, and their handles have
535 : // been freed, the backing store wasn't released because externalized contents
536 : // were using it.
537 62427 : if (isolates_per_buffer_.empty()) return;
538 580 : for (auto& entry : isolates_per_buffer_) {
539 429 : if (entry.second.find(isolate) == entry.second.end()) continue;
540 152 : const void* backing_store = entry.first;
541 : entry.second.erase(isolate);
542 152 : DestroyMemoryObjectsAndRemoveIsolateEntry_Locked(isolate, backing_store);
543 : }
544 199 : for (auto& buffer_isolates : isolates_updated_on_grow_) {
545 : auto& isolates = buffer_isolates.second;
546 : isolates.erase(isolate);
547 : }
548 : }
549 :
550 178631 : Handle<JSArrayBuffer> SetupArrayBuffer(Isolate* isolate, void* backing_store,
551 : size_t size, bool is_external,
552 : SharedFlag shared) {
553 : Handle<JSArrayBuffer> buffer =
554 178631 : isolate->factory()->NewJSArrayBuffer(shared, AllocationType::kOld);
555 : constexpr bool is_wasm_memory = true;
556 178631 : JSArrayBuffer::Setup(buffer, isolate, is_external, backing_store, size,
557 178631 : shared, is_wasm_memory);
558 : buffer->set_is_detachable(false);
559 178631 : return buffer;
560 : }
561 :
562 176335 : MaybeHandle<JSArrayBuffer> AllocateAndSetupArrayBuffer(Isolate* isolate,
563 : size_t size,
564 : size_t maximum_size,
565 : SharedFlag shared) {
566 : // Enforce flag-limited maximum allocation size.
567 176335 : if (size > max_mem_bytes()) return {};
568 :
569 : WasmMemoryTracker* memory_tracker = isolate->wasm_engine()->memory_tracker();
570 :
571 : // Set by TryAllocateBackingStore or GetEmptyBackingStore
572 176335 : void* allocation_base = nullptr;
573 176335 : size_t allocation_length = 0;
574 :
575 : void* memory = TryAllocateBackingStore(memory_tracker, isolate->heap(), size,
576 : maximum_size, &allocation_base,
577 176335 : &allocation_length);
578 176335 : if (memory == nullptr) return {};
579 :
580 : #if DEBUG
581 : // Double check the API allocator actually zero-initialized the memory.
582 : const byte* bytes = reinterpret_cast<const byte*>(memory);
583 : for (size_t i = 0; i < size; ++i) {
584 : DCHECK_EQ(0, bytes[i]);
585 : }
586 : #endif
587 :
588 : reinterpret_cast<v8::Isolate*>(isolate)
589 176327 : ->AdjustAmountOfExternalAllocatedMemory(size);
590 :
591 : constexpr bool is_external = false;
592 176327 : return SetupArrayBuffer(isolate, memory, size, is_external, shared);
593 : }
594 :
595 175822 : MaybeHandle<JSArrayBuffer> NewArrayBuffer(Isolate* isolate, size_t size) {
596 : return AllocateAndSetupArrayBuffer(isolate, size, size,
597 175822 : SharedFlag::kNotShared);
598 : }
599 :
600 513 : MaybeHandle<JSArrayBuffer> NewSharedArrayBuffer(Isolate* isolate,
601 : size_t initial_size,
602 : size_t max_size) {
603 : return AllocateAndSetupArrayBuffer(isolate, initial_size, max_size,
604 513 : SharedFlag::kShared);
605 : }
606 :
607 2096 : void DetachMemoryBuffer(Isolate* isolate, Handle<JSArrayBuffer> buffer,
608 : bool free_memory) {
609 2096 : if (buffer->is_shared()) return; // Detaching shared buffers is impossible.
610 : DCHECK(!buffer->is_detachable());
611 :
612 : const bool is_external = buffer->is_external();
613 : DCHECK(!buffer->is_detachable());
614 2096 : if (!is_external) {
615 : buffer->set_is_external(true);
616 2084 : isolate->heap()->UnregisterArrayBuffer(*buffer);
617 2084 : if (free_memory) {
618 : // We need to free the memory before detaching the buffer because
619 : // FreeBackingStore reads buffer->allocation_base(), which is nulled out
620 : // by Detach. This means there is a dangling pointer until we detach the
621 : // buffer. Since there is no way for the user to directly call
622 : // FreeBackingStore, we can ensure this is safe.
623 0 : buffer->FreeBackingStoreFromMainThread();
624 : }
625 : }
626 :
627 : DCHECK(buffer->is_external());
628 : buffer->set_is_wasm_memory(false);
629 : buffer->set_is_detachable(true);
630 2096 : buffer->Detach();
631 : }
632 :
633 : } // namespace wasm
634 : } // namespace internal
635 122036 : } // namespace v8
|