Line data Source code
1 : // Copyright 2017 the V8 project authors. All rights reserved.
2 : // Use of this source code is governed by a BSD-style license that can be
3 : // found in the LICENSE file.
4 :
5 : #include <limits>
6 :
7 : #include "src/counters.h"
8 : #include "src/heap/heap-inl.h"
9 : #include "src/objects-inl.h"
10 : #include "src/objects/js-array-buffer-inl.h"
11 : #include "src/wasm/wasm-engine.h"
12 : #include "src/wasm/wasm-limits.h"
13 : #include "src/wasm/wasm-memory.h"
14 : #include "src/wasm/wasm-module.h"
15 :
16 : namespace v8 {
17 : namespace internal {
18 : namespace wasm {
19 :
20 : namespace {
21 :
22 : constexpr size_t kNegativeGuardSize = 1u << 31; // 2GiB
23 :
24 : void AddAllocationStatusSample(Isolate* isolate,
25 : WasmMemoryTracker::AllocationStatus status) {
26 : isolate->counters()->wasm_memory_allocation_result()->AddSample(
27 177212 : static_cast<int>(status));
28 : }
29 :
30 184585 : size_t GetAllocationLength(uint32_t size, bool require_full_guard_regions) {
31 184585 : if (require_full_guard_regions) {
32 367512 : return RoundUp(kWasmMaxHeapOffset + kNegativeGuardSize, CommitPageSize());
33 : } else {
34 829 : return RoundUp(
35 : base::bits::RoundUpToPowerOfTwo32(static_cast<uint32_t>(size)),
36 829 : kWasmPageSize);
37 : }
38 : }
39 :
40 177211 : void* TryAllocateBackingStore(WasmMemoryTracker* memory_tracker, Heap* heap,
41 : size_t size, size_t max_size,
42 : void** allocation_base,
43 : size_t* allocation_length) {
44 : using AllocationStatus = WasmMemoryTracker::AllocationStatus;
45 : #if V8_TARGET_ARCH_64_BIT
46 : bool require_full_guard_regions = true;
47 : #else
48 : bool require_full_guard_regions = false;
49 : #endif
50 : // Let the WasmMemoryTracker know we are going to reserve a bunch of
51 : // address space.
52 : // Try up to three times; getting rid of dead JSArrayBuffer allocations might
53 : // require two GCs because the first GC maybe incremental and may have
54 : // floating garbage.
55 : static constexpr int kAllocationRetries = 2;
56 : // TODO(7881): do not use static_cast<uint32_t>() here
57 : uint32_t reservation_size =
58 177211 : static_cast<uint32_t>((max_size > size) ? max_size : size);
59 : // TODO(8898): Cleanup the allocation retry flow
60 : bool did_retry = false;
61 7373 : for (int trial = 0;; ++trial) {
62 : // For guard regions, we always allocate the largest possible offset into
63 : // the heap, so the addressable memory after the guard page can be made
64 : // inaccessible.
65 : //
66 : // To protect against 32-bit integer overflow issues, we also protect the
67 : // 2GiB before the valid part of the memory buffer.
68 : *allocation_length =
69 184584 : GetAllocationLength(reservation_size, require_full_guard_regions);
70 : DCHECK_GE(*allocation_length, size);
71 : DCHECK_GE(*allocation_length, kWasmPageSize);
72 :
73 : auto limit = require_full_guard_regions ? WasmMemoryTracker::kSoftLimit
74 184585 : : WasmMemoryTracker::kHardLimit;
75 184585 : if (memory_tracker->ReserveAddressSpace(*allocation_length, limit)) break;
76 :
77 : did_retry = true;
78 : // After first and second GC: retry.
79 7381 : if (trial == kAllocationRetries) {
80 : // Always reset reservation_size to initial size so that at least the
81 : // initial size can be allocated if maximum size reservation is not
82 : // possible.
83 837 : reservation_size = static_cast<uint32_t>(size);
84 :
85 : // If we fail to allocate guard regions and the fallback is enabled, then
86 : // retry without full guard regions.
87 837 : if (require_full_guard_regions && FLAG_wasm_trap_handler_fallback) {
88 : require_full_guard_regions = false;
89 829 : --trial; // one more try.
90 829 : continue;
91 : }
92 :
93 : // We are over the address space limit. Fail.
94 : //
95 : // When running under the correctness fuzzer (i.e.
96 : // --abort-on-stack-or-string-length-overflow is preset), we crash
97 : // instead so it is not incorrectly reported as a correctness
98 : // violation. See https://crbug.com/828293#c4
99 8 : if (FLAG_abort_on_stack_or_string_length_overflow) {
100 0 : FATAL("could not allocate wasm memory");
101 : }
102 : AddAllocationStatusSample(
103 : heap->isolate(), AllocationStatus::kAddressSpaceLimitReachedFailure);
104 8 : return nullptr;
105 : }
106 : // Collect garbage and retry.
107 6544 : heap->MemoryPressureNotification(MemoryPressureLevel::kCritical, true);
108 : }
109 :
110 : // The Reserve makes the whole region inaccessible by default.
111 : DCHECK_NULL(*allocation_base);
112 0 : for (int trial = 0;; ++trial) {
113 : *allocation_base =
114 177204 : AllocatePages(GetPlatformPageAllocator(), nullptr, *allocation_length,
115 177204 : kWasmPageSize, PageAllocator::kNoAccess);
116 177204 : if (*allocation_base != nullptr) break;
117 0 : if (trial == kAllocationRetries) {
118 0 : memory_tracker->ReleaseReservation(*allocation_length);
119 : AddAllocationStatusSample(heap->isolate(),
120 : AllocationStatus::kOtherFailure);
121 0 : return nullptr;
122 : }
123 0 : heap->MemoryPressureNotification(MemoryPressureLevel::kCritical, true);
124 : }
125 : byte* memory = reinterpret_cast<byte*>(*allocation_base);
126 177204 : if (require_full_guard_regions) {
127 176375 : memory += kNegativeGuardSize;
128 : }
129 :
130 : // Make the part we care about accessible.
131 177204 : if (size > 0) {
132 : bool result =
133 50276 : SetPermissions(GetPlatformPageAllocator(), memory,
134 50276 : RoundUp(size, kWasmPageSize), PageAllocator::kReadWrite);
135 : // SetPermissions commits the extra memory, which may put us over the
136 : // process memory limit. If so, report this as an OOM.
137 50276 : if (!result) {
138 0 : V8::FatalProcessOutOfMemory(nullptr, "TryAllocateBackingStore");
139 : }
140 : }
141 :
142 177204 : memory_tracker->RegisterAllocation(heap->isolate(), *allocation_base,
143 177204 : *allocation_length, memory, size);
144 177204 : AddAllocationStatusSample(heap->isolate(),
145 : did_retry ? AllocationStatus::kSuccessAfterRetry
146 : : AllocationStatus::kSuccess);
147 177204 : return memory;
148 : }
149 :
150 : #if V8_TARGET_ARCH_MIPS64
151 : // MIPS64 has a user space of 2^40 bytes on most processors,
152 : // address space limits needs to be smaller.
153 : constexpr size_t kAddressSpaceSoftLimit = 0x2100000000L; // 132 GiB
154 : constexpr size_t kAddressSpaceHardLimit = 0x4000000000L; // 256 GiB
155 : #elif V8_TARGET_ARCH_64_BIT
156 : constexpr size_t kAddressSpaceSoftLimit = 0x6000000000L; // 384 GiB
157 : constexpr size_t kAddressSpaceHardLimit = 0x10100000000L; // 1 TiB + 4 GiB
158 : #else
159 : constexpr size_t kAddressSpaceSoftLimit = 0x90000000; // 2 GiB + 256 MiB
160 : constexpr size_t kAddressSpaceHardLimit = 0xC0000000; // 3 GiB
161 : #endif
162 :
163 : } // namespace
164 :
165 117966 : WasmMemoryTracker::~WasmMemoryTracker() {
166 : // All reserved address space should be released before the allocation tracker
167 : // is destroyed.
168 : DCHECK_EQ(reserved_address_space_, 0u);
169 : DCHECK_EQ(allocated_address_space_, 0u);
170 : DCHECK(allocations_.empty());
171 58983 : }
172 :
173 12 : void* WasmMemoryTracker::TryAllocateBackingStoreForTesting(
174 : Heap* heap, size_t size, void** allocation_base,
175 : size_t* allocation_length) {
176 : return TryAllocateBackingStore(this, heap, size, size, allocation_base,
177 12 : allocation_length);
178 : }
179 :
180 12 : void WasmMemoryTracker::FreeBackingStoreForTesting(base::AddressRegion memory,
181 : void* buffer_start) {
182 12 : base::MutexGuard scope_lock(&mutex_);
183 24 : ReleaseAllocation_Locked(nullptr, buffer_start);
184 12 : CHECK(FreePages(GetPlatformPageAllocator(),
185 : reinterpret_cast<void*>(memory.begin()), memory.size()));
186 12 : }
187 :
188 1427373 : bool WasmMemoryTracker::ReserveAddressSpace(size_t num_bytes,
189 : ReservationLimit limit) {
190 : size_t reservation_limit =
191 1427373 : limit == kSoftLimit ? kAddressSpaceSoftLimit : kAddressSpaceHardLimit;
192 15 : while (true) {
193 1427388 : size_t old_count = reserved_address_space_.load();
194 2854761 : if (old_count > reservation_limit) return false;
195 1427065 : if (reservation_limit - old_count < num_bytes) return false;
196 2838240 : if (reserved_address_space_.compare_exchange_weak(old_count,
197 : old_count + num_bytes)) {
198 : return true;
199 : }
200 : }
201 : }
202 :
203 1241902 : void WasmMemoryTracker::ReleaseReservation(size_t num_bytes) {
204 : size_t const old_reserved = reserved_address_space_.fetch_sub(num_bytes);
205 : USE(old_reserved);
206 : DCHECK_LE(num_bytes, old_reserved);
207 1241902 : }
208 :
209 177204 : void WasmMemoryTracker::RegisterAllocation(Isolate* isolate,
210 : void* allocation_base,
211 : size_t allocation_length,
212 : void* buffer_start,
213 : size_t buffer_length) {
214 177204 : base::MutexGuard scope_lock(&mutex_);
215 :
216 177204 : allocated_address_space_ += allocation_length;
217 : // Report address space usage in MiB so the full range fits in an int on all
218 : // platforms.
219 177204 : isolate->counters()->wasm_address_space_usage_mb()->AddSample(
220 354408 : static_cast<int>(allocated_address_space_ / MB));
221 :
222 : allocations_.emplace(buffer_start,
223 : AllocationData{allocation_base, allocation_length,
224 354408 : buffer_start, buffer_length});
225 177204 : }
226 :
227 177204 : WasmMemoryTracker::AllocationData WasmMemoryTracker::ReleaseAllocation_Locked(
228 : Isolate* isolate, const void* buffer_start) {
229 : auto find_result = allocations_.find(buffer_start);
230 177204 : CHECK_NE(find_result, allocations_.end());
231 :
232 177204 : size_t num_bytes = find_result->second.allocation_length;
233 : DCHECK_LE(num_bytes, reserved_address_space_);
234 : DCHECK_LE(num_bytes, allocated_address_space_);
235 : reserved_address_space_ -= num_bytes;
236 177204 : allocated_address_space_ -= num_bytes;
237 :
238 : AllocationData allocation_data = find_result->second;
239 : allocations_.erase(find_result);
240 177204 : return allocation_data;
241 : }
242 :
243 5954 : const WasmMemoryTracker::AllocationData* WasmMemoryTracker::FindAllocationData(
244 : const void* buffer_start) {
245 5954 : base::MutexGuard scope_lock(&mutex_);
246 : const auto& result = allocations_.find(buffer_start);
247 5954 : if (result != allocations_.end()) {
248 5954 : return &result->second;
249 : }
250 : return nullptr;
251 : }
252 :
253 805 : bool WasmMemoryTracker::IsWasmMemory(const void* buffer_start) {
254 805 : base::MutexGuard scope_lock(&mutex_);
255 805 : return allocations_.find(buffer_start) != allocations_.end();
256 : }
257 :
258 108 : bool WasmMemoryTracker::IsWasmSharedMemory(const void* buffer_start) {
259 108 : base::MutexGuard scope_lock(&mutex_);
260 : const auto& result = allocations_.find(buffer_start);
261 : // Should be a wasm allocation, and registered as a shared allocation.
262 216 : return (result != allocations_.end() && result->second.is_shared);
263 : }
264 :
265 131197 : bool WasmMemoryTracker::HasFullGuardRegions(const void* buffer_start) {
266 131197 : base::MutexGuard scope_lock(&mutex_);
267 : const auto allocation = allocations_.find(buffer_start);
268 :
269 131199 : if (allocation == allocations_.end()) {
270 : return false;
271 : }
272 :
273 131199 : Address start = reinterpret_cast<Address>(buffer_start);
274 : Address limit =
275 131199 : reinterpret_cast<Address>(allocation->second.allocation_base) +
276 131199 : allocation->second.allocation_length;
277 131199 : return start + kWasmMaxHeapOffset < limit;
278 : }
279 :
280 177186 : bool WasmMemoryTracker::FreeMemoryIfIsWasmMemory(Isolate* isolate,
281 : const void* buffer_start) {
282 177186 : base::MutexGuard scope_lock(&mutex_);
283 : const auto& result = allocations_.find(buffer_start);
284 177200 : if (result == allocations_.end()) return false;
285 177192 : if (result->second.is_shared) {
286 : // This is a shared WebAssembly.Memory allocation
287 89 : FreeMemoryIfNotShared_Locked(isolate, buffer_start);
288 89 : return true;
289 : }
290 : // This is a WebAssembly.Memory allocation
291 : const AllocationData allocation =
292 177103 : ReleaseAllocation_Locked(isolate, buffer_start);
293 177103 : CHECK(FreePages(GetPlatformPageAllocator(), allocation.allocation_base,
294 : allocation.allocation_length));
295 : return true;
296 : }
297 :
298 306 : void WasmMemoryTracker::RegisterWasmMemoryAsShared(
299 : Handle<WasmMemoryObject> object, Isolate* isolate) {
300 306 : const void* backing_store = object->array_buffer()->backing_store();
301 : // TODO(V8:8810): This should be a DCHECK, currently some tests do not
302 : // use a full WebAssembly.Memory, and fail on registering so return early.
303 308 : if (!IsWasmMemory(backing_store)) return;
304 : {
305 304 : base::MutexGuard scope_lock(&mutex_);
306 : // Register as shared allocation when it is post messaged. This happens only
307 : // the first time a buffer is shared over Postmessage, and track all the
308 : // memory objects that are associated with this backing store.
309 304 : RegisterSharedWasmMemory_Locked(object, isolate);
310 : // Add isolate to backing store mapping.
311 : isolates_per_buffer_[backing_store].emplace(isolate);
312 : }
313 : }
314 :
315 100 : void WasmMemoryTracker::SetPendingUpdateOnGrow(Handle<JSArrayBuffer> old_buffer,
316 : size_t new_size) {
317 100 : base::MutexGuard scope_lock(&mutex_);
318 : // Keep track of the new size of the buffer associated with each backing
319 : // store.
320 100 : AddBufferToGrowMap_Locked(old_buffer, new_size);
321 : // Request interrupt to GROW_SHARED_MEMORY to other isolates
322 100 : TriggerSharedGrowInterruptOnAllIsolates_Locked(old_buffer);
323 100 : }
324 :
325 136 : void WasmMemoryTracker::UpdateSharedMemoryInstances(Isolate* isolate) {
326 136 : base::MutexGuard scope_lock(&mutex_);
327 : // For every buffer in the grow_entry_map_, update the size for all the
328 : // memory objects associated with this isolate.
329 361 : for (auto it = grow_update_map_.begin(); it != grow_update_map_.end();) {
330 225 : UpdateSharedMemoryStateOnInterrupt_Locked(isolate, it->first, it->second);
331 : // If all the isolates that share this buffer have hit a stack check, their
332 : // memory objects are updated, and this grow entry can be erased.
333 225 : if (AreAllIsolatesUpdated_Locked(it->first)) {
334 : it = grow_update_map_.erase(it);
335 : } else {
336 : it++;
337 : }
338 : }
339 136 : }
340 :
341 304 : void WasmMemoryTracker::RegisterSharedWasmMemory_Locked(
342 : Handle<WasmMemoryObject> object, Isolate* isolate) {
343 : DCHECK(object->array_buffer()->is_shared());
344 :
345 : void* backing_store = object->array_buffer()->backing_store();
346 : // The allocation of a WasmMemoryObject should always be registered with the
347 : // WasmMemoryTracker.
348 608 : const auto& result = allocations_.find(backing_store);
349 304 : if (result == allocations_.end()) return;
350 :
351 : // Register the allocation as shared, if not alreadt marked as shared.
352 304 : if (!result->second.is_shared) result->second.is_shared = true;
353 :
354 : // Create persistent global handles for the memory objects that are shared
355 : GlobalHandles* global_handles = isolate->global_handles();
356 : object = global_handles->Create(*object);
357 :
358 : // Add to memory_object_vector to track memory objects, instance objects
359 : // that will need to be updated on a Grow call
360 608 : result->second.memory_object_vector.push_back(
361 : SharedMemoryObjectState(object, isolate));
362 : }
363 :
364 100 : void WasmMemoryTracker::AddBufferToGrowMap_Locked(
365 : Handle<JSArrayBuffer> old_buffer, size_t new_size) {
366 100 : void* backing_store = old_buffer->backing_store();
367 200 : auto entry = grow_update_map_.find(old_buffer->backing_store());
368 100 : if (entry == grow_update_map_.end()) {
369 : // No pending grow for this backing store, add to map.
370 : grow_update_map_.emplace(backing_store, new_size);
371 56 : return;
372 : }
373 : // If grow on the same buffer is requested before the update is complete,
374 : // the new_size should always be greater or equal to the old_size. Equal
375 : // in the case that grow(0) is called, but new buffer handles are mandated
376 : // by the Spec.
377 44 : CHECK_LE(entry->second, new_size);
378 44 : entry->second = new_size;
379 : // Flush instances_updated everytime a new grow size needs to be updates
380 44 : ClearUpdatedInstancesOnPendingGrow_Locked(backing_store);
381 : }
382 :
383 100 : void WasmMemoryTracker::TriggerSharedGrowInterruptOnAllIsolates_Locked(
384 : Handle<JSArrayBuffer> old_buffer) {
385 : // Request a GrowShareMemory interrupt on all the isolates that share
386 : // the backing store.
387 200 : const auto& isolates = isolates_per_buffer_.find(old_buffer->backing_store());
388 301 : for (const auto& isolate : isolates->second) {
389 201 : isolate->stack_guard()->RequestGrowSharedMemory();
390 : }
391 100 : }
392 :
393 225 : void WasmMemoryTracker::UpdateSharedMemoryStateOnInterrupt_Locked(
394 : Isolate* isolate, void* backing_store, size_t new_size) {
395 : // Update objects only if there are memory objects that share this backing
396 : // store, and this isolate is marked as one of the isolates that shares this
397 : // buffer.
398 225 : if (MemoryObjectsNeedUpdate_Locked(isolate, backing_store)) {
399 196 : UpdateMemoryObjectsForIsolate_Locked(isolate, backing_store, new_size);
400 : // As the memory objects are updated, add this isolate to a set of isolates
401 : // that are updated on grow. This state is maintained to track if all the
402 : // isolates that share the backing store have hit a StackCheck.
403 392 : isolates_updated_on_grow_[backing_store].emplace(isolate);
404 : }
405 225 : }
406 :
407 225 : bool WasmMemoryTracker::AreAllIsolatesUpdated_Locked(
408 : const void* backing_store) {
409 : const auto& buffer_isolates = isolates_per_buffer_.find(backing_store);
410 : // No isolates share this buffer.
411 225 : if (buffer_isolates == isolates_per_buffer_.end()) return true;
412 : const auto& updated_isolates = isolates_updated_on_grow_.find(backing_store);
413 : // Some isolates share the buffer, but no isolates have been updated yet.
414 225 : if (updated_isolates == isolates_updated_on_grow_.end()) return false;
415 225 : if (buffer_isolates->second == updated_isolates->second) {
416 : // If all the isolates that share this backing_store have hit a stack check,
417 : // and the memory objects have been updated, remove the entry from the
418 : // updatemap, and return true.
419 : isolates_updated_on_grow_.erase(backing_store);
420 44 : return true;
421 : }
422 : return false;
423 : }
424 :
425 44 : void WasmMemoryTracker::ClearUpdatedInstancesOnPendingGrow_Locked(
426 : const void* backing_store) {
427 : // On multiple grows to the same buffer, the entries for that buffer should be
428 : // flushed. This is done so that any consecutive grows to the same buffer will
429 : // update all instances that share this buffer.
430 : const auto& value = isolates_updated_on_grow_.find(backing_store);
431 44 : if (value != isolates_updated_on_grow_.end()) {
432 : value->second.clear();
433 : }
434 44 : }
435 :
436 196 : void WasmMemoryTracker::UpdateMemoryObjectsForIsolate_Locked(
437 : Isolate* isolate, void* backing_store, size_t new_size) {
438 392 : const auto& result = allocations_.find(backing_store);
439 196 : if (result == allocations_.end() || !result->second.is_shared) return;
440 598 : for (const auto& memory_obj_state : result->second.memory_object_vector) {
441 : DCHECK_NE(memory_obj_state.isolate, nullptr);
442 402 : if (isolate == memory_obj_state.isolate) {
443 : HandleScope scope(isolate);
444 200 : Handle<WasmMemoryObject> memory_object = memory_obj_state.memory_object;
445 : DCHECK(memory_object->IsWasmMemoryObject());
446 : DCHECK(memory_object->array_buffer()->is_shared());
447 : // Permissions adjusted, but create a new buffer with new size
448 : // and old attributes. Buffer has already been allocated,
449 : // just create a new buffer with same backing store.
450 : bool is_external = memory_object->array_buffer()->is_external();
451 : Handle<JSArrayBuffer> new_buffer = SetupArrayBuffer(
452 200 : isolate, backing_store, new_size, is_external, SharedFlag::kShared);
453 200 : memory_obj_state.memory_object->update_instances(isolate, new_buffer);
454 : }
455 : }
456 : }
457 :
458 225 : bool WasmMemoryTracker::MemoryObjectsNeedUpdate_Locked(
459 : Isolate* isolate, const void* backing_store) {
460 : // Return true if this buffer has memory_objects it needs to update.
461 : const auto& result = allocations_.find(backing_store);
462 225 : if (result == allocations_.end() || !result->second.is_shared) return false;
463 : // Only update if the buffer has memory objects that need to be updated.
464 225 : if (result->second.memory_object_vector.empty()) return false;
465 : const auto& isolate_entry = isolates_per_buffer_.find(backing_store);
466 450 : return (isolate_entry != isolates_per_buffer_.end() &&
467 : isolate_entry->second.count(isolate) != 0);
468 : }
469 :
470 89 : void WasmMemoryTracker::FreeMemoryIfNotShared_Locked(
471 : Isolate* isolate, const void* backing_store) {
472 89 : RemoveSharedBufferState_Locked(isolate, backing_store);
473 89 : if (CanFreeSharedMemory_Locked(backing_store)) {
474 : const AllocationData allocation =
475 89 : ReleaseAllocation_Locked(isolate, backing_store);
476 89 : CHECK(FreePages(GetPlatformPageAllocator(), allocation.allocation_base,
477 : allocation.allocation_length));
478 : }
479 89 : }
480 :
481 0 : bool WasmMemoryTracker::CanFreeSharedMemory_Locked(const void* backing_store) {
482 : const auto& value = isolates_per_buffer_.find(backing_store);
483 : // If no isolates share this buffer, backing store can be freed.
484 : // Erase the buffer entry.
485 89 : if (value == isolates_per_buffer_.end()) return true;
486 0 : if (value->second.empty()) {
487 : // If no isolates share this buffer, the global handles to memory objects
488 : // associated with this buffer should have been destroyed.
489 : // DCHECK(shared_memory_map_.find(backing_store) ==
490 : // shared_memory_map_.end());
491 : return true;
492 : }
493 0 : return false;
494 : }
495 :
496 89 : void WasmMemoryTracker::RemoveSharedBufferState_Locked(
497 : Isolate* isolate, const void* backing_store) {
498 89 : if (isolate != nullptr) {
499 0 : DestroyMemoryObjectsAndRemoveIsolateEntry_Locked(isolate, backing_store);
500 0 : RemoveIsolateFromBackingStore_Locked(isolate, backing_store);
501 : } else {
502 : // This happens for externalized contents cleanup shared memory state
503 : // associated with this buffer across isolates.
504 89 : DestroyMemoryObjectsAndRemoveIsolateEntry_Locked(backing_store);
505 : }
506 89 : }
507 :
508 89 : void WasmMemoryTracker::DestroyMemoryObjectsAndRemoveIsolateEntry_Locked(
509 : const void* backing_store) {
510 : const auto& result = allocations_.find(backing_store);
511 89 : CHECK(result != allocations_.end() && result->second.is_shared);
512 : auto& object_vector = result->second.memory_object_vector;
513 89 : if (object_vector.empty()) return;
514 241 : for (const auto& mem_obj_state : object_vector) {
515 152 : GlobalHandles::Destroy(mem_obj_state.memory_object.location());
516 : }
517 : object_vector.clear();
518 : // Remove isolate from backing store map.
519 : isolates_per_buffer_.erase(backing_store);
520 : }
521 :
522 152 : void WasmMemoryTracker::DestroyMemoryObjectsAndRemoveIsolateEntry_Locked(
523 : Isolate* isolate, const void* backing_store) {
524 : // This gets called when an internal handle to the ArrayBuffer should be
525 : // freed, on heap tear down for that isolate, remove the memory objects
526 : // that are associated with this buffer and isolate.
527 : const auto& result = allocations_.find(backing_store);
528 152 : CHECK(result != allocations_.end() && result->second.is_shared);
529 : auto& object_vector = result->second.memory_object_vector;
530 152 : if (object_vector.empty()) return;
531 786 : for (auto it = object_vector.begin(); it != object_vector.end();) {
532 634 : if (isolate == it->isolate) {
533 152 : GlobalHandles::Destroy(it->memory_object.location());
534 : it = object_vector.erase(it);
535 : } else {
536 : ++it;
537 : }
538 : }
539 : }
540 :
541 0 : void WasmMemoryTracker::RemoveIsolateFromBackingStore_Locked(
542 : Isolate* isolate, const void* backing_store) {
543 : const auto& isolates = isolates_per_buffer_.find(backing_store);
544 0 : if (isolates == isolates_per_buffer_.end() || isolates->second.empty())
545 : return;
546 : isolates->second.erase(isolate);
547 : }
548 :
549 61519 : void WasmMemoryTracker::DeleteSharedMemoryObjectsOnIsolate(Isolate* isolate) {
550 61519 : base::MutexGuard scope_lock(&mutex_);
551 : // This is possible for buffers that are externalized, and their handles have
552 : // been freed, the backing store wasn't released because externalized contents
553 : // were using it.
554 61519 : if (isolates_per_buffer_.empty()) return;
555 588 : for (auto& entry : isolates_per_buffer_) {
556 436 : if (entry.second.find(isolate) == entry.second.end()) continue;
557 152 : const void* backing_store = entry.first;
558 : entry.second.erase(isolate);
559 152 : DestroyMemoryObjectsAndRemoveIsolateEntry_Locked(isolate, backing_store);
560 : }
561 202 : for (auto& buffer_isolates : isolates_updated_on_grow_) {
562 : auto& isolates = buffer_isolates.second;
563 : isolates.erase(isolate);
564 : }
565 : }
566 :
567 179560 : Handle<JSArrayBuffer> SetupArrayBuffer(Isolate* isolate, void* backing_store,
568 : size_t size, bool is_external,
569 : SharedFlag shared) {
570 : Handle<JSArrayBuffer> buffer =
571 179560 : isolate->factory()->NewJSArrayBuffer(shared, AllocationType::kOld);
572 : constexpr bool is_wasm_memory = true;
573 179560 : JSArrayBuffer::Setup(buffer, isolate, is_external, backing_store, size,
574 179560 : shared, is_wasm_memory);
575 : buffer->set_is_detachable(false);
576 : buffer->set_is_growable(true);
577 179559 : return buffer;
578 : }
579 :
580 177199 : MaybeHandle<JSArrayBuffer> AllocateAndSetupArrayBuffer(Isolate* isolate,
581 : size_t size,
582 : size_t maximum_size,
583 : SharedFlag shared) {
584 : // Enforce flag-limited maximum allocation size.
585 177199 : if (size > max_mem_bytes()) return {};
586 :
587 : WasmMemoryTracker* memory_tracker = isolate->wasm_engine()->memory_tracker();
588 :
589 : // Set by TryAllocateBackingStore or GetEmptyBackingStore
590 177199 : void* allocation_base = nullptr;
591 177199 : size_t allocation_length = 0;
592 :
593 : void* memory = TryAllocateBackingStore(memory_tracker, isolate->heap(), size,
594 : maximum_size, &allocation_base,
595 177199 : &allocation_length);
596 177200 : if (memory == nullptr) return {};
597 :
598 : #if DEBUG
599 : // Double check the API allocator actually zero-initialized the memory.
600 : const byte* bytes = reinterpret_cast<const byte*>(memory);
601 : for (size_t i = 0; i < size; ++i) {
602 : DCHECK_EQ(0, bytes[i]);
603 : }
604 : #endif
605 :
606 : reinterpret_cast<v8::Isolate*>(isolate)
607 177192 : ->AdjustAmountOfExternalAllocatedMemory(size);
608 :
609 : constexpr bool is_external = false;
610 177192 : return SetupArrayBuffer(isolate, memory, size, is_external, shared);
611 : }
612 :
613 176688 : MaybeHandle<JSArrayBuffer> NewArrayBuffer(Isolate* isolate, size_t size) {
614 : return AllocateAndSetupArrayBuffer(isolate, size, size,
615 176688 : SharedFlag::kNotShared);
616 : }
617 :
618 511 : MaybeHandle<JSArrayBuffer> NewSharedArrayBuffer(Isolate* isolate,
619 : size_t initial_size,
620 : size_t max_size) {
621 : return AllocateAndSetupArrayBuffer(isolate, initial_size, max_size,
622 511 : SharedFlag::kShared);
623 : }
624 :
625 2160 : void DetachMemoryBuffer(Isolate* isolate, Handle<JSArrayBuffer> buffer,
626 : bool free_memory) {
627 2160 : if (buffer->is_shared()) return; // Detaching shared buffers is impossible.
628 : DCHECK(!buffer->is_detachable());
629 :
630 : const bool is_external = buffer->is_external();
631 : DCHECK(!buffer->is_detachable());
632 2160 : if (!is_external) {
633 : buffer->set_is_external(true);
634 2148 : isolate->heap()->UnregisterArrayBuffer(*buffer);
635 2148 : if (free_memory) {
636 : // We need to free the memory before detaching the buffer because
637 : // FreeBackingStore reads buffer->allocation_base(), which is nulled out
638 : // by Detach. This means there is a dangling pointer until we detach the
639 : // buffer. Since there is no way for the user to directly call
640 : // FreeBackingStore, we can ensure this is safe.
641 0 : buffer->FreeBackingStoreFromMainThread();
642 : }
643 : }
644 :
645 : DCHECK(buffer->is_external());
646 : buffer->set_is_wasm_memory(false);
647 : buffer->set_is_detachable(true);
648 2160 : buffer->Detach();
649 : }
650 :
651 : } // namespace wasm
652 : } // namespace internal
653 120216 : } // namespace v8
|