LCOV - code coverage report
Current view: top level - src/wasm - wasm-memory.cc (source / functions) Hit Total Coverage
Test: app.info Lines: 225 237 94.9 %
Date: 2019-04-17 Functions: 42 44 95.5 %

          Line data    Source code
       1             : // Copyright 2017 the V8 project authors. All rights reserved.
       2             : // Use of this source code is governed by a BSD-style license that can be
       3             : // found in the LICENSE file.
       4             : 
       5             : #include <limits>
       6             : 
       7             : #include "src/counters.h"
       8             : #include "src/heap/heap-inl.h"
       9             : #include "src/objects-inl.h"
      10             : #include "src/objects/js-array-buffer-inl.h"
      11             : #include "src/wasm/wasm-engine.h"
      12             : #include "src/wasm/wasm-limits.h"
      13             : #include "src/wasm/wasm-memory.h"
      14             : #include "src/wasm/wasm-module.h"
      15             : 
      16             : namespace v8 {
      17             : namespace internal {
      18             : namespace wasm {
      19             : 
      20             : namespace {
      21             : 
      22             : constexpr size_t kNegativeGuardSize = 1u << 31;  // 2GiB
      23             : 
      24             : void AddAllocationStatusSample(Isolate* isolate,
      25             :                                WasmMemoryTracker::AllocationStatus status) {
      26             :   isolate->counters()->wasm_memory_allocation_result()->AddSample(
      27      178114 :       static_cast<int>(status));
      28             : }
      29             : 
      30      185546 : size_t GetAllocationLength(uint32_t size, bool require_full_guard_regions) {
      31      185546 :   if (require_full_guard_regions) {
      32      369434 :     return RoundUp(kWasmMaxHeapOffset + kNegativeGuardSize, CommitPageSize());
      33             :   } else {
      34         829 :     return RoundUp(
      35             :         base::bits::RoundUpToPowerOfTwo32(static_cast<uint32_t>(size)),
      36         829 :         kWasmPageSize);
      37             :   }
      38             : }
      39             : 
      40      535155 : bool RunWithGCAndRetry(const std::function<bool()>& fn, Heap* heap,
      41             :                        bool* did_retry) {
      42             :   // Try up to three times; getting rid of dead JSArrayBuffer allocations might
      43             :   // require two GCs because the first GC maybe incremental and may have
      44             :   // floating garbage.
      45             :   static constexpr int kAllocationRetries = 2;
      46             : 
      47        6603 :   for (int trial = 0;; ++trial) {
      48      541758 :     if (fn()) return true;
      49             :     // {fn} failed. If {kAllocationRetries} is reached, fail.
      50        7440 :     *did_retry = true;
      51        7440 :     if (trial == kAllocationRetries) return false;
      52             :     // Otherwise, collect garbage and retry.
      53        6603 :     heap->MemoryPressureNotification(MemoryPressureLevel::kCritical, true);
      54             :   }
      55             : }
      56             : 
      57      178114 : void* TryAllocateBackingStore(WasmMemoryTracker* memory_tracker, Heap* heap,
      58             :                               size_t size, size_t max_size,
      59             :                               void** allocation_base,
      60             :                               size_t* allocation_length) {
      61             :   using AllocationStatus = WasmMemoryTracker::AllocationStatus;
      62             : #if V8_TARGET_ARCH_64_BIT
      63      178114 :   bool require_full_guard_regions = true;
      64             : #else
      65             :   bool require_full_guard_regions = false;
      66             : #endif
      67             :   // Let the WasmMemoryTracker know we are going to reserve a bunch of
      68             :   // address space.
      69             :   // TODO(7881): do not use static_cast<uint32_t>() here
      70             :   uint32_t reservation_size =
      71      178114 :       static_cast<uint32_t>((max_size > size) ? max_size : size);
      72      178114 :   bool did_retry = false;
      73             : 
      74      185546 :   auto reserve_memory_space = [&] {
      75             :     // For guard regions, we always allocate the largest possible offset
      76             :     // into the heap, so the addressable memory after the guard page can
      77             :     // be made inaccessible.
      78             :     //
      79             :     // To protect against 32-bit integer overflow issues, we also
      80             :     // protect the 2GiB before the valid part of the memory buffer.
      81      371092 :     *allocation_length =
      82      185546 :         GetAllocationLength(reservation_size, require_full_guard_regions);
      83             :     DCHECK_GE(*allocation_length, size);
      84             :     DCHECK_GE(*allocation_length, kWasmPageSize);
      85             : 
      86             :     auto limit = require_full_guard_regions ? WasmMemoryTracker::kSoftLimit
      87      185546 :                                             : WasmMemoryTracker::kHardLimit;
      88      185546 :     return memory_tracker->ReserveAddressSpace(*allocation_length, limit);
      89             :   };
      90      356228 :   if (!RunWithGCAndRetry(reserve_memory_space, heap, &did_retry)) {
      91             :     // Reset reservation_size to initial size so that at least the initial size
      92             :     // can be allocated if maximum size reservation is not possible.
      93         837 :     reservation_size = static_cast<uint32_t>(size);
      94             : 
      95             :     // If we fail to allocate guard regions and the fallback is enabled, then
      96             :     // retry without full guard regions.
      97             :     bool fail = true;
      98         837 :     if (require_full_guard_regions && FLAG_wasm_trap_handler_fallback) {
      99         829 :       require_full_guard_regions = false;
     100        1658 :       fail = !RunWithGCAndRetry(reserve_memory_space, heap, &did_retry);
     101             :     }
     102         837 :     if (fail) {
     103             :       // We are over the address space limit. Fail.
     104             :       //
     105             :       // When running under the correctness fuzzer (i.e.
     106             :       // --abort-on-stack-or-string-length-overflow is preset), we crash
     107             :       // instead so it is not incorrectly reported as a correctness
     108             :       // violation. See https://crbug.com/828293#c4
     109           8 :       if (FLAG_abort_on_stack_or_string_length_overflow) {
     110           0 :         FATAL("could not allocate wasm memory");
     111             :       }
     112             :       AddAllocationStatusSample(
     113             :           heap->isolate(), AllocationStatus::kAddressSpaceLimitReachedFailure);
     114           8 :       return nullptr;
     115             :     }
     116             :   }
     117             : 
     118             :   // The Reserve makes the whole region inaccessible by default.
     119             :   DCHECK_NULL(*allocation_base);
     120      178106 :   auto allocate_pages = [&] {
     121      356212 :     *allocation_base =
     122      178106 :         AllocatePages(GetPlatformPageAllocator(), nullptr, *allocation_length,
     123      356212 :                       kWasmPageSize, PageAllocator::kNoAccess);
     124      178106 :     return *allocation_base != nullptr;
     125             :   };
     126      356212 :   if (!RunWithGCAndRetry(allocate_pages, heap, &did_retry)) {
     127           0 :     memory_tracker->ReleaseReservation(*allocation_length);
     128             :     AddAllocationStatusSample(heap->isolate(), AllocationStatus::kOtherFailure);
     129           0 :     return nullptr;
     130             :   }
     131             : 
     132      178106 :   byte* memory = reinterpret_cast<byte*>(*allocation_base);
     133      178106 :   if (require_full_guard_regions) {
     134      177277 :     memory += kNegativeGuardSize;
     135             :   }
     136             : 
     137             :   // Make the part we care about accessible.
     138      178106 :   auto commit_memory = [&] {
     139      228456 :     return size == 0 || SetPermissions(GetPlatformPageAllocator(), memory,
     140             :                                        RoundUp(size, kWasmPageSize),
     141             :                                        PageAllocator::kReadWrite);
     142      178106 :   };
     143             :   // SetPermissions commits the extra memory, which may put us over the
     144             :   // process memory limit. If so, report this as an OOM.
     145      356212 :   if (!RunWithGCAndRetry(commit_memory, heap, &did_retry)) {
     146           0 :     V8::FatalProcessOutOfMemory(nullptr, "TryAllocateBackingStore");
     147             :   }
     148             : 
     149      356212 :   memory_tracker->RegisterAllocation(heap->isolate(), *allocation_base,
     150      356212 :                                      *allocation_length, memory, size);
     151      178106 :   AddAllocationStatusSample(heap->isolate(),
     152             :                             did_retry ? AllocationStatus::kSuccessAfterRetry
     153             :                                       : AllocationStatus::kSuccess);
     154      178106 :   return memory;
     155             : }
     156             : 
     157             : #if V8_TARGET_ARCH_MIPS64
     158             : // MIPS64 has a user space of 2^40 bytes on most processors,
     159             : // address space limits needs to be smaller.
     160             : constexpr size_t kAddressSpaceSoftLimit = 0x2100000000L;  // 132 GiB
     161             : constexpr size_t kAddressSpaceHardLimit = 0x4000000000L;  // 256 GiB
     162             : #elif V8_TARGET_ARCH_64_BIT
     163             : constexpr size_t kAddressSpaceSoftLimit = 0x6000000000L;   // 384 GiB
     164             : constexpr size_t kAddressSpaceHardLimit = 0x10100000000L;  // 1 TiB + 4 GiB
     165             : #else
     166             : constexpr size_t kAddressSpaceSoftLimit = 0x90000000;  // 2 GiB + 256 MiB
     167             : constexpr size_t kAddressSpaceHardLimit = 0xC0000000;  // 3 GiB
     168             : #endif
     169             : 
     170             : }  // namespace
     171             : 
     172      119746 : WasmMemoryTracker::~WasmMemoryTracker() {
     173             :   // All reserved address space should be released before the allocation tracker
     174             :   // is destroyed.
     175             :   DCHECK_EQ(reserved_address_space_, 0u);
     176             :   DCHECK_EQ(allocated_address_space_, 0u);
     177             :   DCHECK(allocations_.empty());
     178       59873 : }
     179             : 
     180          12 : void* WasmMemoryTracker::TryAllocateBackingStoreForTesting(
     181             :     Heap* heap, size_t size, void** allocation_base,
     182             :     size_t* allocation_length) {
     183             :   return TryAllocateBackingStore(this, heap, size, size, allocation_base,
     184          12 :                                  allocation_length);
     185             : }
     186             : 
     187          12 : void WasmMemoryTracker::FreeBackingStoreForTesting(base::AddressRegion memory,
     188             :                                                    void* buffer_start) {
     189          12 :   base::MutexGuard scope_lock(&mutex_);
     190          24 :   ReleaseAllocation_Locked(nullptr, buffer_start);
     191          12 :   CHECK(FreePages(GetPlatformPageAllocator(),
     192             :                   reinterpret_cast<void*>(memory.begin()), memory.size()));
     193          12 : }
     194             : 
     195     1429406 : bool WasmMemoryTracker::ReserveAddressSpace(size_t num_bytes,
     196             :                                             ReservationLimit limit) {
     197             :   size_t reservation_limit =
     198     1429406 :       limit == kSoftLimit ? kAddressSpaceSoftLimit : kAddressSpaceHardLimit;
     199           8 :   while (true) {
     200     1429414 :     size_t old_count = reserved_address_space_.load();
     201     2858820 :     if (old_count > reservation_limit) return false;
     202     1429089 :     if (reservation_limit - old_count < num_bytes) return false;
     203     2842188 :     if (reserved_address_space_.compare_exchange_weak(old_count,
     204             :                                                       old_count + num_bytes)) {
     205             :       return true;
     206             :     }
     207             :   }
     208             : }
     209             : 
     210     1242981 : void WasmMemoryTracker::ReleaseReservation(size_t num_bytes) {
     211             :   size_t const old_reserved = reserved_address_space_.fetch_sub(num_bytes);
     212             :   USE(old_reserved);
     213             :   DCHECK_LE(num_bytes, old_reserved);
     214     1242981 : }
     215             : 
     216      178106 : void WasmMemoryTracker::RegisterAllocation(Isolate* isolate,
     217             :                                            void* allocation_base,
     218             :                                            size_t allocation_length,
     219             :                                            void* buffer_start,
     220             :                                            size_t buffer_length) {
     221      178106 :   base::MutexGuard scope_lock(&mutex_);
     222             : 
     223      178106 :   allocated_address_space_ += allocation_length;
     224             :   // Report address space usage in MiB so the full range fits in an int on all
     225             :   // platforms.
     226      178106 :   isolate->counters()->wasm_address_space_usage_mb()->AddSample(
     227      356212 :       static_cast<int>(allocated_address_space_ / MB));
     228             : 
     229             :   allocations_.emplace(buffer_start,
     230             :                        AllocationData{allocation_base, allocation_length,
     231      356212 :                                       buffer_start, buffer_length});
     232      178106 : }
     233             : 
     234      178106 : WasmMemoryTracker::AllocationData WasmMemoryTracker::ReleaseAllocation_Locked(
     235             :     Isolate* isolate, const void* buffer_start) {
     236             :   auto find_result = allocations_.find(buffer_start);
     237      178106 :   CHECK_NE(find_result, allocations_.end());
     238             : 
     239      178106 :   size_t num_bytes = find_result->second.allocation_length;
     240             :   DCHECK_LE(num_bytes, reserved_address_space_);
     241             :   DCHECK_LE(num_bytes, allocated_address_space_);
     242             :   reserved_address_space_ -= num_bytes;
     243      178106 :   allocated_address_space_ -= num_bytes;
     244             : 
     245             :   AllocationData allocation_data = find_result->second;
     246             :   allocations_.erase(find_result);
     247      178106 :   return allocation_data;
     248             : }
     249             : 
     250        5842 : const WasmMemoryTracker::AllocationData* WasmMemoryTracker::FindAllocationData(
     251             :     const void* buffer_start) {
     252        5842 :   base::MutexGuard scope_lock(&mutex_);
     253             :   const auto& result = allocations_.find(buffer_start);
     254        5842 :   if (result != allocations_.end()) {
     255        5842 :     return &result->second;
     256             :   }
     257             :   return nullptr;
     258             : }
     259             : 
     260         805 : bool WasmMemoryTracker::IsWasmMemory(const void* buffer_start) {
     261         805 :   base::MutexGuard scope_lock(&mutex_);
     262         805 :   return allocations_.find(buffer_start) != allocations_.end();
     263             : }
     264             : 
     265         108 : bool WasmMemoryTracker::IsWasmSharedMemory(const void* buffer_start) {
     266         108 :   base::MutexGuard scope_lock(&mutex_);
     267             :   const auto& result = allocations_.find(buffer_start);
     268             :   // Should be a wasm allocation, and registered as a shared allocation.
     269         216 :   return (result != allocations_.end() && result->second.is_shared);
     270             : }
     271             : 
     272      131956 : bool WasmMemoryTracker::HasFullGuardRegions(const void* buffer_start) {
     273      131956 :   base::MutexGuard scope_lock(&mutex_);
     274             :   const auto allocation = allocations_.find(buffer_start);
     275             : 
     276      131957 :   if (allocation == allocations_.end()) {
     277             :     return false;
     278             :   }
     279             : 
     280      131957 :   Address start = reinterpret_cast<Address>(buffer_start);
     281             :   Address limit =
     282      131957 :       reinterpret_cast<Address>(allocation->second.allocation_base) +
     283      131957 :       allocation->second.allocation_length;
     284      131957 :   return start + kWasmMaxHeapOffset < limit;
     285             : }
     286             : 
     287        1324 : void WasmMemoryTracker::MarkWasmMemoryNotGrowable(
     288             :     Handle<JSArrayBuffer> buffer) {
     289        1324 :   base::MutexGuard scope_lock(&mutex_);
     290        2648 :   const auto& allocation = allocations_.find(buffer->backing_store());
     291        1324 :   if (allocation == allocations_.end()) return;
     292           8 :   allocation->second.is_growable = false;
     293             : }
     294             : 
     295        2432 : bool WasmMemoryTracker::IsWasmMemoryGrowable(Handle<JSArrayBuffer> buffer) {
     296        2432 :   base::MutexGuard scope_lock(&mutex_);
     297        2432 :   if (buffer->backing_store() == nullptr) return true;
     298        4864 :   const auto& allocation = allocations_.find(buffer->backing_store());
     299        2432 :   if (allocation == allocations_.end()) return false;
     300        2432 :   return allocation->second.is_growable;
     301             : }
     302             : 
     303      178097 : bool WasmMemoryTracker::FreeMemoryIfIsWasmMemory(Isolate* isolate,
     304             :                                                  const void* buffer_start) {
     305      178097 :   base::MutexGuard scope_lock(&mutex_);
     306             :   const auto& result = allocations_.find(buffer_start);
     307      178102 :   if (result == allocations_.end()) return false;
     308      178094 :   if (result->second.is_shared) {
     309             :     // This is a shared WebAssembly.Memory allocation
     310          89 :     FreeMemoryIfNotShared_Locked(isolate, buffer_start);
     311          89 :     return true;
     312             :   }
     313             :   // This is a WebAssembly.Memory allocation
     314             :   const AllocationData allocation =
     315      178005 :       ReleaseAllocation_Locked(isolate, buffer_start);
     316      178005 :   CHECK(FreePages(GetPlatformPageAllocator(), allocation.allocation_base,
     317             :                   allocation.allocation_length));
     318             :   return true;
     319             : }
     320             : 
     321         306 : void WasmMemoryTracker::RegisterWasmMemoryAsShared(
     322             :     Handle<WasmMemoryObject> object, Isolate* isolate) {
     323         306 :   const void* backing_store = object->array_buffer()->backing_store();
     324             :   // TODO(V8:8810): This should be a DCHECK, currently some tests do not
     325             :   // use a full WebAssembly.Memory, and fail on registering so return early.
     326         308 :   if (!IsWasmMemory(backing_store)) return;
     327             :   {
     328         304 :     base::MutexGuard scope_lock(&mutex_);
     329             :     // Register as shared allocation when it is post messaged. This happens only
     330             :     // the first time a buffer is shared over Postmessage, and track all the
     331             :     // memory objects that are associated with this backing store.
     332         304 :     RegisterSharedWasmMemory_Locked(object, isolate);
     333             :     // Add isolate to backing store mapping.
     334             :     isolates_per_buffer_[backing_store].emplace(isolate);
     335             :   }
     336             : }
     337             : 
     338         100 : void WasmMemoryTracker::SetPendingUpdateOnGrow(Handle<JSArrayBuffer> old_buffer,
     339             :                                                size_t new_size) {
     340         100 :   base::MutexGuard scope_lock(&mutex_);
     341             :   // Keep track of the new size of the buffer associated with each backing
     342             :   // store.
     343         100 :   AddBufferToGrowMap_Locked(old_buffer, new_size);
     344             :   // Request interrupt to GROW_SHARED_MEMORY to other isolates
     345         100 :   TriggerSharedGrowInterruptOnAllIsolates_Locked(old_buffer);
     346         100 : }
     347             : 
     348         136 : void WasmMemoryTracker::UpdateSharedMemoryInstances(Isolate* isolate) {
     349         136 :   base::MutexGuard scope_lock(&mutex_);
     350             :   // For every buffer in the grow_entry_map_, update the size for all the
     351             :   // memory objects associated with this isolate.
     352         361 :   for (auto it = grow_update_map_.begin(); it != grow_update_map_.end();) {
     353         225 :     UpdateSharedMemoryStateOnInterrupt_Locked(isolate, it->first, it->second);
     354             :     // If all the isolates that share this buffer have hit a stack check, their
     355             :     // memory objects are updated, and this grow entry can be erased.
     356         225 :     if (AreAllIsolatesUpdated_Locked(it->first)) {
     357             :       it = grow_update_map_.erase(it);
     358             :     } else {
     359             :       it++;
     360             :     }
     361             :   }
     362         136 : }
     363             : 
     364         304 : void WasmMemoryTracker::RegisterSharedWasmMemory_Locked(
     365             :     Handle<WasmMemoryObject> object, Isolate* isolate) {
     366             :   DCHECK(object->array_buffer()->is_shared());
     367             : 
     368             :   void* backing_store = object->array_buffer()->backing_store();
     369             :   // The allocation of a WasmMemoryObject should always be registered with the
     370             :   // WasmMemoryTracker.
     371         608 :   const auto& result = allocations_.find(backing_store);
     372         304 :   if (result == allocations_.end()) return;
     373             : 
     374             :   // Register the allocation as shared, if not alreadt marked as shared.
     375         304 :   if (!result->second.is_shared) result->second.is_shared = true;
     376             : 
     377             :   // Create persistent global handles for the memory objects that are shared
     378             :   GlobalHandles* global_handles = isolate->global_handles();
     379             :   object = global_handles->Create(*object);
     380             : 
     381             :   // Add to memory_object_vector to track memory objects, instance objects
     382             :   // that will need to be updated on a Grow call
     383         608 :   result->second.memory_object_vector.push_back(
     384             :       SharedMemoryObjectState(object, isolate));
     385             : }
     386             : 
     387         100 : void WasmMemoryTracker::AddBufferToGrowMap_Locked(
     388             :     Handle<JSArrayBuffer> old_buffer, size_t new_size) {
     389         100 :   void* backing_store = old_buffer->backing_store();
     390         200 :   auto entry = grow_update_map_.find(old_buffer->backing_store());
     391         100 :   if (entry == grow_update_map_.end()) {
     392             :     // No pending grow for this backing store, add to map.
     393             :     grow_update_map_.emplace(backing_store, new_size);
     394          56 :     return;
     395             :   }
     396             :   // If grow on the same buffer is requested before the update is complete,
     397             :   // the new_size should always be greater or equal to the old_size. Equal
     398             :   // in the case that grow(0) is called, but new buffer handles are mandated
     399             :   // by the Spec.
     400          44 :   CHECK_LE(entry->second, new_size);
     401          44 :   entry->second = new_size;
     402             :   // Flush instances_updated everytime a new grow size needs to be updates
     403          44 :   ClearUpdatedInstancesOnPendingGrow_Locked(backing_store);
     404             : }
     405             : 
     406         100 : void WasmMemoryTracker::TriggerSharedGrowInterruptOnAllIsolates_Locked(
     407             :     Handle<JSArrayBuffer> old_buffer) {
     408             :   // Request a GrowShareMemory interrupt on all the isolates that share
     409             :   // the backing store.
     410         200 :   const auto& isolates = isolates_per_buffer_.find(old_buffer->backing_store());
     411         301 :   for (const auto& isolate : isolates->second) {
     412         201 :     isolate->stack_guard()->RequestGrowSharedMemory();
     413             :   }
     414         100 : }
     415             : 
     416         225 : void WasmMemoryTracker::UpdateSharedMemoryStateOnInterrupt_Locked(
     417             :     Isolate* isolate, void* backing_store, size_t new_size) {
     418             :   // Update objects only if there are memory objects that share this backing
     419             :   // store, and this isolate is marked as one of the isolates that shares this
     420             :   // buffer.
     421         225 :   if (MemoryObjectsNeedUpdate_Locked(isolate, backing_store)) {
     422         196 :     UpdateMemoryObjectsForIsolate_Locked(isolate, backing_store, new_size);
     423             :     // As the memory objects are updated, add this isolate to a set of isolates
     424             :     // that are updated on grow. This state is maintained to track if all the
     425             :     // isolates that share the backing store have hit a StackCheck.
     426         392 :     isolates_updated_on_grow_[backing_store].emplace(isolate);
     427             :   }
     428         225 : }
     429             : 
     430         225 : bool WasmMemoryTracker::AreAllIsolatesUpdated_Locked(
     431             :     const void* backing_store) {
     432             :   const auto& buffer_isolates = isolates_per_buffer_.find(backing_store);
     433             :   // No isolates share this buffer.
     434         225 :   if (buffer_isolates == isolates_per_buffer_.end()) return true;
     435             :   const auto& updated_isolates = isolates_updated_on_grow_.find(backing_store);
     436             :   // Some isolates share the buffer, but no isolates have been updated yet.
     437         225 :   if (updated_isolates == isolates_updated_on_grow_.end()) return false;
     438         225 :   if (buffer_isolates->second == updated_isolates->second) {
     439             :     // If all the isolates that share this backing_store have hit a stack check,
     440             :     // and the memory objects have been updated, remove the entry from the
     441             :     // updatemap, and return true.
     442             :     isolates_updated_on_grow_.erase(backing_store);
     443          44 :     return true;
     444             :   }
     445             :   return false;
     446             : }
     447             : 
     448          44 : void WasmMemoryTracker::ClearUpdatedInstancesOnPendingGrow_Locked(
     449             :     const void* backing_store) {
     450             :   // On multiple grows to the same buffer, the entries for that buffer should be
     451             :   // flushed. This is done so that any consecutive grows to the same buffer will
     452             :   // update all instances that share this buffer.
     453             :   const auto& value = isolates_updated_on_grow_.find(backing_store);
     454          44 :   if (value != isolates_updated_on_grow_.end()) {
     455             :     value->second.clear();
     456             :   }
     457          44 : }
     458             : 
     459         196 : void WasmMemoryTracker::UpdateMemoryObjectsForIsolate_Locked(
     460             :     Isolate* isolate, void* backing_store, size_t new_size) {
     461         392 :   const auto& result = allocations_.find(backing_store);
     462         196 :   if (result == allocations_.end() || !result->second.is_shared) return;
     463         598 :   for (const auto& memory_obj_state : result->second.memory_object_vector) {
     464             :     DCHECK_NE(memory_obj_state.isolate, nullptr);
     465         402 :     if (isolate == memory_obj_state.isolate) {
     466             :       HandleScope scope(isolate);
     467         200 :       Handle<WasmMemoryObject> memory_object = memory_obj_state.memory_object;
     468             :       DCHECK(memory_object->IsWasmMemoryObject());
     469             :       DCHECK(memory_object->array_buffer()->is_shared());
     470             :       // Permissions adjusted, but create a new buffer with new size
     471             :       // and old attributes. Buffer has already been allocated,
     472             :       // just create a new buffer with same backing store.
     473             :       bool is_external = memory_object->array_buffer()->is_external();
     474             :       Handle<JSArrayBuffer> new_buffer = SetupArrayBuffer(
     475         200 :           isolate, backing_store, new_size, is_external, SharedFlag::kShared);
     476         200 :       memory_obj_state.memory_object->update_instances(isolate, new_buffer);
     477             :     }
     478             :   }
     479             : }
     480             : 
     481         225 : bool WasmMemoryTracker::MemoryObjectsNeedUpdate_Locked(
     482             :     Isolate* isolate, const void* backing_store) {
     483             :   // Return true if this buffer has memory_objects it needs to update.
     484             :   const auto& result = allocations_.find(backing_store);
     485         225 :   if (result == allocations_.end() || !result->second.is_shared) return false;
     486             :   // Only update if the buffer has memory objects that need to be updated.
     487         225 :   if (result->second.memory_object_vector.empty()) return false;
     488             :   const auto& isolate_entry = isolates_per_buffer_.find(backing_store);
     489         450 :   return (isolate_entry != isolates_per_buffer_.end() &&
     490             :           isolate_entry->second.count(isolate) != 0);
     491             : }
     492             : 
     493          89 : void WasmMemoryTracker::FreeMemoryIfNotShared_Locked(
     494             :     Isolate* isolate, const void* backing_store) {
     495          89 :   RemoveSharedBufferState_Locked(isolate, backing_store);
     496          89 :   if (CanFreeSharedMemory_Locked(backing_store)) {
     497             :     const AllocationData allocation =
     498          89 :         ReleaseAllocation_Locked(isolate, backing_store);
     499          89 :     CHECK(FreePages(GetPlatformPageAllocator(), allocation.allocation_base,
     500             :                     allocation.allocation_length));
     501             :   }
     502          89 : }
     503             : 
     504           0 : bool WasmMemoryTracker::CanFreeSharedMemory_Locked(const void* backing_store) {
     505             :   const auto& value = isolates_per_buffer_.find(backing_store);
     506             :   // If no isolates share this buffer, backing store can be freed.
     507             :   // Erase the buffer entry.
     508          89 :   if (value == isolates_per_buffer_.end()) return true;
     509           0 :   if (value->second.empty()) {
     510             :     // If no isolates share this buffer, the global handles to memory objects
     511             :     // associated with this buffer should have been destroyed.
     512             :     // DCHECK(shared_memory_map_.find(backing_store) ==
     513             :     // shared_memory_map_.end());
     514             :     return true;
     515             :   }
     516           0 :   return false;
     517             : }
     518             : 
     519          89 : void WasmMemoryTracker::RemoveSharedBufferState_Locked(
     520             :     Isolate* isolate, const void* backing_store) {
     521          89 :   if (isolate != nullptr) {
     522           0 :     DestroyMemoryObjectsAndRemoveIsolateEntry_Locked(isolate, backing_store);
     523           0 :     RemoveIsolateFromBackingStore_Locked(isolate, backing_store);
     524             :   } else {
     525             :     // This happens for externalized contents cleanup shared memory state
     526             :     // associated with this buffer across isolates.
     527          89 :     DestroyMemoryObjectsAndRemoveIsolateEntry_Locked(backing_store);
     528             :   }
     529          89 : }
     530             : 
     531          89 : void WasmMemoryTracker::DestroyMemoryObjectsAndRemoveIsolateEntry_Locked(
     532             :     const void* backing_store) {
     533             :   const auto& result = allocations_.find(backing_store);
     534          89 :   CHECK(result != allocations_.end() && result->second.is_shared);
     535             :   auto& object_vector = result->second.memory_object_vector;
     536          89 :   if (object_vector.empty()) return;
     537         241 :   for (const auto& mem_obj_state : object_vector) {
     538         152 :     GlobalHandles::Destroy(mem_obj_state.memory_object.location());
     539             :   }
     540             :   object_vector.clear();
     541             :   // Remove isolate from backing store map.
     542             :   isolates_per_buffer_.erase(backing_store);
     543             : }
     544             : 
     545         152 : void WasmMemoryTracker::DestroyMemoryObjectsAndRemoveIsolateEntry_Locked(
     546             :     Isolate* isolate, const void* backing_store) {
     547             :   // This gets called when an internal handle to the ArrayBuffer should be
     548             :   // freed, on heap tear down for that isolate, remove the memory objects
     549             :   // that are associated with this buffer and isolate.
     550             :   const auto& result = allocations_.find(backing_store);
     551         152 :   CHECK(result != allocations_.end() && result->second.is_shared);
     552             :   auto& object_vector = result->second.memory_object_vector;
     553         152 :   if (object_vector.empty()) return;
     554         788 :   for (auto it = object_vector.begin(); it != object_vector.end();) {
     555         636 :     if (isolate == it->isolate) {
     556         152 :       GlobalHandles::Destroy(it->memory_object.location());
     557             :       it = object_vector.erase(it);
     558             :     } else {
     559             :       ++it;
     560             :     }
     561             :   }
     562             : }
     563             : 
     564           0 : void WasmMemoryTracker::RemoveIsolateFromBackingStore_Locked(
     565             :     Isolate* isolate, const void* backing_store) {
     566             :   const auto& isolates = isolates_per_buffer_.find(backing_store);
     567           0 :   if (isolates == isolates_per_buffer_.end() || isolates->second.empty())
     568             :     return;
     569             :   isolates->second.erase(isolate);
     570             : }
     571             : 
     572       62410 : void WasmMemoryTracker::DeleteSharedMemoryObjectsOnIsolate(Isolate* isolate) {
     573       62410 :   base::MutexGuard scope_lock(&mutex_);
     574             :   // This is possible for buffers that are externalized, and their handles have
     575             :   // been freed, the backing store wasn't released because externalized contents
     576             :   // were using it.
     577       62411 :   if (isolates_per_buffer_.empty()) return;
     578         592 :   for (auto& entry : isolates_per_buffer_) {
     579         441 :     if (entry.second.find(isolate) == entry.second.end()) continue;
     580         152 :     const void* backing_store = entry.first;
     581             :     entry.second.erase(isolate);
     582         152 :     DestroyMemoryObjectsAndRemoveIsolateEntry_Locked(isolate, backing_store);
     583             :   }
     584         201 :   for (auto& buffer_isolates : isolates_updated_on_grow_) {
     585             :     auto& isolates = buffer_isolates.second;
     586             :     isolates.erase(isolate);
     587             :   }
     588             : }
     589             : 
     590      180406 : Handle<JSArrayBuffer> SetupArrayBuffer(Isolate* isolate, void* backing_store,
     591             :                                        size_t size, bool is_external,
     592             :                                        SharedFlag shared) {
     593             :   Handle<JSArrayBuffer> buffer =
     594      180406 :       isolate->factory()->NewJSArrayBuffer(shared, AllocationType::kOld);
     595             :   constexpr bool is_wasm_memory = true;
     596      180406 :   JSArrayBuffer::Setup(buffer, isolate, is_external, backing_store, size,
     597      180406 :                        shared, is_wasm_memory);
     598             :   buffer->set_is_detachable(false);
     599      180406 :   return buffer;
     600             : }
     601             : 
     602      178102 : MaybeHandle<JSArrayBuffer> AllocateAndSetupArrayBuffer(Isolate* isolate,
     603             :                                                        size_t size,
     604             :                                                        size_t maximum_size,
     605             :                                                        SharedFlag shared) {
     606             :   // Enforce flag-limited maximum allocation size.
     607      178102 :   if (size > max_mem_bytes()) return {};
     608             : 
     609             :   WasmMemoryTracker* memory_tracker = isolate->wasm_engine()->memory_tracker();
     610             : 
     611             :   // Set by TryAllocateBackingStore or GetEmptyBackingStore
     612      178102 :   void* allocation_base = nullptr;
     613      178102 :   size_t allocation_length = 0;
     614             : 
     615             :   void* memory = TryAllocateBackingStore(memory_tracker, isolate->heap(), size,
     616             :                                          maximum_size, &allocation_base,
     617      178102 :                                          &allocation_length);
     618      178102 :   if (memory == nullptr) return {};
     619             : 
     620             : #if DEBUG
     621             :   // Double check the API allocator actually zero-initialized the memory.
     622             :   const byte* bytes = reinterpret_cast<const byte*>(memory);
     623             :   for (size_t i = 0; i < size; ++i) {
     624             :     DCHECK_EQ(0, bytes[i]);
     625             :   }
     626             : #endif
     627             : 
     628             :   reinterpret_cast<v8::Isolate*>(isolate)
     629      178094 :       ->AdjustAmountOfExternalAllocatedMemory(size);
     630             : 
     631             :   constexpr bool is_external = false;
     632      178094 :   return SetupArrayBuffer(isolate, memory, size, is_external, shared);
     633             : }
     634             : 
     635      177593 : MaybeHandle<JSArrayBuffer> NewArrayBuffer(Isolate* isolate, size_t size) {
     636             :   return AllocateAndSetupArrayBuffer(isolate, size, size,
     637      177593 :                                      SharedFlag::kNotShared);
     638             : }
     639             : 
     640         509 : MaybeHandle<JSArrayBuffer> NewSharedArrayBuffer(Isolate* isolate,
     641             :                                                 size_t initial_size,
     642             :                                                 size_t max_size) {
     643             :   return AllocateAndSetupArrayBuffer(isolate, initial_size, max_size,
     644         509 :                                      SharedFlag::kShared);
     645             : }
     646             : 
     647        2104 : void DetachMemoryBuffer(Isolate* isolate, Handle<JSArrayBuffer> buffer,
     648             :                         bool free_memory) {
     649        2104 :   if (buffer->is_shared()) return;  // Detaching shared buffers is impossible.
     650             :   DCHECK(!buffer->is_detachable());
     651             : 
     652             :   const bool is_external = buffer->is_external();
     653             :   DCHECK(!buffer->is_detachable());
     654        2104 :   if (!is_external) {
     655             :     buffer->set_is_external(true);
     656        2092 :     isolate->heap()->UnregisterArrayBuffer(*buffer);
     657        2092 :     if (free_memory) {
     658             :       // We need to free the memory before detaching the buffer because
     659             :       // FreeBackingStore reads buffer->allocation_base(), which is nulled out
     660             :       // by Detach. This means there is a dangling pointer until we detach the
     661             :       // buffer. Since there is no way for the user to directly call
     662             :       // FreeBackingStore, we can ensure this is safe.
     663           0 :       buffer->FreeBackingStoreFromMainThread();
     664             :     }
     665             :   }
     666             : 
     667             :   DCHECK(buffer->is_external());
     668             :   buffer->set_is_wasm_memory(false);
     669             :   buffer->set_is_detachable(true);
     670        2104 :   buffer->Detach();
     671             : }
     672             : 
     673             : }  // namespace wasm
     674             : }  // namespace internal
     675      122004 : }  // namespace v8

Generated by: LCOV version 1.10