LCOV - code coverage report
Current view: top level - src/wasm - wasm-memory.h (source / functions) Hit Total Coverage
Test: app.info Lines: 4 4 100.0 %
Date: 2019-04-17 Functions: 1 1 100.0 %

          Line data    Source code
       1             : // Copyright 2017 the V8 project authors. All rights reserved.
       2             : // Use of this source code is governed by a BSD-style license that can be
       3             : // found in the LICENSE file.
       4             : 
       5             : #ifndef V8_WASM_WASM_MEMORY_H_
       6             : #define V8_WASM_WASM_MEMORY_H_
       7             : 
       8             : #include <atomic>
       9             : #include <unordered_map>
      10             : #include <unordered_set>
      11             : 
      12             : #include "src/base/platform/mutex.h"
      13             : #include "src/flags.h"
      14             : #include "src/handles.h"
      15             : #include "src/objects/js-array-buffer.h"
      16             : 
      17             : namespace v8 {
      18             : namespace internal {
      19             : namespace wasm {
      20             : 
      21             : // The {WasmMemoryTracker} tracks reservations and allocations for wasm memory
      22             : // and wasm code. There is an upper limit on the total reserved memory which is
      23             : // checked by this class. Allocations are stored so we can look them up when an
      24             : // array buffer dies and figure out the reservation and allocation bounds for
      25             : // that buffer.
      26             : class WasmMemoryTracker {
      27             :  public:
      28      183084 :   WasmMemoryTracker() = default;
      29             :   V8_EXPORT_PRIVATE ~WasmMemoryTracker();
      30             : 
      31             :   // ReserveAddressSpace attempts to increase the reserved address space counter
      32             :   // by {num_bytes}. Returns true if successful (meaning it is okay to go ahead
      33             :   // and reserve {num_bytes} bytes), false otherwise.
      34             :   // Use {kSoftLimit} if you can implement a fallback which needs less reserved
      35             :   // memory.
      36             :   enum ReservationLimit { kSoftLimit, kHardLimit };
      37             :   bool ReserveAddressSpace(size_t num_bytes, ReservationLimit limit);
      38             : 
      39             :   void RegisterAllocation(Isolate* isolate, void* allocation_base,
      40             :                           size_t allocation_length, void* buffer_start,
      41             :                           size_t buffer_length);
      42             : 
      43             :   struct SharedMemoryObjectState {
      44             :     Handle<WasmMemoryObject> memory_object;
      45             :     Isolate* isolate;
      46             : 
      47             :     SharedMemoryObjectState() = default;
      48             :     SharedMemoryObjectState(Handle<WasmMemoryObject> memory_object,
      49             :                             Isolate* isolate)
      50         304 :         : memory_object(memory_object), isolate(isolate) {}
      51             :   };
      52             : 
      53      890530 :   struct AllocationData {
      54             :     void* allocation_base = nullptr;
      55             :     size_t allocation_length = 0;
      56             :     void* buffer_start = nullptr;
      57             :     size_t buffer_length = 0;
      58             :     bool is_shared = false;
      59             :     // Wasm memories are growable by default, this will be false only when
      60             :     // shared with an asmjs module.
      61             :     bool is_growable = true;
      62             : 
      63             :     // Track Wasm Memory instances across isolates, this is populated on
      64             :     // PostMessage using persistent handles for memory objects.
      65             :     std::vector<WasmMemoryTracker::SharedMemoryObjectState>
      66             :         memory_object_vector;
      67             : 
      68             :    private:
      69             :     AllocationData() = default;
      70             :     AllocationData(void* allocation_base, size_t allocation_length,
      71             :                    void* buffer_start, size_t buffer_length)
      72             :         : allocation_base(allocation_base),
      73             :           allocation_length(allocation_length),
      74             :           buffer_start(buffer_start),
      75      178106 :           buffer_length(buffer_length) {
      76             :       DCHECK_LE(reinterpret_cast<uintptr_t>(allocation_base),
      77             :                 reinterpret_cast<uintptr_t>(buffer_start));
      78             :       DCHECK_GE(
      79             :           reinterpret_cast<uintptr_t>(allocation_base) + allocation_length,
      80             :           reinterpret_cast<uintptr_t>(buffer_start));
      81             :       DCHECK_GE(
      82             :           reinterpret_cast<uintptr_t>(allocation_base) + allocation_length,
      83             :           reinterpret_cast<uintptr_t>(buffer_start) + buffer_length);
      84             :     }
      85             : 
      86             :     friend WasmMemoryTracker;
      87             :   };
      88             : 
      89             :   // Allow tests to allocate a backing store the same way as we do it for
      90             :   // WebAssembly memory. This is used in unit tests for trap handler to
      91             :   // generate the same signals/exceptions for invalid memory accesses as
      92             :   // we would get with WebAssembly memory.
      93             :   V8_EXPORT_PRIVATE void* TryAllocateBackingStoreForTesting(
      94             :       Heap* heap, size_t size, void** allocation_base,
      95             :       size_t* allocation_length);
      96             : 
      97             :   // Free memory allocated with TryAllocateBackingStoreForTesting.
      98             :   V8_EXPORT_PRIVATE void FreeBackingStoreForTesting(base::AddressRegion memory,
      99             :                                                     void* buffer_start);
     100             : 
     101             :   // Decreases the amount of reserved address space.
     102             :   void ReleaseReservation(size_t num_bytes);
     103             : 
     104             :   V8_EXPORT_PRIVATE bool IsWasmMemory(const void* buffer_start);
     105             : 
     106             :   bool IsWasmSharedMemory(const void* buffer_start);
     107             : 
     108             :   // Returns whether the given buffer is a Wasm memory with guard regions large
     109             :   // enough to safely use trap handlers.
     110             :   bool HasFullGuardRegions(const void* buffer_start);
     111             : 
     112             :   // Returns a pointer to a Wasm buffer's allocation data, or nullptr if the
     113             :   // buffer is not tracked.
     114             :   V8_EXPORT_PRIVATE const AllocationData* FindAllocationData(
     115             :       const void* buffer_start);
     116             : 
     117             :   // Checks if a buffer points to a Wasm memory and if so does any necessary
     118             :   // work to reclaim the buffer. If this function returns false, the caller must
     119             :   // free the buffer manually.
     120             :   bool FreeMemoryIfIsWasmMemory(Isolate* isolate, const void* buffer_start);
     121             : 
     122             :   void MarkWasmMemoryNotGrowable(Handle<JSArrayBuffer> buffer);
     123             : 
     124             :   bool IsWasmMemoryGrowable(Handle<JSArrayBuffer> buffer);
     125             : 
     126             :   // When WebAssembly.Memory is transferred over PostMessage, register the
     127             :   // allocation as shared and track the memory objects that will need
     128             :   // updating if memory is resized.
     129             :   void RegisterWasmMemoryAsShared(Handle<WasmMemoryObject> object,
     130             :                                   Isolate* isolate);
     131             : 
     132             :   // This method is called when the underlying backing store is grown, but
     133             :   // instances that share the backing_store have not yet been updated.
     134             :   void SetPendingUpdateOnGrow(Handle<JSArrayBuffer> old_buffer,
     135             :                               size_t new_size);
     136             : 
     137             :   // Interrupt handler for GROW_SHARED_MEMORY interrupt. Update memory objects
     138             :   // and instances that share the memory objects  after a Grow call.
     139             :   void UpdateSharedMemoryInstances(Isolate* isolate);
     140             : 
     141             :   // Due to timing of when buffers are garbage collected, vs. when isolate
     142             :   // object handles are destroyed, it is possible to leak global handles. To
     143             :   // avoid this, cleanup any global handles on isolate destruction if any exist.
     144             :   void DeleteSharedMemoryObjectsOnIsolate(Isolate* isolate);
     145             : 
     146             :   // Allocation results are reported to UMA
     147             :   //
     148             :   // See wasm_memory_allocation_result in counters.h
     149             :   enum class AllocationStatus {
     150             :     kSuccess,  // Succeeded on the first try
     151             : 
     152             :     kSuccessAfterRetry,  // Succeeded after garbage collection
     153             : 
     154             :     kAddressSpaceLimitReachedFailure,  // Failed because Wasm is at its address
     155             :                                        // space limit
     156             : 
     157             :     kOtherFailure  // Failed for an unknown reason
     158             :   };
     159             : 
     160             :  private:
     161             :   // Helper methods to free memory only if not shared by other isolates, memory
     162             :   // objects.
     163             :   void FreeMemoryIfNotShared_Locked(Isolate* isolate,
     164             :                                     const void* backing_store);
     165             :   bool CanFreeSharedMemory_Locked(const void* backing_store);
     166             :   void RemoveSharedBufferState_Locked(Isolate* isolate,
     167             :                                       const void* backing_store);
     168             : 
     169             :   // Registers the allocation as shared, and tracks all the memory objects
     170             :   // associates with this allocation across isolates.
     171             :   void RegisterSharedWasmMemory_Locked(Handle<WasmMemoryObject> object,
     172             :                                        Isolate* isolate);
     173             : 
     174             :   // Map the new size after grow to the buffer backing store, so that instances
     175             :   // and memory objects that share the WebAssembly.Memory across isolates can
     176             :   // be updated..
     177             :   void AddBufferToGrowMap_Locked(Handle<JSArrayBuffer> old_buffer,
     178             :                                  size_t new_size);
     179             : 
     180             :   // Trigger a GROW_SHARED_MEMORY interrupt on all the isolates that have memory
     181             :   // objects that share this buffer.
     182             :   void TriggerSharedGrowInterruptOnAllIsolates_Locked(
     183             :       Handle<JSArrayBuffer> old_buffer);
     184             : 
     185             :   // When isolates hit a stack check, update the memory objects associated with
     186             :   // that isolate.
     187             :   void UpdateSharedMemoryStateOnInterrupt_Locked(Isolate* isolate,
     188             :                                                  void* backing_store,
     189             :                                                  size_t new_size);
     190             : 
     191             :   // Check if all the isolates that share a backing_store have hit a stack
     192             :   // check. If a stack check is hit, and the backing store is pending grow,
     193             :   // this isolate will have updated memory objects.
     194             :   bool AreAllIsolatesUpdated_Locked(const void* backing_store);
     195             : 
     196             :   // If a grow call is made to a buffer with a pending grow, and all the
     197             :   // isolates that share this buffer have not hit a StackCheck, clear the set of
     198             :   // already updated instances so they can be updated with the new size on the
     199             :   // most recent grow call.
     200             :   void ClearUpdatedInstancesOnPendingGrow_Locked(const void* backing_store);
     201             : 
     202             :   // Helper functions to update memory objects on grow, and maintain state for
     203             :   // which isolates hit a stack check.
     204             :   void UpdateMemoryObjectsForIsolate_Locked(Isolate* isolate,
     205             :                                             void* backing_store,
     206             :                                             size_t new_size);
     207             :   bool MemoryObjectsNeedUpdate_Locked(Isolate* isolate,
     208             :                                       const void* backing_store);
     209             : 
     210             :   // Destroy global handles to memory objects, and remove backing store from
     211             :   // isolates_per_buffer on Free.
     212             :   void DestroyMemoryObjectsAndRemoveIsolateEntry_Locked(
     213             :       Isolate* isolate, const void* backing_store);
     214             :   void DestroyMemoryObjectsAndRemoveIsolateEntry_Locked(
     215             :       const void* backing_store);
     216             : 
     217             :   void RemoveIsolateFromBackingStore_Locked(Isolate* isolate,
     218             :                                             const void* backing_store);
     219             : 
     220             :   // Removes an allocation from the tracker.
     221             :   AllocationData ReleaseAllocation_Locked(Isolate* isolate,
     222             :                                           const void* buffer_start);
     223             : 
     224             :   // Clients use a two-part process. First they "reserve" the address space,
     225             :   // which signifies an intent to actually allocate it. This determines whether
     226             :   // doing the allocation would put us over our limit. Once there is a
     227             :   // reservation, clients can do the allocation and register the result.
     228             :   //
     229             :   // We should always have:
     230             :   // allocated_address_space_ <= reserved_address_space_ <= kAddressSpaceLimit
     231             :   std::atomic<size_t> reserved_address_space_{0};
     232             : 
     233             :   // Used to protect access to the allocated address space counter and
     234             :   // allocation map. This is needed because Wasm memories can be freed on
     235             :   // another thread by the ArrayBufferTracker.
     236             :   base::Mutex mutex_;
     237             : 
     238             :   size_t allocated_address_space_ = 0;
     239             : 
     240             :   //////////////////////////////////////////////////////////////////////////////
     241             :   // Protected by {mutex_}:
     242             : 
     243             :   // Track Wasm memory allocation information. This is keyed by the start of the
     244             :   // buffer, rather than by the start of the allocation.
     245             :   std::unordered_map<const void*, AllocationData> allocations_;
     246             : 
     247             :   // Maps each buffer to the isolates that share the backing store.
     248             :   std::unordered_map<const void*, std::unordered_set<Isolate*>>
     249             :       isolates_per_buffer_;
     250             : 
     251             :   // Maps which isolates have had a grow interrupt handled on the buffer. This
     252             :   // is maintained to ensure that the instances are updated with the right size
     253             :   // on Grow.
     254             :   std::unordered_map<const void*, std::unordered_set<Isolate*>>
     255             :       isolates_updated_on_grow_;
     256             : 
     257             :   // Maps backing stores(void*) to the size of the underlying memory in
     258             :   // (size_t). An entry to this map is made on a grow call to the corresponding
     259             :   // backing store. On consecutive grow calls to the same backing store,
     260             :   // the size entry is updated. This entry is made right after the mprotect
     261             :   // call to change the protections on a backing_store, so the memory objects
     262             :   // have not been updated yet. The backing store entry in this map is erased
     263             :   // when all the memory objects, or instances that share this backing store
     264             :   // have their bounds updated.
     265             :   std::unordered_map<void*, size_t> grow_update_map_;
     266             : 
     267             :   // End of fields protected by {mutex_}.
     268             :   //////////////////////////////////////////////////////////////////////////////
     269             : 
     270             :   DISALLOW_COPY_AND_ASSIGN(WasmMemoryTracker);
     271             : };
     272             : 
     273             : // Attempts to allocate an array buffer with guard regions suitable for trap
     274             : // handling. If address space is not available, it will return a buffer with
     275             : // mini-guards that will require bounds checks.
     276             : V8_EXPORT_PRIVATE MaybeHandle<JSArrayBuffer> NewArrayBuffer(Isolate*,
     277             :                                                             size_t size);
     278             : 
     279             : // Attempts to allocate a SharedArrayBuffer with guard regions suitable for
     280             : // trap handling. If address space is not available, it will try to reserve
     281             : // up to the maximum for that memory. If all else fails, it will return a
     282             : // buffer with mini-guards of initial size.
     283             : V8_EXPORT_PRIVATE MaybeHandle<JSArrayBuffer> NewSharedArrayBuffer(
     284             :     Isolate*, size_t initial_size, size_t max_size);
     285             : 
     286             : Handle<JSArrayBuffer> SetupArrayBuffer(
     287             :     Isolate*, void* backing_store, size_t size, bool is_external,
     288             :     SharedFlag shared = SharedFlag::kNotShared);
     289             : 
     290             : V8_EXPORT_PRIVATE void DetachMemoryBuffer(Isolate* isolate,
     291             :                                           Handle<JSArrayBuffer> buffer,
     292             :                                           bool free_memory);
     293             : 
     294             : }  // namespace wasm
     295             : }  // namespace internal
     296             : }  // namespace v8
     297             : 
     298             : #endif  // V8_WASM_WASM_MEMORY_H_

Generated by: LCOV version 1.10