Line data Source code
1 : // Copyright 2017 the V8 project authors. All rights reserved.
2 : // Use of this source code is governed by a BSD-style license that can be
3 : // found in the LICENSE file.
4 :
5 : #ifndef V8_WASM_WASM_CODE_MANAGER_H_
6 : #define V8_WASM_WASM_CODE_MANAGER_H_
7 :
8 : #include <functional>
9 : #include <list>
10 : #include <map>
11 : #include <unordered_map>
12 : #include <unordered_set>
13 :
14 : #include "src/base/macros.h"
15 : #include "src/builtins/builtins-definitions.h"
16 : #include "src/handles.h"
17 : #include "src/trap-handler/trap-handler.h"
18 : #include "src/vector.h"
19 : #include "src/wasm/compilation-environment.h"
20 : #include "src/wasm/wasm-features.h"
21 : #include "src/wasm/wasm-limits.h"
22 :
23 : namespace v8 {
24 : namespace internal {
25 :
26 : class Code;
27 : class CodeDesc;
28 : class Isolate;
29 :
30 : namespace wasm {
31 :
32 : class NativeModule;
33 : class WasmCodeManager;
34 : class WasmEngine;
35 : class WasmMemoryTracker;
36 : class WasmImportWrapperCache;
37 : struct WasmModule;
38 :
39 : // Sorted, disjoint and non-overlapping memory regions. A region is of the
40 : // form [start, end). So there's no [start, end), [end, other_end),
41 : // because that should have been reduced to [start, other_end).
42 : class V8_EXPORT_PRIVATE DisjointAllocationPool final {
43 : public:
44 : DisjointAllocationPool() = default;
45 :
46 1240025 : explicit DisjointAllocationPool(base::AddressRegion region)
47 2480050 : : regions_({region}) {}
48 :
49 : DisjointAllocationPool(DisjointAllocationPool&& other) V8_NOEXCEPT = default;
50 : DisjointAllocationPool& operator=(DisjointAllocationPool&& other)
51 : V8_NOEXCEPT = default;
52 :
53 : // Merge the parameter region into this object while preserving ordering of
54 : // the regions. The assumption is that the passed parameter is not
55 : // intersecting this object - for example, it was obtained from a previous
56 : // Allocate.
57 : void Merge(base::AddressRegion);
58 :
59 : // Allocate a contiguous region of size {size}. Return an empty pool on
60 : // failure.
61 : base::AddressRegion Allocate(size_t size);
62 :
63 : bool IsEmpty() const { return regions_.empty(); }
64 : const std::list<base::AddressRegion>& regions() const { return regions_; }
65 :
66 : private:
67 : std::list<base::AddressRegion> regions_;
68 :
69 : DISALLOW_COPY_AND_ASSIGN(DisjointAllocationPool);
70 : };
71 :
72 : class V8_EXPORT_PRIVATE WasmCode final {
73 : public:
74 : enum Kind {
75 : kFunction,
76 : kWasmToJsWrapper,
77 : kLazyStub,
78 : kRuntimeStub,
79 : kInterpreterEntry,
80 : kJumpTable
81 : };
82 :
83 : // Each runtime stub is identified by an id. This id is used to reference the
84 : // stub via {RelocInfo::WASM_STUB_CALL} and gets resolved during relocation.
85 : enum RuntimeStubId {
86 : #define DEF_ENUM(Name) k##Name,
87 : #define DEF_ENUM_TRAP(Name) kThrowWasm##Name,
88 : WASM_RUNTIME_STUB_LIST(DEF_ENUM, DEF_ENUM_TRAP)
89 : #undef DEF_ENUM_TRAP
90 : #undef DEF_ENUM
91 : kRuntimeStubCount
92 : };
93 :
94 : // kOther is used if we have WasmCode that is neither
95 : // liftoff- nor turbofan-compiled, i.e. if Kind is
96 : // not a kFunction.
97 : enum Tier : int8_t { kLiftoff, kTurbofan, kOther };
98 :
99 : Vector<byte> instructions() const { return instructions_; }
100 : Address instruction_start() const {
101 45932704 : return reinterpret_cast<Address>(instructions_.start());
102 : }
103 : Vector<const byte> reloc_info() const { return reloc_info_.as_vector(); }
104 : Vector<const byte> source_positions() const {
105 : return source_position_table_.as_vector();
106 : }
107 :
108 : uint32_t index() const {
109 : DCHECK(!IsAnonymous());
110 : return index_;
111 : }
112 : // Anonymous functions are functions that don't carry an index.
113 : bool IsAnonymous() const { return index_ == kAnonymousFuncIndex; }
114 : Kind kind() const { return kind_; }
115 : NativeModule* native_module() const { return native_module_; }
116 : Tier tier() const { return tier_; }
117 : Address constant_pool() const;
118 : Address code_comments() const;
119 : size_t constant_pool_offset() const { return constant_pool_offset_; }
120 : size_t safepoint_table_offset() const { return safepoint_table_offset_; }
121 : size_t handler_table_offset() const { return handler_table_offset_; }
122 : size_t code_comments_offset() const { return code_comments_offset_; }
123 : size_t unpadded_binary_size() const { return unpadded_binary_size_; }
124 : uint32_t stack_slots() const { return stack_slots_; }
125 : uint32_t tagged_parameter_slots() const { return tagged_parameter_slots_; }
126 184 : bool is_liftoff() const { return tier_ == kLiftoff; }
127 : bool contains(Address pc) const {
128 11442802 : return reinterpret_cast<Address>(instructions_.start()) <= pc &&
129 5721401 : pc < reinterpret_cast<Address>(instructions_.end());
130 : }
131 :
132 : Vector<trap_handler::ProtectedInstructionData> protected_instructions()
133 : const {
134 : return protected_instructions_.as_vector();
135 : }
136 :
137 : void Validate() const;
138 : void Print(const char* name = nullptr) const;
139 : void MaybePrint(const char* name = nullptr) const;
140 : void Disassemble(const char* name, std::ostream& os,
141 : Address current_pc = kNullAddress) const;
142 :
143 : static bool ShouldBeLogged(Isolate* isolate);
144 : void LogCode(Isolate* isolate) const;
145 :
146 : ~WasmCode();
147 :
148 : enum FlushICache : bool { kFlushICache = true, kNoFlushICache = false };
149 :
150 : static constexpr uint32_t kAnonymousFuncIndex = 0xffffffff;
151 : STATIC_ASSERT(kAnonymousFuncIndex > kV8MaxWasmFunctions);
152 :
153 : private:
154 : friend class NativeModule;
155 :
156 : WasmCode(NativeModule* native_module, uint32_t index,
157 : Vector<byte> instructions, uint32_t stack_slots,
158 : uint32_t tagged_parameter_slots, size_t safepoint_table_offset,
159 : size_t handler_table_offset, size_t constant_pool_offset,
160 : size_t code_comments_offset, size_t unpadded_binary_size,
161 : OwnedVector<trap_handler::ProtectedInstructionData>
162 : protected_instructions,
163 : OwnedVector<const byte> reloc_info,
164 : OwnedVector<const byte> source_position_table, Kind kind, Tier tier)
165 : : instructions_(instructions),
166 : reloc_info_(std::move(reloc_info)),
167 : source_position_table_(std::move(source_position_table)),
168 : native_module_(native_module),
169 : index_(index),
170 : kind_(kind),
171 : constant_pool_offset_(constant_pool_offset),
172 : stack_slots_(stack_slots),
173 : tagged_parameter_slots_(tagged_parameter_slots),
174 : safepoint_table_offset_(safepoint_table_offset),
175 : handler_table_offset_(handler_table_offset),
176 : code_comments_offset_(code_comments_offset),
177 : unpadded_binary_size_(unpadded_binary_size),
178 : protected_instructions_(std::move(protected_instructions)),
179 10656753 : tier_(tier) {
180 : DCHECK_LE(safepoint_table_offset, unpadded_binary_size);
181 : DCHECK_LE(handler_table_offset, unpadded_binary_size);
182 : DCHECK_LE(code_comments_offset, unpadded_binary_size);
183 : DCHECK_LE(constant_pool_offset, unpadded_binary_size);
184 : }
185 :
186 : // Code objects that have been registered with the global trap handler within
187 : // this process, will have a {trap_handler_index} associated with them.
188 : size_t trap_handler_index() const;
189 : void set_trap_handler_index(size_t);
190 : bool HasTrapHandlerIndex() const;
191 :
192 : // Register protected instruction information with the trap handler. Sets
193 : // trap_handler_index.
194 : void RegisterTrapHandlerData();
195 :
196 : Vector<byte> instructions_;
197 : OwnedVector<const byte> reloc_info_;
198 : OwnedVector<const byte> source_position_table_;
199 : NativeModule* native_module_ = nullptr;
200 : uint32_t index_;
201 : Kind kind_;
202 : size_t constant_pool_offset_ = 0;
203 : uint32_t stack_slots_ = 0;
204 : // Number of tagged parameters passed to this function via the stack. This
205 : // value is used by the stack walker (e.g. GC) to find references.
206 : uint32_t tagged_parameter_slots_ = 0;
207 : // we care about safepoint data for wasm-to-js functions,
208 : // since there may be stack/register tagged values for large number
209 : // conversions.
210 : size_t safepoint_table_offset_ = 0;
211 : size_t handler_table_offset_ = 0;
212 : size_t code_comments_offset_ = 0;
213 : size_t unpadded_binary_size_ = 0;
214 : intptr_t trap_handler_index_ = -1;
215 : OwnedVector<trap_handler::ProtectedInstructionData> protected_instructions_;
216 : Tier tier_;
217 :
218 : DISALLOW_COPY_AND_ASSIGN(WasmCode);
219 : };
220 :
221 : // Return a textual description of the kind.
222 : const char* GetWasmCodeKindAsString(WasmCode::Kind);
223 :
224 : class V8_EXPORT_PRIVATE NativeModule final {
225 : public:
226 : #if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_S390X || V8_TARGET_ARCH_ARM64
227 : static constexpr bool kCanAllocateMoreMemory = false;
228 : #else
229 : static constexpr bool kCanAllocateMoreMemory = true;
230 : #endif
231 :
232 : // {AddCode} is thread safe w.r.t. other calls to {AddCode} or methods adding
233 : // code below, i.e. it can be called concurrently from background threads.
234 : WasmCode* AddCode(uint32_t index, const CodeDesc& desc, uint32_t stack_slots,
235 : uint32_t tagged_parameter_slots,
236 : OwnedVector<trap_handler::ProtectedInstructionData>
237 : protected_instructions,
238 : OwnedVector<const byte> source_position_table,
239 : WasmCode::Kind kind, WasmCode::Tier tier);
240 :
241 : WasmCode* AddDeserializedCode(
242 : uint32_t index, Vector<const byte> instructions, uint32_t stack_slots,
243 : uint32_t tagged_parameter_slots, size_t safepoint_table_offset,
244 : size_t handler_table_offset, size_t constant_pool_offset,
245 : size_t code_comments_offset, size_t unpadded_binary_size,
246 : OwnedVector<trap_handler::ProtectedInstructionData>
247 : protected_instructions,
248 : OwnedVector<const byte> reloc_info,
249 : OwnedVector<const byte> source_position_table, WasmCode::Tier tier);
250 :
251 : // Adds anonymous code for testing purposes.
252 : WasmCode* AddCodeForTesting(Handle<Code> code);
253 :
254 : // When starting lazy compilation, provide the WasmLazyCompile builtin by
255 : // calling SetLazyBuiltin. It will be copied into this NativeModule and the
256 : // jump table will be populated with that copy.
257 : void SetLazyBuiltin(Handle<Code> code);
258 :
259 : // Initializes all runtime stubs by copying them over from the JS-allocated
260 : // heap into this native module. It must be called exactly once per native
261 : // module before adding other WasmCode so that runtime stub ids can be
262 : // resolved during relocation.
263 : void SetRuntimeStubs(Isolate* isolate);
264 :
265 : // Makes the code available to the system (by entering it into the code table
266 : // and patching the jump table). Callers have to take care not to race with
267 : // threads executing the old code.
268 : void PublishCode(WasmCode* code);
269 :
270 : // Switch a function to an interpreter entry wrapper. When adding interpreter
271 : // wrappers, we do not insert them in the code_table, however, we let them
272 : // self-identify as the {index} function.
273 : void PublishInterpreterEntry(WasmCode* code, uint32_t index);
274 :
275 : // Creates a snapshot of the current state of the code table. This is useful
276 : // to get a consistent view of the table (e.g. used by the serializer).
277 : std::vector<WasmCode*> SnapshotCodeTable() const;
278 :
279 112 : WasmCode* code(uint32_t index) const {
280 : DCHECK_LT(index, num_functions());
281 : DCHECK_LE(module_->num_imported_functions, index);
282 18511408 : return code_table_[index - module_->num_imported_functions];
283 : }
284 :
285 224 : bool has_code(uint32_t index) const { return code(index) != nullptr; }
286 :
287 : Address runtime_stub_entry(WasmCode::RuntimeStubId index) const {
288 : DCHECK_LT(index, WasmCode::kRuntimeStubCount);
289 534320 : Address entry_address = runtime_stub_entries_[index];
290 : DCHECK_NE(kNullAddress, entry_address);
291 : return entry_address;
292 : }
293 :
294 : Address jump_table_start() const {
295 1464436 : return jump_table_ ? jump_table_->instruction_start() : kNullAddress;
296 : }
297 :
298 235383 : ptrdiff_t jump_table_offset(uint32_t func_index) const {
299 : DCHECK_GE(func_index, num_imported_functions());
300 470766 : return GetCallTargetForFunction(func_index) - jump_table_start();
301 : }
302 :
303 : bool is_jump_table_slot(Address address) const {
304 : return jump_table_->contains(address);
305 : }
306 :
307 : // Transition this module from code relying on trap handlers (i.e. without
308 : // explicit memory bounds checks) to code that does not require trap handlers
309 : // (i.e. code with explicit bounds checks).
310 : // This method must only be called if {use_trap_handler()} is true (it will be
311 : // false afterwards). All code in this {NativeModule} needs to be re-added
312 : // after calling this method.
313 : void DisableTrapHandler();
314 :
315 : // Returns the target to call for the given function (returns a jump table
316 : // slot within {jump_table_}).
317 : Address GetCallTargetForFunction(uint32_t func_index) const;
318 :
319 : // Reverse lookup from a given call target (i.e. a jump table slot as the
320 : // above {GetCallTargetForFunction} returns) to a function index.
321 : uint32_t GetFunctionIndexFromJumpTableSlot(Address slot_address) const;
322 :
323 : bool SetExecutable(bool executable);
324 :
325 : // For cctests, where we build both WasmModule and the runtime objects
326 : // on the fly, and bypass the instance builder pipeline.
327 : void ReserveCodeTableForTesting(uint32_t max_functions);
328 :
329 : void LogWasmCodes(Isolate* isolate);
330 :
331 32 : CompilationState* compilation_state() { return compilation_state_.get(); }
332 :
333 : // Create a {CompilationEnv} object for compilation. The caller has to ensure
334 : // that the {WasmModule} pointer stays valid while the {CompilationEnv} is
335 : // being used.
336 : CompilationEnv CreateCompilationEnv() const;
337 :
338 : uint32_t num_functions() const {
339 1248618 : return module_->num_declared_functions + module_->num_imported_functions;
340 : }
341 : uint32_t num_imported_functions() const {
342 123546 : return module_->num_imported_functions;
343 : }
344 : UseTrapHandler use_trap_handler() const { return use_trap_handler_; }
345 48 : void set_lazy_compile_frozen(bool frozen) { lazy_compile_frozen_ = frozen; }
346 : bool lazy_compile_frozen() const { return lazy_compile_frozen_; }
347 4916464 : Vector<const uint8_t> wire_bytes() const { return wire_bytes_->as_vector(); }
348 9492858 : const WasmModule* module() const { return module_.get(); }
349 : std::shared_ptr<const WasmModule> shared_module() const { return module_; }
350 : size_t committed_code_space() const { return committed_code_space_.load(); }
351 : WasmEngine* engine() const { return engine_; }
352 :
353 : void SetWireBytes(OwnedVector<const uint8_t> wire_bytes);
354 :
355 : WasmCode* Lookup(Address) const;
356 :
357 : WasmImportWrapperCache* import_wrapper_cache() const {
358 : return import_wrapper_cache_.get();
359 : }
360 :
361 : ~NativeModule();
362 :
363 : const WasmFeatures& enabled_features() const { return enabled_features_; }
364 :
365 : const char* GetRuntimeStubName(Address runtime_stub_entry) const;
366 :
367 : private:
368 : friend class WasmCode;
369 : friend class WasmCodeManager;
370 : friend class NativeModuleModificationScope;
371 :
372 : NativeModule(WasmEngine* engine, const WasmFeatures& enabled_features,
373 : bool can_request_more, VirtualMemory code_space,
374 : std::shared_ptr<const WasmModule> module,
375 : std::shared_ptr<Counters> async_counters);
376 :
377 : WasmCode* AddAnonymousCode(Handle<Code>, WasmCode::Kind kind,
378 : const char* name = nullptr);
379 : // Allocate code space. Returns a valid buffer or fails with OOM (crash).
380 : Vector<byte> AllocateForCode(size_t size);
381 :
382 : // Primitive for adding code to the native module. All code added to a native
383 : // module is owned by that module. Various callers get to decide on how the
384 : // code is obtained (CodeDesc vs, as a point in time, Code), the kind,
385 : // whether it has an index or is anonymous, etc.
386 : WasmCode* AddOwnedCode(uint32_t index, Vector<const byte> instructions,
387 : uint32_t stack_slots, uint32_t tagged_parameter_slots,
388 : size_t safepoint_table_offset,
389 : size_t handler_table_offset,
390 : size_t constant_pool_offset,
391 : size_t code_comments_offset,
392 : size_t unpadded_binary_size,
393 : OwnedVector<trap_handler::ProtectedInstructionData>,
394 : OwnedVector<const byte> reloc_info,
395 : OwnedVector<const byte> source_position_table,
396 : WasmCode::Kind, WasmCode::Tier);
397 :
398 : WasmCode* CreateEmptyJumpTable(uint32_t jump_table_size);
399 :
400 : // Hold the {allocation_mutex_} when calling this method.
401 : void InstallCode(WasmCode* code);
402 :
403 : Vector<WasmCode*> code_table() const {
404 402 : return {code_table_.get(), module_->num_declared_functions};
405 : }
406 :
407 : // Hold the {mutex_} when calling this method.
408 : bool has_interpreter_redirection(uint32_t func_index) {
409 : DCHECK_LT(func_index, num_functions());
410 : DCHECK_LE(module_->num_imported_functions, func_index);
411 1066721 : if (!interpreter_redirections_) return false;
412 12 : uint32_t bitset_idx = func_index - module_->num_imported_functions;
413 24 : uint8_t byte = interpreter_redirections_[bitset_idx / kBitsPerByte];
414 12 : return byte & (1 << (bitset_idx % kBitsPerByte));
415 : }
416 :
417 : // Hold the {mutex_} when calling this method.
418 2444 : void SetInterpreterRedirection(uint32_t func_index) {
419 : DCHECK_LT(func_index, num_functions());
420 : DCHECK_LE(module_->num_imported_functions, func_index);
421 2444 : if (!interpreter_redirections_) {
422 : interpreter_redirections_.reset(
423 6156 : new uint8_t[RoundUp<kBitsPerByte>(module_->num_declared_functions) /
424 1856 : kBitsPerByte]);
425 : }
426 2444 : uint32_t bitset_idx = func_index - module_->num_imported_functions;
427 2444 : uint8_t& byte = interpreter_redirections_[bitset_idx / kBitsPerByte];
428 2444 : byte |= 1 << (bitset_idx % kBitsPerByte);
429 2444 : }
430 :
431 : // Features enabled for this module. We keep a copy of the features that
432 : // were enabled at the time of the creation of this native module,
433 : // to be consistent across asynchronous compilations later.
434 : const WasmFeatures enabled_features_;
435 :
436 : // The decoded module, stored in a shared_ptr such that background compile
437 : // tasks can keep this alive.
438 : std::shared_ptr<const WasmModule> module_;
439 :
440 : // Wire bytes, held in a shared_ptr so they can be kept alive by the
441 : // {WireBytesStorage}, held by background compile tasks.
442 : std::shared_ptr<OwnedVector<const uint8_t>> wire_bytes_;
443 :
444 : // Contains entry points for runtime stub calls via {WASM_STUB_CALL}.
445 : Address runtime_stub_entries_[WasmCode::kRuntimeStubCount] = {kNullAddress};
446 :
447 : // Jump table used for runtime stubs (i.e. trampolines to embedded builtins).
448 : WasmCode* runtime_stub_table_ = nullptr;
449 :
450 : // Jump table used to easily redirect wasm function calls.
451 : WasmCode* jump_table_ = nullptr;
452 :
453 : // The compilation state keeps track of compilation tasks for this module.
454 : // Note that its destructor blocks until all tasks are finished/aborted and
455 : // hence needs to be destructed first when this native module dies.
456 : std::unique_ptr<CompilationState> compilation_state_;
457 :
458 : // A cache of the import wrappers, keyed on the kind and signature.
459 : std::unique_ptr<WasmImportWrapperCache> import_wrapper_cache_;
460 :
461 : // This mutex protects concurrent calls to {AddCode} and friends.
462 : mutable base::Mutex allocation_mutex_;
463 :
464 : //////////////////////////////////////////////////////////////////////////////
465 : // Protected by {allocation_mutex_}:
466 :
467 : // Holds all allocated code objects, is maintained to be in ascending order
468 : // according to the codes instruction start address to allow lookups.
469 : std::vector<std::unique_ptr<WasmCode>> owned_code_;
470 :
471 : std::unique_ptr<WasmCode* []> code_table_;
472 :
473 : // Null if no redirections exist, otherwise a bitset over all functions in
474 : // this module marking those functions that have been redirected.
475 : std::unique_ptr<uint8_t[]> interpreter_redirections_;
476 :
477 : DisjointAllocationPool free_code_space_;
478 : DisjointAllocationPool allocated_code_space_;
479 : std::list<VirtualMemory> owned_code_space_;
480 :
481 : // End of fields protected by {allocation_mutex_}.
482 : //////////////////////////////////////////////////////////////////////////////
483 :
484 : WasmEngine* const engine_;
485 : std::atomic<size_t> committed_code_space_{0};
486 : int modification_scope_depth_ = 0;
487 : bool can_request_more_memory_;
488 : UseTrapHandler use_trap_handler_ = kNoTrapHandler;
489 : bool is_executable_ = false;
490 : bool lazy_compile_frozen_ = false;
491 :
492 : DISALLOW_COPY_AND_ASSIGN(NativeModule);
493 : };
494 :
495 117004 : class V8_EXPORT_PRIVATE WasmCodeManager final {
496 : public:
497 : explicit WasmCodeManager(WasmMemoryTracker* memory_tracker,
498 : size_t max_committed);
499 :
500 : NativeModule* LookupNativeModule(Address pc) const;
501 : WasmCode* LookupCode(Address pc) const;
502 : size_t remaining_uncommitted_code_space() const;
503 :
504 : void SetMaxCommittedMemoryForTesting(size_t limit);
505 :
506 : static size_t EstimateNativeModuleCodeSize(const WasmModule* module);
507 : static size_t EstimateNativeModuleNonCodeSize(const WasmModule* module);
508 :
509 : private:
510 : friend class NativeModule;
511 : friend class WasmEngine;
512 :
513 : std::unique_ptr<NativeModule> NewNativeModule(
514 : WasmEngine* engine, Isolate* isolate,
515 : const WasmFeatures& enabled_features, size_t code_size_estimate,
516 : bool can_request_more, std::shared_ptr<const WasmModule> module);
517 :
518 : V8_WARN_UNUSED_RESULT VirtualMemory TryAllocate(size_t size,
519 : void* hint = nullptr);
520 : bool Commit(Address, size_t);
521 : // Currently, we uncommit a whole module, so all we need is account
522 : // for the freed memory size. We do that in FreeNativeModule.
523 : // There's no separate Uncommit.
524 :
525 : void FreeNativeModule(NativeModule*);
526 :
527 : void AssignRanges(Address start, Address end, NativeModule*);
528 :
529 : WasmMemoryTracker* const memory_tracker_;
530 : std::atomic<size_t> remaining_uncommitted_code_space_;
531 : // If the remaining uncommitted code space falls below
532 : // {critical_uncommitted_code_space_}, then we trigger a GC before creating
533 : // the next module. This value is initialized to 50% of the available code
534 : // space on creation and after each GC.
535 : std::atomic<size_t> critical_uncommitted_code_space_;
536 : mutable base::Mutex native_modules_mutex_;
537 :
538 : //////////////////////////////////////////////////////////////////////////////
539 : // Protected by {native_modules_mutex_}:
540 :
541 : std::map<Address, std::pair<Address, NativeModule*>> lookup_map_;
542 :
543 : // End of fields protected by {native_modules_mutex_}.
544 : //////////////////////////////////////////////////////////////////////////////
545 :
546 : DISALLOW_COPY_AND_ASSIGN(WasmCodeManager);
547 : };
548 :
549 : // Within the scope, the native_module is writable and not executable.
550 : // At the scope's destruction, the native_module is executable and not writable.
551 : // The states inside the scope and at the scope termination are irrespective of
552 : // native_module's state when entering the scope.
553 : // We currently mark the entire module's memory W^X:
554 : // - for AOT, that's as efficient as it can be.
555 : // - for Lazy, we don't have a heuristic for functions that may need patching,
556 : // and even if we did, the resulting set of pages may be fragmented.
557 : // Currently, we try and keep the number of syscalls low.
558 : // - similar argument for debug time.
559 : class NativeModuleModificationScope final {
560 : public:
561 : explicit NativeModuleModificationScope(NativeModule* native_module);
562 : ~NativeModuleModificationScope();
563 :
564 : private:
565 : NativeModule* native_module_;
566 : };
567 :
568 : } // namespace wasm
569 : } // namespace internal
570 : } // namespace v8
571 :
572 : #endif // V8_WASM_WASM_CODE_MANAGER_H_
|