Line data Source code
1 : // Copyright 2017 the V8 project authors. All rights reserved.
2 : // Use of this source code is governed by a BSD-style license that can be
3 : // found in the LICENSE file.
4 :
5 : #ifndef V8_WASM_WASM_CODE_MANAGER_H_
6 : #define V8_WASM_WASM_CODE_MANAGER_H_
7 :
8 : #include <functional>
9 : #include <list>
10 : #include <map>
11 : #include <unordered_map>
12 : #include <unordered_set>
13 :
14 : #include "src/base/macros.h"
15 : #include "src/builtins/builtins-definitions.h"
16 : #include "src/handles.h"
17 : #include "src/trap-handler/trap-handler.h"
18 : #include "src/vector.h"
19 : #include "src/wasm/compilation-environment.h"
20 : #include "src/wasm/wasm-features.h"
21 : #include "src/wasm/wasm-limits.h"
22 :
23 : namespace v8 {
24 : namespace internal {
25 :
26 : struct CodeDesc;
27 : class Code;
28 :
29 : namespace wasm {
30 :
31 : class NativeModule;
32 : class WasmCodeManager;
33 : class WasmEngine;
34 : class WasmMemoryTracker;
35 : class WasmImportWrapperCache;
36 : struct WasmModule;
37 :
38 : // Sorted, disjoint and non-overlapping memory regions. A region is of the
39 : // form [start, end). So there's no [start, end), [end, other_end),
40 : // because that should have been reduced to [start, other_end).
41 : class V8_EXPORT_PRIVATE DisjointAllocationPool final {
42 : public:
43 : DisjointAllocationPool() = default;
44 :
45 1530590 : explicit DisjointAllocationPool(base::AddressRegion region)
46 3061181 : : regions_({region}) {}
47 :
48 : DisjointAllocationPool(DisjointAllocationPool&& other) V8_NOEXCEPT = default;
49 : DisjointAllocationPool& operator=(DisjointAllocationPool&& other)
50 : V8_NOEXCEPT = default;
51 :
52 : // Merge the parameter region into this object while preserving ordering of
53 : // the regions. The assumption is that the passed parameter is not
54 : // intersecting this object - for example, it was obtained from a previous
55 : // Allocate.
56 : void Merge(base::AddressRegion);
57 :
58 : // Allocate a contiguous region of size {size}. Return an empty pool on
59 : // failure.
60 : base::AddressRegion Allocate(size_t size);
61 :
62 : bool IsEmpty() const { return regions_.empty(); }
63 : const std::list<base::AddressRegion>& regions() const { return regions_; }
64 :
65 : private:
66 : std::list<base::AddressRegion> regions_;
67 :
68 : DISALLOW_COPY_AND_ASSIGN(DisjointAllocationPool);
69 : };
70 :
71 : class V8_EXPORT_PRIVATE WasmCode final {
72 : public:
73 : enum Kind {
74 : kFunction,
75 : kWasmToJsWrapper,
76 : kLazyStub,
77 : kRuntimeStub,
78 : kInterpreterEntry,
79 : kJumpTable
80 : };
81 :
82 : // Each runtime stub is identified by an id. This id is used to reference the
83 : // stub via {RelocInfo::WASM_STUB_CALL} and gets resolved during relocation.
84 : enum RuntimeStubId {
85 : #define DEF_ENUM(Name) k##Name,
86 : #define DEF_ENUM_TRAP(Name) kThrowWasm##Name,
87 : WASM_RUNTIME_STUB_LIST(DEF_ENUM, DEF_ENUM_TRAP)
88 : #undef DEF_ENUM_TRAP
89 : #undef DEF_ENUM
90 : kRuntimeStubCount
91 : };
92 :
93 : // kOther is used if we have WasmCode that is neither
94 : // liftoff- nor turbofan-compiled, i.e. if Kind is
95 : // not a kFunction.
96 : enum Tier : int8_t { kLiftoff, kTurbofan, kOther };
97 :
98 : Vector<byte> instructions() const { return instructions_; }
99 : Address instruction_start() const {
100 212699240 : return reinterpret_cast<Address>(instructions_.start());
101 : }
102 : Vector<const byte> reloc_info() const { return reloc_info_.as_vector(); }
103 : Vector<const byte> source_positions() const {
104 : return source_position_table_.as_vector();
105 : }
106 :
107 : uint32_t index() const {
108 : DCHECK(!IsAnonymous());
109 : return index_;
110 : }
111 : // Anonymous functions are functions that don't carry an index.
112 : bool IsAnonymous() const { return index_ == kAnonymousFuncIndex; }
113 : Kind kind() const { return kind_; }
114 : NativeModule* native_module() const { return native_module_; }
115 : Tier tier() const { return tier_; }
116 : Address constant_pool() const;
117 : Address code_comments() const;
118 : size_t constant_pool_offset() const { return constant_pool_offset_; }
119 : size_t safepoint_table_offset() const { return safepoint_table_offset_; }
120 : size_t handler_table_offset() const { return handler_table_offset_; }
121 : size_t code_comments_offset() const { return code_comments_offset_; }
122 : size_t unpadded_binary_size() const { return unpadded_binary_size_; }
123 : uint32_t stack_slots() const { return stack_slots_; }
124 216 : bool is_liftoff() const { return tier_ == kLiftoff; }
125 : bool contains(Address pc) const {
126 12871914 : return reinterpret_cast<Address>(instructions_.start()) <= pc &&
127 6435957 : pc < reinterpret_cast<Address>(instructions_.end());
128 : }
129 :
130 : Vector<trap_handler::ProtectedInstructionData> protected_instructions()
131 : const {
132 : return protected_instructions_.as_vector();
133 : }
134 :
135 : const char* GetRuntimeStubName() const;
136 :
137 : void Validate() const;
138 : void Print(const char* name = nullptr) const;
139 : void MaybePrint(const char* name = nullptr) const;
140 : void Disassemble(const char* name, std::ostream& os,
141 : Address current_pc = kNullAddress) const;
142 :
143 : static bool ShouldBeLogged(Isolate* isolate);
144 : void LogCode(Isolate* isolate) const;
145 :
146 : ~WasmCode();
147 :
148 : enum FlushICache : bool { kFlushICache = true, kNoFlushICache = false };
149 :
150 : static constexpr uint32_t kAnonymousFuncIndex = 0xffffffff;
151 : STATIC_ASSERT(kAnonymousFuncIndex > kV8MaxWasmFunctions);
152 :
153 : private:
154 : friend class NativeModule;
155 :
156 : WasmCode(NativeModule* native_module, uint32_t index,
157 : Vector<byte> instructions, uint32_t stack_slots,
158 : size_t safepoint_table_offset, size_t handler_table_offset,
159 : size_t constant_pool_offset, size_t code_comments_offset,
160 : size_t unpadded_binary_size,
161 : OwnedVector<trap_handler::ProtectedInstructionData>
162 : protected_instructions,
163 : OwnedVector<const byte> reloc_info,
164 : OwnedVector<const byte> source_position_table, Kind kind, Tier tier)
165 : : instructions_(instructions),
166 : reloc_info_(std::move(reloc_info)),
167 : source_position_table_(std::move(source_position_table)),
168 : native_module_(native_module),
169 : index_(index),
170 : kind_(kind),
171 : constant_pool_offset_(constant_pool_offset),
172 : stack_slots_(stack_slots),
173 : safepoint_table_offset_(safepoint_table_offset),
174 : handler_table_offset_(handler_table_offset),
175 : code_comments_offset_(code_comments_offset),
176 : unpadded_binary_size_(unpadded_binary_size),
177 : protected_instructions_(std::move(protected_instructions)),
178 124575327 : tier_(tier) {
179 : DCHECK_LE(safepoint_table_offset, unpadded_binary_size);
180 : DCHECK_LE(handler_table_offset, unpadded_binary_size);
181 : DCHECK_LE(code_comments_offset, unpadded_binary_size);
182 : DCHECK_LE(constant_pool_offset, unpadded_binary_size);
183 : }
184 :
185 : // Code objects that have been registered with the global trap handler within
186 : // this process, will have a {trap_handler_index} associated with them.
187 : size_t trap_handler_index() const;
188 : void set_trap_handler_index(size_t);
189 : bool HasTrapHandlerIndex() const;
190 :
191 : // Register protected instruction information with the trap handler. Sets
192 : // trap_handler_index.
193 : void RegisterTrapHandlerData();
194 :
195 : Vector<byte> instructions_;
196 : OwnedVector<const byte> reloc_info_;
197 : OwnedVector<const byte> source_position_table_;
198 : NativeModule* native_module_ = nullptr;
199 : uint32_t index_;
200 : Kind kind_;
201 : size_t constant_pool_offset_ = 0;
202 : uint32_t stack_slots_ = 0;
203 : // we care about safepoint data for wasm-to-js functions,
204 : // since there may be stack/register tagged values for large number
205 : // conversions.
206 : size_t safepoint_table_offset_ = 0;
207 : size_t handler_table_offset_ = 0;
208 : size_t code_comments_offset_ = 0;
209 : size_t unpadded_binary_size_ = 0;
210 : intptr_t trap_handler_index_ = -1;
211 : OwnedVector<trap_handler::ProtectedInstructionData> protected_instructions_;
212 : Tier tier_;
213 :
214 : DISALLOW_COPY_AND_ASSIGN(WasmCode);
215 : };
216 :
217 : // Return a textual description of the kind.
218 : const char* GetWasmCodeKindAsString(WasmCode::Kind);
219 :
220 : class V8_EXPORT_PRIVATE NativeModule final {
221 : public:
222 : #if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_S390X || V8_TARGET_ARCH_ARM64
223 : static constexpr bool kCanAllocateMoreMemory = false;
224 : #else
225 : static constexpr bool kCanAllocateMoreMemory = true;
226 : #endif
227 :
228 : // {AddCode} is thread safe w.r.t. other calls to {AddCode} or methods adding
229 : // code below, i.e. it can be called concurrently from background threads.
230 : WasmCode* AddCode(uint32_t index, const CodeDesc& desc, uint32_t stack_slots,
231 : size_t safepoint_table_offset, size_t handler_table_offset,
232 : OwnedVector<trap_handler::ProtectedInstructionData>
233 : protected_instructions,
234 : OwnedVector<const byte> source_position_table,
235 : WasmCode::Kind kind, WasmCode::Tier tier);
236 :
237 : WasmCode* AddDeserializedCode(
238 : uint32_t index, Vector<const byte> instructions, uint32_t stack_slots,
239 : size_t safepoint_table_offset, size_t handler_table_offset,
240 : size_t constant_pool_offset, size_t code_comments_offset,
241 : size_t unpadded_binary_size,
242 : OwnedVector<trap_handler::ProtectedInstructionData>
243 : protected_instructions,
244 : OwnedVector<const byte> reloc_info,
245 : OwnedVector<const byte> source_position_table, WasmCode::Tier tier);
246 :
247 : // Adds anonymous code for testing purposes.
248 : WasmCode* AddCodeForTesting(Handle<Code> code);
249 :
250 : // When starting lazy compilation, provide the WasmLazyCompile builtin by
251 : // calling SetLazyBuiltin. It will be copied into this NativeModule and the
252 : // jump table will be populated with that copy.
253 : void SetLazyBuiltin(Handle<Code> code);
254 :
255 : // Initializes all runtime stubs by copying them over from the JS-allocated
256 : // heap into this native module. It must be called exactly once per native
257 : // module before adding other WasmCode so that runtime stub ids can be
258 : // resolved during relocation.
259 : void SetRuntimeStubs(Isolate* isolate);
260 :
261 : // Makes the code available to the system (by entering it into the code table
262 : // and patching the jump table). Callers have to take care not to race with
263 : // threads executing the old code.
264 : void PublishCode(WasmCode* code);
265 :
266 : // Switch a function to an interpreter entry wrapper. When adding interpreter
267 : // wrappers, we do not insert them in the code_table, however, we let them
268 : // self-identify as the {index} function.
269 : void PublishInterpreterEntry(WasmCode* code, uint32_t index);
270 :
271 : // Creates a snapshot of the current state of the code table. This is useful
272 : // to get a consistent view of the table (e.g. used by the serializer).
273 : std::vector<WasmCode*> SnapshotCodeTable() const;
274 :
275 126 : WasmCode* code(uint32_t index) const {
276 : DCHECK_LT(index, num_functions());
277 : DCHECK_LE(module_->num_imported_functions, index);
278 23075012 : return code_table_[index - module_->num_imported_functions];
279 : }
280 :
281 252 : bool has_code(uint32_t index) const { return code(index) != nullptr; }
282 :
283 : WasmCode* runtime_stub(WasmCode::RuntimeStubId index) const {
284 : DCHECK_LT(index, WasmCode::kRuntimeStubCount);
285 603926 : WasmCode* code = runtime_stub_table_[index];
286 : DCHECK_NOT_NULL(code);
287 : return code;
288 : }
289 :
290 : Address jump_table_start() const {
291 1783242 : return jump_table_ ? jump_table_->instruction_start() : kNullAddress;
292 : }
293 :
294 264450 : ptrdiff_t jump_table_offset(uint32_t func_index) const {
295 : DCHECK_GE(func_index, num_imported_functions());
296 528902 : return GetCallTargetForFunction(func_index) - jump_table_start();
297 : }
298 :
299 : bool is_jump_table_slot(Address address) const {
300 : return jump_table_->contains(address);
301 : }
302 :
303 : // Transition this module from code relying on trap handlers (i.e. without
304 : // explicit memory bounds checks) to code that does not require trap handlers
305 : // (i.e. code with explicit bounds checks).
306 : // This method must only be called if {use_trap_handler()} is true (it will be
307 : // false afterwards). All code in this {NativeModule} needs to be re-added
308 : // after calling this method.
309 : void DisableTrapHandler();
310 :
311 : // Returns the target to call for the given function (returns a jump table
312 : // slot within {jump_table_}).
313 : Address GetCallTargetForFunction(uint32_t func_index) const;
314 :
315 : // Reverse lookup from a given call target (i.e. a jump table slot as the
316 : // above {GetCallTargetForFunction} returns) to a function index.
317 : uint32_t GetFunctionIndexFromJumpTableSlot(Address slot_address) const;
318 :
319 : bool SetExecutable(bool executable);
320 :
321 : // For cctests, where we build both WasmModule and the runtime objects
322 : // on the fly, and bypass the instance builder pipeline.
323 : void ReserveCodeTableForTesting(uint32_t max_functions);
324 :
325 : void LogWasmCodes(Isolate* isolate);
326 :
327 36 : CompilationState* compilation_state() { return compilation_state_.get(); }
328 :
329 : // Create a {CompilationEnv} object for compilation. Only valid as long as
330 : // this {NativeModule} is alive.
331 : CompilationEnv CreateCompilationEnv() const;
332 :
333 : uint32_t num_functions() const {
334 1540051 : return module_->num_declared_functions + module_->num_imported_functions;
335 : }
336 : uint32_t num_imported_functions() const {
337 138432 : return module_->num_imported_functions;
338 : }
339 : UseTrapHandler use_trap_handler() const { return use_trap_handler_; }
340 54 : void set_lazy_compile_frozen(bool frozen) { lazy_compile_frozen_ = frozen; }
341 : bool lazy_compile_frozen() const { return lazy_compile_frozen_; }
342 : Vector<const byte> wire_bytes() const { return wire_bytes_.as_vector(); }
343 13507524 : const WasmModule* module() const { return module_.get(); }
344 : size_t committed_code_space() const { return committed_code_space_.load(); }
345 :
346 : void SetWireBytes(OwnedVector<const byte> wire_bytes);
347 :
348 : WasmCode* Lookup(Address) const;
349 :
350 : WasmImportWrapperCache* import_wrapper_cache() const {
351 : return import_wrapper_cache_.get();
352 : }
353 :
354 : ~NativeModule();
355 :
356 : const WasmFeatures& enabled_features() const { return enabled_features_; }
357 :
358 : private:
359 : friend class WasmCode;
360 : friend class WasmCodeManager;
361 : friend class NativeModuleModificationScope;
362 :
363 : NativeModule(Isolate* isolate, const WasmFeatures& enabled_features,
364 : bool can_request_more, VirtualMemory code_space,
365 : WasmCodeManager* code_manager,
366 : std::shared_ptr<const WasmModule> module);
367 :
368 : WasmCode* AddAnonymousCode(Handle<Code>, WasmCode::Kind kind,
369 : const char* name = nullptr);
370 : // Allocate code space. Returns a valid buffer or fails with OOM (crash).
371 : Vector<byte> AllocateForCode(size_t size);
372 :
373 : // Primitive for adding code to the native module. All code added to a native
374 : // module is owned by that module. Various callers get to decide on how the
375 : // code is obtained (CodeDesc vs, as a point in time, Code), the kind,
376 : // whether it has an index or is anonymous, etc.
377 : WasmCode* AddOwnedCode(uint32_t index, Vector<const byte> instructions,
378 : uint32_t stack_slots, size_t safepoint_table_offset,
379 : size_t handler_table_offset,
380 : size_t constant_pool_offset,
381 : size_t code_comments_offset,
382 : size_t unpadded_binary_size,
383 : OwnedVector<trap_handler::ProtectedInstructionData>,
384 : OwnedVector<const byte> reloc_info,
385 : OwnedVector<const byte> source_position_table,
386 : WasmCode::Kind, WasmCode::Tier);
387 :
388 : WasmCode* CreateEmptyJumpTable(uint32_t num_wasm_functions);
389 :
390 : // Hold the {allocation_mutex_} when calling this method.
391 : void InstallCode(WasmCode* code);
392 :
393 : Vector<WasmCode*> code_table() const {
394 460 : return {code_table_.get(), module_->num_declared_functions};
395 : }
396 :
397 : // Hold the {mutex_} when calling this method.
398 : bool has_interpreter_redirection(uint32_t func_index) {
399 : DCHECK_LT(func_index, num_functions());
400 : DCHECK_LE(module_->num_imported_functions, func_index);
401 1747773 : if (!interpreter_redirections_) return false;
402 11 : uint32_t bitset_idx = func_index - module_->num_imported_functions;
403 22 : uint8_t byte = interpreter_redirections_[bitset_idx / kBitsPerByte];
404 11 : return byte & (1 << (bitset_idx % kBitsPerByte));
405 : }
406 :
407 : // Hold the {mutex_} when calling this method.
408 1214 : void SetInterpreterRedirection(uint32_t func_index) {
409 : DCHECK_LT(func_index, num_functions());
410 : DCHECK_LE(module_->num_imported_functions, func_index);
411 1214 : if (!interpreter_redirections_) {
412 : interpreter_redirections_.reset(
413 2314 : new uint8_t[RoundUp<kBitsPerByte>(module_->num_declared_functions) /
414 550 : kBitsPerByte]);
415 : }
416 1214 : uint32_t bitset_idx = func_index - module_->num_imported_functions;
417 1214 : uint8_t& byte = interpreter_redirections_[bitset_idx / kBitsPerByte];
418 1214 : byte |= 1 << (bitset_idx % kBitsPerByte);
419 1214 : }
420 :
421 : // Features enabled for this module. We keep a copy of the features that
422 : // were enabled at the time of the creation of this native module,
423 : // to be consistent across asynchronous compilations later.
424 : const WasmFeatures enabled_features_;
425 :
426 : // TODO(clemensh): Make this a unique_ptr (requires refactoring
427 : // AsyncCompileJob).
428 : std::shared_ptr<const WasmModule> module_;
429 :
430 : OwnedVector<const byte> wire_bytes_;
431 :
432 : WasmCode* runtime_stub_table_[WasmCode::kRuntimeStubCount] = {nullptr};
433 :
434 : // Jump table used to easily redirect wasm function calls.
435 : WasmCode* jump_table_ = nullptr;
436 :
437 : // The compilation state keeps track of compilation tasks for this module.
438 : // Note that its destructor blocks until all tasks are finished/aborted and
439 : // hence needs to be destructed first when this native module dies.
440 : std::unique_ptr<CompilationState> compilation_state_;
441 :
442 : // A cache of the import wrappers, keyed on the kind and signature.
443 : std::unique_ptr<WasmImportWrapperCache> import_wrapper_cache_;
444 :
445 : // This mutex protects concurrent calls to {AddCode} and friends.
446 : mutable base::Mutex allocation_mutex_;
447 :
448 : //////////////////////////////////////////////////////////////////////////////
449 : // Protected by {allocation_mutex_}:
450 :
451 : // Holds all allocated code objects, is maintained to be in ascending order
452 : // according to the codes instruction start address to allow lookups.
453 : std::vector<std::unique_ptr<WasmCode>> owned_code_;
454 :
455 : std::unique_ptr<WasmCode* []> code_table_;
456 :
457 : // Null if no redirections exist, otherwise a bitset over all functions in
458 : // this module marking those functions that have been redirected.
459 : std::unique_ptr<uint8_t[]> interpreter_redirections_;
460 :
461 : DisjointAllocationPool free_code_space_;
462 : DisjointAllocationPool allocated_code_space_;
463 : std::list<VirtualMemory> owned_code_space_;
464 :
465 : // End of fields protected by {allocation_mutex_}.
466 : //////////////////////////////////////////////////////////////////////////////
467 :
468 : WasmCodeManager* const code_manager_;
469 : std::atomic<size_t> committed_code_space_{0};
470 : int modification_scope_depth_ = 0;
471 : bool can_request_more_memory_;
472 : UseTrapHandler use_trap_handler_ = kNoTrapHandler;
473 : bool is_executable_ = false;
474 : bool lazy_compile_frozen_ = false;
475 :
476 : DISALLOW_COPY_AND_ASSIGN(NativeModule);
477 : };
478 :
479 63766 : class V8_EXPORT_PRIVATE WasmCodeManager final {
480 : public:
481 : explicit WasmCodeManager(WasmMemoryTracker* memory_tracker,
482 : size_t max_committed);
483 : // Create a new NativeModule. The caller is responsible for its
484 : // lifetime. The native module will be given some memory for code,
485 : // which will be page size aligned. The size of the initial memory
486 : // is determined with a heuristic based on the total size of wasm
487 : // code. The native module may later request more memory.
488 : // TODO(titzer): isolate is only required here for CompilationState.
489 : std::unique_ptr<NativeModule> NewNativeModule(
490 : Isolate* isolate, const WasmFeatures& enabled_features,
491 : size_t code_size_estimate, bool can_request_more,
492 : std::shared_ptr<const WasmModule> module);
493 :
494 : NativeModule* LookupNativeModule(Address pc) const;
495 : WasmCode* LookupCode(Address pc) const;
496 : size_t remaining_uncommitted_code_space() const;
497 :
498 : // Add a sample of all module sizes.
499 : void SampleModuleSizes(Isolate* isolate) const;
500 :
501 : void SetMaxCommittedMemoryForTesting(size_t limit);
502 :
503 : // TODO(v8:7424): For now we sample module sizes in a GC callback. This will
504 : // bias samples towards apps with high memory pressure. We should switch to
505 : // using sampling based on regular intervals independent of the GC.
506 : static void InstallSamplingGCCallback(Isolate* isolate);
507 :
508 : static size_t EstimateNativeModuleCodeSize(const WasmModule* module);
509 : static size_t EstimateNativeModuleNonCodeSize(const WasmModule* module);
510 :
511 : private:
512 : friend class NativeModule;
513 :
514 : V8_WARN_UNUSED_RESULT VirtualMemory TryAllocate(size_t size,
515 : void* hint = nullptr);
516 : bool Commit(Address, size_t);
517 : // Currently, we uncommit a whole module, so all we need is account
518 : // for the freed memory size. We do that in FreeNativeModule.
519 : // There's no separate Uncommit.
520 :
521 : void FreeNativeModule(NativeModule*);
522 : void AssignRanges(Address start, Address end, NativeModule*);
523 : void AssignRangesAndAddModule(Address start, Address end, NativeModule*);
524 :
525 : WasmMemoryTracker* const memory_tracker_;
526 : std::atomic<size_t> remaining_uncommitted_code_space_;
527 : // If the remaining uncommitted code space falls below
528 : // {critical_uncommitted_code_space_}, then we trigger a GC before creating
529 : // the next module. This value is initialized to 50% of the available code
530 : // space on creation and after each GC.
531 : std::atomic<size_t> critical_uncommitted_code_space_;
532 : mutable base::Mutex native_modules_mutex_;
533 :
534 : //////////////////////////////////////////////////////////////////////////////
535 : // Protected by {native_modules_mutex_}:
536 :
537 : std::map<Address, std::pair<Address, NativeModule*>> lookup_map_;
538 : std::unordered_set<NativeModule*> native_modules_;
539 :
540 : // End of fields protected by {native_modules_mutex_}.
541 : //////////////////////////////////////////////////////////////////////////////
542 :
543 : DISALLOW_COPY_AND_ASSIGN(WasmCodeManager);
544 : };
545 :
546 : // Within the scope, the native_module is writable and not executable.
547 : // At the scope's destruction, the native_module is executable and not writable.
548 : // The states inside the scope and at the scope termination are irrespective of
549 : // native_module's state when entering the scope.
550 : // We currently mark the entire module's memory W^X:
551 : // - for AOT, that's as efficient as it can be.
552 : // - for Lazy, we don't have a heuristic for functions that may need patching,
553 : // and even if we did, the resulting set of pages may be fragmented.
554 : // Currently, we try and keep the number of syscalls low.
555 : // - similar argument for debug time.
556 : class NativeModuleModificationScope final {
557 : public:
558 : explicit NativeModuleModificationScope(NativeModule* native_module);
559 : ~NativeModuleModificationScope();
560 :
561 : private:
562 : NativeModule* native_module_;
563 : };
564 :
565 : } // namespace wasm
566 : } // namespace internal
567 : } // namespace v8
568 :
569 : #endif // V8_WASM_WASM_CODE_MANAGER_H_
|