Line data Source code
1 : // Copyright 2017 the V8 project authors. All rights reserved.
2 : // Use of this source code is governed by a BSD-style license that can be
3 : // found in the LICENSE file.
4 :
5 : #include "src/wasm/wasm-code-manager.h"
6 :
7 : #include <iomanip>
8 :
9 : #include "src/assembler-inl.h"
10 : #include "src/base/adapters.h"
11 : #include "src/base/macros.h"
12 : #include "src/base/platform/platform.h"
13 : #include "src/counters.h"
14 : #include "src/disassembler.h"
15 : #include "src/globals.h"
16 : #include "src/log.h"
17 : #include "src/macro-assembler-inl.h"
18 : #include "src/macro-assembler.h"
19 : #include "src/objects-inl.h"
20 : #include "src/ostreams.h"
21 : #include "src/snapshot/embedded-data.h"
22 : #include "src/vector.h"
23 : #include "src/wasm/compilation-environment.h"
24 : #include "src/wasm/function-compiler.h"
25 : #include "src/wasm/jump-table-assembler.h"
26 : #include "src/wasm/wasm-import-wrapper-cache.h"
27 : #include "src/wasm/wasm-module.h"
28 : #include "src/wasm/wasm-objects-inl.h"
29 : #include "src/wasm/wasm-objects.h"
30 :
31 : #if defined(V8_OS_WIN_X64)
32 : #include "src/unwinding-info-win64.h"
33 : #endif
34 :
35 : #define TRACE_HEAP(...) \
36 : do { \
37 : if (FLAG_trace_wasm_native_heap) PrintF(__VA_ARGS__); \
38 : } while (false)
39 :
40 : namespace v8 {
41 : namespace internal {
42 : namespace wasm {
43 :
44 : using trap_handler::ProtectedInstructionData;
45 :
46 3863479 : void DisjointAllocationPool::Merge(base::AddressRegion region) {
47 : auto dest_it = regions_.begin();
48 : auto dest_end = regions_.end();
49 :
50 : // Skip over dest regions strictly before {region}.
51 6483980 : while (dest_it != dest_end && dest_it->end() < region.begin()) ++dest_it;
52 :
53 : // After last dest region: insert and done.
54 3863479 : if (dest_it == dest_end) {
55 1243004 : regions_.push_back(region);
56 : return;
57 : }
58 :
59 : // Adjacent (from below) to dest: merge and done.
60 2620475 : if (dest_it->begin() == region.end()) {
61 : base::AddressRegion merged_region{region.begin(),
62 1 : region.size() + dest_it->size()};
63 : DCHECK_EQ(merged_region.end(), dest_it->end());
64 1 : *dest_it = merged_region;
65 : return;
66 : }
67 :
68 : // Before dest: insert and done.
69 2620474 : if (dest_it->begin() > region.end()) {
70 0 : regions_.insert(dest_it, region);
71 0 : return;
72 : }
73 :
74 : // Src is adjacent from above. Merge and check whether the merged region is
75 : // now adjacent to the next region.
76 : DCHECK_EQ(dest_it->end(), region.begin());
77 2620474 : dest_it->set_size(dest_it->size() + region.size());
78 : DCHECK_EQ(dest_it->end(), region.end());
79 : auto next_dest = dest_it;
80 : ++next_dest;
81 2620480 : if (next_dest != dest_end && dest_it->end() == next_dest->begin()) {
82 6 : dest_it->set_size(dest_it->size() + next_dest->size());
83 : DCHECK_EQ(dest_it->end(), next_dest->end());
84 : regions_.erase(next_dest);
85 : }
86 : }
87 :
88 3863461 : base::AddressRegion DisjointAllocationPool::Allocate(size_t size) {
89 3863465 : for (auto it = regions_.begin(), end = regions_.end(); it != end; ++it) {
90 3863463 : if (size > it->size()) continue;
91 : base::AddressRegion ret{it->begin(), size};
92 3863459 : if (size == it->size()) {
93 : regions_.erase(it);
94 : } else {
95 3863457 : *it = base::AddressRegion{it->begin() + size, it->size() - size};
96 : }
97 3863455 : return ret;
98 : }
99 2 : return {};
100 : }
101 :
102 1157 : Address WasmCode::constant_pool() const {
103 : if (FLAG_enable_embedded_constant_pool) {
104 : if (constant_pool_offset_ < code_comments_offset_) {
105 : return instruction_start() + constant_pool_offset_;
106 : }
107 : }
108 : return kNullAddress;
109 : }
110 :
111 0 : Address WasmCode::code_comments() const {
112 0 : return instruction_start() + code_comments_offset_;
113 : }
114 :
115 0 : uint32_t WasmCode::code_comments_size() const {
116 : DCHECK_GE(unpadded_binary_size_, code_comments_offset_);
117 0 : return static_cast<uint32_t>(unpadded_binary_size_ - code_comments_offset_);
118 : }
119 :
120 0 : size_t WasmCode::trap_handler_index() const {
121 128322 : CHECK(HasTrapHandlerIndex());
122 64161 : return static_cast<size_t>(trap_handler_index_);
123 : }
124 :
125 0 : void WasmCode::set_trap_handler_index(size_t value) {
126 64160 : trap_handler_index_ = value;
127 0 : }
128 :
129 1436793 : void WasmCode::RegisterTrapHandlerData() {
130 : DCHECK(!HasTrapHandlerIndex());
131 1436793 : if (kind() != WasmCode::kFunction) return;
132 1061001 : if (protected_instructions_.empty()) return;
133 :
134 : Address base = instruction_start();
135 :
136 : size_t size = instructions().size();
137 : const int index =
138 : RegisterHandlerData(base, size, protected_instructions().size(),
139 64029 : protected_instructions().start());
140 :
141 : // TODO(eholk): if index is negative, fail.
142 64160 : CHECK_LE(0, index);
143 : set_trap_handler_index(static_cast<size_t>(index));
144 : }
145 :
146 3918297 : bool WasmCode::HasTrapHandlerIndex() const { return trap_handler_index_ >= 0; }
147 :
148 802316 : bool WasmCode::ShouldBeLogged(Isolate* isolate) {
149 : // The return value is cached in {WasmEngine::IsolateData::log_codes}. Ensure
150 : // to call {WasmEngine::EnableCodeLogging} if this return value would change
151 : // for any isolate. Otherwise we might lose code events.
152 940331 : return isolate->logger()->is_listening_to_code_events() ||
153 802316 : isolate->is_profiling();
154 : }
155 :
156 11 : void WasmCode::LogCode(Isolate* isolate) const {
157 : DCHECK(ShouldBeLogged(isolate));
158 11 : if (IsAnonymous()) return;
159 :
160 : ModuleWireBytes wire_bytes(native_module()->wire_bytes());
161 : // TODO(herhut): Allow to log code without on-heap round-trip of the name.
162 : WireBytesRef name_ref =
163 11 : native_module()->module()->LookupFunctionName(wire_bytes, index());
164 11 : WasmName name_vec = wire_bytes.GetNameOrNull(name_ref);
165 11 : if (!name_vec.empty()) {
166 : HandleScope scope(isolate);
167 : MaybeHandle<String> maybe_name = isolate->factory()->NewStringFromUtf8(
168 11 : Vector<const char>::cast(name_vec));
169 : Handle<String> name;
170 11 : if (!maybe_name.ToHandle(&name)) {
171 0 : name = isolate->factory()->NewStringFromAsciiChecked("<name too long>");
172 : }
173 : int name_length;
174 : auto cname =
175 : name->ToCString(AllowNullsFlag::DISALLOW_NULLS,
176 11 : RobustnessFlag::ROBUST_STRING_TRAVERSAL, &name_length);
177 22 : PROFILE(isolate,
178 : CodeCreateEvent(CodeEventListener::FUNCTION_TAG, this,
179 : {cname.get(), static_cast<size_t>(name_length)}));
180 : } else {
181 : EmbeddedVector<char, 32> generated_name;
182 0 : int length = SNPrintF(generated_name, "wasm-function[%d]", index());
183 0 : generated_name.Truncate(length);
184 0 : PROFILE(isolate, CodeCreateEvent(CodeEventListener::FUNCTION_TAG, this,
185 : generated_name));
186 : }
187 :
188 11 : if (!source_positions().empty()) {
189 11 : LOG_CODE_EVENT(isolate, CodeLinePosInfoRecordEvent(instruction_start(),
190 : source_positions()));
191 : }
192 : }
193 :
194 236 : void WasmCode::Validate() const {
195 : #ifdef DEBUG
196 : // We expect certain relocation info modes to never appear in {WasmCode}
197 : // objects or to be restricted to a small set of valid values. Hence the
198 : // iteration below does not use a mask, but visits all relocation data.
199 : for (RelocIterator it(instructions(), reloc_info(), constant_pool());
200 : !it.done(); it.next()) {
201 : RelocInfo::Mode mode = it.rinfo()->rmode();
202 : switch (mode) {
203 : case RelocInfo::WASM_CALL: {
204 : Address target = it.rinfo()->wasm_call_address();
205 : WasmCode* code = native_module_->Lookup(target);
206 : CHECK_NOT_NULL(code);
207 : CHECK_EQ(WasmCode::kJumpTable, code->kind());
208 : CHECK_EQ(native_module()->jump_table_, code);
209 : CHECK(code->contains(target));
210 : break;
211 : }
212 : case RelocInfo::WASM_STUB_CALL: {
213 : Address target = it.rinfo()->wasm_stub_call_address();
214 : WasmCode* code = native_module_->Lookup(target);
215 : CHECK_NOT_NULL(code);
216 : #ifdef V8_EMBEDDED_BUILTINS
217 : CHECK_EQ(WasmCode::kJumpTable, code->kind());
218 : CHECK_EQ(native_module()->runtime_stub_table_, code);
219 : CHECK(code->contains(target));
220 : #else
221 : CHECK_EQ(WasmCode::kRuntimeStub, code->kind());
222 : CHECK_EQ(target, code->instruction_start());
223 : #endif
224 : break;
225 : }
226 : case RelocInfo::INTERNAL_REFERENCE:
227 : case RelocInfo::INTERNAL_REFERENCE_ENCODED: {
228 : Address target = it.rinfo()->target_internal_reference();
229 : CHECK(contains(target));
230 : break;
231 : }
232 : case RelocInfo::EXTERNAL_REFERENCE:
233 : case RelocInfo::CONST_POOL:
234 : case RelocInfo::VENEER_POOL:
235 : // These are OK to appear.
236 : break;
237 : default:
238 : FATAL("Unexpected mode: %d", mode);
239 : }
240 : }
241 : #endif
242 236 : }
243 :
244 1438007 : void WasmCode::MaybePrint(const char* name) const {
245 : // Determines whether flags want this code to be printed.
246 1438068 : if ((FLAG_print_wasm_code && kind() == kFunction) ||
247 2875643 : (FLAG_print_wasm_stub_code && kind() != kFunction) || FLAG_print_code) {
248 61 : Print(name);
249 : }
250 1438007 : }
251 :
252 61 : void WasmCode::Print(const char* name) const {
253 122 : StdoutStream os;
254 59 : os << "--- WebAssembly code ---\n";
255 61 : Disassemble(name, os);
256 61 : os << "--- End code ---\n";
257 61 : }
258 :
259 61 : void WasmCode::Disassemble(const char* name, std::ostream& os,
260 : Address current_pc) const {
261 61 : if (name) os << "name: " << name << "\n";
262 122 : if (!IsAnonymous()) os << "index: " << index() << "\n";
263 122 : os << "kind: " << GetWasmCodeKindAsString(kind_) << "\n";
264 122 : os << "compiler: " << (is_liftoff() ? "Liftoff" : "TurboFan") << "\n";
265 61 : size_t padding = instructions().size() - unpadded_binary_size_;
266 : os << "Body (size = " << instructions().size() << " = "
267 61 : << unpadded_binary_size_ << " + " << padding << " padding)\n";
268 :
269 : #ifdef ENABLE_DISASSEMBLER
270 : size_t instruction_size = unpadded_binary_size_;
271 : if (constant_pool_offset_ < instruction_size) {
272 : instruction_size = constant_pool_offset_;
273 : }
274 : if (safepoint_table_offset_ && safepoint_table_offset_ < instruction_size) {
275 : instruction_size = safepoint_table_offset_;
276 : }
277 : if (handler_table_offset_ && handler_table_offset_ < instruction_size) {
278 : instruction_size = handler_table_offset_;
279 : }
280 : DCHECK_LT(0, instruction_size);
281 : os << "Instructions (size = " << instruction_size << ")\n";
282 : Disassembler::Decode(nullptr, &os, instructions().start(),
283 : instructions().start() + instruction_size,
284 : CodeReference(this), current_pc);
285 : os << "\n";
286 :
287 : if (handler_table_offset_ > 0) {
288 : HandlerTable table(instruction_start(), handler_table_offset_);
289 : os << "Exception Handler Table (size = " << table.NumberOfReturnEntries()
290 : << "):\n";
291 : table.HandlerTableReturnPrint(os);
292 : os << "\n";
293 : }
294 :
295 : if (!protected_instructions_.empty()) {
296 : os << "Protected instructions:\n pc offset land pad\n";
297 : for (auto& data : protected_instructions()) {
298 : os << std::setw(10) << std::hex << data.instr_offset << std::setw(10)
299 : << std::hex << data.landing_offset << "\n";
300 : }
301 : os << "\n";
302 : }
303 :
304 : if (!source_positions().empty()) {
305 : os << "Source positions:\n pc offset position\n";
306 : for (SourcePositionTableIterator it(source_positions()); !it.done();
307 : it.Advance()) {
308 : os << std::setw(10) << std::hex << it.code_offset() << std::dec
309 : << std::setw(10) << it.source_position().ScriptOffset()
310 : << (it.is_statement() ? " statement" : "") << "\n";
311 : }
312 : os << "\n";
313 : }
314 :
315 : if (safepoint_table_offset_ > 0) {
316 : SafepointTable table(instruction_start(), safepoint_table_offset_,
317 : stack_slots_);
318 : os << "Safepoints (size = " << table.size() << ")\n";
319 : for (uint32_t i = 0; i < table.length(); i++) {
320 : uintptr_t pc_offset = table.GetPcOffset(i);
321 : os << reinterpret_cast<const void*>(instruction_start() + pc_offset);
322 : os << std::setw(6) << std::hex << pc_offset << " " << std::dec;
323 : table.PrintEntry(i, os);
324 : os << " (sp -> fp)";
325 : SafepointEntry entry = table.GetEntry(i);
326 : if (entry.trampoline_pc() != -1) {
327 : os << " trampoline: " << std::hex << entry.trampoline_pc() << std::dec;
328 : }
329 : if (entry.has_deoptimization_index()) {
330 : os << " deopt: " << std::setw(6) << entry.deoptimization_index();
331 : }
332 : os << "\n";
333 : }
334 : os << "\n";
335 : }
336 :
337 : os << "RelocInfo (size = " << reloc_info_.size() << ")\n";
338 : for (RelocIterator it(instructions(), reloc_info(), constant_pool());
339 : !it.done(); it.next()) {
340 : it.rinfo()->Print(nullptr, os);
341 : }
342 : os << "\n";
343 :
344 : if (code_comments_size() > 0) {
345 : PrintCodeCommentsSection(os, code_comments(), code_comments_size());
346 : }
347 : #endif // ENABLE_DISASSEMBLER
348 61 : }
349 :
350 0 : const char* GetWasmCodeKindAsString(WasmCode::Kind kind) {
351 61 : switch (kind) {
352 : case WasmCode::kFunction:
353 : return "wasm function";
354 : case WasmCode::kWasmToJsWrapper:
355 0 : return "wasm-to-js";
356 : case WasmCode::kRuntimeStub:
357 0 : return "runtime-stub";
358 : case WasmCode::kInterpreterEntry:
359 0 : return "interpreter entry";
360 : case WasmCode::kJumpTable:
361 0 : return "jump table";
362 : }
363 0 : return "unknown kind";
364 : }
365 :
366 7836594 : WasmCode::~WasmCode() {
367 3918297 : if (HasTrapHandlerIndex()) {
368 64161 : CHECK_LT(trap_handler_index(),
369 : static_cast<size_t>(std::numeric_limits<int>::max()));
370 64161 : trap_handler::ReleaseHandlerData(static_cast<int>(trap_handler_index()));
371 : }
372 3918297 : }
373 :
374 76812 : V8_WARN_UNUSED_RESULT bool WasmCode::DecRefOnPotentiallyDeadCode() {
375 76812 : if (native_module_->engine()->AddPotentiallyDeadCode(this)) {
376 : // The code just became potentially dead. The ref count we wanted to
377 : // decrement is now transferred to the set of potentially dead code, and
378 : // will be decremented when the next GC is run.
379 : return false;
380 : }
381 : // If we reach here, the code was already potentially dead. Decrement the ref
382 : // count, and return true if it drops to zero.
383 0 : int old_count = ref_count_.load(std::memory_order_relaxed);
384 : while (true) {
385 : DCHECK_LE(1, old_count);
386 0 : if (ref_count_.compare_exchange_weak(old_count, old_count - 1,
387 : std::memory_order_relaxed)) {
388 0 : return old_count == 1;
389 : }
390 : }
391 : }
392 :
393 : // static
394 62592260 : void WasmCode::DecrementRefCount(Vector<WasmCode*> code_vec) {
395 : // Decrement the ref counter of all given code objects. Keep the ones whose
396 : // ref count drops to zero.
397 : std::unordered_map<NativeModule*, std::vector<WasmCode*>> dead_code;
398 82444854 : for (WasmCode* code : code_vec) {
399 9926221 : if (code->DecRef()) dead_code[code->native_module()].push_back(code);
400 : }
401 :
402 : // For each native module, free all its code objects at once.
403 62592336 : for (auto& dead_code_entry : dead_code) {
404 : NativeModule* native_module = dead_code_entry.first;
405 : Vector<WasmCode*> code_vec = VectorOf(dead_code_entry.second);
406 : native_module->FreeCode(code_vec);
407 : }
408 62592328 : }
409 :
410 1242981 : NativeModule::NativeModule(WasmEngine* engine, const WasmFeatures& enabled,
411 : bool can_request_more, VirtualMemory code_space,
412 : std::shared_ptr<const WasmModule> module,
413 : std::shared_ptr<Counters> async_counters,
414 : std::shared_ptr<NativeModule>* shared_this)
415 : : enabled_features_(enabled),
416 : module_(std::move(module)),
417 : import_wrapper_cache_(std::unique_ptr<WasmImportWrapperCache>(
418 : new WasmImportWrapperCache(this))),
419 : free_code_space_(code_space.region()),
420 : engine_(engine),
421 : can_request_more_memory_(can_request_more),
422 : use_trap_handler_(trap_handler::IsTrapHandlerEnabled() ? kUseTrapHandler
423 9943848 : : kNoTrapHandler) {
424 : // We receive a pointer to an empty {std::shared_ptr}, and install ourselve
425 : // there.
426 : DCHECK_NOT_NULL(shared_this);
427 : DCHECK_NULL(*shared_this);
428 1242981 : shared_this->reset(this);
429 1242981 : compilation_state_ =
430 3728943 : CompilationState::New(*shared_this, std::move(async_counters));
431 : DCHECK_NOT_NULL(module_);
432 1242981 : owned_code_space_.emplace_back(std::move(code_space));
433 1242981 : owned_code_.reserve(num_functions());
434 :
435 : #if defined(V8_OS_WIN_X64)
436 : // On some platforms, specifically Win64, we need to reserve some pages at
437 : // the beginning of an executable space.
438 : // See src/heap/spaces.cc, MemoryAllocator::InitializeCodePageAllocator() and
439 : // https://cs.chromium.org/chromium/src/components/crash/content/app/crashpad_win.cc?rcl=fd680447881449fba2edcf0589320e7253719212&l=204
440 : // for details.
441 : if (win64_unwindinfo::CanRegisterUnwindInfoForNonABICompliantCodeRange() &&
442 : FLAG_win64_unwinding_info) {
443 : AllocateForCode(Heap::GetCodeRangeReservedAreaSize());
444 : }
445 : #endif
446 :
447 1242981 : uint32_t num_wasm_functions = module_->num_declared_functions;
448 1242981 : if (num_wasm_functions > 0) {
449 142393 : code_table_.reset(new WasmCode* [num_wasm_functions] {});
450 :
451 142389 : WasmCodeRefScope code_ref_scope;
452 142393 : jump_table_ = CreateEmptyJumpTable(
453 142389 : JumpTableAssembler::SizeForNumberOfSlots(num_wasm_functions));
454 : }
455 1242981 : }
456 :
457 1094224 : void NativeModule::ReserveCodeTableForTesting(uint32_t max_functions) {
458 1094224 : WasmCodeRefScope code_ref_scope;
459 : DCHECK_LE(num_functions(), max_functions);
460 1094224 : WasmCode** new_table = new WasmCode* [max_functions] {};
461 1094224 : if (module_->num_declared_functions > 0) {
462 0 : memcpy(new_table, code_table_.get(),
463 : module_->num_declared_functions * sizeof(*new_table));
464 : }
465 : code_table_.reset(new_table);
466 :
467 : // Re-allocate jump table.
468 1094224 : jump_table_ = CreateEmptyJumpTable(
469 1094224 : JumpTableAssembler::SizeForNumberOfSlots(max_functions));
470 1094224 : }
471 :
472 138015 : void NativeModule::LogWasmCodes(Isolate* isolate) {
473 276026 : if (!WasmCode::ShouldBeLogged(isolate)) return;
474 :
475 : // TODO(titzer): we skip the logging of the import wrappers
476 : // here, but they should be included somehow.
477 4 : int start = module()->num_imported_functions;
478 4 : int end = start + module()->num_declared_functions;
479 4 : WasmCodeRefScope code_ref_scope;
480 12 : for (int func_index = start; func_index < end; ++func_index) {
481 4 : if (WasmCode* code = GetCode(func_index)) code->LogCode(isolate);
482 : }
483 : }
484 :
485 479045 : CompilationEnv NativeModule::CreateCompilationEnv() const {
486 479045 : return {module(), use_trap_handler_, kRuntimeExceptionSupport,
487 479045 : enabled_features_};
488 : }
489 :
490 753 : WasmCode* NativeModule::AddCodeForTesting(Handle<Code> code) {
491 753 : return AddAndPublishAnonymousCode(code, WasmCode::kFunction);
492 : }
493 :
494 2633 : void NativeModule::UseLazyStubs() {
495 2633 : uint32_t start = module_->num_imported_functions;
496 2633 : uint32_t end = start + module_->num_declared_functions;
497 32115 : for (uint32_t func_index = start; func_index < end; func_index++) {
498 14741 : UseLazyStub(func_index);
499 : }
500 2633 : }
501 :
502 15093 : void NativeModule::UseLazyStub(uint32_t func_index) {
503 : DCHECK_LE(module_->num_imported_functions, func_index);
504 : DCHECK_LT(func_index,
505 : module_->num_imported_functions + module_->num_declared_functions);
506 :
507 : // Add jump table entry for jump to the lazy compile stub.
508 15093 : uint32_t slot_index = func_index - module_->num_imported_functions;
509 : DCHECK_NE(runtime_stub_entry(WasmCode::kWasmCompileLazy), kNullAddress);
510 : JumpTableAssembler::EmitLazyCompileJumpSlot(
511 15093 : jump_table_->instruction_start(), slot_index, func_index,
512 15093 : runtime_stub_entry(WasmCode::kWasmCompileLazy), WasmCode::kFlushICache);
513 15093 : }
514 :
515 : // TODO(mstarzinger): Remove {Isolate} parameter once {V8_EMBEDDED_BUILTINS}
516 : // was removed and embedded builtins are no longer optional.
517 1242212 : void NativeModule::SetRuntimeStubs(Isolate* isolate) {
518 : DCHECK_EQ(kNullAddress, runtime_stub_entries_[0]); // Only called once.
519 : #ifdef V8_EMBEDDED_BUILTINS
520 1242211 : WasmCodeRefScope code_ref_scope;
521 : WasmCode* jump_table =
522 : CreateEmptyJumpTable(JumpTableAssembler::SizeForNumberOfStubSlots(
523 1242212 : WasmCode::kRuntimeStubCount));
524 : Address base = jump_table->instruction_start();
525 1242209 : EmbeddedData embedded_data = EmbeddedData::FromBlob();
526 : #define RUNTIME_STUB(Name) {Builtins::k##Name, WasmCode::k##Name},
527 : #define RUNTIME_STUB_TRAP(Name) RUNTIME_STUB(ThrowWasm##Name)
528 : std::pair<Builtins::Name, WasmCode::RuntimeStubId> wasm_runtime_stubs[] = {
529 1242209 : WASM_RUNTIME_STUB_LIST(RUNTIME_STUB, RUNTIME_STUB_TRAP)};
530 : #undef RUNTIME_STUB
531 : #undef RUNTIME_STUB_TRAP
532 73290499 : for (auto pair : wasm_runtime_stubs) {
533 36024146 : CHECK(embedded_data.ContainsBuiltin(pair.first));
534 36024146 : Address builtin = embedded_data.InstructionStartOfBuiltin(pair.first);
535 : JumpTableAssembler::EmitRuntimeStubSlot(base, pair.second, builtin,
536 36024144 : WasmCode::kNoFlushICache);
537 : uint32_t slot_offset =
538 : JumpTableAssembler::StubSlotIndexToOffset(pair.second);
539 36024145 : runtime_stub_entries_[pair.second] = base + slot_offset;
540 : }
541 : FlushInstructionCache(jump_table->instructions().start(),
542 1242212 : jump_table->instructions().size());
543 : DCHECK_NULL(runtime_stub_table_);
544 1242211 : runtime_stub_table_ = jump_table;
545 : #else // V8_EMBEDDED_BUILTINS
546 : HandleScope scope(isolate);
547 : WasmCodeRefScope code_ref_scope;
548 : USE(runtime_stub_table_); // Actually unused, but avoids ifdef's in header.
549 : #define COPY_BUILTIN(Name) \
550 : runtime_stub_entries_[WasmCode::k##Name] = \
551 : AddAndPublishAnonymousCode( \
552 : isolate->builtins()->builtin_handle(Builtins::k##Name), \
553 : WasmCode::kRuntimeStub, #Name) \
554 : ->instruction_start();
555 : #define COPY_BUILTIN_TRAP(Name) COPY_BUILTIN(ThrowWasm##Name)
556 : WASM_RUNTIME_STUB_LIST(COPY_BUILTIN, COPY_BUILTIN_TRAP)
557 : #undef COPY_BUILTIN_TRAP
558 : #undef COPY_BUILTIN
559 : #endif // V8_EMBEDDED_BUILTINS
560 : DCHECK_NE(kNullAddress, runtime_stub_entries_[0]);
561 1242212 : }
562 :
563 753 : WasmCode* NativeModule::AddAndPublishAnonymousCode(Handle<Code> code,
564 : WasmCode::Kind kind,
565 : const char* name) {
566 : // For off-heap builtins, we create a copy of the off-heap instruction stream
567 : // instead of the on-heap code object containing the trampoline. Ensure that
568 : // we do not apply the on-heap reloc info to the off-heap instructions.
569 : const size_t relocation_size =
570 1506 : code->is_off_heap_trampoline() ? 0 : code->relocation_size();
571 : OwnedVector<byte> reloc_info;
572 753 : if (relocation_size > 0) {
573 : reloc_info = OwnedVector<byte>::New(relocation_size);
574 : memcpy(reloc_info.start(), code->relocation_start(), relocation_size);
575 : }
576 : Handle<ByteArray> source_pos_table(code->SourcePositionTable(),
577 1506 : code->GetIsolate());
578 : OwnedVector<byte> source_pos =
579 753 : OwnedVector<byte>::New(source_pos_table->length());
580 753 : if (source_pos_table->length() > 0) {
581 : source_pos_table->copy_out(0, source_pos.start(),
582 : source_pos_table->length());
583 : }
584 : Vector<const byte> instructions(
585 1506 : reinterpret_cast<byte*>(code->InstructionStart()),
586 3012 : static_cast<size_t>(code->InstructionSize()));
587 : const uint32_t stack_slots = static_cast<uint32_t>(
588 753 : code->has_safepoint_info() ? code->stack_slots() : 0);
589 :
590 : // TODO(jgruber,v8:8758): Remove this translation. It exists only because
591 : // Code objects contains real offsets but WasmCode expects an offset of 0 to
592 : // mean 'empty'.
593 : const size_t safepoint_table_offset = static_cast<size_t>(
594 1506 : code->has_safepoint_table() ? code->safepoint_table_offset() : 0);
595 : const size_t handler_table_offset = static_cast<size_t>(
596 753 : code->has_handler_table() ? code->handler_table_offset() : 0);
597 : const size_t constant_pool_offset =
598 753 : static_cast<size_t>(code->constant_pool_offset());
599 : const size_t code_comments_offset =
600 : static_cast<size_t>(code->code_comments_offset());
601 :
602 753 : Vector<uint8_t> dst_code_bytes = AllocateForCode(instructions.size());
603 : memcpy(dst_code_bytes.begin(), instructions.start(), instructions.size());
604 :
605 : // Apply the relocation delta by iterating over the RelocInfo.
606 1506 : intptr_t delta = reinterpret_cast<Address>(dst_code_bytes.begin()) -
607 1506 : code->InstructionStart();
608 753 : int mode_mask = RelocInfo::kApplyMask |
609 753 : RelocInfo::ModeMask(RelocInfo::WASM_STUB_CALL);
610 : Address constant_pool_start =
611 753 : reinterpret_cast<Address>(dst_code_bytes.begin()) + constant_pool_offset;
612 753 : RelocIterator orig_it(*code, mode_mask);
613 753 : for (RelocIterator it(dst_code_bytes, reloc_info.as_vector(),
614 753 : constant_pool_start, mode_mask);
615 0 : !it.done(); it.next(), orig_it.next()) {
616 : RelocInfo::Mode mode = it.rinfo()->rmode();
617 0 : if (RelocInfo::IsWasmStubCall(mode)) {
618 0 : uint32_t stub_call_tag = orig_it.rinfo()->wasm_call_tag();
619 : DCHECK_LT(stub_call_tag, WasmCode::kRuntimeStubCount);
620 : Address entry = runtime_stub_entry(
621 : static_cast<WasmCode::RuntimeStubId>(stub_call_tag));
622 0 : it.rinfo()->set_wasm_stub_call_address(entry, SKIP_ICACHE_FLUSH);
623 : } else {
624 : it.rinfo()->apply(delta);
625 : }
626 : }
627 :
628 : // Flush the i-cache after relocation.
629 753 : FlushInstructionCache(dst_code_bytes.start(), dst_code_bytes.size());
630 :
631 : DCHECK_NE(kind, WasmCode::Kind::kInterpreterEntry);
632 : std::unique_ptr<WasmCode> new_code{new WasmCode{
633 : this, // native_module
634 : WasmCode::kAnonymousFuncIndex, // index
635 : dst_code_bytes, // instructions
636 : stack_slots, // stack_slots
637 : 0, // tagged_parameter_slots
638 : safepoint_table_offset, // safepoint_table_offset
639 : handler_table_offset, // handler_table_offset
640 : constant_pool_offset, // constant_pool_offset
641 : code_comments_offset, // code_comments_offset
642 : instructions.size(), // unpadded_binary_size
643 : OwnedVector<ProtectedInstructionData>{}, // protected_instructions
644 : std::move(reloc_info), // reloc_info
645 : std::move(source_pos), // source positions
646 : kind, // kind
647 1506 : ExecutionTier::kNone}}; // tier
648 753 : new_code->MaybePrint(name);
649 : new_code->Validate();
650 :
651 1506 : return PublishCode(std::move(new_code));
652 : }
653 :
654 374899 : std::unique_ptr<WasmCode> NativeModule::AddCode(
655 : uint32_t index, const CodeDesc& desc, uint32_t stack_slots,
656 : uint32_t tagged_parameter_slots,
657 : OwnedVector<trap_handler::ProtectedInstructionData> protected_instructions,
658 : OwnedVector<const byte> source_position_table, WasmCode::Kind kind,
659 : ExecutionTier tier) {
660 : return AddCodeWithCodeSpace(index, desc, stack_slots, tagged_parameter_slots,
661 : std::move(protected_instructions),
662 : std::move(source_position_table), kind, tier,
663 1499596 : AllocateForCode(desc.instr_size));
664 : }
665 :
666 1437700 : std::unique_ptr<WasmCode> NativeModule::AddCodeWithCodeSpace(
667 : uint32_t index, const CodeDesc& desc, uint32_t stack_slots,
668 : uint32_t tagged_parameter_slots,
669 : OwnedVector<ProtectedInstructionData> protected_instructions,
670 : OwnedVector<const byte> source_position_table, WasmCode::Kind kind,
671 : ExecutionTier tier, Vector<uint8_t> dst_code_bytes) {
672 : OwnedVector<byte> reloc_info;
673 1437700 : if (desc.reloc_size > 0) {
674 359511 : reloc_info = OwnedVector<byte>::New(desc.reloc_size);
675 359649 : memcpy(reloc_info.start(), desc.buffer + desc.buffer_size - desc.reloc_size,
676 359649 : desc.reloc_size);
677 : }
678 :
679 : // TODO(jgruber,v8:8758): Remove this translation. It exists only because
680 : // CodeDesc contains real offsets but WasmCode expects an offset of 0 to mean
681 : // 'empty'.
682 : const size_t safepoint_table_offset = static_cast<size_t>(
683 1437838 : desc.safepoint_table_size == 0 ? 0 : desc.safepoint_table_offset);
684 : const size_t handler_table_offset = static_cast<size_t>(
685 1437838 : desc.handler_table_size == 0 ? 0 : desc.handler_table_offset);
686 : const size_t constant_pool_offset =
687 1437838 : static_cast<size_t>(desc.constant_pool_offset);
688 : const size_t code_comments_offset =
689 1437838 : static_cast<size_t>(desc.code_comments_offset);
690 1437838 : const size_t instr_size = static_cast<size_t>(desc.instr_size);
691 :
692 1437838 : memcpy(dst_code_bytes.begin(), desc.buffer,
693 : static_cast<size_t>(desc.instr_size));
694 :
695 : // Apply the relocation delta by iterating over the RelocInfo.
696 1437838 : intptr_t delta = dst_code_bytes.begin() - desc.buffer;
697 1437838 : int mode_mask = RelocInfo::kApplyMask |
698 : RelocInfo::ModeMask(RelocInfo::WASM_CALL) |
699 1437838 : RelocInfo::ModeMask(RelocInfo::WASM_STUB_CALL);
700 : Address constant_pool_start =
701 1437838 : reinterpret_cast<Address>(dst_code_bytes.begin()) + constant_pool_offset;
702 2799223 : for (RelocIterator it(dst_code_bytes, reloc_info.as_vector(),
703 1437838 : constant_pool_start, mode_mask);
704 681196 : !it.done(); it.next()) {
705 : RelocInfo::Mode mode = it.rinfo()->rmode();
706 681191 : if (RelocInfo::IsWasmCall(mode)) {
707 25960 : uint32_t call_tag = it.rinfo()->wasm_call_tag();
708 : Address target = GetCallTargetForFunction(call_tag);
709 25940 : it.rinfo()->set_wasm_call_address(target, SKIP_ICACHE_FLUSH);
710 655231 : } else if (RelocInfo::IsWasmStubCall(mode)) {
711 531844 : uint32_t stub_call_tag = it.rinfo()->wasm_call_tag();
712 : DCHECK_LT(stub_call_tag, WasmCode::kRuntimeStubCount);
713 : Address entry = runtime_stub_entry(
714 : static_cast<WasmCode::RuntimeStubId>(stub_call_tag));
715 531898 : it.rinfo()->set_wasm_stub_call_address(entry, SKIP_ICACHE_FLUSH);
716 : } else {
717 : it.rinfo()->apply(delta);
718 : }
719 : }
720 :
721 : // Flush the i-cache after relocation.
722 1437012 : FlushInstructionCache(dst_code_bytes.start(), dst_code_bytes.size());
723 :
724 : std::unique_ptr<WasmCode> code{new WasmCode{
725 : this, index, dst_code_bytes, stack_slots, tagged_parameter_slots,
726 : safepoint_table_offset, handler_table_offset, constant_pool_offset,
727 : code_comments_offset, instr_size, std::move(protected_instructions),
728 1436668 : std::move(reloc_info), std::move(source_position_table), kind, tier}};
729 1438344 : code->MaybePrint();
730 : code->Validate();
731 :
732 1436837 : code->RegisterTrapHandlerData();
733 :
734 1437053 : return code;
735 : }
736 :
737 2854717 : WasmCode* NativeModule::PublishCode(std::unique_ptr<WasmCode> code) {
738 2854717 : base::MutexGuard lock(&allocation_mutex_);
739 5709433 : return PublishCodeLocked(std::move(code));
740 : }
741 :
742 : namespace {
743 1062887 : WasmCode::Kind GetCodeKindForExecutionTier(ExecutionTier tier) {
744 1062887 : switch (tier) {
745 : case ExecutionTier::kInterpreter:
746 : return WasmCode::Kind::kInterpreterEntry;
747 : case ExecutionTier::kLiftoff:
748 : case ExecutionTier::kTurbofan:
749 1061550 : return WasmCode::Kind::kFunction;
750 : case ExecutionTier::kNone:
751 0 : UNREACHABLE();
752 : }
753 0 : }
754 : } // namespace
755 :
756 3918229 : WasmCode* NativeModule::PublishCodeLocked(std::unique_ptr<WasmCode> code) {
757 : // The caller must hold the {allocation_mutex_}, thus we fail to lock it here.
758 : DCHECK(!allocation_mutex_.TryLock());
759 :
760 3918229 : if (!code->IsAnonymous()) {
761 : DCHECK_LT(code->index(), num_functions());
762 : DCHECK_LE(module_->num_imported_functions, code->index());
763 :
764 : // Assume an order of execution tiers that represents the quality of their
765 : // generated code.
766 : static_assert(ExecutionTier::kNone < ExecutionTier::kInterpreter &&
767 : ExecutionTier::kInterpreter < ExecutionTier::kLiftoff &&
768 : ExecutionTier::kLiftoff < ExecutionTier::kTurbofan,
769 : "Assume an order on execution tiers");
770 :
771 : // Update code table but avoid to fall back to less optimized code. We use
772 : // the new code if it was compiled with a higher tier.
773 1431590 : uint32_t slot_idx = code->index() - module_->num_imported_functions;
774 2863180 : WasmCode* prior_code = code_table_[slot_idx];
775 1431590 : bool update_code_table = !prior_code || prior_code->tier() < code->tier();
776 1431590 : if (update_code_table) {
777 1411958 : code_table_[slot_idx] = code.get();
778 1411958 : if (prior_code) {
779 76816 : WasmCodeRefScope::AddRef(prior_code);
780 : // The code is added to the current {WasmCodeRefScope}, hence the ref
781 : // count cannot drop to zero here.
782 76804 : CHECK(!prior_code->DecRef());
783 : }
784 : }
785 :
786 : // Populate optimized code to the jump table unless there is an active
787 : // redirection to the interpreter that should be preserved.
788 : bool update_jump_table =
789 2843551 : update_code_table && !has_interpreter_redirection(code->index());
790 :
791 : // Ensure that interpreter entries always populate to the jump table.
792 1431589 : if (code->kind_ == WasmCode::Kind::kInterpreterEntry) {
793 369231 : SetInterpreterRedirection(code->index());
794 : update_jump_table = true;
795 : }
796 :
797 1431615 : if (update_jump_table) {
798 : JumpTableAssembler::PatchJumpTableSlot(
799 1412139 : jump_table_->instruction_start(), slot_idx, code->instruction_start(),
800 1412139 : WasmCode::kFlushICache);
801 : }
802 : }
803 3918262 : WasmCodeRefScope::AddRef(code.get());
804 : WasmCode* result = code.get();
805 3918294 : owned_code_.emplace_back(std::move(code));
806 3918226 : return result;
807 : }
808 :
809 236 : WasmCode* NativeModule::AddDeserializedCode(
810 : uint32_t index, Vector<const byte> instructions, uint32_t stack_slots,
811 : uint32_t tagged_parameter_slots, size_t safepoint_table_offset,
812 : size_t handler_table_offset, size_t constant_pool_offset,
813 : size_t code_comments_offset, size_t unpadded_binary_size,
814 : OwnedVector<ProtectedInstructionData> protected_instructions,
815 : OwnedVector<const byte> reloc_info,
816 : OwnedVector<const byte> source_position_table, WasmCode::Kind kind,
817 : ExecutionTier tier) {
818 236 : Vector<uint8_t> dst_code_bytes = AllocateForCode(instructions.size());
819 : memcpy(dst_code_bytes.begin(), instructions.start(), instructions.size());
820 :
821 : std::unique_ptr<WasmCode> code{new WasmCode{
822 : this, index, dst_code_bytes, stack_slots, tagged_parameter_slots,
823 : safepoint_table_offset, handler_table_offset, constant_pool_offset,
824 : code_comments_offset, unpadded_binary_size,
825 : std::move(protected_instructions), std::move(reloc_info),
826 708 : std::move(source_position_table), kind, tier}};
827 :
828 236 : code->RegisterTrapHandlerData();
829 :
830 : // Note: we do not flush the i-cache here, since the code needs to be
831 : // relocated anyway. The caller is responsible for flushing the i-cache later.
832 :
833 472 : return PublishCode(std::move(code));
834 : }
835 :
836 209 : std::vector<WasmCode*> NativeModule::SnapshotCodeTable() const {
837 209 : base::MutexGuard lock(&allocation_mutex_);
838 : WasmCode** start = code_table_.get();
839 209 : WasmCode** end = start + module_->num_declared_functions;
840 209 : return std::vector<WasmCode*>{start, end};
841 : }
842 :
843 9653448 : WasmCode* NativeModule::GetCode(uint32_t index) const {
844 9653448 : base::MutexGuard guard(&allocation_mutex_);
845 : DCHECK_LT(index, num_functions());
846 : DCHECK_LE(module_->num_imported_functions, index);
847 19306896 : WasmCode* code = code_table_[index - module_->num_imported_functions];
848 9653448 : WasmCodeRefScope::AddRef(code);
849 9653448 : return code;
850 : }
851 :
852 0 : bool NativeModule::HasCode(uint32_t index) const {
853 0 : base::MutexGuard guard(&allocation_mutex_);
854 : DCHECK_LT(index, num_functions());
855 : DCHECK_LE(module_->num_imported_functions, index);
856 0 : return code_table_[index - module_->num_imported_functions] != nullptr;
857 : }
858 :
859 2478829 : WasmCode* NativeModule::CreateEmptyJumpTable(uint32_t jump_table_size) {
860 : // Only call this if we really need a jump table.
861 : DCHECK_LT(0, jump_table_size);
862 2478829 : Vector<uint8_t> code_space = AllocateForCode(jump_table_size);
863 : ZapCode(reinterpret_cast<Address>(code_space.begin()), code_space.size());
864 : std::unique_ptr<WasmCode> code{new WasmCode{
865 : this, // native_module
866 : WasmCode::kAnonymousFuncIndex, // index
867 : code_space, // instructions
868 : 0, // stack_slots
869 : 0, // tagged_parameter_slots
870 : 0, // safepoint_table_offset
871 : 0, // handler_table_offset
872 : jump_table_size, // constant_pool_offset
873 : jump_table_size, // code_comments_offset
874 : jump_table_size, // unpadded_binary_size
875 : OwnedVector<ProtectedInstructionData>{}, // protected_instructions
876 : OwnedVector<const uint8_t>{}, // reloc_info
877 : OwnedVector<const uint8_t>{}, // source_pos
878 : WasmCode::kJumpTable, // kind
879 4957657 : ExecutionTier::kNone}}; // tier
880 4957657 : return PublishCode(std::move(code));
881 : }
882 :
883 3862805 : Vector<byte> NativeModule::AllocateForCode(size_t size) {
884 3862805 : base::MutexGuard lock(&allocation_mutex_);
885 : DCHECK_LT(0, size);
886 3863450 : v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
887 : // This happens under a lock assumed by the caller.
888 : size = RoundUp<kCodeAlignment>(size);
889 3863450 : base::AddressRegion code_space = free_code_space_.Allocate(size);
890 3863450 : if (code_space.is_empty()) {
891 0 : if (!can_request_more_memory_) {
892 : V8::FatalProcessOutOfMemory(nullptr,
893 0 : "NativeModule::AllocateForCode reservation");
894 : UNREACHABLE();
895 : }
896 :
897 : Address hint = owned_code_space_.empty() ? kNullAddress
898 0 : : owned_code_space_.back().end();
899 :
900 0 : VirtualMemory new_mem = engine_->code_manager()->TryAllocate(
901 0 : size, reinterpret_cast<void*>(hint));
902 0 : if (!new_mem.IsReserved()) {
903 : V8::FatalProcessOutOfMemory(nullptr,
904 0 : "NativeModule::AllocateForCode reservation");
905 : UNREACHABLE();
906 : }
907 0 : engine_->code_manager()->AssignRanges(new_mem.address(), new_mem.end(),
908 0 : this);
909 :
910 0 : free_code_space_.Merge(new_mem.region());
911 0 : owned_code_space_.emplace_back(std::move(new_mem));
912 0 : code_space = free_code_space_.Allocate(size);
913 : DCHECK(!code_space.is_empty());
914 : }
915 3863450 : const Address page_size = page_allocator->AllocatePageSize();
916 3863399 : Address commit_start = RoundUp(code_space.begin(), page_size);
917 : Address commit_end = RoundUp(code_space.end(), page_size);
918 : // {commit_start} will be either code_space.start or the start of the next
919 : // page. {commit_end} will be the start of the page after the one in which
920 : // the allocation ends.
921 : // We start from an aligned start, and we know we allocated vmem in
922 : // page multiples.
923 : // We just need to commit what's not committed. The page in which we
924 : // start is already committed (or we start at the beginning of a page).
925 : // The end needs to be committed all through the end of the page.
926 3863399 : if (commit_start < commit_end) {
927 1246014 : committed_code_space_.fetch_add(commit_end - commit_start);
928 : // Committed code cannot grow bigger than maximum code space size.
929 : DCHECK_LE(committed_code_space_.load(), kMaxWasmCodeMemory);
930 : #if V8_OS_WIN
931 : // On Windows, we cannot commit a region that straddles different
932 : // reservations of virtual memory. Because we bump-allocate, and because, if
933 : // we need more memory, we append that memory at the end of the
934 : // owned_code_space_ list, we traverse that list in reverse order to find
935 : // the reservation(s) that guide how to chunk the region to commit.
936 : for (auto& vmem : base::Reversed(owned_code_space_)) {
937 : if (commit_end <= vmem.address() || vmem.end() <= commit_start) continue;
938 : Address start = std::max(commit_start, vmem.address());
939 : Address end = std::min(commit_end, vmem.end());
940 : size_t commit_size = static_cast<size_t>(end - start);
941 : if (!engine_->code_manager()->Commit(start, commit_size)) {
942 : V8::FatalProcessOutOfMemory(nullptr,
943 : "NativeModule::AllocateForCode commit");
944 : UNREACHABLE();
945 : }
946 : // Opportunistically reduce the commit range. This might terminate the
947 : // loop early.
948 : if (commit_start == start) commit_start = end;
949 : if (commit_end == end) commit_end = start;
950 : if (commit_start >= commit_end) break;
951 : }
952 : #else
953 2492028 : if (!engine_->code_manager()->Commit(commit_start,
954 : commit_end - commit_start)) {
955 : V8::FatalProcessOutOfMemory(nullptr,
956 0 : "NativeModule::AllocateForCode commit");
957 : UNREACHABLE();
958 : }
959 : #endif
960 : }
961 : DCHECK(IsAligned(code_space.begin(), kCodeAlignment));
962 3863399 : allocated_code_space_.Merge(code_space);
963 : generated_code_size_.fetch_add(code_space.size(), std::memory_order_relaxed);
964 :
965 : TRACE_HEAP("Code alloc for %p: %" PRIxPTR ",+%zu\n", this, code_space.begin(),
966 : size);
967 7726912 : return {reinterpret_cast<byte*>(code_space.begin()), code_space.size()};
968 : }
969 :
970 : namespace {
971 4681056 : class NativeModuleWireBytesStorage final : public WireBytesStorage {
972 : public:
973 : explicit NativeModuleWireBytesStorage(
974 : std::shared_ptr<OwnedVector<const uint8_t>> wire_bytes)
975 2340528 : : wire_bytes_(std::move(wire_bytes)) {}
976 :
977 1072695 : Vector<const uint8_t> GetCode(WireBytesRef ref) const final {
978 2145390 : return wire_bytes_->as_vector().SubVector(ref.offset(), ref.end_offset());
979 : }
980 :
981 : private:
982 : const std::shared_ptr<OwnedVector<const uint8_t>> wire_bytes_;
983 : };
984 : } // namespace
985 :
986 3435036 : void NativeModule::SetWireBytes(OwnedVector<const uint8_t> wire_bytes) {
987 : auto shared_wire_bytes =
988 : std::make_shared<OwnedVector<const uint8_t>>(std::move(wire_bytes));
989 : wire_bytes_ = shared_wire_bytes;
990 3435036 : if (!shared_wire_bytes->empty()) {
991 2340528 : compilation_state_->SetWireBytesStorage(
992 2340528 : std::make_shared<NativeModuleWireBytesStorage>(
993 2340528 : std::move(shared_wire_bytes)));
994 : }
995 3435036 : }
996 :
997 5938225 : WasmCode* NativeModule::Lookup(Address pc) const {
998 5938225 : base::MutexGuard lock(&allocation_mutex_);
999 5938225 : if (owned_code_.empty()) return nullptr;
1000 : // First update the sorted portion counter.
1001 5938225 : if (owned_code_sorted_portion_ == 0) ++owned_code_sorted_portion_;
1002 13089407 : while (owned_code_sorted_portion_ < owned_code_.size() &&
1003 1109784 : owned_code_[owned_code_sorted_portion_ - 1]->instruction_start() <=
1004 : owned_code_[owned_code_sorted_portion_]->instruction_start()) {
1005 34391 : ++owned_code_sorted_portion_;
1006 : }
1007 : // Execute at most two rounds: First check whether the {pc} is within the
1008 : // sorted portion of {owned_code_}. If it's not, then sort the whole vector
1009 : // and retry.
1010 85 : while (true) {
1011 : auto iter =
1012 : std::upper_bound(owned_code_.begin(), owned_code_.end(), pc,
1013 : [](Address pc, const std::unique_ptr<WasmCode>& code) {
1014 : DCHECK_NE(kNullAddress, pc);
1015 : DCHECK_NOT_NULL(code);
1016 : return pc < code->instruction_start();
1017 : });
1018 5938310 : if (iter != owned_code_.begin()) {
1019 : --iter;
1020 : WasmCode* candidate = iter->get();
1021 : DCHECK_NOT_NULL(candidate);
1022 5938310 : if (candidate->contains(pc)) {
1023 5938223 : WasmCodeRefScope::AddRef(candidate);
1024 5938223 : return candidate;
1025 : }
1026 : }
1027 174 : if (owned_code_sorted_portion_ == owned_code_.size()) return nullptr;
1028 : std::sort(owned_code_.begin(), owned_code_.end(),
1029 : [](const std::unique_ptr<WasmCode>& code1,
1030 : const std::unique_ptr<WasmCode>& code2) {
1031 : return code1->instruction_start() < code2->instruction_start();
1032 : });
1033 85 : owned_code_sorted_portion_ = owned_code_.size();
1034 : }
1035 : }
1036 :
1037 369736 : Address NativeModule::GetCallTargetForFunction(uint32_t func_index) const {
1038 : // TODO(clemensh): Measure performance win of returning instruction start
1039 : // directly if we have turbofan code. Downside: Redirecting functions (e.g.
1040 : // for debugging) gets much harder.
1041 :
1042 : // Return the jump table slot for that function index.
1043 : DCHECK_NOT_NULL(jump_table_);
1044 395676 : uint32_t slot_idx = func_index - module_->num_imported_functions;
1045 : uint32_t slot_offset = JumpTableAssembler::SlotIndexToOffset(slot_idx);
1046 : DCHECK_LT(slot_offset, jump_table_->instructions().size());
1047 791352 : return jump_table_->instruction_start() + slot_offset;
1048 : }
1049 :
1050 40656 : uint32_t NativeModule::GetFunctionIndexFromJumpTableSlot(
1051 : Address slot_address) const {
1052 : DCHECK(is_jump_table_slot(slot_address));
1053 : uint32_t slot_offset =
1054 81312 : static_cast<uint32_t>(slot_address - jump_table_->instruction_start());
1055 : uint32_t slot_idx = JumpTableAssembler::SlotOffsetToIndex(slot_offset);
1056 : DCHECK_LT(slot_idx, module_->num_declared_functions);
1057 40656 : return module_->num_imported_functions + slot_idx;
1058 : }
1059 :
1060 38 : void NativeModule::DisableTrapHandler() {
1061 : // Switch {use_trap_handler_} from true to false.
1062 : DCHECK(use_trap_handler_);
1063 38 : use_trap_handler_ = kNoTrapHandler;
1064 :
1065 : // Clear the code table (just to increase the chances to hit an error if we
1066 : // forget to re-add all code).
1067 38 : uint32_t num_wasm_functions = module_->num_declared_functions;
1068 38 : memset(code_table_.get(), 0, num_wasm_functions * sizeof(WasmCode*));
1069 :
1070 : // TODO(clemensh): Actually free the owned code, such that the memory can be
1071 : // recycled.
1072 38 : }
1073 :
1074 0 : const char* NativeModule::GetRuntimeStubName(Address runtime_stub_entry) const {
1075 : #define RETURN_NAME(Name) \
1076 : if (runtime_stub_entries_[WasmCode::k##Name] == runtime_stub_entry) { \
1077 : return #Name; \
1078 : }
1079 : #define RETURN_NAME_TRAP(Name) RETURN_NAME(ThrowWasm##Name)
1080 0 : WASM_RUNTIME_STUB_LIST(RETURN_NAME, RETURN_NAME_TRAP)
1081 : #undef RETURN_NAME_TRAP
1082 : #undef RETURN_NAME
1083 0 : return "<unknown>";
1084 : }
1085 :
1086 3728943 : NativeModule::~NativeModule() {
1087 : TRACE_HEAP("Deleting native module: %p\n", reinterpret_cast<void*>(this));
1088 : // Cancel all background compilation before resetting any field of the
1089 : // NativeModule or freeing anything.
1090 1242981 : compilation_state_->AbortCompilation();
1091 1242981 : engine_->FreeNativeModule(this);
1092 : // Free the import wrapper cache before releasing the {WasmCode} objects in
1093 : // {owned_code_}. The destructor of {WasmImportWrapperCache} still needs to
1094 : // decrease reference counts on the {WasmCode} objects.
1095 1242981 : import_wrapper_cache_.reset();
1096 1242981 : }
1097 :
1098 61028 : WasmCodeManager::WasmCodeManager(WasmMemoryTracker* memory_tracker,
1099 : size_t max_committed)
1100 : : memory_tracker_(memory_tracker),
1101 : max_committed_code_space_(max_committed),
1102 : total_committed_code_space_(0),
1103 183084 : critical_committed_code_space_(max_committed / 2) {
1104 : DCHECK_LE(max_committed, kMaxWasmCodeMemory);
1105 61028 : }
1106 :
1107 1246014 : bool WasmCodeManager::Commit(Address start, size_t size) {
1108 : // TODO(v8:8462) Remove eager commit once perf supports remapping.
1109 1246014 : if (FLAG_perf_prof) return true;
1110 : DCHECK(IsAligned(start, AllocatePageSize()));
1111 : DCHECK(IsAligned(size, AllocatePageSize()));
1112 : // Reserve the size. Use CAS loop to avoid overflow on
1113 : // {total_committed_code_space_}.
1114 1246014 : size_t old_value = total_committed_code_space_.load();
1115 : while (true) {
1116 : DCHECK_GE(max_committed_code_space_, old_value);
1117 1246014 : if (size > max_committed_code_space_ - old_value) return false;
1118 2492028 : if (total_committed_code_space_.compare_exchange_weak(old_value,
1119 : old_value + size)) {
1120 : break;
1121 : }
1122 : }
1123 : PageAllocator::Permission permission = FLAG_wasm_write_protect_code_memory
1124 : ? PageAllocator::kReadWrite
1125 1246014 : : PageAllocator::kReadWriteExecute;
1126 :
1127 : bool ret =
1128 1246014 : SetPermissions(GetPlatformPageAllocator(), start, size, permission);
1129 : TRACE_HEAP("Setting rw permissions for %p:%p\n",
1130 : reinterpret_cast<void*>(start),
1131 : reinterpret_cast<void*>(start + size));
1132 :
1133 1246014 : if (!ret) {
1134 : // Highly unlikely.
1135 : total_committed_code_space_.fetch_sub(size);
1136 0 : return false;
1137 : }
1138 : return true;
1139 : }
1140 :
1141 0 : void WasmCodeManager::AssignRanges(Address start, Address end,
1142 : NativeModule* native_module) {
1143 0 : base::MutexGuard lock(&native_modules_mutex_);
1144 0 : lookup_map_.insert(std::make_pair(start, std::make_pair(end, native_module)));
1145 0 : }
1146 :
1147 1243862 : VirtualMemory WasmCodeManager::TryAllocate(size_t size, void* hint) {
1148 1243862 : v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
1149 : DCHECK_GT(size, 0);
1150 1243861 : size = RoundUp(size, page_allocator->AllocatePageSize());
1151 1243863 : if (!memory_tracker_->ReserveAddressSpace(size,
1152 : WasmMemoryTracker::kHardLimit)) {
1153 887 : return {};
1154 : }
1155 1242981 : if (hint == nullptr) hint = page_allocator->GetRandomMmapAddr();
1156 :
1157 : VirtualMemory mem(page_allocator, size, hint,
1158 2485963 : page_allocator->AllocatePageSize());
1159 1242981 : if (!mem.IsReserved()) {
1160 0 : memory_tracker_->ReleaseReservation(size);
1161 0 : return {};
1162 : }
1163 : TRACE_HEAP("VMem alloc: %p:%p (%zu)\n",
1164 : reinterpret_cast<void*>(mem.address()),
1165 : reinterpret_cast<void*>(mem.end()), mem.size());
1166 :
1167 : // TODO(v8:8462) Remove eager commit once perf supports remapping.
1168 1242981 : if (FLAG_perf_prof) {
1169 0 : SetPermissions(GetPlatformPageAllocator(), mem.address(), mem.size(),
1170 : PageAllocator::kReadWriteExecute);
1171 : }
1172 : return mem;
1173 : }
1174 :
1175 14 : void WasmCodeManager::SetMaxCommittedMemoryForTesting(size_t limit) {
1176 : // This has to be set before committing any memory.
1177 : DCHECK_EQ(0, total_committed_code_space_.load());
1178 14 : max_committed_code_space_ = limit;
1179 14 : critical_committed_code_space_.store(limit / 2);
1180 14 : }
1181 :
1182 : // static
1183 1387302 : size_t WasmCodeManager::EstimateNativeModuleCodeSize(const WasmModule* module) {
1184 : constexpr size_t kCodeSizeMultiplier = 4;
1185 : constexpr size_t kCodeOverhead = 32; // for prologue, stack check, ...
1186 : constexpr size_t kStaticCodeSize = 512; // runtime stubs, ...
1187 : constexpr size_t kImportSize = 64 * kSystemPointerSize;
1188 :
1189 : size_t estimate = kStaticCodeSize;
1190 2140512 : for (auto& function : module->functions) {
1191 753210 : estimate += kCodeOverhead + kCodeSizeMultiplier * function.code.length();
1192 : }
1193 : estimate +=
1194 2774604 : JumpTableAssembler::SizeForNumberOfSlots(module->num_declared_functions);
1195 1387302 : estimate += kImportSize * module->num_imported_functions;
1196 :
1197 1387302 : return estimate;
1198 : }
1199 :
1200 : // static
1201 1239676 : size_t WasmCodeManager::EstimateNativeModuleNonCodeSize(
1202 : const WasmModule* module) {
1203 1239676 : size_t wasm_module_estimate = EstimateStoredSize(module);
1204 :
1205 1239676 : uint32_t num_wasm_functions = module->num_declared_functions;
1206 :
1207 : // TODO(wasm): Include wire bytes size.
1208 : size_t native_module_estimate =
1209 : sizeof(NativeModule) + /* NativeModule struct */
1210 1239676 : (sizeof(WasmCode*) * num_wasm_functions) + /* code table size */
1211 1239676 : (sizeof(WasmCode) * num_wasm_functions); /* code object size */
1212 :
1213 1239676 : return wasm_module_estimate + native_module_estimate;
1214 : }
1215 :
1216 1242976 : std::shared_ptr<NativeModule> WasmCodeManager::NewNativeModule(
1217 : WasmEngine* engine, Isolate* isolate, const WasmFeatures& enabled,
1218 : size_t code_size_estimate, bool can_request_more,
1219 : std::shared_ptr<const WasmModule> module) {
1220 : DCHECK_EQ(this, isolate->wasm_engine()->code_manager());
1221 1242976 : if (total_committed_code_space_.load() >
1222 : critical_committed_code_space_.load()) {
1223 : (reinterpret_cast<v8::Isolate*>(isolate))
1224 60 : ->MemoryPressureNotification(MemoryPressureLevel::kCritical);
1225 : size_t committed = total_committed_code_space_.load();
1226 : DCHECK_GE(max_committed_code_space_, committed);
1227 60 : critical_committed_code_space_.store(
1228 60 : committed + (max_committed_code_space_ - committed) / 2);
1229 : }
1230 :
1231 : // If the code must be contiguous, reserve enough address space up front.
1232 : size_t code_vmem_size =
1233 : kRequiresCodeRange ? kMaxWasmCodeMemory : code_size_estimate;
1234 : // Try up to two times; getting rid of dead JSArrayBuffer allocations might
1235 : // require two GCs because the first GC maybe incremental and may have
1236 : // floating garbage.
1237 : static constexpr int kAllocationRetries = 2;
1238 2485957 : VirtualMemory code_space;
1239 887 : for (int retries = 0;; ++retries) {
1240 2487731 : code_space = TryAllocate(code_vmem_size);
1241 1243868 : if (code_space.IsReserved()) break;
1242 887 : if (retries == kAllocationRetries) {
1243 0 : V8::FatalProcessOutOfMemory(isolate, "WasmCodeManager::NewNativeModule");
1244 : UNREACHABLE();
1245 : }
1246 : // Run one GC, then try the allocation again.
1247 : isolate->heap()->MemoryPressureNotification(MemoryPressureLevel::kCritical,
1248 887 : true);
1249 : }
1250 :
1251 : Address start = code_space.address();
1252 : size_t size = code_space.size();
1253 : Address end = code_space.end();
1254 1242981 : std::shared_ptr<NativeModule> ret;
1255 : new NativeModule(engine, enabled, can_request_more, std::move(code_space),
1256 3728941 : std::move(module), isolate->async_counters(), &ret);
1257 : // The constructor initialized the shared_ptr.
1258 : DCHECK_NOT_NULL(ret);
1259 : TRACE_HEAP("New NativeModule %p: Mem: %" PRIuPTR ",+%zu\n", ret.get(), start,
1260 : size);
1261 :
1262 : #if defined(V8_OS_WIN_X64)
1263 : if (win64_unwindinfo::CanRegisterUnwindInfoForNonABICompliantCodeRange() &&
1264 : FLAG_win64_unwinding_info) {
1265 : win64_unwindinfo::RegisterNonABICompliantCodeRange(
1266 : reinterpret_cast<void*>(start), size);
1267 : }
1268 : #endif
1269 :
1270 1242979 : base::MutexGuard lock(&native_modules_mutex_);
1271 1242981 : lookup_map_.insert(std::make_pair(start, std::make_pair(end, ret.get())));
1272 1242981 : return ret;
1273 : }
1274 :
1275 9628500 : bool NativeModule::SetExecutable(bool executable) {
1276 9628500 : if (is_executable_ == executable) return true;
1277 : TRACE_HEAP("Setting module %p as executable: %d.\n", this, executable);
1278 :
1279 729328 : v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
1280 :
1281 729328 : if (FLAG_wasm_write_protect_code_memory) {
1282 : PageAllocator::Permission permission =
1283 0 : executable ? PageAllocator::kReadExecute : PageAllocator::kReadWrite;
1284 : #if V8_OS_WIN
1285 : // On windows, we need to switch permissions per separate virtual memory
1286 : // reservation. This is really just a problem when the NativeModule is
1287 : // growable (meaning can_request_more_memory_). That's 32-bit in production,
1288 : // or unittests.
1289 : // For now, in that case, we commit at reserved memory granularity.
1290 : // Technically, that may be a waste, because we may reserve more than we
1291 : // use. On 32-bit though, the scarce resource is the address space -
1292 : // committed or not.
1293 : if (can_request_more_memory_) {
1294 : for (auto& vmem : owned_code_space_) {
1295 : if (!SetPermissions(page_allocator, vmem.address(), vmem.size(),
1296 : permission)) {
1297 : return false;
1298 : }
1299 : TRACE_HEAP("Set %p:%p to executable:%d\n", vmem.address(), vmem.end(),
1300 : executable);
1301 : }
1302 : is_executable_ = executable;
1303 : return true;
1304 : }
1305 : #endif
1306 0 : for (auto& region : allocated_code_space_.regions()) {
1307 : // allocated_code_space_ is fine-grained, so we need to
1308 : // page-align it.
1309 : size_t region_size =
1310 0 : RoundUp(region.size(), page_allocator->AllocatePageSize());
1311 0 : if (!SetPermissions(page_allocator, region.begin(), region_size,
1312 : permission)) {
1313 : return false;
1314 : }
1315 : TRACE_HEAP("Set %p:%p to executable:%d\n",
1316 : reinterpret_cast<void*>(region.begin()),
1317 : reinterpret_cast<void*>(region.end()), executable);
1318 : }
1319 : }
1320 729328 : is_executable_ = executable;
1321 729328 : return true;
1322 : }
1323 :
1324 697175 : void NativeModule::SampleCodeSize(
1325 : Counters* counters, NativeModule::CodeSamplingTime sampling_time) const {
1326 : size_t code_size = sampling_time == kSampling
1327 : ? committed_code_space()
1328 697175 : : generated_code_size_.load(std::memory_order_relaxed);
1329 697175 : int code_size_mb = static_cast<int>(code_size / MB);
1330 : Histogram* histogram = nullptr;
1331 697175 : switch (sampling_time) {
1332 : case kAfterBaseline:
1333 : histogram = counters->wasm_module_code_size_mb_after_baseline();
1334 2197 : break;
1335 : case kAfterTopTier:
1336 : histogram = counters->wasm_module_code_size_mb_after_top_tier();
1337 154 : break;
1338 : case kSampling:
1339 : histogram = counters->wasm_module_code_size_mb();
1340 694824 : break;
1341 : }
1342 697175 : histogram->AddSample(code_size_mb);
1343 697175 : }
1344 :
1345 739906 : WasmCode* NativeModule::AddCompiledCode(WasmCompilationResult result) {
1346 1479812 : return AddCompiledCode({&result, 1})[0];
1347 : }
1348 :
1349 1008228 : std::vector<WasmCode*> NativeModule::AddCompiledCode(
1350 : Vector<WasmCompilationResult> results) {
1351 : DCHECK(!results.empty());
1352 : // First, allocate code space for all the results.
1353 : size_t total_code_space = 0;
1354 3132272 : for (auto& result : results) {
1355 : DCHECK(result.succeeded());
1356 2124044 : total_code_space += RoundUp<kCodeAlignment>(result.code_desc.instr_size);
1357 : }
1358 1008228 : Vector<byte> code_space = AllocateForCode(total_code_space);
1359 :
1360 1008757 : std::vector<std::unique_ptr<WasmCode>> generated_code;
1361 1008762 : generated_code.reserve(results.size());
1362 :
1363 : // Now copy the generated code into the code space and relocate it.
1364 3132558 : for (auto& result : results) {
1365 : DCHECK_EQ(result.code_desc.buffer, result.instr_buffer.get());
1366 2126142 : size_t code_size = RoundUp<kCodeAlignment>(result.code_desc.instr_size);
1367 1063071 : Vector<byte> this_code_space = code_space.SubVector(0, code_size);
1368 : code_space += code_size;
1369 6374950 : generated_code.emplace_back(AddCodeWithCodeSpace(
1370 1062930 : result.func_index, result.code_desc, result.frame_slot_count,
1371 : result.tagged_parameter_slots, std::move(result.protected_instructions),
1372 : std::move(result.source_positions),
1373 : GetCodeKindForExecutionTier(result.result_tier), result.result_tier,
1374 1062162 : this_code_space));
1375 : }
1376 : DCHECK_EQ(0, code_space.size());
1377 :
1378 : // Under the {allocation_mutex_}, publish the code. The published code is put
1379 : // into the top-most surrounding {WasmCodeRefScope} by {PublishCodeLocked}.
1380 : std::vector<WasmCode*> code_vector;
1381 1007506 : code_vector.reserve(results.size());
1382 : {
1383 1008726 : base::MutexGuard lock(&allocation_mutex_);
1384 2072254 : for (auto& result : generated_code)
1385 2127060 : code_vector.push_back(PublishCodeLocked(std::move(result)));
1386 : }
1387 :
1388 1008743 : return code_vector;
1389 : }
1390 :
1391 0 : void NativeModule::FreeCode(Vector<WasmCode* const> codes) {
1392 : // TODO(clemensh): Implement.
1393 0 : }
1394 :
1395 1242981 : void WasmCodeManager::FreeNativeModule(NativeModule* native_module) {
1396 1242981 : base::MutexGuard lock(&native_modules_mutex_);
1397 : TRACE_HEAP("Freeing NativeModule %p\n", native_module);
1398 2485962 : for (auto& code_space : native_module->owned_code_space_) {
1399 : DCHECK(code_space.IsReserved());
1400 : TRACE_HEAP("VMem Release: %" PRIxPTR ":%" PRIxPTR " (%zu)\n",
1401 : code_space.address(), code_space.end(), code_space.size());
1402 :
1403 : #if defined(V8_OS_WIN_X64)
1404 : if (win64_unwindinfo::CanRegisterUnwindInfoForNonABICompliantCodeRange() &&
1405 : FLAG_win64_unwinding_info) {
1406 : win64_unwindinfo::UnregisterNonABICompliantCodeRange(
1407 : reinterpret_cast<void*>(code_space.address()));
1408 : }
1409 : #endif
1410 :
1411 2485962 : lookup_map_.erase(code_space.address());
1412 1242981 : memory_tracker_->ReleaseReservation(code_space.size());
1413 1242981 : code_space.Free();
1414 : DCHECK(!code_space.IsReserved());
1415 : }
1416 : native_module->owned_code_space_.clear();
1417 :
1418 : size_t code_size = native_module->committed_code_space_.load();
1419 : DCHECK(IsAligned(code_size, AllocatePageSize()));
1420 : size_t old_committed = total_committed_code_space_.fetch_sub(code_size);
1421 : DCHECK_LE(code_size, old_committed);
1422 : USE(old_committed);
1423 1242981 : }
1424 :
1425 56267025 : NativeModule* WasmCodeManager::LookupNativeModule(Address pc) const {
1426 56267025 : base::MutexGuard lock(&native_modules_mutex_);
1427 56267368 : if (lookup_map_.empty()) return nullptr;
1428 :
1429 : auto iter = lookup_map_.upper_bound(pc);
1430 13876149 : if (iter == lookup_map_.begin()) return nullptr;
1431 : --iter;
1432 13829360 : Address region_start = iter->first;
1433 13829360 : Address region_end = iter->second.first;
1434 13829360 : NativeModule* candidate = iter->second.second;
1435 :
1436 : DCHECK_NOT_NULL(candidate);
1437 13829360 : return region_start <= pc && pc < region_end ? candidate : nullptr;
1438 : }
1439 :
1440 56218287 : WasmCode* WasmCodeManager::LookupCode(Address pc) const {
1441 56218287 : NativeModule* candidate = LookupNativeModule(pc);
1442 56218595 : return candidate ? candidate->Lookup(pc) : nullptr;
1443 : }
1444 :
1445 : // TODO(v8:7424): Code protection scopes are not yet supported with shared code
1446 : // enabled and need to be revisited to work with --wasm-shared-code as well.
1447 145266 : NativeModuleModificationScope::NativeModuleModificationScope(
1448 : NativeModule* native_module)
1449 145266 : : native_module_(native_module) {
1450 145266 : if (FLAG_wasm_write_protect_code_memory && native_module_ &&
1451 0 : (native_module_->modification_scope_depth_++) == 0) {
1452 0 : bool success = native_module_->SetExecutable(false);
1453 0 : CHECK(success);
1454 : }
1455 145266 : }
1456 :
1457 290532 : NativeModuleModificationScope::~NativeModuleModificationScope() {
1458 145266 : if (FLAG_wasm_write_protect_code_memory && native_module_ &&
1459 0 : (native_module_->modification_scope_depth_--) == 1) {
1460 0 : bool success = native_module_->SetExecutable(true);
1461 0 : CHECK(success);
1462 : }
1463 145266 : }
1464 :
1465 : namespace {
1466 : thread_local WasmCodeRefScope* current_code_refs_scope = nullptr;
1467 : } // namespace
1468 :
1469 58869979 : WasmCodeRefScope::WasmCodeRefScope()
1470 61348812 : : previous_scope_(current_code_refs_scope) {
1471 61348812 : current_code_refs_scope = this;
1472 58869979 : }
1473 :
1474 122698678 : WasmCodeRefScope::~WasmCodeRefScope() {
1475 : DCHECK_EQ(this, current_code_refs_scope);
1476 61349320 : current_code_refs_scope = previous_scope_;
1477 : std::vector<WasmCode*> code_ptrs;
1478 61349320 : code_ptrs.reserve(code_ptrs_.size());
1479 : code_ptrs.assign(code_ptrs_.begin(), code_ptrs_.end());
1480 61349292 : WasmCode::DecrementRefCount(VectorOf(code_ptrs));
1481 61349358 : }
1482 :
1483 : // static
1484 19586728 : void WasmCodeRefScope::AddRef(WasmCode* code) {
1485 19586728 : WasmCodeRefScope* current_scope = current_code_refs_scope;
1486 : DCHECK_NOT_NULL(current_scope);
1487 : auto entry = current_scope->code_ptrs_.insert(code);
1488 : // If we added a new entry, increment the ref counter.
1489 19586703 : if (entry.second) code->IncRef();
1490 19586703 : }
1491 :
1492 : } // namespace wasm
1493 : } // namespace internal
1494 122004 : } // namespace v8
1495 : #undef TRACE_HEAP
|