Line data Source code
1 : // Copyright 2018 the V8 project authors. All rights reserved.
2 : // Use of this source code is governed by a BSD-style license that can be
3 : // found in the LICENSE file.
4 :
5 : #include "src/wasm/jump-table-assembler.h"
6 :
7 : #include "src/assembler-inl.h"
8 : #include "src/macro-assembler-inl.h"
9 :
10 : namespace v8 {
11 : namespace internal {
12 : namespace wasm {
13 :
14 : // The implementation is compact enough to implement it inline here. If it gets
15 : // much bigger, we might want to split it in a separate file per architecture.
16 : #if V8_TARGET_ARCH_X64
17 15093 : void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
18 : Address lazy_compile_target) {
19 : // TODO(clemensh): Try more efficient sequences.
20 : // Alternative 1:
21 : // [header]: mov r10, [lazy_compile_target]
22 : // jmp r10
23 : // [slot 0]: push [0]
24 : // jmp [header] // pc-relative --> slot size: 10 bytes
25 : //
26 : // Alternative 2:
27 : // [header]: lea r10, [rip - [header]]
28 : // shr r10, 3 // compute index from offset
29 : // push r10
30 : // mov r10, [lazy_compile_target]
31 : // jmp r10
32 : // [slot 0]: call [header]
33 : // ret // -> slot size: 5 bytes
34 :
35 : // Use a push, because mov to an extended register takes 6 bytes.
36 30186 : pushq(Immediate(func_index)); // max 5 bytes
37 : movq(kScratchRegister, uint64_t{lazy_compile_target}); // max 10 bytes
38 15093 : jmp(kScratchRegister); // 3 bytes
39 :
40 15093 : PatchConstPool(); // force patching entries for partial const pool
41 15093 : }
42 :
43 36024139 : void JumpTableAssembler::EmitRuntimeStubSlot(Address builtin_target) {
44 36024139 : JumpToInstructionStream(builtin_target);
45 36024131 : }
46 :
47 1445364 : void JumpTableAssembler::EmitJumpSlot(Address target) {
48 : // On x64, all code is allocated within a single code section, so we can use
49 : // relative jumps.
50 : static_assert(kMaxWasmCodeMemory <= size_t{2} * GB, "can use relative jump");
51 : intptr_t displacement = static_cast<intptr_t>(
52 1445364 : reinterpret_cast<byte*>(target) - pc_ - kNearJmpInstrSize);
53 1445364 : near_jmp(displacement, RelocInfo::NONE);
54 1445345 : }
55 :
56 37484518 : void JumpTableAssembler::NopBytes(int bytes) {
57 : DCHECK_LE(0, bytes);
58 37484518 : Nop(bytes);
59 37484598 : }
60 :
61 : #elif V8_TARGET_ARCH_IA32
62 : void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
63 : Address lazy_compile_target) {
64 : mov(kWasmCompileLazyFuncIndexRegister, func_index); // 5 bytes
65 : jmp(lazy_compile_target, RelocInfo::NONE); // 5 bytes
66 : }
67 :
68 : void JumpTableAssembler::EmitRuntimeStubSlot(Address builtin_target) {
69 : JumpToInstructionStream(builtin_target);
70 : }
71 :
72 : void JumpTableAssembler::EmitJumpSlot(Address target) {
73 : jmp(target, RelocInfo::NONE);
74 : }
75 :
76 : void JumpTableAssembler::NopBytes(int bytes) {
77 : DCHECK_LE(0, bytes);
78 : Nop(bytes);
79 : }
80 :
81 : #elif V8_TARGET_ARCH_ARM
82 : void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
83 : Address lazy_compile_target) {
84 : // Load function index to a register.
85 : // This generates [movw, movt] on ARMv7 and later, [ldr, constant pool marker,
86 : // constant] on ARMv6.
87 : Move32BitImmediate(kWasmCompileLazyFuncIndexRegister, Operand(func_index));
88 : // EmitJumpSlot emits either [b], [movw, movt, mov] (ARMv7+), or [ldr,
89 : // constant].
90 : // In total, this is <=5 instructions on all architectures.
91 : // TODO(arm): Optimize this for code size; lazy compile is not performance
92 : // critical, as it's only executed once per function.
93 : EmitJumpSlot(lazy_compile_target);
94 : }
95 :
96 : void JumpTableAssembler::EmitRuntimeStubSlot(Address builtin_target) {
97 : JumpToInstructionStream(builtin_target);
98 : CheckConstPool(true, false); // force emit of const pool
99 : }
100 :
101 : void JumpTableAssembler::EmitJumpSlot(Address target) {
102 : // Note that {Move32BitImmediate} emits [ldr, constant] for the relocation
103 : // mode used below, we need this to allow concurrent patching of this slot.
104 : Move32BitImmediate(pc, Operand(target, RelocInfo::WASM_CALL));
105 : CheckConstPool(true, false); // force emit of const pool
106 : }
107 :
108 : void JumpTableAssembler::NopBytes(int bytes) {
109 : DCHECK_LE(0, bytes);
110 : DCHECK_EQ(0, bytes % kInstrSize);
111 : for (; bytes > 0; bytes -= kInstrSize) {
112 : nop();
113 : }
114 : }
115 :
116 : #elif V8_TARGET_ARCH_ARM64
117 : void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
118 : Address lazy_compile_target) {
119 : Mov(kWasmCompileLazyFuncIndexRegister.W(), func_index); // max. 2 instr
120 : Jump(lazy_compile_target, RelocInfo::NONE); // 1 instr
121 : }
122 :
123 : void JumpTableAssembler::EmitRuntimeStubSlot(Address builtin_target) {
124 : JumpToInstructionStream(builtin_target);
125 : CheckConstPool(true, false); // force emit of const pool
126 : }
127 :
128 : void JumpTableAssembler::EmitJumpSlot(Address target) {
129 : // TODO(wasm): Currently this is guaranteed to be a {near_call} and hence is
130 : // patchable concurrently. Once {kMaxWasmCodeMemory} is raised on ARM64, make
131 : // sure concurrent patching is still supported.
132 : DCHECK(TurboAssembler::IsNearCallOffset(
133 : (reinterpret_cast<byte*>(target) - pc_) / kInstrSize));
134 :
135 : Jump(target, RelocInfo::NONE);
136 : }
137 :
138 : void JumpTableAssembler::NopBytes(int bytes) {
139 : DCHECK_LE(0, bytes);
140 : DCHECK_EQ(0, bytes % kInstrSize);
141 : for (; bytes > 0; bytes -= kInstrSize) {
142 : nop();
143 : }
144 : }
145 :
146 : #elif V8_TARGET_ARCH_S390X
147 : void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
148 : Address lazy_compile_target) {
149 : // Load function index to r7. 6 bytes
150 : lgfi(kWasmCompileLazyFuncIndexRegister, Operand(func_index));
151 : // Jump to {lazy_compile_target}. 6 bytes or 12 bytes
152 : mov(r1, Operand(lazy_compile_target));
153 : b(r1); // 2 bytes
154 : }
155 :
156 : void JumpTableAssembler::EmitRuntimeStubSlot(Address builtin_target) {
157 : JumpToInstructionStream(builtin_target);
158 : }
159 :
160 : void JumpTableAssembler::EmitJumpSlot(Address target) {
161 : mov(r1, Operand(target));
162 : b(r1);
163 : }
164 :
165 : void JumpTableAssembler::NopBytes(int bytes) {
166 : DCHECK_LE(0, bytes);
167 : DCHECK_EQ(0, bytes % 2);
168 : for (; bytes > 0; bytes -= 2) {
169 : nop(0);
170 : }
171 : }
172 :
173 : #elif V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
174 : void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
175 : Address lazy_compile_target) {
176 : li(kWasmCompileLazyFuncIndexRegister, func_index); // max. 2 instr
177 : // Jump produces max. 4 instructions for 32-bit platform
178 : // and max. 6 instructions for 64-bit platform.
179 : Jump(lazy_compile_target, RelocInfo::NONE);
180 : }
181 :
182 : void JumpTableAssembler::EmitRuntimeStubSlot(Address builtin_target) {
183 : JumpToInstructionStream(builtin_target);
184 : }
185 :
186 : void JumpTableAssembler::EmitJumpSlot(Address target) {
187 : Jump(target, RelocInfo::NONE);
188 : }
189 :
190 : void JumpTableAssembler::NopBytes(int bytes) {
191 : DCHECK_LE(0, bytes);
192 : DCHECK_EQ(0, bytes % kInstrSize);
193 : for (; bytes > 0; bytes -= kInstrSize) {
194 : nop();
195 : }
196 : }
197 :
198 : #elif V8_TARGET_ARCH_PPC64
199 : void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
200 : Address lazy_compile_target) {
201 : // Load function index to register. max 5 instrs
202 : mov(kWasmCompileLazyFuncIndexRegister, Operand(func_index));
203 : // Jump to {lazy_compile_target}. max 5 instrs
204 : mov(r0, Operand(lazy_compile_target));
205 : mtctr(r0);
206 : bctr();
207 : }
208 :
209 : void JumpTableAssembler::EmitRuntimeStubSlot(Address builtin_target) {
210 : JumpToInstructionStream(builtin_target);
211 : }
212 :
213 : void JumpTableAssembler::EmitJumpSlot(Address target) {
214 : mov(r0, Operand(target));
215 : mtctr(r0);
216 : bctr();
217 : }
218 :
219 : void JumpTableAssembler::NopBytes(int bytes) {
220 : DCHECK_LE(0, bytes);
221 : DCHECK_EQ(0, bytes % 4);
222 : for (; bytes > 0; bytes -= 4) {
223 : nop(0);
224 : }
225 : }
226 :
227 : #else
228 : void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
229 : Address lazy_compile_target) {
230 : UNIMPLEMENTED();
231 : }
232 :
233 : void JumpTableAssembler::EmitRuntimeStubSlot(Address builtin_target) {
234 : UNIMPLEMENTED();
235 : }
236 :
237 : void JumpTableAssembler::EmitJumpSlot(Address target) { UNIMPLEMENTED(); }
238 :
239 : void JumpTableAssembler::NopBytes(int bytes) {
240 : DCHECK_LE(0, bytes);
241 : UNIMPLEMENTED();
242 : }
243 : #endif
244 :
245 : } // namespace wasm
246 : } // namespace internal
247 122004 : } // namespace v8
|