Line data Source code
1 : // Copyright 2018 the V8 project authors. All rights reserved.
2 : // Use of this source code is governed by a BSD-style license that can be
3 : // found in the LICENSE file.
4 :
5 : #include "src/wasm/jump-table-assembler.h"
6 :
7 : #include "src/assembler-inl.h"
8 : #include "src/macro-assembler-inl.h"
9 :
10 : namespace v8 {
11 : namespace internal {
12 : namespace wasm {
13 :
14 : // The implementation is compact enough to implement it inline here. If it gets
15 : // much bigger, we might want to split it in a separate file per architecture.
16 : #if V8_TARGET_ARCH_X64
17 15029 : void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
18 : Address lazy_compile_target) {
19 : // TODO(clemensh): Try more efficient sequences.
20 : // Alternative 1:
21 : // [header]: mov r10, [lazy_compile_target]
22 : // jmp r10
23 : // [slot 0]: push [0]
24 : // jmp [header] // pc-relative --> slot size: 10 bytes
25 : //
26 : // Alternative 2:
27 : // [header]: lea r10, [rip - [header]]
28 : // shr r10, 3 // compute index from offset
29 : // push r10
30 : // mov r10, [lazy_compile_target]
31 : // jmp r10
32 : // [slot 0]: call [header]
33 : // ret // -> slot size: 5 bytes
34 :
35 : // Use a push, because mov to an extended register takes 6 bytes.
36 30058 : pushq(Immediate(func_index)); // max 5 bytes
37 : movq(kScratchRegister, uint64_t{lazy_compile_target}); // max 10 bytes
38 15029 : jmp(kScratchRegister); // 3 bytes
39 :
40 15029 : PatchConstPool(); // force patching entries for partial const pool
41 15029 : }
42 :
43 35992849 : void JumpTableAssembler::EmitRuntimeStubSlot(Address builtin_target) {
44 35992849 : JumpToInstructionStream(builtin_target);
45 35992849 : }
46 :
47 1468784 : void JumpTableAssembler::EmitJumpSlot(Address target) {
48 1468784 : movq(kScratchRegister, static_cast<uint64_t>(target));
49 1468786 : jmp(kScratchRegister);
50 1468786 : }
51 :
52 37476656 : void JumpTableAssembler::NopBytes(int bytes) {
53 : DCHECK_LE(0, bytes);
54 37476656 : Nop(bytes);
55 37476658 : }
56 :
57 : #elif V8_TARGET_ARCH_IA32
58 : void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
59 : Address lazy_compile_target) {
60 : mov(kWasmCompileLazyFuncIndexRegister, func_index); // 5 bytes
61 : jmp(lazy_compile_target, RelocInfo::NONE); // 5 bytes
62 : }
63 :
64 : void JumpTableAssembler::EmitRuntimeStubSlot(Address builtin_target) {
65 : JumpToInstructionStream(builtin_target);
66 : }
67 :
68 : void JumpTableAssembler::EmitJumpSlot(Address target) {
69 : jmp(target, RelocInfo::NONE);
70 : }
71 :
72 : void JumpTableAssembler::NopBytes(int bytes) {
73 : DCHECK_LE(0, bytes);
74 : Nop(bytes);
75 : }
76 :
77 : #elif V8_TARGET_ARCH_ARM
78 : void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
79 : Address lazy_compile_target) {
80 : // Load function index to a register.
81 : // This generates [movw, movt] on ARMv7 and later, [ldr, constant pool marker,
82 : // constant] on ARMv6.
83 : Move32BitImmediate(kWasmCompileLazyFuncIndexRegister, Operand(func_index));
84 : // EmitJumpSlot emits either [b], [movw, movt, mov] (ARMv7+), or [ldr,
85 : // constant].
86 : // In total, this is <=5 instructions on all architectures.
87 : // TODO(arm): Optimize this for code size; lazy compile is not performance
88 : // critical, as it's only executed once per function.
89 : EmitJumpSlot(lazy_compile_target);
90 : }
91 :
92 : void JumpTableAssembler::EmitRuntimeStubSlot(Address builtin_target) {
93 : JumpToInstructionStream(builtin_target);
94 : CheckConstPool(true, false); // force emit of const pool
95 : }
96 :
97 : void JumpTableAssembler::EmitJumpSlot(Address target) {
98 : // Note that {Move32BitImmediate} emits [ldr, constant] for the relocation
99 : // mode used below, we need this to allow concurrent patching of this slot.
100 : Move32BitImmediate(pc, Operand(target, RelocInfo::WASM_CALL));
101 : CheckConstPool(true, false); // force emit of const pool
102 : }
103 :
104 : void JumpTableAssembler::NopBytes(int bytes) {
105 : DCHECK_LE(0, bytes);
106 : DCHECK_EQ(0, bytes % kInstrSize);
107 : for (; bytes > 0; bytes -= kInstrSize) {
108 : nop();
109 : }
110 : }
111 :
112 : #elif V8_TARGET_ARCH_ARM64
113 : void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
114 : Address lazy_compile_target) {
115 : Mov(kWasmCompileLazyFuncIndexRegister.W(), func_index); // max. 2 instr
116 : Jump(lazy_compile_target, RelocInfo::NONE); // 1 instr
117 : }
118 :
119 : void JumpTableAssembler::EmitRuntimeStubSlot(Address builtin_target) {
120 : JumpToInstructionStream(builtin_target);
121 : CheckConstPool(true, false); // force emit of const pool
122 : }
123 :
124 : void JumpTableAssembler::EmitJumpSlot(Address target) {
125 : // TODO(wasm): Currently this is guaranteed to be a {near_call} and hence is
126 : // patchable concurrently. Once {kMaxWasmCodeMemory} is raised on ARM64, make
127 : // sure concurrent patching is still supported.
128 : Jump(target, RelocInfo::NONE);
129 : }
130 :
131 : void JumpTableAssembler::NopBytes(int bytes) {
132 : DCHECK_LE(0, bytes);
133 : DCHECK_EQ(0, bytes % kInstrSize);
134 : for (; bytes > 0; bytes -= kInstrSize) {
135 : nop();
136 : }
137 : }
138 :
139 : #elif V8_TARGET_ARCH_S390X
140 : void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
141 : Address lazy_compile_target) {
142 : // Load function index to r7. 6 bytes
143 : lgfi(kWasmCompileLazyFuncIndexRegister, Operand(func_index));
144 : // Jump to {lazy_compile_target}. 6 bytes or 12 bytes
145 : mov(r1, Operand(lazy_compile_target));
146 : b(r1); // 2 bytes
147 : }
148 :
149 : void JumpTableAssembler::EmitRuntimeStubSlot(Address builtin_target) {
150 : JumpToInstructionStream(builtin_target);
151 : }
152 :
153 : void JumpTableAssembler::EmitJumpSlot(Address target) {
154 : mov(r1, Operand(target));
155 : b(r1);
156 : }
157 :
158 : void JumpTableAssembler::NopBytes(int bytes) {
159 : DCHECK_LE(0, bytes);
160 : DCHECK_EQ(0, bytes % 2);
161 : for (; bytes > 0; bytes -= 2) {
162 : nop(0);
163 : }
164 : }
165 :
166 : #elif V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
167 : void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
168 : Address lazy_compile_target) {
169 : li(kWasmCompileLazyFuncIndexRegister, func_index); // max. 2 instr
170 : // Jump produces max. 4 instructions for 32-bit platform
171 : // and max. 6 instructions for 64-bit platform.
172 : Jump(lazy_compile_target, RelocInfo::NONE);
173 : }
174 :
175 : void JumpTableAssembler::EmitRuntimeStubSlot(Address builtin_target) {
176 : JumpToInstructionStream(builtin_target);
177 : }
178 :
179 : void JumpTableAssembler::EmitJumpSlot(Address target) {
180 : Jump(target, RelocInfo::NONE);
181 : }
182 :
183 : void JumpTableAssembler::NopBytes(int bytes) {
184 : DCHECK_LE(0, bytes);
185 : DCHECK_EQ(0, bytes % kInstrSize);
186 : for (; bytes > 0; bytes -= kInstrSize) {
187 : nop();
188 : }
189 : }
190 :
191 : #elif V8_TARGET_ARCH_PPC64
192 : void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
193 : Address lazy_compile_target) {
194 : // Load function index to register. max 5 instrs
195 : mov(kWasmCompileLazyFuncIndexRegister, Operand(func_index));
196 : // Jump to {lazy_compile_target}. max 5 instrs
197 : mov(r0, Operand(lazy_compile_target));
198 : mtctr(r0);
199 : bctr();
200 : }
201 :
202 : void JumpTableAssembler::EmitRuntimeStubSlot(Address builtin_target) {
203 : JumpToInstructionStream(builtin_target);
204 : }
205 :
206 : void JumpTableAssembler::EmitJumpSlot(Address target) {
207 : mov(r0, Operand(target));
208 : mtctr(r0);
209 : bctr();
210 : }
211 :
212 : void JumpTableAssembler::NopBytes(int bytes) {
213 : DCHECK_LE(0, bytes);
214 : DCHECK_EQ(0, bytes % 4);
215 : for (; bytes > 0; bytes -= 4) {
216 : nop(0);
217 : }
218 : }
219 :
220 : #else
221 : void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
222 : Address lazy_compile_target) {
223 : UNIMPLEMENTED();
224 : }
225 :
226 : void JumpTableAssembler::EmitRuntimeStubSlot(Address builtin_target) {
227 : UNIMPLEMENTED();
228 : }
229 :
230 : void JumpTableAssembler::EmitJumpSlot(Address target) { UNIMPLEMENTED(); }
231 :
232 : void JumpTableAssembler::NopBytes(int bytes) {
233 : DCHECK_LE(0, bytes);
234 : UNIMPLEMENTED();
235 : }
236 : #endif
237 :
238 : } // namespace wasm
239 : } // namespace internal
240 120216 : } // namespace v8
|