Line data Source code
1 : // Copyright 2018 the V8 project authors. All rights reserved.
2 : // Use of this source code is governed by a BSD-style license that can be
3 : // found in the LICENSE file.
4 :
5 : #include "src/wasm/jump-table-assembler.h"
6 :
7 : #include "src/assembler-inl.h"
8 : #include "src/macro-assembler-inl.h"
9 :
10 : namespace v8 {
11 : namespace internal {
12 : namespace wasm {
13 :
14 : // The implementation is compact enough to implement it inline here. If it gets
15 : // much bigger, we might want to split it in a separate file per architecture.
16 : #if V8_TARGET_ARCH_X64
17 15104 : void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
18 : Address lazy_compile_target) {
19 : // Use a push, because mov to an extended register takes 6 bytes.
20 30208 : pushq(Immediate(func_index)); // max 5 bytes
21 : EmitJumpSlot(lazy_compile_target); // always 5 bytes
22 15104 : }
23 :
24 36006051 : void JumpTableAssembler::EmitRuntimeStubSlot(Address builtin_target) {
25 36006051 : JumpToInstructionStream(builtin_target);
26 36006049 : }
27 :
28 1440256 : void JumpTableAssembler::EmitJumpSlot(Address target) {
29 : // On x64, all code is allocated within a single code section, so we can use
30 : // relative jumps.
31 : static_assert(kMaxWasmCodeMemory <= size_t{2} * GB, "can use relative jump");
32 : intptr_t displacement = static_cast<intptr_t>(
33 1455360 : reinterpret_cast<byte*>(target) - pc_ - kNearJmpInstrSize);
34 1455360 : near_jmp(displacement, RelocInfo::NONE);
35 1440232 : }
36 :
37 37461343 : void JumpTableAssembler::NopBytes(int bytes) {
38 : DCHECK_LE(0, bytes);
39 37461343 : Nop(bytes);
40 37461356 : }
41 :
42 : #elif V8_TARGET_ARCH_IA32
43 : void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
44 : Address lazy_compile_target) {
45 : mov(kWasmCompileLazyFuncIndexRegister, func_index); // 5 bytes
46 : jmp(lazy_compile_target, RelocInfo::NONE); // 5 bytes
47 : }
48 :
49 : void JumpTableAssembler::EmitRuntimeStubSlot(Address builtin_target) {
50 : JumpToInstructionStream(builtin_target);
51 : }
52 :
53 : void JumpTableAssembler::EmitJumpSlot(Address target) {
54 : jmp(target, RelocInfo::NONE);
55 : }
56 :
57 : void JumpTableAssembler::NopBytes(int bytes) {
58 : DCHECK_LE(0, bytes);
59 : Nop(bytes);
60 : }
61 :
62 : #elif V8_TARGET_ARCH_ARM
63 : void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
64 : Address lazy_compile_target) {
65 : // Load function index to a register.
66 : // This generates [movw, movt] on ARMv7 and later, [ldr, constant pool marker,
67 : // constant] on ARMv6.
68 : Move32BitImmediate(kWasmCompileLazyFuncIndexRegister, Operand(func_index));
69 : // EmitJumpSlot emits either [b], [movw, movt, mov] (ARMv7+), or [ldr,
70 : // constant].
71 : // In total, this is <=5 instructions on all architectures.
72 : // TODO(arm): Optimize this for code size; lazy compile is not performance
73 : // critical, as it's only executed once per function.
74 : EmitJumpSlot(lazy_compile_target);
75 : }
76 :
77 : void JumpTableAssembler::EmitRuntimeStubSlot(Address builtin_target) {
78 : JumpToInstructionStream(builtin_target);
79 : CheckConstPool(true, false); // force emit of const pool
80 : }
81 :
82 : void JumpTableAssembler::EmitJumpSlot(Address target) {
83 : // Note that {Move32BitImmediate} emits [ldr, constant] for the relocation
84 : // mode used below, we need this to allow concurrent patching of this slot.
85 : Move32BitImmediate(pc, Operand(target, RelocInfo::WASM_CALL));
86 : CheckConstPool(true, false); // force emit of const pool
87 : }
88 :
89 : void JumpTableAssembler::NopBytes(int bytes) {
90 : DCHECK_LE(0, bytes);
91 : DCHECK_EQ(0, bytes % kInstrSize);
92 : for (; bytes > 0; bytes -= kInstrSize) {
93 : nop();
94 : }
95 : }
96 :
97 : #elif V8_TARGET_ARCH_ARM64
98 : void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
99 : Address lazy_compile_target) {
100 : Mov(kWasmCompileLazyFuncIndexRegister.W(), func_index); // max. 2 instr
101 : Jump(lazy_compile_target, RelocInfo::NONE); // 1 instr
102 : }
103 :
104 : void JumpTableAssembler::EmitRuntimeStubSlot(Address builtin_target) {
105 : JumpToInstructionStream(builtin_target);
106 : CheckConstPool(true, false); // force emit of const pool
107 : }
108 :
109 : void JumpTableAssembler::EmitJumpSlot(Address target) {
110 : // TODO(wasm): Currently this is guaranteed to be a {near_call} and hence is
111 : // patchable concurrently. Once {kMaxWasmCodeMemory} is raised on ARM64, make
112 : // sure concurrent patching is still supported.
113 : DCHECK(TurboAssembler::IsNearCallOffset(
114 : (reinterpret_cast<byte*>(target) - pc_) / kInstrSize));
115 :
116 : Jump(target, RelocInfo::NONE);
117 : }
118 :
119 : void JumpTableAssembler::NopBytes(int bytes) {
120 : DCHECK_LE(0, bytes);
121 : DCHECK_EQ(0, bytes % kInstrSize);
122 : for (; bytes > 0; bytes -= kInstrSize) {
123 : nop();
124 : }
125 : }
126 :
127 : #elif V8_TARGET_ARCH_S390X
128 : void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
129 : Address lazy_compile_target) {
130 : // Load function index to r7. 6 bytes
131 : lgfi(kWasmCompileLazyFuncIndexRegister, Operand(func_index));
132 : // Jump to {lazy_compile_target}. 6 bytes or 12 bytes
133 : mov(r1, Operand(lazy_compile_target));
134 : b(r1); // 2 bytes
135 : }
136 :
137 : void JumpTableAssembler::EmitRuntimeStubSlot(Address builtin_target) {
138 : JumpToInstructionStream(builtin_target);
139 : }
140 :
141 : void JumpTableAssembler::EmitJumpSlot(Address target) {
142 : mov(r1, Operand(target));
143 : b(r1);
144 : }
145 :
146 : void JumpTableAssembler::NopBytes(int bytes) {
147 : DCHECK_LE(0, bytes);
148 : DCHECK_EQ(0, bytes % 2);
149 : for (; bytes > 0; bytes -= 2) {
150 : nop(0);
151 : }
152 : }
153 :
154 : #elif V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
155 : void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
156 : Address lazy_compile_target) {
157 : li(kWasmCompileLazyFuncIndexRegister, func_index); // max. 2 instr
158 : // Jump produces max. 4 instructions for 32-bit platform
159 : // and max. 6 instructions for 64-bit platform.
160 : Jump(lazy_compile_target, RelocInfo::NONE);
161 : }
162 :
163 : void JumpTableAssembler::EmitRuntimeStubSlot(Address builtin_target) {
164 : JumpToInstructionStream(builtin_target);
165 : }
166 :
167 : void JumpTableAssembler::EmitJumpSlot(Address target) {
168 : Jump(target, RelocInfo::NONE);
169 : }
170 :
171 : void JumpTableAssembler::NopBytes(int bytes) {
172 : DCHECK_LE(0, bytes);
173 : DCHECK_EQ(0, bytes % kInstrSize);
174 : for (; bytes > 0; bytes -= kInstrSize) {
175 : nop();
176 : }
177 : }
178 :
179 : #elif V8_TARGET_ARCH_PPC64
180 : void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
181 : Address lazy_compile_target) {
182 : // Load function index to register. max 5 instrs
183 : mov(kWasmCompileLazyFuncIndexRegister, Operand(func_index));
184 : // Jump to {lazy_compile_target}. max 5 instrs
185 : mov(r0, Operand(lazy_compile_target));
186 : mtctr(r0);
187 : bctr();
188 : }
189 :
190 : void JumpTableAssembler::EmitRuntimeStubSlot(Address builtin_target) {
191 : JumpToInstructionStream(builtin_target);
192 : }
193 :
194 : void JumpTableAssembler::EmitJumpSlot(Address target) {
195 : mov(r0, Operand(target));
196 : mtctr(r0);
197 : bctr();
198 : }
199 :
200 : void JumpTableAssembler::NopBytes(int bytes) {
201 : DCHECK_LE(0, bytes);
202 : DCHECK_EQ(0, bytes % 4);
203 : for (; bytes > 0; bytes -= 4) {
204 : nop(0);
205 : }
206 : }
207 :
208 : #else
209 : void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
210 : Address lazy_compile_target) {
211 : UNIMPLEMENTED();
212 : }
213 :
214 : void JumpTableAssembler::EmitRuntimeStubSlot(Address builtin_target) {
215 : UNIMPLEMENTED();
216 : }
217 :
218 : void JumpTableAssembler::EmitJumpSlot(Address target) { UNIMPLEMENTED(); }
219 :
220 : void JumpTableAssembler::NopBytes(int bytes) {
221 : DCHECK_LE(0, bytes);
222 : UNIMPLEMENTED();
223 : }
224 : #endif
225 :
226 : } // namespace wasm
227 : } // namespace internal
228 122036 : } // namespace v8
|