Line data Source code
1 : // Copyright 2018 the V8 project authors. All rights reserved.
2 : // Use of this source code is governed by a BSD-style license that can be
3 : // found in the LICENSE file.
4 :
5 : #ifndef V8_WASM_JUMP_TABLE_ASSEMBLER_H_
6 : #define V8_WASM_JUMP_TABLE_ASSEMBLER_H_
7 :
8 : #include "src/macro-assembler.h"
9 : #include "src/wasm/wasm-code-manager.h"
10 :
11 : namespace v8 {
12 : namespace internal {
13 : namespace wasm {
14 :
15 : // The jump table is the central dispatch point for all (direct and indirect)
16 : // invocations in WebAssembly. It holds one slot per function in a module, with
17 : // each slot containing a dispatch to the currently published {WasmCode} that
18 : // corresponds to the function.
19 : //
20 : // Note that the table is split into lines of fixed size, with lines laid out
21 : // consecutively within the executable memory of the {NativeModule}. The slots
22 : // in turn are consecutive within a line, but do not cross line boundaries.
23 : //
24 : // +- L1 -------------------+ +- L2 -------------------+ +- L3 ...
25 : // | S1 | S2 | ... | Sn | x | | S1 | S2 | ... | Sn | x | | S1 ...
26 : // +------------------------+ +------------------------+ +---- ...
27 : //
28 : // The above illustrates jump table lines {Li} containing slots {Si} with each
29 : // line containing {n} slots and some padding {x} for alignment purposes.
30 32099001 : class JumpTableAssembler : public MacroAssembler {
31 : public:
32 : // Translate an offset into the continuous jump table to a jump table index.
33 : static uint32_t SlotOffsetToIndex(uint32_t slot_offset) {
34 160 : uint32_t line_index = slot_offset / kJumpTableLineSize;
35 160 : uint32_t line_offset = slot_offset % kJumpTableLineSize;
36 : DCHECK_EQ(0, line_offset % kJumpTableSlotSize);
37 160 : return line_index * kJumpTableSlotsPerLine +
38 160 : line_offset / kJumpTableSlotSize;
39 : }
40 :
41 : // Translate a jump table index to an offset into the continuous jump table.
42 : static uint32_t SlotIndexToOffset(uint32_t slot_index) {
43 1507677 : uint32_t line_index = slot_index / kJumpTableSlotsPerLine;
44 : uint32_t line_offset =
45 1507677 : (slot_index % kJumpTableSlotsPerLine) * kJumpTableSlotSize;
46 1507677 : return line_index * kJumpTableLineSize + line_offset;
47 : }
48 :
49 : // Determine the size of a jump table containing the given number of slots.
50 : static constexpr uint32_t SizeForNumberOfSlots(uint32_t slot_count) {
51 : // TODO(wasm): Once the {RoundUp} utility handles non-powers of two values,
52 : // use: {RoundUp<kJumpTableSlotsPerLine>(slot_count) * kJumpTableLineSize}
53 2616605 : return ((slot_count + kJumpTableSlotsPerLine - 1) /
54 : kJumpTableSlotsPerLine) *
55 2616605 : kJumpTableLineSize;
56 : }
57 :
58 : // Translate a stub slot index to an offset into the continuous jump table.
59 : static uint32_t StubSlotIndexToOffset(uint32_t slot_index) {
60 61962698 : return slot_index * kJumpTableStubSlotSize;
61 : }
62 :
63 : // Determine the size of a jump table containing only runtime stub slots.
64 : static constexpr uint32_t SizeForNumberOfStubSlots(uint32_t slot_count) {
65 : return slot_count * kJumpTableStubSlotSize;
66 : }
67 :
68 14993 : static void EmitLazyCompileJumpSlot(Address base, uint32_t slot_index,
69 : uint32_t func_index,
70 : Address lazy_compile_target,
71 : WasmCode::FlushICache flush_i_cache) {
72 14993 : Address slot = base + SlotIndexToOffset(slot_index);
73 14993 : JumpTableAssembler jtasm(slot);
74 14993 : jtasm.EmitLazyCompileJumpSlot(func_index, lazy_compile_target);
75 29986 : jtasm.NopBytes(kJumpTableSlotSize - jtasm.pc_offset());
76 14993 : if (flush_i_cache) {
77 : FlushInstructionCache(slot, kJumpTableSlotSize);
78 : }
79 14993 : }
80 :
81 30981349 : static void EmitRuntimeStubSlot(Address base, uint32_t slot_index,
82 : Address builtin_target,
83 : WasmCode::FlushICache flush_i_cache) {
84 30981349 : Address slot = base + StubSlotIndexToOffset(slot_index);
85 30981349 : JumpTableAssembler jtasm(slot);
86 30981349 : jtasm.EmitRuntimeStubSlot(builtin_target);
87 61962698 : jtasm.NopBytes(kJumpTableStubSlotSize - jtasm.pc_offset());
88 30981347 : if (flush_i_cache) {
89 : FlushInstructionCache(slot, kJumpTableStubSlotSize);
90 : }
91 30981349 : }
92 :
93 1102659 : static void PatchJumpTableSlot(Address base, uint32_t slot_index,
94 : Address new_target,
95 : WasmCode::FlushICache flush_i_cache) {
96 1102659 : Address slot = base + SlotIndexToOffset(slot_index);
97 1102659 : JumpTableAssembler jtasm(slot);
98 1102659 : jtasm.EmitJumpSlot(new_target);
99 2205320 : jtasm.NopBytes(kJumpTableSlotSize - jtasm.pc_offset());
100 1102661 : if (flush_i_cache) {
101 : FlushInstructionCache(slot, kJumpTableSlotSize);
102 : }
103 1102661 : }
104 :
105 : private:
106 : // Instantiate a {JumpTableAssembler} for patching.
107 32099001 : explicit JumpTableAssembler(Address slot_addr, int size = 256)
108 : : MacroAssembler(nullptr, JumpTableAssemblerOptions(),
109 : CodeObjectRequired::kNo,
110 : ExternalAssemblerBuffer(
111 128395999 : reinterpret_cast<uint8_t*>(slot_addr), size)) {}
112 :
113 : // To allow concurrent patching of the jump table entries, we need to ensure
114 : // that the instruction containing the call target does not cross cache-line
115 : // boundaries. The jump table line size has been chosen to satisfy this.
116 : #if V8_TARGET_ARCH_X64
117 : static constexpr int kJumpTableLineSize = 64;
118 : static constexpr int kJumpTableSlotSize = 18;
119 : static constexpr int kJumpTableStubSlotSize = 18;
120 : #elif V8_TARGET_ARCH_IA32
121 : static constexpr int kJumpTableLineSize = 64;
122 : static constexpr int kJumpTableSlotSize = 10;
123 : static constexpr int kJumpTableStubSlotSize = 10;
124 : #elif V8_TARGET_ARCH_ARM
125 : static constexpr int kJumpTableLineSize = 5 * kInstrSize;
126 : static constexpr int kJumpTableSlotSize = 5 * kInstrSize;
127 : static constexpr int kJumpTableStubSlotSize = 5 * kInstrSize;
128 : #elif V8_TARGET_ARCH_ARM64
129 : static constexpr int kJumpTableLineSize = 3 * kInstrSize;
130 : static constexpr int kJumpTableSlotSize = 3 * kInstrSize;
131 : static constexpr int kJumpTableStubSlotSize = 6 * kInstrSize;
132 : #elif V8_TARGET_ARCH_S390X
133 : static constexpr int kJumpTableLineSize = 128;
134 : static constexpr int kJumpTableSlotSize = 20;
135 : static constexpr int kJumpTableStubSlotSize = 14;
136 : #elif V8_TARGET_ARCH_PPC64
137 : static constexpr int kJumpTableLineSize = 64;
138 : static constexpr int kJumpTableSlotSize = 48;
139 : static constexpr int kJumpTableStubSlotSize = 7 * kInstrSize;
140 : #elif V8_TARGET_ARCH_MIPS
141 : static constexpr int kJumpTableLineSize = 6 * kInstrSize;
142 : static constexpr int kJumpTableSlotSize = 6 * kInstrSize;
143 : #elif V8_TARGET_ARCH_MIPS64
144 : static constexpr int kJumpTableLineSize = 8 * kInstrSize;
145 : static constexpr int kJumpTableSlotSize = 8 * kInstrSize;
146 : #else
147 : static constexpr int kJumpTableLineSize = 1;
148 : static constexpr int kJumpTableSlotSize = 1;
149 : static constexpr int kJumpTableStubSlotSize = 1;
150 : #endif
151 :
152 : static constexpr int kJumpTableSlotsPerLine =
153 : kJumpTableLineSize / kJumpTableSlotSize;
154 :
155 : // {JumpTableAssembler} is never used during snapshot generation, and its code
156 : // must be independent of the code range of any isolate anyway. Just ensure
157 : // that no relocation information is recorded, there is no buffer to store it
158 : // since it is instantiated in patching mode in existing code directly.
159 : static AssemblerOptions JumpTableAssemblerOptions() {
160 32099004 : AssemblerOptions options;
161 32099004 : options.disable_reloc_info_for_patching = true;
162 : return options;
163 : }
164 :
165 : void EmitLazyCompileJumpSlot(uint32_t func_index,
166 : Address lazy_compile_target);
167 :
168 : void EmitRuntimeStubSlot(Address builtin_target);
169 :
170 : void EmitJumpSlot(Address target);
171 :
172 : void NopBytes(int bytes);
173 : };
174 :
175 : } // namespace wasm
176 : } // namespace internal
177 : } // namespace v8
178 :
179 : #endif // V8_WASM_JUMP_TABLE_ASSEMBLER_H_
|