Line data Source code
1 : // Copyright 2018 the V8 project authors. All rights reserved.
2 : // Use of this source code is governed by a BSD-style license that can be
3 : // found in the LICENSE file.
4 :
5 : #ifndef V8_WASM_JUMP_TABLE_ASSEMBLER_H_
6 : #define V8_WASM_JUMP_TABLE_ASSEMBLER_H_
7 :
8 : #include "src/macro-assembler.h"
9 : #include "src/wasm/wasm-code-manager.h"
10 :
11 : namespace v8 {
12 : namespace internal {
13 : namespace wasm {
14 :
15 : // The jump table is the central dispatch point for all (direct and indirect)
16 : // invocations in WebAssembly. It holds one slot per function in a module, with
17 : // each slot containing a dispatch to the currently published {WasmCode} that
18 : // corresponds to the function.
19 : //
20 : // Note that the table is split into lines of fixed size, with lines laid out
21 : // consecutively within the executable memory of the {NativeModule}. The slots
22 : // in turn are consecutive within a line, but do not cross line boundaries.
23 : //
24 : // +- L1 -------------------+ +- L2 -------------------+ +- L3 ...
25 : // | S1 | S2 | ... | Sn | x | | S1 | S2 | ... | Sn | x | | S1 ...
26 : // +------------------------+ +------------------------+ +---- ...
27 : //
28 : // The above illustrates jump table lines {Li} containing slots {Si} with each
29 : // line containing {n} slots and some padding {x} for alignment purposes.
30 1808431 : class JumpTableAssembler : public TurboAssembler {
31 : public:
32 : // Translate an offset into the continuous jump table to a jump table index.
33 : static uint32_t SlotOffsetToIndex(uint32_t slot_offset) {
34 180 : uint32_t line_index = slot_offset / kJumpTableLineSize;
35 180 : uint32_t line_offset = slot_offset % kJumpTableLineSize;
36 : DCHECK_EQ(0, line_offset % kJumpTableSlotSize);
37 180 : return line_index * kJumpTableSlotsPerLine +
38 180 : line_offset / kJumpTableSlotSize;
39 : }
40 :
41 : // Translate a jump table index to an offset into the continuous jump table.
42 : static uint32_t SlotIndexToOffset(uint32_t slot_index) {
43 2248334 : uint32_t line_index = slot_index / kJumpTableSlotsPerLine;
44 : uint32_t line_offset =
45 2248334 : (slot_index % kJumpTableSlotsPerLine) * kJumpTableSlotSize;
46 2248334 : return line_index * kJumpTableLineSize + line_offset;
47 : }
48 :
49 : // Determine the size of a jump table containing the given number of slots.
50 : static constexpr uint32_t SizeForNumberOfSlots(uint32_t slot_count) {
51 : // TODO(wasm): Once the {RoundUp} utility handles non-powers of two values,
52 : // use: {RoundUp<kJumpTableSlotsPerLine>(slot_count) * kJumpTableLineSize}
53 3214717 : return ((slot_count + kJumpTableSlotsPerLine - 1) /
54 : kJumpTableSlotsPerLine) *
55 3214717 : kJumpTableLineSize;
56 : }
57 :
58 17599 : static void EmitLazyCompileJumpSlot(Address base, uint32_t slot_index,
59 : uint32_t func_index,
60 : Address lazy_compile_target,
61 : WasmCode::FlushICache flush_i_cache) {
62 17599 : Address slot = base + SlotIndexToOffset(slot_index);
63 17599 : JumpTableAssembler jtasm(slot);
64 17599 : jtasm.EmitLazyCompileJumpSlot(func_index, lazy_compile_target);
65 35198 : jtasm.NopBytes(kJumpTableSlotSize - jtasm.pc_offset());
66 17599 : if (flush_i_cache) {
67 : Assembler::FlushICache(slot, kJumpTableSlotSize);
68 : }
69 17599 : }
70 :
71 1790819 : static void PatchJumpTableSlot(Address base, uint32_t slot_index,
72 : Address new_target,
73 : WasmCode::FlushICache flush_i_cache) {
74 1790819 : Address slot = base + SlotIndexToOffset(slot_index);
75 1790819 : JumpTableAssembler jtasm(slot);
76 1790831 : jtasm.EmitJumpSlot(new_target);
77 3581642 : jtasm.NopBytes(kJumpTableSlotSize - jtasm.pc_offset());
78 1790828 : if (flush_i_cache) {
79 : Assembler::FlushICache(slot, kJumpTableSlotSize);
80 : }
81 1790831 : }
82 :
83 : private:
84 : // Instantiate a {JumpTableAssembler} for patching.
85 1808418 : explicit JumpTableAssembler(Address slot_addr, int size = 256)
86 : : TurboAssembler(nullptr, JumpTableAssemblerOptions(),
87 : CodeObjectRequired::kNo,
88 : ExternalAssemblerBuffer(
89 5425278 : reinterpret_cast<uint8_t*>(slot_addr), size)) {}
90 :
91 : // To allow concurrent patching of the jump table entries, we need to ensure
92 : // that the instruction containing the call target does not cross cache-line
93 : // boundaries. The jump table line size has been chosen to satisfy this.
94 : #if V8_TARGET_ARCH_X64
95 : static constexpr int kJumpTableLineSize = 64;
96 : static constexpr int kJumpTableSlotSize = 18;
97 : #elif V8_TARGET_ARCH_IA32
98 : static constexpr int kJumpTableLineSize = 64;
99 : static constexpr int kJumpTableSlotSize = 10;
100 : #elif V8_TARGET_ARCH_ARM
101 : static constexpr int kJumpTableLineSize = 5 * kInstrSize;
102 : static constexpr int kJumpTableSlotSize = 5 * kInstrSize;
103 : #elif V8_TARGET_ARCH_ARM64
104 : static constexpr int kJumpTableLineSize = 3 * kInstrSize;
105 : static constexpr int kJumpTableSlotSize = 3 * kInstrSize;
106 : #elif V8_TARGET_ARCH_S390X
107 : static constexpr int kJumpTableLineSize = 20;
108 : static constexpr int kJumpTableSlotSize = 20;
109 : #elif V8_TARGET_ARCH_S390
110 : static constexpr int kJumpTableLineSize = 14;
111 : static constexpr int kJumpTableSlotSize = 14;
112 : #elif V8_TARGET_ARCH_PPC64
113 : static constexpr int kJumpTableLineSize = 48;
114 : static constexpr int kJumpTableSlotSize = 48;
115 : #elif V8_TARGET_ARCH_PPC
116 : static constexpr int kJumpTableLineSize = 24;
117 : static constexpr int kJumpTableSlotSize = 24;
118 : #elif V8_TARGET_ARCH_MIPS
119 : static constexpr int kJumpTableLineSize = 6 * kInstrSize;
120 : static constexpr int kJumpTableSlotSize = 6 * kInstrSize;
121 : #elif V8_TARGET_ARCH_MIPS64
122 : static constexpr int kJumpTableLineSize = 8 * kInstrSize;
123 : static constexpr int kJumpTableSlotSize = 8 * kInstrSize;
124 : #else
125 : static constexpr int kJumpTableLineSize = 1;
126 : static constexpr int kJumpTableSlotSize = 1;
127 : #endif
128 :
129 : static constexpr int kJumpTableSlotsPerLine =
130 : kJumpTableLineSize / kJumpTableSlotSize;
131 :
132 : // {JumpTableAssembler} is never used during snapshot generation, and its code
133 : // must be independent of the code range of any isolate anyway. Just ensure
134 : // that no relocation information is recorded, there is no buffer to store it
135 : // since it is instantiated in patching mode in existing code directly.
136 : static AssemblerOptions JumpTableAssemblerOptions() {
137 1808430 : AssemblerOptions options;
138 1808430 : options.disable_reloc_info_for_patching = true;
139 : return options;
140 : }
141 :
142 : void EmitLazyCompileJumpSlot(uint32_t func_index,
143 : Address lazy_compile_target);
144 :
145 : void EmitJumpSlot(Address target);
146 :
147 : void NopBytes(int bytes);
148 : };
149 :
150 : } // namespace wasm
151 : } // namespace internal
152 : } // namespace v8
153 :
154 : #endif // V8_WASM_JUMP_TABLE_ASSEMBLER_H_
|