Line data Source code
1 : // Copyright 2018 the V8 project authors. All rights reserved.
2 : // Use of this source code is governed by a BSD-style license that can be
3 : // found in the LICENSE file.
4 :
5 : #include <bitset>
6 :
7 : #include "src/assembler-inl.h"
8 : #include "src/macro-assembler-inl.h"
9 : #include "src/simulator.h"
10 : #include "src/utils.h"
11 : #include "src/wasm/jump-table-assembler.h"
12 : #include "test/cctest/cctest.h"
13 : #include "test/common/assembler-tester.h"
14 :
15 : namespace v8 {
16 : namespace internal {
17 : namespace wasm {
18 :
19 : #if 0
20 : #define TRACE(...) PrintF(__VA_ARGS__)
21 : #else
22 : #define TRACE(...)
23 : #endif
24 :
25 : #define __ masm.
26 :
27 : // TODO(v8:7424,v8:8018): Extend this test to all architectures.
28 : #if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM || \
29 : V8_TARGET_ARCH_ARM64
30 :
31 : namespace {
32 :
33 : static volatile int global_stop_bit = 0;
34 :
35 : constexpr int kJumpTableSlotCount = 128;
36 : constexpr uint32_t kJumpTableSize =
37 : JumpTableAssembler::SizeForNumberOfSlots(kJumpTableSlotCount);
38 :
39 : #if V8_TARGET_ARCH_ARM64
40 : constexpr uint32_t kAvailableBufferSlots =
41 : (kMaxWasmCodeMemory - kJumpTableSize) / AssemblerBase::kMinimalBufferSize;
42 : constexpr uint32_t kBufferSlotStartOffset =
43 : RoundUp<AssemblerBase::kMinimalBufferSize>(kJumpTableSize);
44 : #else
45 : constexpr uint32_t kAvailableBufferSlots = 0;
46 : #endif
47 :
48 1280 : Address GenerateJumpTableThunk(
49 : Address jump_target, byte* thunk_slot_buffer,
50 : std::bitset<kAvailableBufferSlots>* used_slots,
51 : std::vector<std::unique_ptr<TestingAssemblerBuffer>>* thunk_buffers) {
52 : #if V8_TARGET_ARCH_ARM64
53 : // To guarantee that the branch range lies within the near-call range,
54 : // generate the thunk in the same (kMaxWasmCodeMemory-sized) buffer as the
55 : // jump_target itself.
56 : //
57 : // Allocate a slot that we haven't already used. This is necessary because
58 : // each test iteration expects to generate two unique addresses and we leave
59 : // each slot executable (and not writable).
60 : base::RandomNumberGenerator* rng =
61 : CcTest::i_isolate()->random_number_generator();
62 : // Ensure a chance of completion without too much thrashing.
63 : DCHECK(used_slots->count() < (used_slots->size() / 2));
64 : int buffer_index;
65 : do {
66 : buffer_index = rng->NextInt(kAvailableBufferSlots);
67 : } while (used_slots->test(buffer_index));
68 : used_slots->set(buffer_index);
69 : byte* buffer =
70 : thunk_slot_buffer + buffer_index * AssemblerBase::kMinimalBufferSize;
71 :
72 : DCHECK(TurboAssembler::IsNearCallOffset(
73 : (reinterpret_cast<byte*>(jump_target) - buffer) / kInstrSize));
74 :
75 : #else
76 : USE(thunk_slot_buffer);
77 : USE(used_slots);
78 : thunk_buffers->emplace_back(AllocateAssemblerBuffer(
79 3840 : AssemblerBase::kMinimalBufferSize, GetRandomMmapAddr()));
80 1280 : byte* buffer = thunk_buffers->back()->start();
81 : #endif
82 :
83 : MacroAssembler masm(
84 : nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
85 5120 : ExternalAssemblerBuffer(buffer, AssemblerBase::kMinimalBufferSize));
86 :
87 1280 : Label exit;
88 : Register scratch = kReturnRegister0;
89 1280 : Address stop_bit_address = reinterpret_cast<Address>(&global_stop_bit);
90 : #if V8_TARGET_ARCH_X64
91 : __ Move(scratch, stop_bit_address, RelocInfo::NONE);
92 2560 : __ testl(MemOperand(scratch, 0), Immediate(1));
93 1280 : __ j(not_zero, &exit);
94 1280 : __ Jump(jump_target, RelocInfo::NONE);
95 : #elif V8_TARGET_ARCH_IA32
96 : __ Move(scratch, Immediate(stop_bit_address, RelocInfo::NONE));
97 : __ test(MemOperand(scratch, 0), Immediate(1));
98 : __ j(not_zero, &exit);
99 : __ jmp(jump_target, RelocInfo::NONE);
100 : #elif V8_TARGET_ARCH_ARM
101 : __ mov(scratch, Operand(stop_bit_address, RelocInfo::NONE));
102 : __ ldr(scratch, MemOperand(scratch, 0));
103 : __ tst(scratch, Operand(1));
104 : __ b(ne, &exit);
105 : __ Jump(jump_target, RelocInfo::NONE);
106 : #elif V8_TARGET_ARCH_ARM64
107 : __ Mov(scratch, Operand(stop_bit_address, RelocInfo::NONE));
108 : __ Ldr(scratch, MemOperand(scratch, 0));
109 : __ Tbnz(scratch, 0, &exit);
110 : __ Mov(scratch, Immediate(jump_target, RelocInfo::NONE));
111 : __ Br(scratch);
112 : #else
113 : #error Unsupported architecture
114 : #endif
115 1280 : __ bind(&exit);
116 1280 : __ Ret();
117 :
118 1280 : CodeDesc desc;
119 1280 : masm.GetCode(nullptr, &desc);
120 2560 : return reinterpret_cast<Address>(buffer);
121 : }
122 :
123 3200 : class JumpTableRunner : public v8::base::Thread {
124 : public:
125 : JumpTableRunner(Address slot_address, int runner_id)
126 : : Thread(Options("JumpTableRunner")),
127 : slot_address_(slot_address),
128 3200 : runner_id_(runner_id) {}
129 :
130 3197 : void Run() override {
131 : TRACE("Runner #%d is starting ...\n", runner_id_);
132 3197 : GeneratedCode<void>::FromAddress(CcTest::i_isolate(), slot_address_).Call();
133 : TRACE("Runner #%d is stopping ...\n", runner_id_);
134 : USE(runner_id_);
135 3178 : }
136 :
137 : private:
138 : Address slot_address_;
139 : int runner_id_;
140 : };
141 :
142 640 : class JumpTablePatcher : public v8::base::Thread {
143 : public:
144 : JumpTablePatcher(Address slot_start, uint32_t slot_index, Address thunk1,
145 : Address thunk2)
146 : : Thread(Options("JumpTablePatcher")),
147 : slot_start_(slot_start),
148 : slot_index_(slot_index),
149 640 : thunks_{thunk1, thunk2} {}
150 :
151 640 : void Run() override {
152 : TRACE("Patcher is starting ...\n");
153 : constexpr int kNumberOfPatchIterations = 64;
154 41600 : for (int i = 0; i < kNumberOfPatchIterations; ++i) {
155 : TRACE(" patch slot " V8PRIxPTR_FMT " to thunk #%d\n",
156 : slot_start_ + JumpTableAssembler::SlotIndexToOffset(slot_index_),
157 : i % 2);
158 : JumpTableAssembler::PatchJumpTableSlot(
159 40960 : slot_start_, slot_index_, thunks_[i % 2], WasmCode::kFlushICache);
160 : }
161 : TRACE("Patcher is stopping ...\n");
162 640 : }
163 :
164 : private:
165 : Address slot_start_;
166 : uint32_t slot_index_;
167 : Address thunks_[2];
168 : };
169 :
170 : } // namespace
171 :
172 : // This test is intended to stress concurrent patching of jump-table slots. It
173 : // uses the following setup:
174 : // 1) Picks a particular slot of the jump-table. Slots are iterated over to
175 : // ensure multiple entries (at different offset alignments) are tested.
176 : // 2) Starts multiple runners that spin through the above slot. The runners
177 : // use thunk code that will jump to the same jump-table slot repeatedly
178 : // until the {global_stop_bit} indicates a test-end condition.
179 : // 3) Start a patcher that repeatedly patches the jump-table slot back and
180 : // forth between two thunk. If there is a race then chances are high that
181 : // one of the runners is currently executing the jump-table slot.
182 28342 : TEST(JumpTablePatchingStress) {
183 : constexpr int kNumberOfRunnerThreads = 5;
184 :
185 : #if V8_TARGET_ARCH_ARM64
186 : // We need the branches (from GenerateJumpTableThunk) to be within near-call
187 : // range of the jump table slots. The address hint to AllocateAssemblerBuffer
188 : // is not reliable enough to guarantee that we can always achieve this with
189 : // separate allocations, so for Arm64 we generate all code in a single
190 : // kMaxMasmCodeMemory-sized chunk.
191 : //
192 : // TODO(wasm): Currently {kMaxWasmCodeMemory} limits code sufficiently, so
193 : // that the jump table only supports {near_call} distances.
194 : STATIC_ASSERT(kMaxWasmCodeMemory >= kJumpTableSize);
195 : auto buffer = AllocateAssemblerBuffer(kMaxWasmCodeMemory);
196 : byte* thunk_slot_buffer = buffer->start() + kBufferSlotStartOffset;
197 : #else
198 : auto buffer = AllocateAssemblerBuffer(kJumpTableSize);
199 : byte* thunk_slot_buffer = nullptr;
200 : #endif
201 : std::bitset<kAvailableBufferSlots> used_thunk_slots;
202 5 : buffer->MakeWritableAndExecutable();
203 :
204 : // Iterate through jump-table slots to hammer at different alignments within
205 : // the jump-table, thereby increasing stress for variable-length ISAs.
206 5 : Address slot_start = reinterpret_cast<Address>(buffer->start());
207 645 : for (int slot = 0; slot < kJumpTableSlotCount; ++slot) {
208 : TRACE("Hammering on jump table slot #%d ...\n", slot);
209 640 : uint32_t slot_offset = JumpTableAssembler::SlotIndexToOffset(slot);
210 : std::vector<std::unique_ptr<TestingAssemblerBuffer>> thunk_buffers;
211 : Address thunk1 =
212 : GenerateJumpTableThunk(slot_start + slot_offset, thunk_slot_buffer,
213 640 : &used_thunk_slots, &thunk_buffers);
214 : Address thunk2 =
215 : GenerateJumpTableThunk(slot_start + slot_offset, thunk_slot_buffer,
216 640 : &used_thunk_slots, &thunk_buffers);
217 : TRACE(" generated thunk1: " V8PRIxPTR_FMT "\n", thunk1);
218 : TRACE(" generated thunk2: " V8PRIxPTR_FMT "\n", thunk2);
219 : JumpTableAssembler::PatchJumpTableSlot(slot_start, slot, thunk1,
220 640 : WasmCode::kFlushICache);
221 :
222 3840 : for (auto& buf : thunk_buffers) buf->MakeExecutable();
223 : // Start multiple runner threads and a patcher thread that hammer on the
224 : // same jump-table slot concurrently.
225 : std::list<JumpTableRunner> runners;
226 3840 : for (int runner = 0; runner < kNumberOfRunnerThreads; ++runner) {
227 6400 : runners.emplace_back(slot_start + slot_offset, runner);
228 : }
229 : JumpTablePatcher patcher(slot_start, slot, thunk1, thunk2);
230 640 : global_stop_bit = 0; // Signal runners to keep going.
231 4480 : for (auto& runner : runners) runner.Start();
232 640 : patcher.Start();
233 640 : patcher.Join();
234 640 : global_stop_bit = -1; // Signal runners to stop.
235 4480 : for (auto& runner : runners) runner.Join();
236 640 : }
237 5 : }
238 :
239 : #endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM ||
240 : // V8_TARGET_ARCH_ARM64
241 :
242 : #undef __
243 : #undef TRACE
244 :
245 : } // namespace wasm
246 : } // namespace internal
247 85011 : } // namespace v8
|