LCOV - code coverage report
Current view: top level - test/cctest/wasm - test-jump-table-assembler.cc (source / functions) Hit Total Coverage
Test: app.info Lines: 51 51 100.0 %
Date: 2019-04-17 Functions: 6 10 60.0 %

          Line data    Source code
       1             : // Copyright 2018 the V8 project authors. All rights reserved.
       2             : // Use of this source code is governed by a BSD-style license that can be
       3             : // found in the LICENSE file.
       4             : 
       5             : #include <bitset>
       6             : 
       7             : #include "src/assembler-inl.h"
       8             : #include "src/macro-assembler-inl.h"
       9             : #include "src/simulator.h"
      10             : #include "src/utils.h"
      11             : #include "src/wasm/jump-table-assembler.h"
      12             : #include "test/cctest/cctest.h"
      13             : #include "test/common/assembler-tester.h"
      14             : 
      15             : namespace v8 {
      16             : namespace internal {
      17             : namespace wasm {
      18             : 
      19             : #if 0
      20             : #define TRACE(...) PrintF(__VA_ARGS__)
      21             : #else
      22             : #define TRACE(...)
      23             : #endif
      24             : 
      25             : #define __ masm.
      26             : 
      27             : namespace {
      28             : 
      29             : static volatile int global_stop_bit = 0;
      30             : 
      31             : constexpr int kJumpTableSlotCount = 128;
      32             : constexpr uint32_t kJumpTableSize =
      33             :     JumpTableAssembler::SizeForNumberOfSlots(kJumpTableSlotCount);
      34             : 
      35             : #if V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64
      36             : constexpr uint32_t kAvailableBufferSlots =
      37             :     (kMaxWasmCodeMemory - kJumpTableSize) / AssemblerBase::kMinimalBufferSize;
      38             : constexpr uint32_t kBufferSlotStartOffset =
      39             :     RoundUp<AssemblerBase::kMinimalBufferSize>(kJumpTableSize);
      40             : #else
      41             : constexpr uint32_t kAvailableBufferSlots = 0;
      42             : #endif
      43             : 
      44        1024 : Address GenerateJumpTableThunk(
      45             :     Address jump_target, byte* thunk_slot_buffer,
      46             :     std::bitset<kAvailableBufferSlots>* used_slots,
      47             :     std::vector<std::unique_ptr<TestingAssemblerBuffer>>* thunk_buffers) {
      48             : #if V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64
      49             :   // To guarantee that the branch range lies within the near-call range,
      50             :   // generate the thunk in the same (kMaxWasmCodeMemory-sized) buffer as the
      51             :   // jump_target itself.
      52             :   //
      53             :   // Allocate a slot that we haven't already used. This is necessary because
      54             :   // each test iteration expects to generate two unique addresses and we leave
      55             :   // each slot executable (and not writable).
      56             :   base::RandomNumberGenerator* rng =
      57        1024 :       CcTest::i_isolate()->random_number_generator();
      58             :   // Ensure a chance of completion without too much thrashing.
      59             :   DCHECK(used_slots->count() < (used_slots->size() / 2));
      60             :   int buffer_index;
      61        1024 :   do {
      62        1024 :     buffer_index = rng->NextInt(kAvailableBufferSlots);
      63        1024 :   } while (used_slots->test(buffer_index));
      64        1024 :   used_slots->set(buffer_index);
      65             :   byte* buffer =
      66        1024 :       thunk_slot_buffer + buffer_index * AssemblerBase::kMinimalBufferSize;
      67             : 
      68             : #else
      69             :   USE(thunk_slot_buffer);
      70             :   USE(used_slots);
      71             :   thunk_buffers->emplace_back(AllocateAssemblerBuffer(
      72             :       AssemblerBase::kMinimalBufferSize, GetRandomMmapAddr()));
      73             :   byte* buffer = thunk_buffers->back()->start();
      74             : #endif
      75             : 
      76             :   MacroAssembler masm(
      77             :       nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
      78        3072 :       ExternalAssemblerBuffer(buffer, AssemblerBase::kMinimalBufferSize));
      79             : 
      80        1024 :   Label exit;
      81             :   Register scratch = kReturnRegister0;
      82             :   Address stop_bit_address = reinterpret_cast<Address>(&global_stop_bit);
      83             : #if V8_TARGET_ARCH_X64
      84             :   __ Move(scratch, stop_bit_address, RelocInfo::NONE);
      85        2048 :   __ testl(MemOperand(scratch, 0), Immediate(1));
      86        1024 :   __ j(not_zero, &exit);
      87        1024 :   __ Jump(jump_target, RelocInfo::NONE);
      88             : #elif V8_TARGET_ARCH_IA32
      89             :   __ Move(scratch, Immediate(stop_bit_address, RelocInfo::NONE));
      90             :   __ test(MemOperand(scratch, 0), Immediate(1));
      91             :   __ j(not_zero, &exit);
      92             :   __ jmp(jump_target, RelocInfo::NONE);
      93             : #elif V8_TARGET_ARCH_ARM
      94             :   __ mov(scratch, Operand(stop_bit_address, RelocInfo::NONE));
      95             :   __ ldr(scratch, MemOperand(scratch, 0));
      96             :   __ tst(scratch, Operand(1));
      97             :   __ b(ne, &exit);
      98             :   __ Jump(jump_target, RelocInfo::NONE);
      99             : #elif V8_TARGET_ARCH_ARM64
     100             :   __ Mov(scratch, Operand(stop_bit_address, RelocInfo::NONE));
     101             :   __ Ldr(scratch, MemOperand(scratch, 0));
     102             :   __ Tbnz(scratch, 0, &exit);
     103             :   __ Mov(scratch, Immediate(jump_target, RelocInfo::NONE));
     104             :   __ Br(scratch);
     105             : #elif V8_TARGET_ARCH_PPC64
     106             :   __ mov(scratch, Operand(stop_bit_address, RelocInfo::NONE));
     107             :   __ LoadP(scratch, MemOperand(scratch));
     108             :   __ cmpi(scratch, Operand::Zero());
     109             :   __ bne(&exit);
     110             :   __ mov(scratch, Operand(jump_target, RelocInfo::NONE));
     111             :   __ Jump(scratch);
     112             : #elif V8_TARGET_ARCH_S390X
     113             :   __ mov(scratch, Operand(stop_bit_address, RelocInfo::NONE));
     114             :   __ LoadP(scratch, MemOperand(scratch));
     115             :   __ CmpP(scratch, Operand(0));
     116             :   __ bne(&exit);
     117             :   __ mov(scratch, Operand(jump_target, RelocInfo::NONE));
     118             :   __ Jump(scratch);
     119             : #elif V8_TARGET_ARCH_MIPS64
     120             :   __ li(scratch, Operand(stop_bit_address, RelocInfo::NONE));
     121             :   __ Lw(scratch, MemOperand(scratch, 0));
     122             :   __ Branch(&exit, ne, scratch, Operand(zero_reg));
     123             :   __ Jump(jump_target, RelocInfo::NONE);
     124             : #elif V8_TARGET_ARCH_MIPS
     125             :   __ li(scratch, Operand(stop_bit_address, RelocInfo::NONE));
     126             :   __ lw(scratch, MemOperand(scratch, 0));
     127             :   __ Branch(&exit, ne, scratch, Operand(zero_reg));
     128             :   __ Jump(jump_target, RelocInfo::NONE);
     129             : #else
     130             : #error Unsupported architecture
     131             : #endif
     132        1024 :   __ bind(&exit);
     133        1024 :   __ Ret();
     134             : 
     135        1024 :   CodeDesc desc;
     136             :   masm.GetCode(nullptr, &desc);
     137        2048 :   return reinterpret_cast<Address>(buffer);
     138             : }
     139             : 
     140        2560 : class JumpTableRunner : public v8::base::Thread {
     141             :  public:
     142             :   JumpTableRunner(Address slot_address, int runner_id)
     143             :       : Thread(Options("JumpTableRunner")),
     144             :         slot_address_(slot_address),
     145        2560 :         runner_id_(runner_id) {}
     146             : 
     147        2556 :   void Run() override {
     148             :     TRACE("Runner #%d is starting ...\n", runner_id_);
     149        2556 :     GeneratedCode<void>::FromAddress(CcTest::i_isolate(), slot_address_).Call();
     150             :     TRACE("Runner #%d is stopping ...\n", runner_id_);
     151             :     USE(runner_id_);
     152        2439 :   }
     153             : 
     154             :  private:
     155             :   Address slot_address_;
     156             :   int runner_id_;
     157             : };
     158             : 
     159         512 : class JumpTablePatcher : public v8::base::Thread {
     160             :  public:
     161             :   JumpTablePatcher(Address slot_start, uint32_t slot_index, Address thunk1,
     162             :                    Address thunk2)
     163             :       : Thread(Options("JumpTablePatcher")),
     164             :         slot_start_(slot_start),
     165             :         slot_index_(slot_index),
     166         512 :         thunks_{thunk1, thunk2} {}
     167             : 
     168         512 :   void Run() override {
     169             :     TRACE("Patcher is starting ...\n");
     170             :     constexpr int kNumberOfPatchIterations = 64;
     171       66048 :     for (int i = 0; i < kNumberOfPatchIterations; ++i) {
     172             :       TRACE("  patch slot " V8PRIxPTR_FMT " to thunk #%d\n",
     173             :             slot_start_ + JumpTableAssembler::SlotIndexToOffset(slot_index_),
     174             :             i % 2);
     175       32768 :       JumpTableAssembler::PatchJumpTableSlot(
     176       65536 :           slot_start_, slot_index_, thunks_[i % 2], WasmCode::kFlushICache);
     177             :     }
     178             :     TRACE("Patcher is stopping ...\n");
     179         512 :   }
     180             : 
     181             :  private:
     182             :   Address slot_start_;
     183             :   uint32_t slot_index_;
     184             :   Address thunks_[2];
     185             : };
     186             : 
     187             : }  // namespace
     188             : 
     189             : // This test is intended to stress concurrent patching of jump-table slots. It
     190             : // uses the following setup:
     191             : //   1) Picks a particular slot of the jump-table. Slots are iterated over to
     192             : //      ensure multiple entries (at different offset alignments) are tested.
     193             : //   2) Starts multiple runners that spin through the above slot. The runners
     194             : //      use thunk code that will jump to the same jump-table slot repeatedly
     195             : //      until the {global_stop_bit} indicates a test-end condition.
     196             : //   3) Start a patcher that repeatedly patches the jump-table slot back and
     197             : //      forth between two thunk. If there is a race then chances are high that
     198             : //      one of the runners is currently executing the jump-table slot.
     199       26643 : TEST(JumpTablePatchingStress) {
     200             :   constexpr int kNumberOfRunnerThreads = 5;
     201             : 
     202             : #if V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64
     203             :   // We need the branches (from GenerateJumpTableThunk) to be within near-call
     204             :   // range of the jump table slots. The address hint to AllocateAssemblerBuffer
     205             :   // is not reliable enough to guarantee that we can always achieve this with
     206             :   // separate allocations, so for Arm64 we generate all code in a single
     207             :   // kMaxMasmCodeMemory-sized chunk.
     208             :   //
     209             :   // TODO(wasm): Currently {kMaxWasmCodeMemory} limits code sufficiently, so
     210             :   // that the jump table only supports {near_call} distances.
     211             :   STATIC_ASSERT(kMaxWasmCodeMemory >= kJumpTableSize);
     212             :   auto buffer = AllocateAssemblerBuffer(kMaxWasmCodeMemory);
     213           4 :   byte* thunk_slot_buffer = buffer->start() + kBufferSlotStartOffset;
     214             : #else
     215             :   auto buffer = AllocateAssemblerBuffer(kJumpTableSize);
     216             :   byte* thunk_slot_buffer = nullptr;
     217             : #endif
     218             : 
     219           4 :   std::bitset<kAvailableBufferSlots> used_thunk_slots;
     220           4 :   buffer->MakeWritableAndExecutable();
     221             : 
     222             :   // Iterate through jump-table slots to hammer at different alignments within
     223             :   // the jump-table, thereby increasing stress for variable-length ISAs.
     224           4 :   Address slot_start = reinterpret_cast<Address>(buffer->start());
     225        1028 :   for (int slot = 0; slot < kJumpTableSlotCount; ++slot) {
     226             :     TRACE("Hammering on jump table slot #%d ...\n", slot);
     227         512 :     uint32_t slot_offset = JumpTableAssembler::SlotIndexToOffset(slot);
     228         512 :     std::vector<std::unique_ptr<TestingAssemblerBuffer>> thunk_buffers;
     229             :     Address thunk1 =
     230         512 :         GenerateJumpTableThunk(slot_start + slot_offset, thunk_slot_buffer,
     231         512 :                                &used_thunk_slots, &thunk_buffers);
     232             :     Address thunk2 =
     233             :         GenerateJumpTableThunk(slot_start + slot_offset, thunk_slot_buffer,
     234         512 :                                &used_thunk_slots, &thunk_buffers);
     235             :     TRACE("  generated thunk1: " V8PRIxPTR_FMT "\n", thunk1);
     236             :     TRACE("  generated thunk2: " V8PRIxPTR_FMT "\n", thunk2);
     237             :     JumpTableAssembler::PatchJumpTableSlot(slot_start, slot, thunk1,
     238         512 :                                            WasmCode::kFlushICache);
     239             : 
     240         512 :     for (auto& buf : thunk_buffers) buf->MakeExecutable();
     241             :     // Start multiple runner threads and a patcher thread that hammer on the
     242             :     // same jump-table slot concurrently.
     243             :     std::list<JumpTableRunner> runners;
     244        3072 :     for (int runner = 0; runner < kNumberOfRunnerThreads; ++runner) {
     245        2560 :       runners.emplace_back(slot_start + slot_offset, runner);
     246             :     }
     247             :     JumpTablePatcher patcher(slot_start, slot, thunk1, thunk2);
     248         512 :     global_stop_bit = 0;  // Signal runners to keep going.
     249        3072 :     for (auto& runner : runners) runner.Start();
     250         512 :     patcher.Start();
     251         512 :     patcher.Join();
     252         512 :     global_stop_bit = -1;  // Signal runners to stop.
     253        3072 :     for (auto& runner : runners) runner.Join();
     254             :   }
     255           4 : }
     256             : 
     257             : #undef __
     258             : #undef TRACE
     259             : 
     260             : }  // namespace wasm
     261             : }  // namespace internal
     262       79917 : }  // namespace v8

Generated by: LCOV version 1.10