LCOV - code coverage report
Current view: top level - src/compiler - gap-resolver.cc (source / functions) Hit Total Coverage
Test: app.info Lines: 32 32 100.0 %
Date: 2017-04-26 Functions: 3 3 100.0 %

          Line data    Source code
       1             : // Copyright 2014 the V8 project authors. All rights reserved.
       2             : // Use of this source code is governed by a BSD-style license that can be
       3             : // found in the LICENSE file.
       4             : 
       5             : #include "src/compiler/gap-resolver.h"
       6             : 
       7             : #include <algorithm>
       8             : #include <functional>
       9             : #include <set>
      10             : 
      11             : namespace v8 {
      12             : namespace internal {
      13             : namespace compiler {
      14             : 
      15             : namespace {
      16             : 
      17             : #define REP_BIT(rep) (1 << static_cast<int>(rep))
      18             : 
      19             : const int kFloat32Bit = REP_BIT(MachineRepresentation::kFloat32);
      20             : const int kFloat64Bit = REP_BIT(MachineRepresentation::kFloat64);
      21             : 
      22    80521546 : inline bool Blocks(MoveOperands* move, InstructionOperand destination) {
      23    80521546 :   return !move->IsEliminated() && move->source().InterferesWith(destination);
      24             : }
      25             : 
      26             : // Splits a FP move between two location operands into the equivalent series of
      27             : // moves between smaller sub-operands, e.g. a double move to two single moves.
      28             : // This helps reduce the number of cycles that would normally occur under FP
      29             : // aliasing, and makes swaps much easier to implement.
      30             : MoveOperands* Split(MoveOperands* move, MachineRepresentation smaller_rep,
      31             :                     ParallelMove* moves) {
      32             :   DCHECK(!kSimpleFPAliasing);
      33             :   // Splitting is only possible when the slot size is the same as float size.
      34             :   DCHECK_EQ(kPointerSize, kFloatSize);
      35             :   const LocationOperand& src_loc = LocationOperand::cast(move->source());
      36             :   const LocationOperand& dst_loc = LocationOperand::cast(move->destination());
      37             :   MachineRepresentation dst_rep = dst_loc.representation();
      38             :   DCHECK_NE(smaller_rep, dst_rep);
      39             :   auto src_kind = src_loc.location_kind();
      40             :   auto dst_kind = dst_loc.location_kind();
      41             : 
      42             :   int aliases =
      43             :       1 << (ElementSizeLog2Of(dst_rep) - ElementSizeLog2Of(smaller_rep));
      44             :   int base = -1;
      45             :   USE(base);
      46             :   DCHECK_EQ(aliases, RegisterConfiguration::Turbofan()->GetAliases(
      47             :                          dst_rep, 0, smaller_rep, &base));
      48             : 
      49             :   int src_index = -1;
      50             :   int slot_size = (1 << ElementSizeLog2Of(smaller_rep)) / kPointerSize;
      51             :   int src_step = 1;
      52             :   if (src_kind == LocationOperand::REGISTER) {
      53             :     src_index = src_loc.register_code() * aliases;
      54             :   } else {
      55             :     src_index = src_loc.index();
      56             :     // For operands that occuply multiple slots, the index refers to the last
      57             :     // slot. On little-endian architectures, we start at the high slot and use a
      58             :     // negative step so that register-to-slot moves are in the correct order.
      59             :     src_step = -slot_size;
      60             :   }
      61             :   int dst_index = -1;
      62             :   int dst_step = 1;
      63             :   if (dst_kind == LocationOperand::REGISTER) {
      64             :     dst_index = dst_loc.register_code() * aliases;
      65             :   } else {
      66             :     dst_index = dst_loc.index();
      67             :     dst_step = -slot_size;
      68             :   }
      69             : 
      70             :   // Reuse 'move' for the first fragment. It is not pending.
      71             :   move->set_source(AllocatedOperand(src_kind, smaller_rep, src_index));
      72             :   move->set_destination(AllocatedOperand(dst_kind, smaller_rep, dst_index));
      73             :   // Add the remaining fragment moves.
      74             :   for (int i = 1; i < aliases; ++i) {
      75             :     src_index += src_step;
      76             :     dst_index += dst_step;
      77             :     moves->AddMove(AllocatedOperand(src_kind, smaller_rep, src_index),
      78             :                    AllocatedOperand(dst_kind, smaller_rep, dst_index));
      79             :   }
      80             :   // Return the first fragment.
      81             :   return move;
      82             : }
      83             : 
      84             : }  // namespace
      85             : 
      86    26444665 : void GapResolver::Resolve(ParallelMove* moves) {
      87             :   // Clear redundant moves, and collect FP move representations if aliasing
      88             :   // is non-simple.
      89             :   int reps = 0;
      90    86499949 :   for (size_t i = 0; i < moves->size();) {
      91   144063096 :     MoveOperands* move = (*moves)[i];
      92    33610634 :     if (move->IsRedundant()) {
      93     9658057 :       (*moves)[i] = moves->back();
      94             :       moves->pop_back();
      95             :       continue;
      96             :     }
      97    23952562 :     i++;
      98             :     if (!kSimpleFPAliasing && move->destination().IsFPRegister()) {
      99             :       reps |=
     100             :           REP_BIT(LocationOperand::cast(move->destination()).representation());
     101             :     }
     102             :   }
     103             : 
     104             :   if (!kSimpleFPAliasing) {
     105             :     if (reps && !base::bits::IsPowerOfTwo32(reps)) {
     106             :       // Start with the smallest FP moves, so we never encounter smaller moves
     107             :       // in the middle of a cycle of larger moves.
     108             :       if ((reps & kFloat32Bit) != 0) {
     109             :         split_rep_ = MachineRepresentation::kFloat32;
     110             :         for (size_t i = 0; i < moves->size(); ++i) {
     111             :           auto move = (*moves)[i];
     112             :           if (!move->IsEliminated() && move->destination().IsFloatRegister())
     113             :             PerformMove(moves, move);
     114             :         }
     115             :       }
     116             :       if ((reps & kFloat64Bit) != 0) {
     117             :         split_rep_ = MachineRepresentation::kFloat64;
     118             :         for (size_t i = 0; i < moves->size(); ++i) {
     119             :           auto move = (*moves)[i];
     120             :           if (!move->IsEliminated() && move->destination().IsDoubleRegister())
     121             :             PerformMove(moves, move);
     122             :         }
     123             :       }
     124             :     }
     125             :     split_rep_ = MachineRepresentation::kSimd128;
     126             :   }
     127             : 
     128    74349706 :   for (size_t i = 0; i < moves->size(); ++i) {
     129    23952502 :     auto move = (*moves)[i];
     130    23952502 :     if (!move->IsEliminated()) PerformMove(moves, move);
     131             :   }
     132    26444676 : }
     133             : 
     134    23952559 : void GapResolver::PerformMove(ParallelMove* moves, MoveOperands* move) {
     135             :   // Each call to this function performs a move and deletes it from the move
     136             :   // graph.  We first recursively perform any move blocking this one.  We mark a
     137             :   // move as "pending" on entry to PerformMove in order to detect cycles in the
     138             :   // move graph.  We use operand swaps to resolve cycles, which means that a
     139             :   // call to PerformMove could change any source operand in the move graph.
     140             :   DCHECK(!move->IsPending());
     141             :   DCHECK(!move->IsRedundant());
     142             : 
     143             :   // Clear this move's destination to indicate a pending move.  The actual
     144             :   // destination is saved on the side.
     145    23952559 :   InstructionOperand source = move->source();
     146             :   DCHECK(!source.IsInvalid());  // Or else it will look eliminated.
     147    23952559 :   InstructionOperand destination = move->destination();
     148             :   move->SetPending();
     149             : 
     150             :   // We may need to split moves between FP locations differently.
     151             :   const bool is_fp_loc_move =
     152             :       !kSimpleFPAliasing && destination.IsFPLocationOperand();
     153             : 
     154             :   // Perform a depth-first traversal of the move graph to resolve dependencies.
     155             :   // Any unperformed, unpending move with a source the same as this one's
     156             :   // destination blocks this one so recursively perform all such moves.
     157   210180430 :   for (size_t i = 0; i < moves->size(); ++i) {
     158   186227909 :     auto other = (*moves)[i];
     159    81137694 :     if (other->IsEliminated()) continue;
     160    52989336 :     if (other->IsPending()) continue;
     161    28546230 :     if (other->source().InterferesWith(destination)) {
     162             :       if (is_fp_loc_move &&
     163             :           LocationOperand::cast(other->source()).representation() >
     164             :               split_rep_) {
     165             :         // 'other' must also be an FP location move. Break it into fragments
     166             :         // of the same size as 'move'. 'other' is set to one of the fragments,
     167             :         // and the rest are appended to 'moves'.
     168             :         other = Split(other, split_rep_, moves);
     169             :         // 'other' may not block destination now.
     170             :         if (!other->source().InterferesWith(destination)) continue;
     171             :       }
     172             :       // Though PerformMove can change any source operand in the move graph,
     173             :       // this call cannot create a blocking move via a swap (this loop does not
     174             :       // miss any).  Assume there is a non-blocking move with source A and this
     175             :       // move is blocked on source B and there is a swap of A and B.  Then A and
     176             :       // B must be involved in the same cycle (or they would not be swapped).
     177             :       // Since this move's destination is B and there is only a single incoming
     178             :       // edge to an operand, this move must also be involved in the same cycle.
     179             :       // In that case, the blocking move will be created but will be "pending"
     180             :       // when we return from PerformMove.
     181      405464 :       PerformMove(moves, other);
     182             :     }
     183             :   }
     184             : 
     185             :   // This move's source may have changed due to swaps to resolve cycles and so
     186             :   // it may now be the last move in the cycle.  If so remove it.
     187    23952521 :   source = move->source();
     188    23952540 :   if (source.EqualsCanonicalized(destination)) {
     189             :     move->Eliminate();
     190    23919665 :     return;
     191             :   }
     192             : 
     193             :   // We are about to resolve this move and don't need it marked as pending, so
     194             :   // restore its destination.
     195             :   move->set_destination(destination);
     196             : 
     197             :   // The move may be blocked on a (at most one) pending move, in which case we
     198             :   // have a cycle.  Search for such a blocking move and perform a swap to
     199             :   // resolve it.
     200             :   auto blocker = std::find_if(moves->begin(), moves->end(),
     201             :                               std::bind2nd(std::ptr_fun(&Blocks), destination));
     202    23929000 :   if (blocker == moves->end()) {
     203             :     // The easy case: This move is not blocked.
     204    23896138 :     assembler_->AssembleMove(&source, &destination);
     205             :     move->Eliminate();
     206             :     return;
     207             :   }
     208             : 
     209             :   // Ensure source is a register or both are stack slots, to limit swap cases.
     210       57056 :   if (source.IsStackSlot() || source.IsFPStackSlot()) {
     211             :     std::swap(source, destination);
     212             :   }
     213       32862 :   assembler_->AssembleSwap(&source, &destination);
     214             :   move->Eliminate();
     215             : 
     216             :   // Update outstanding moves whose source may now have been moved.
     217             :   if (is_fp_loc_move) {
     218             :     // We may have to split larger moves.
     219             :     for (size_t i = 0; i < moves->size(); ++i) {
     220             :       auto other = (*moves)[i];
     221             :       if (other->IsEliminated()) continue;
     222             :       if (source.InterferesWith(other->source())) {
     223             :         if (LocationOperand::cast(other->source()).representation() >
     224             :             split_rep_) {
     225             :           other = Split(other, split_rep_, moves);
     226             :           if (!source.InterferesWith(other->source())) continue;
     227             :         }
     228             :         other->set_source(destination);
     229             :       } else if (destination.InterferesWith(other->source())) {
     230             :         if (LocationOperand::cast(other->source()).representation() >
     231             :             split_rep_) {
     232             :           other = Split(other, split_rep_, moves);
     233             :           if (!destination.InterferesWith(other->source())) continue;
     234             :         }
     235             :         other->set_source(source);
     236             :       }
     237             :     }
     238             :   } else {
     239      502298 :     for (auto other : *moves) {
     240      436574 :       if (other->IsEliminated()) continue;
     241      493160 :       if (source.EqualsCanonicalized(other->source())) {
     242             :         other->set_source(destination);
     243      230848 :       } else if (destination.EqualsCanonicalized(other->source())) {
     244             :         other->set_source(source);
     245             :       }
     246             :     }
     247             :   }
     248             : }
     249             : }  // namespace compiler
     250             : }  // namespace internal
     251             : }  // namespace v8

Generated by: LCOV version 1.10