LCOV - code coverage report
Current view: top level - src/compiler/backend - gap-resolver.cc (source / functions) Hit Total Coverage
Test: app.info Lines: 40 40 100.0 %
Date: 2019-04-17 Functions: 3 3 100.0 %

          Line data    Source code
       1             : // Copyright 2014 the V8 project authors. All rights reserved.
       2             : // Use of this source code is governed by a BSD-style license that can be
       3             : // found in the LICENSE file.
       4             : 
       5             : #include "src/compiler/backend/gap-resolver.h"
       6             : 
       7             : #include <algorithm>
       8             : #include <set>
       9             : 
      10             : #include "src/base/enum-set.h"
      11             : #include "src/register-configuration.h"
      12             : 
      13             : namespace v8 {
      14             : namespace internal {
      15             : namespace compiler {
      16             : 
      17             : namespace {
      18             : 
      19             : // Splits a FP move between two location operands into the equivalent series of
      20             : // moves between smaller sub-operands, e.g. a double move to two single moves.
      21             : // This helps reduce the number of cycles that would normally occur under FP
      22             : // aliasing, and makes swaps much easier to implement.
      23             : MoveOperands* Split(MoveOperands* move, MachineRepresentation smaller_rep,
      24             :                     ParallelMove* moves) {
      25             :   DCHECK(!kSimpleFPAliasing);
      26             :   // Splitting is only possible when the slot size is the same as float size.
      27             :   DCHECK_EQ(kSystemPointerSize, kFloatSize);
      28             :   const LocationOperand& src_loc = LocationOperand::cast(move->source());
      29             :   const LocationOperand& dst_loc = LocationOperand::cast(move->destination());
      30             :   MachineRepresentation dst_rep = dst_loc.representation();
      31             :   DCHECK_NE(smaller_rep, dst_rep);
      32             :   auto src_kind = src_loc.location_kind();
      33             :   auto dst_kind = dst_loc.location_kind();
      34             : 
      35             :   int aliases =
      36             :       1 << (ElementSizeLog2Of(dst_rep) - ElementSizeLog2Of(smaller_rep));
      37             :   int base = -1;
      38             :   USE(base);
      39             :   DCHECK_EQ(aliases, RegisterConfiguration::Default()->GetAliases(
      40             :                          dst_rep, 0, smaller_rep, &base));
      41             : 
      42             :   int src_index = -1;
      43             :   int slot_size = (1 << ElementSizeLog2Of(smaller_rep)) / kSystemPointerSize;
      44             :   int src_step = 1;
      45             :   if (src_kind == LocationOperand::REGISTER) {
      46             :     src_index = src_loc.register_code() * aliases;
      47             :   } else {
      48             :     src_index = src_loc.index();
      49             :     // For operands that occupy multiple slots, the index refers to the last
      50             :     // slot. On little-endian architectures, we start at the high slot and use a
      51             :     // negative step so that register-to-slot moves are in the correct order.
      52             :     src_step = -slot_size;
      53             :   }
      54             :   int dst_index = -1;
      55             :   int dst_step = 1;
      56             :   if (dst_kind == LocationOperand::REGISTER) {
      57             :     dst_index = dst_loc.register_code() * aliases;
      58             :   } else {
      59             :     dst_index = dst_loc.index();
      60             :     dst_step = -slot_size;
      61             :   }
      62             : 
      63             :   // Reuse 'move' for the first fragment. It is not pending.
      64             :   move->set_source(AllocatedOperand(src_kind, smaller_rep, src_index));
      65             :   move->set_destination(AllocatedOperand(dst_kind, smaller_rep, dst_index));
      66             :   // Add the remaining fragment moves.
      67             :   for (int i = 1; i < aliases; ++i) {
      68             :     src_index += src_step;
      69             :     dst_index += dst_step;
      70             :     moves->AddMove(AllocatedOperand(src_kind, smaller_rep, src_index),
      71             :                    AllocatedOperand(dst_kind, smaller_rep, dst_index));
      72             :   }
      73             :   // Return the first fragment.
      74             :   return move;
      75             : }
      76             : 
      77             : enum MoveOperandKind : uint8_t { kConstant, kGpReg, kFpReg, kStack };
      78             : 
      79             : MoveOperandKind GetKind(const InstructionOperand& move) {
      80    76551558 :   if (move.IsConstant()) return kConstant;
      81             :   LocationOperand loc_op = LocationOperand::cast(move);
      82    57487727 :   if (loc_op.location_kind() != LocationOperand::REGISTER) return kStack;
      83    42461519 :   return IsFloatingPoint(loc_op.representation()) ? kFpReg : kGpReg;
      84             : }
      85             : 
      86             : }  // namespace
      87             : 
      88    48889807 : void GapResolver::Resolve(ParallelMove* moves) {
      89             :   base::EnumSet<MoveOperandKind, uint8_t> source_kinds;
      90             :   base::EnumSet<MoveOperandKind, uint8_t> destination_kinds;
      91             : 
      92             :   // Remove redundant moves, collect source kinds and destination kinds to
      93             :   // detect simple non-overlapping moves, and collect FP move representations if
      94             :   // aliasing is non-simple.
      95             :   int fp_reps = 0;
      96             :   size_t nmoves = moves->size();
      97   106349330 :   for (size_t i = 0; i < nmoves;) {
      98    57459054 :     MoveOperands* move = (*moves)[i];
      99    57459054 :     if (move->IsRedundant()) {
     100    19183744 :       nmoves--;
     101    28735794 :       if (i < nmoves) (*moves)[i] = (*moves)[nmoves];
     102             :       continue;
     103             :     }
     104    38275779 :     i++;
     105             :     source_kinds.Add(GetKind(move->source()));
     106             :     destination_kinds.Add(GetKind(move->destination()));
     107             :     if (!kSimpleFPAliasing && move->destination().IsFPRegister()) {
     108             :       fp_reps |= RepresentationBit(
     109             :           LocationOperand::cast(move->destination()).representation());
     110             :     }
     111             :   }
     112    48890276 :   if (nmoves != moves->size()) moves->resize(nmoves);
     113             : 
     114    54791044 :   if ((source_kinds & destination_kinds).empty() || moves->size() < 2) {
     115             :     // Fast path for non-conflicting parallel moves.
     116    68702827 :     for (MoveOperands* move : *moves) {
     117    47715892 :       assembler_->AssembleMove(&move->source(), &move->destination());
     118             :     }
     119             :     return;
     120             :   }
     121             : 
     122             :   if (!kSimpleFPAliasing) {
     123             :     if (fp_reps && !base::bits::IsPowerOfTwo(fp_reps)) {
     124             :       // Start with the smallest FP moves, so we never encounter smaller moves
     125             :       // in the middle of a cycle of larger moves.
     126             :       if ((fp_reps & RepresentationBit(MachineRepresentation::kFloat32)) != 0) {
     127             :         split_rep_ = MachineRepresentation::kFloat32;
     128             :         for (size_t i = 0; i < moves->size(); ++i) {
     129             :           auto move = (*moves)[i];
     130             :           if (!move->IsEliminated() && move->destination().IsFloatRegister())
     131             :             PerformMove(moves, move);
     132             :         }
     133             :       }
     134             :       if ((fp_reps & RepresentationBit(MachineRepresentation::kFloat64)) != 0) {
     135             :         split_rep_ = MachineRepresentation::kFloat64;
     136             :         for (size_t i = 0; i < moves->size(); ++i) {
     137             :           auto move = (*moves)[i];
     138             :           if (!move->IsEliminated() && move->destination().IsDoubleRegister())
     139             :             PerformMove(moves, move);
     140             :         }
     141             :       }
     142             :     }
     143             :     split_rep_ = MachineRepresentation::kSimd128;
     144             :   }
     145             : 
     146    32889511 :   for (size_t i = 0; i < moves->size(); ++i) {
     147    14423000 :     auto move = (*moves)[i];
     148    14423000 :     if (!move->IsEliminated()) PerformMove(moves, move);
     149             :   }
     150             : }
     151             : 
     152    14422982 : void GapResolver::PerformMove(ParallelMove* moves, MoveOperands* move) {
     153             :   // Each call to this function performs a move and deletes it from the move
     154             :   // graph.  We first recursively perform any move blocking this one.  We mark a
     155             :   // move as "pending" on entry to PerformMove in order to detect cycles in the
     156             :   // move graph.  We use operand swaps to resolve cycles, which means that a
     157             :   // call to PerformMove could change any source operand in the move graph.
     158             :   DCHECK(!move->IsPending());
     159             :   DCHECK(!move->IsRedundant());
     160             : 
     161             :   // Clear this move's destination to indicate a pending move.  The actual
     162             :   // destination is saved on the side.
     163    14422982 :   InstructionOperand source = move->source();
     164             :   DCHECK(!source.IsInvalid());  // Or else it will look eliminated.
     165    14422982 :   InstructionOperand destination = move->destination();
     166             :   move->SetPending();
     167             : 
     168             :   // We may need to split moves between FP locations differently.
     169             :   const bool is_fp_loc_move =
     170             :       !kSimpleFPAliasing && destination.IsFPLocationOperand();
     171             : 
     172             :   // Perform a depth-first traversal of the move graph to resolve dependencies.
     173             :   // Any unperformed, unpending move with a source the same as this one's
     174             :   // destination blocks this one so recursively perform all such moves.
     175   145899838 :   for (size_t i = 0; i < moves->size(); ++i) {
     176    65738507 :     auto other = (*moves)[i];
     177    65738507 :     if (other->IsEliminated()) continue;
     178    40709316 :     if (other->IsPending()) continue;
     179    25610692 :     if (other->source().InterferesWith(destination)) {
     180             :       if (is_fp_loc_move &&
     181             :           LocationOperand::cast(other->source()).representation() >
     182             :               split_rep_) {
     183             :         // 'other' must also be an FP location move. Break it into fragments
     184             :         // of the same size as 'move'. 'other' is set to one of the fragments,
     185             :         // and the rest are appended to 'moves'.
     186             :         other = Split(other, split_rep_, moves);
     187             :         // 'other' may not block destination now.
     188             :         if (!other->source().InterferesWith(destination)) continue;
     189             :       }
     190             :       // Though PerformMove can change any source operand in the move graph,
     191             :       // this call cannot create a blocking move via a swap (this loop does not
     192             :       // miss any).  Assume there is a non-blocking move with source A and this
     193             :       // move is blocked on source B and there is a swap of A and B.  Then A and
     194             :       // B must be involved in the same cycle (or they would not be swapped).
     195             :       // Since this move's destination is B and there is only a single incoming
     196             :       // edge to an operand, this move must also be involved in the same cycle.
     197             :       // In that case, the blocking move will be created but will be "pending"
     198             :       // when we return from PerformMove.
     199      586541 :       PerformMove(moves, other);
     200             :     }
     201             :   }
     202             : 
     203             :   // This move's source may have changed due to swaps to resolve cycles and so
     204             :   // it may now be the last move in the cycle.  If so remove it.
     205    14422903 :   source = move->source();
     206    14422782 :   if (source.EqualsCanonicalized(destination)) {
     207             :     move->Eliminate();
     208    14341695 :     return;
     209             :   }
     210             : 
     211             :   // We are about to resolve this move and don't need it marked as pending, so
     212             :   // restore its destination.
     213             :   move->set_destination(destination);
     214             : 
     215             :   // The move may be blocked on a (at most one) pending move, in which case we
     216             :   // have a cycle.  Search for such a blocking move and perform a swap to
     217             :   // resolve it.
     218             :   auto blocker =
     219             :       std::find_if(moves->begin(), moves->end(), [&](MoveOperands* move) {
     220   104399044 :         return !move->IsEliminated() &&
     221    79039544 :                move->source().InterferesWith(destination);
     222             :       });
     223    14351900 :   if (blocker == moves->end()) {
     224             :     // The easy case: This move is not blocked.
     225    14270764 :     assembler_->AssembleMove(&source, &destination);
     226             :     move->Eliminate();
     227             :     return;
     228             :   }
     229             : 
     230             :   // Ensure source is a register or both are stack slots, to limit swap cases.
     231      157443 :   if (source.IsStackSlot() || source.IsFPStackSlot()) {
     232             :     std::swap(source, destination);
     233             :   }
     234       81136 :   assembler_->AssembleSwap(&source, &destination);
     235             :   move->Eliminate();
     236             : 
     237             :   // Update outstanding moves whose source may now have been moved.
     238             :   if (is_fp_loc_move) {
     239             :     // We may have to split larger moves.
     240             :     for (size_t i = 0; i < moves->size(); ++i) {
     241             :       auto other = (*moves)[i];
     242             :       if (other->IsEliminated()) continue;
     243             :       if (source.InterferesWith(other->source())) {
     244             :         if (LocationOperand::cast(other->source()).representation() >
     245             :             split_rep_) {
     246             :           other = Split(other, split_rep_, moves);
     247             :           if (!source.InterferesWith(other->source())) continue;
     248             :         }
     249             :         other->set_source(destination);
     250             :       } else if (destination.InterferesWith(other->source())) {
     251             :         if (LocationOperand::cast(other->source()).representation() >
     252             :             split_rep_) {
     253             :           other = Split(other, split_rep_, moves);
     254             :           if (!destination.InterferesWith(other->source())) continue;
     255             :         }
     256             :         other->set_source(source);
     257             :       }
     258             :     }
     259             :   } else {
     260      664509 :     for (auto other : *moves) {
     261      583373 :       if (other->IsEliminated()) continue;
     262      317821 :       if (source.EqualsCanonicalized(other->source())) {
     263             :         other->set_source(destination);
     264      306240 :       } else if (destination.EqualsCanonicalized(other->source())) {
     265             :         other->set_source(source);
     266             :       }
     267             :     }
     268             :   }
     269             : }
     270             : }  // namespace compiler
     271             : }  // namespace internal
     272      121996 : }  // namespace v8

Generated by: LCOV version 1.10