LCOV - code coverage report
Current view: top level - src/compiler/backend - gap-resolver.cc (source / functions) Hit Total Coverage
Test: app.info Lines: 38 38 100.0 %
Date: 2019-03-21 Functions: 3 3 100.0 %

          Line data    Source code
       1             : // Copyright 2014 the V8 project authors. All rights reserved.
       2             : // Use of this source code is governed by a BSD-style license that can be
       3             : // found in the LICENSE file.
       4             : 
       5             : #include "src/compiler/backend/gap-resolver.h"
       6             : 
       7             : #include <algorithm>
       8             : #include <set>
       9             : 
      10             : #include "src/base/enum-set.h"
      11             : #include "src/register-configuration.h"
      12             : 
      13             : namespace v8 {
      14             : namespace internal {
      15             : namespace compiler {
      16             : 
      17             : namespace {
      18             : 
      19             : // Splits a FP move between two location operands into the equivalent series of
      20             : // moves between smaller sub-operands, e.g. a double move to two single moves.
      21             : // This helps reduce the number of cycles that would normally occur under FP
      22             : // aliasing, and makes swaps much easier to implement.
      23             : MoveOperands* Split(MoveOperands* move, MachineRepresentation smaller_rep,
      24             :                     ParallelMove* moves) {
      25             :   DCHECK(!kSimpleFPAliasing);
      26             :   // Splitting is only possible when the slot size is the same as float size.
      27             :   DCHECK_EQ(kSystemPointerSize, kFloatSize);
      28             :   const LocationOperand& src_loc = LocationOperand::cast(move->source());
      29             :   const LocationOperand& dst_loc = LocationOperand::cast(move->destination());
      30             :   MachineRepresentation dst_rep = dst_loc.representation();
      31             :   DCHECK_NE(smaller_rep, dst_rep);
      32             :   auto src_kind = src_loc.location_kind();
      33             :   auto dst_kind = dst_loc.location_kind();
      34             : 
      35             :   int aliases =
      36             :       1 << (ElementSizeLog2Of(dst_rep) - ElementSizeLog2Of(smaller_rep));
      37             :   int base = -1;
      38             :   USE(base);
      39             :   DCHECK_EQ(aliases, RegisterConfiguration::Default()->GetAliases(
      40             :                          dst_rep, 0, smaller_rep, &base));
      41             : 
      42             :   int src_index = -1;
      43             :   int slot_size = (1 << ElementSizeLog2Of(smaller_rep)) / kSystemPointerSize;
      44             :   int src_step = 1;
      45             :   if (src_kind == LocationOperand::REGISTER) {
      46             :     src_index = src_loc.register_code() * aliases;
      47             :   } else {
      48             :     src_index = src_loc.index();
      49             :     // For operands that occupy multiple slots, the index refers to the last
      50             :     // slot. On little-endian architectures, we start at the high slot and use a
      51             :     // negative step so that register-to-slot moves are in the correct order.
      52             :     src_step = -slot_size;
      53             :   }
      54             :   int dst_index = -1;
      55             :   int dst_step = 1;
      56             :   if (dst_kind == LocationOperand::REGISTER) {
      57             :     dst_index = dst_loc.register_code() * aliases;
      58             :   } else {
      59             :     dst_index = dst_loc.index();
      60             :     dst_step = -slot_size;
      61             :   }
      62             : 
      63             :   // Reuse 'move' for the first fragment. It is not pending.
      64             :   move->set_source(AllocatedOperand(src_kind, smaller_rep, src_index));
      65             :   move->set_destination(AllocatedOperand(dst_kind, smaller_rep, dst_index));
      66             :   // Add the remaining fragment moves.
      67             :   for (int i = 1; i < aliases; ++i) {
      68             :     src_index += src_step;
      69             :     dst_index += dst_step;
      70             :     moves->AddMove(AllocatedOperand(src_kind, smaller_rep, src_index),
      71             :                    AllocatedOperand(dst_kind, smaller_rep, dst_index));
      72             :   }
      73             :   // Return the first fragment.
      74             :   return move;
      75             : }
      76             : 
      77             : enum MoveOperandKind : uint8_t { kConstant, kGpReg, kFpReg, kStack };
      78             : 
      79             : MoveOperandKind GetKind(const InstructionOperand& move) {
      80    77847642 :   if (move.IsConstant()) return kConstant;
      81             :   LocationOperand loc_op = LocationOperand::cast(move);
      82    58281826 :   if (loc_op.location_kind() != LocationOperand::REGISTER) return kStack;
      83    43020745 :   return IsFloatingPoint(loc_op.representation()) ? kFpReg : kGpReg;
      84             : }
      85             : 
      86             : }  // namespace
      87             : 
      88    49638657 : void GapResolver::Resolve(ParallelMove* moves) {
      89             :   base::EnumSet<MoveOperandKind, uint8_t> source_kinds;
      90             :   base::EnumSet<MoveOperandKind, uint8_t> destination_kinds;
      91             : 
      92             :   // Remove redundant moves, collect source kinds and destination kinds to
      93             :   // detect simple non-overlapping moves, and collect FP move representations if
      94             :   // aliasing is non-simple.
      95             :   int fp_reps = 0;
      96   107975378 :   for (auto it = moves->begin(); it != moves->end();) {
      97    58335587 :     MoveOperands* move = *it;
      98    58335587 :     if (move->IsRedundant()) {
      99    19412980 :       it = moves->erase(it);
     100    19412900 :       continue;
     101             :     }
     102             :     source_kinds.Add(GetKind(move->source()));
     103             :     destination_kinds.Add(GetKind(move->destination()));
     104             :     if (!kSimpleFPAliasing && move->destination().IsFPRegister()) {
     105             :       fp_reps |= RepresentationBit(
     106             :           LocationOperand::cast(move->destination()).representation());
     107             :     }
     108             :     ++it;
     109             :   }
     110             : 
     111    55526462 :   if ((source_kinds & destination_kinds).empty() || moves->size() < 2) {
     112             :     // Fast path for non-conflicting parallel moves.
     113    70324727 :     for (MoveOperands* move : *moves) {
     114    49328672 :       assembler_->AssembleMove(&move->source(), &move->destination());
     115             :     }
     116             :     return;
     117             :   }
     118             : 
     119             :   if (!kSimpleFPAliasing) {
     120             :     if (fp_reps && !base::bits::IsPowerOfTwo(fp_reps)) {
     121             :       // Start with the smallest FP moves, so we never encounter smaller moves
     122             :       // in the middle of a cycle of larger moves.
     123             :       if ((fp_reps & RepresentationBit(MachineRepresentation::kFloat32)) != 0) {
     124             :         split_rep_ = MachineRepresentation::kFloat32;
     125             :         for (size_t i = 0; i < moves->size(); ++i) {
     126             :           auto move = (*moves)[i];
     127             :           if (!move->IsEliminated() && move->destination().IsFloatRegister())
     128             :             PerformMove(moves, move);
     129             :         }
     130             :       }
     131             :       if ((fp_reps & RepresentationBit(MachineRepresentation::kFloat64)) != 0) {
     132             :         split_rep_ = MachineRepresentation::kFloat64;
     133             :         for (size_t i = 0; i < moves->size(); ++i) {
     134             :           auto move = (*moves)[i];
     135             :           if (!move->IsEliminated() && move->destination().IsDoubleRegister())
     136             :             PerformMove(moves, move);
     137             :         }
     138             :       }
     139             :     }
     140             :     split_rep_ = MachineRepresentation::kSimd128;
     141             :   }
     142             : 
     143    32497178 :   for (size_t i = 0; i < moves->size(); ++i) {
     144    14259464 :     auto move = (*moves)[i];
     145    14259464 :     if (!move->IsEliminated()) PerformMove(moves, move);
     146             :   }
     147             : }
     148             : 
     149    14259469 : void GapResolver::PerformMove(ParallelMove* moves, MoveOperands* move) {
     150             :   // Each call to this function performs a move and deletes it from the move
     151             :   // graph.  We first recursively perform any move blocking this one.  We mark a
     152             :   // move as "pending" on entry to PerformMove in order to detect cycles in the
     153             :   // move graph.  We use operand swaps to resolve cycles, which means that a
     154             :   // call to PerformMove could change any source operand in the move graph.
     155             :   DCHECK(!move->IsPending());
     156             :   DCHECK(!move->IsRedundant());
     157             : 
     158             :   // Clear this move's destination to indicate a pending move.  The actual
     159             :   // destination is saved on the side.
     160    14259469 :   InstructionOperand source = move->source();
     161             :   DCHECK(!source.IsInvalid());  // Or else it will look eliminated.
     162    14259469 :   InstructionOperand destination = move->destination();
     163             :   move->SetPending();
     164             : 
     165             :   // We may need to split moves between FP locations differently.
     166             :   const bool is_fp_loc_move =
     167             :       !kSimpleFPAliasing && destination.IsFPLocationOperand();
     168             : 
     169             :   // Perform a depth-first traversal of the move graph to resolve dependencies.
     170             :   // Any unperformed, unpending move with a source the same as this one's
     171             :   // destination blocks this one so recursively perform all such moves.
     172   145391399 :   for (size_t i = 0; i < moves->size(); ++i) {
     173    65565937 :     auto other = (*moves)[i];
     174    65565937 :     if (other->IsEliminated()) continue;
     175    40536288 :     if (other->IsPending()) continue;
     176    25609840 :     if (other->source().InterferesWith(destination)) {
     177             :       if (is_fp_loc_move &&
     178             :           LocationOperand::cast(other->source()).representation() >
     179             :               split_rep_) {
     180             :         // 'other' must also be an FP location move. Break it into fragments
     181             :         // of the same size as 'move'. 'other' is set to one of the fragments,
     182             :         // and the rest are appended to 'moves'.
     183             :         other = Split(other, split_rep_, moves);
     184             :         // 'other' may not block destination now.
     185             :         if (!other->source().InterferesWith(destination)) continue;
     186             :       }
     187             :       // Though PerformMove can change any source operand in the move graph,
     188             :       // this call cannot create a blocking move via a swap (this loop does not
     189             :       // miss any).  Assume there is a non-blocking move with source A and this
     190             :       // move is blocked on source B and there is a swap of A and B.  Then A and
     191             :       // B must be involved in the same cycle (or they would not be swapped).
     192             :       // Since this move's destination is B and there is only a single incoming
     193             :       // edge to an operand, this move must also be involved in the same cycle.
     194             :       // In that case, the blocking move will be created but will be "pending"
     195             :       // when we return from PerformMove.
     196      587816 :       PerformMove(moves, other);
     197             :     }
     198             :   }
     199             : 
     200             :   // This move's source may have changed due to swaps to resolve cycles and so
     201             :   // it may now be the last move in the cycle.  If so remove it.
     202    14259497 :   source = move->source();
     203    14259492 :   if (source.EqualsCanonicalized(destination)) {
     204             :     move->Eliminate();
     205    14190806 :     return;
     206             :   }
     207             : 
     208             :   // We are about to resolve this move and don't need it marked as pending, so
     209             :   // restore its destination.
     210             :   move->set_destination(destination);
     211             : 
     212             :   // The move may be blocked on a (at most one) pending move, in which case we
     213             :   // have a cycle.  Search for such a blocking move and perform a swap to
     214             :   // resolve it.
     215             :   auto blocker =
     216             :       std::find_if(moves->begin(), moves->end(), [&](MoveOperands* move) {
     217   104220788 :         return !move->IsEliminated() &&
     218    78842350 :                move->source().InterferesWith(destination);
     219             :       });
     220    14201926 :   if (blocker == moves->end()) {
     221             :     // The easy case: This move is not blocked.
     222    14133267 :     assembler_->AssembleMove(&source, &destination);
     223             :     move->Eliminate();
     224             :     return;
     225             :   }
     226             : 
     227             :   // Ensure source is a register or both are stack slots, to limit swap cases.
     228      132139 :   if (source.IsStackSlot() || source.IsFPStackSlot()) {
     229             :     std::swap(source, destination);
     230             :   }
     231       68659 :   assembler_->AssembleSwap(&source, &destination);
     232             :   move->Eliminate();
     233             : 
     234             :   // Update outstanding moves whose source may now have been moved.
     235             :   if (is_fp_loc_move) {
     236             :     // We may have to split larger moves.
     237             :     for (size_t i = 0; i < moves->size(); ++i) {
     238             :       auto other = (*moves)[i];
     239             :       if (other->IsEliminated()) continue;
     240             :       if (source.InterferesWith(other->source())) {
     241             :         if (LocationOperand::cast(other->source()).representation() >
     242             :             split_rep_) {
     243             :           other = Split(other, split_rep_, moves);
     244             :           if (!source.InterferesWith(other->source())) continue;
     245             :         }
     246             :         other->set_source(destination);
     247             :       } else if (destination.InterferesWith(other->source())) {
     248             :         if (LocationOperand::cast(other->source()).representation() >
     249             :             split_rep_) {
     250             :           other = Split(other, split_rep_, moves);
     251             :           if (!destination.InterferesWith(other->source())) continue;
     252             :         }
     253             :         other->set_source(source);
     254             :       }
     255             :     }
     256             :   } else {
     257      604127 :     for (auto other : *moves) {
     258      535469 :       if (other->IsEliminated()) continue;
     259      284736 :       if (source.EqualsCanonicalized(other->source())) {
     260             :         other->set_source(destination);
     261      273053 :       } else if (destination.EqualsCanonicalized(other->source())) {
     262             :         other->set_source(source);
     263             :       }
     264             :     }
     265             :   }
     266             : }
     267             : }  // namespace compiler
     268             : }  // namespace internal
     269      120216 : }  // namespace v8

Generated by: LCOV version 1.10