LCOV - code coverage report
Current view: top level - src/compiler - scheduler.cc (source / functions) Hit Total Coverage
Test: app.info Lines: 664 686 96.8 %
Date: 2017-04-26 Functions: 61 61 100.0 %

          Line data    Source code
       1             : // Copyright 2013 the V8 project authors. All rights reserved.
       2             : // Use of this source code is governed by a BSD-style license that can be
       3             : // found in the LICENSE file.
       4             : 
       5             : #include "src/compiler/scheduler.h"
       6             : 
       7             : #include <iomanip>
       8             : 
       9             : #include "src/base/adapters.h"
      10             : #include "src/bit-vector.h"
      11             : #include "src/compiler/common-operator.h"
      12             : #include "src/compiler/control-equivalence.h"
      13             : #include "src/compiler/graph.h"
      14             : #include "src/compiler/node-marker.h"
      15             : #include "src/compiler/node-properties.h"
      16             : #include "src/compiler/node.h"
      17             : #include "src/zone/zone-containers.h"
      18             : 
      19             : namespace v8 {
      20             : namespace internal {
      21             : namespace compiler {
      22             : 
      23             : #define TRACE(...)                                       \
      24             :   do {                                                   \
      25             :     if (FLAG_trace_turbo_scheduler) PrintF(__VA_ARGS__); \
      26             :   } while (false)
      27             : 
      28     1924252 : Scheduler::Scheduler(Zone* zone, Graph* graph, Schedule* schedule, Flags flags,
      29      962136 :                      size_t node_count_hint)
      30             :     : zone_(zone),
      31             :       graph_(graph),
      32             :       schedule_(schedule),
      33             :       flags_(flags),
      34             :       scheduled_nodes_(zone),
      35             :       schedule_root_nodes_(zone),
      36             :       schedule_queue_(zone),
      37     1924232 :       node_data_(zone) {
      38      962143 :   node_data_.reserve(node_count_hint);
      39     1924272 :   node_data_.resize(graph->NodeCount(), DefaultSchedulerData());
      40      962134 : }
      41             : 
      42     2491823 : Schedule* Scheduler::ComputeSchedule(Zone* zone, Graph* graph, Flags flags) {
      43             :   Zone* schedule_zone =
      44      962137 :       (flags & Scheduler::kTempSchedule) ? zone : graph->zone();
      45             : 
      46             :   // Reserve 10% more space for nodes if node splitting is enabled to try to
      47             :   // avoid resizing the vector since that would triple its zone memory usage.
      48      962137 :   float node_hint_multiplier = (flags & Scheduler::kSplitNodes) ? 1.1 : 1;
      49      962137 :   size_t node_count_hint = node_hint_multiplier * graph->NodeCount();
      50             : 
      51             :   Schedule* schedule =
      52      962156 :       new (schedule_zone) Schedule(schedule_zone, node_count_hint);
      53      962134 :   Scheduler scheduler(zone, graph, schedule, flags, node_count_hint);
      54             : 
      55      962136 :   scheduler.BuildCFG();
      56      962062 :   scheduler.ComputeSpecialRPONumbering();
      57      962087 :   scheduler.GenerateImmediateDominatorTree();
      58             : 
      59      962136 :   scheduler.PrepareUses();
      60      962148 :   scheduler.ScheduleEarly();
      61      962152 :   scheduler.ScheduleLate();
      62             : 
      63      962117 :   scheduler.SealFinalSchedule();
      64             : 
      65      962152 :   return schedule;
      66             : }
      67             : 
      68             : 
      69             : Scheduler::SchedulerData Scheduler::DefaultSchedulerData() {
      70     3686555 :   SchedulerData def = {schedule_->start(), 0, kUnknown};
      71             :   return def;
      72             : }
      73             : 
      74             : 
      75  2110790673 : Scheduler::SchedulerData* Scheduler::GetData(Node* node) {
      76  2110790673 :   return &node_data_[node->id()];
      77             : }
      78             : 
      79             : 
      80  1405943697 : Scheduler::Placement Scheduler::GetPlacement(Node* node) {
      81             :   SchedulerData* data = GetData(node);
      82  1340859797 :   if (data->placement_ == kUnknown) {  // Compute placement, once, on demand.
      83    65083900 :     switch (node->opcode()) {
      84             :       case IrOpcode::kParameter:
      85             :       case IrOpcode::kOsrValue:
      86             :         // Parameters and OSR values are always fixed to the start block.
      87     2380795 :         data->placement_ = kFixed;
      88     2380795 :         break;
      89             :       case IrOpcode::kPhi:
      90             :       case IrOpcode::kEffectPhi: {
      91             :         // Phis and effect phis are fixed if their control inputs are, whereas
      92             :         // otherwise they are coupled to a floating control node.
      93     3914959 :         Placement p = GetPlacement(NodeProperties::GetControlInput(node));
      94     3914956 :         data->placement_ = (p == kFixed ? kFixed : kCoupled);
      95     3914956 :         break;
      96             :       }
      97             : #define DEFINE_CONTROL_CASE(V) case IrOpcode::k##V:
      98             :       CONTROL_OP_LIST(DEFINE_CONTROL_CASE)
      99             : #undef DEFINE_CONTROL_CASE
     100             :       {
     101             :         // Control nodes that were not control-reachable from end may float.
     102     1540188 :         data->placement_ = kSchedulable;
     103     1540188 :         break;
     104             :       }
     105             :       default:
     106    57247958 :         data->placement_ = kSchedulable;
     107    57247958 :         break;
     108             :     }
     109             :   }
     110  1340859794 :   return data->placement_;
     111             : }
     112             : 
     113             : 
     114   140942299 : void Scheduler::UpdatePlacement(Node* node, Placement placement) {
     115             :   SchedulerData* data = GetData(node);
     116    79023147 :   if (data->placement_ != kUnknown) {  // Trap on mutation, not initialization.
     117    61919152 :     switch (node->opcode()) {
     118             :       case IrOpcode::kParameter:
     119             :         // Parameters are fixed once and for all.
     120           0 :         UNREACHABLE();
     121             :         break;
     122             :       case IrOpcode::kPhi:
     123             :       case IrOpcode::kEffectPhi: {
     124             :         // Phis and effect phis are coupled to their respective blocks.
     125             :         DCHECK_EQ(Scheduler::kCoupled, data->placement_);
     126             :         DCHECK_EQ(Scheduler::kFixed, placement);
     127      406389 :         Node* control = NodeProperties::GetControlInput(node);
     128      406389 :         BasicBlock* block = schedule_->block(control);
     129      406389 :         schedule_->AddNode(block, node);
     130      406389 :         break;
     131             :       }
     132             : #define DEFINE_CONTROL_CASE(V) case IrOpcode::k##V:
     133             :       CONTROL_OP_LIST(DEFINE_CONTROL_CASE)
     134             : #undef DEFINE_CONTROL_CASE
     135             :       {
     136             :         // Control nodes force coupled uses to be placed.
     137     4717974 :         for (auto use : node->uses()) {
     138     3177789 :           if (GetPlacement(use) == Scheduler::kCoupled) {
     139             :             DCHECK_EQ(node, NodeProperties::GetControlInput(use));
     140      406389 :             UpdatePlacement(use, placement);
     141             :           }
     142             :         }
     143             :         break;
     144             :       }
     145             :       default:
     146             :         DCHECK_EQ(Scheduler::kSchedulable, data->placement_);
     147             :         DCHECK_EQ(Scheduler::kScheduled, placement);
     148             :         break;
     149             :     }
     150             :     // Reduce the use count of the node's inputs to potentially make them
     151             :     // schedulable. If all the uses of a node have been scheduled, then the node
     152             :     // itself can be scheduled.
     153   214318939 :     for (Edge const edge : node->input_edges()) {
     154   152400760 :       DecrementUnscheduledUseCount(edge.to(), edge.index(), edge.from());
     155             :     }
     156             :   }
     157    79022174 :   data->placement_ = placement;
     158    79022174 : }
     159             : 
     160             : 
     161   305786615 : bool Scheduler::IsCoupledControlEdge(Node* node, int index) {
     162   308372177 :   return GetPlacement(node) == kCoupled &&
     163   305786399 :          NodeProperties::FirstControlIndex(node) == index;
     164             : }
     165             : 
     166             : 
     167   152399304 : void Scheduler::IncrementUnscheduledUseCount(Node* node, int index,
     168           0 :                                              Node* from) {
     169             :   // Make sure that control edges from coupled nodes are not counted.
     170   152909199 :   if (IsCoupledControlEdge(from, index)) return;
     171             : 
     172             :   // Tracking use counts for fixed nodes is useless.
     173   152503723 :   if (GetPlacement(node) == kFixed) return;
     174             : 
     175             :   // Use count for coupled nodes is summed up on their control.
     176   115753695 :   if (GetPlacement(node) == kCoupled) {
     177      509895 :     Node* control = NodeProperties::GetControlInput(node);
     178      509895 :     return IncrementUnscheduledUseCount(control, index, from);
     179             :   }
     180             : 
     181   115243380 :   ++(GetData(node)->unscheduled_count_);
     182   115243380 :   if (FLAG_trace_turbo_scheduler) {
     183           0 :     TRACE("  Use count of #%d:%s (used by #%d:%s)++ = %d\n", node->id(),
     184             :           node->op()->mnemonic(), from->id(), from->op()->mnemonic(),
     185             :           GetData(node)->unscheduled_count_);
     186             :   }
     187             : }
     188             : 
     189             : 
     190   152910576 : void Scheduler::DecrementUnscheduledUseCount(Node* node, int index,
     191           0 :                                              Node* from) {
     192             :   // Make sure that control edges from coupled nodes are not counted.
     193   152910576 :   if (IsCoupledControlEdge(from, index)) return;
     194             : 
     195             :   // Tracking use counts for fixed nodes is useless.
     196   152504705 :   if (GetPlacement(node) == kFixed) return;
     197             : 
     198             :   // Use count for coupled nodes is summed up on their control.
     199   115437394 :   if (GetPlacement(node) == kCoupled) {
     200      509895 :     Node* control = NodeProperties::GetControlInput(node);
     201      509895 :     return DecrementUnscheduledUseCount(control, index, from);
     202             :   }
     203             : 
     204             :   DCHECK(GetData(node)->unscheduled_count_ > 0);
     205   229854804 :   --(GetData(node)->unscheduled_count_);
     206   114927402 :   if (FLAG_trace_turbo_scheduler) {
     207           0 :     TRACE("  Use count of #%d:%s (used by #%d:%s)-- = %d\n", node->id(),
     208             :           node->op()->mnemonic(), from->id(), from->op()->mnemonic(),
     209             :           GetData(node)->unscheduled_count_);
     210             :   }
     211   229862554 :   if (GetData(node)->unscheduled_count_ == 0) {
     212    50577019 :     TRACE("    newly eligible #%d:%s\n", node->id(), node->op()->mnemonic());
     213             :     schedule_queue_.push(node);
     214             :   }
     215             : }
     216             : 
     217             : 
     218             : // -----------------------------------------------------------------------------
     219             : // Phase 1: Build control-flow graph.
     220             : 
     221             : 
     222             : // Internal class to build a control flow graph (i.e the basic blocks and edges
     223             : // between them within a Schedule) from the node graph. Visits control edges of
     224             : // the graph backwards from an end node in order to find the connected control
     225             : // subgraph, needed for scheduling.
     226             : class CFGBuilder : public ZoneObject {
     227             :  public:
     228      962126 :   CFGBuilder(Zone* zone, Scheduler* scheduler)
     229             :       : zone_(zone),
     230             :         scheduler_(scheduler),
     231             :         schedule_(scheduler->schedule_),
     232             :         queued_(scheduler->graph_, 2),
     233             :         queue_(zone),
     234             :         control_(zone),
     235             :         component_entry_(nullptr),
     236             :         component_start_(nullptr),
     237     2886410 :         component_end_(nullptr) {}
     238             : 
     239             :   // Run the control flow graph construction algorithm by walking the graph
     240             :   // backwards from end through control edges, building and connecting the
     241             :   // basic blocks for control nodes.
     242      962155 :   void Run() {
     243             :     ResetDataStructures();
     244      962155 :     Queue(scheduler_->graph_->end());
     245             : 
     246    23655783 :     while (!queue_.empty()) {  // Breadth-first backwards traversal.
     247    21731387 :       Node* node = queue_.front();
     248             :       queue_.pop();
     249    21731401 :       int max = NodeProperties::PastControlIndex(node);
     250    46369002 :       for (int i = NodeProperties::FirstControlIndex(node); i < max; i++) {
     251    24637575 :         Queue(node->InputAt(i));
     252             :       }
     253             :     }
     254             : 
     255    23656073 :     for (NodeVector::iterator i = control_.begin(); i != control_.end(); ++i) {
     256    21731716 :       ConnectBlocks(*i);  // Connect block to its predecessor/successors.
     257             :     }
     258      962139 :   }
     259             : 
     260             :   // Run the control flow graph construction for a minimal control-connected
     261             :   // component ending in {exit} and merge that component into an existing
     262             :   // control flow graph at the bottom of {block}.
     263      215524 :   void Run(BasicBlock* block, Node* exit) {
     264             :     ResetDataStructures();
     265      215524 :     Queue(exit);
     266             : 
     267      215525 :     component_entry_ = nullptr;
     268      215525 :     component_start_ = block;
     269      215525 :     component_end_ = schedule_->block(exit);
     270      215525 :     scheduler_->equivalence_->Run(exit);
     271     1689147 :     while (!queue_.empty()) {  // Breadth-first backwards traversal.
     272     1258097 :       Node* node = queue_.front();
     273             :       queue_.pop();
     274             : 
     275             :       // Use control dependence equivalence to find a canonical single-entry
     276             :       // single-exit region that makes up a minimal component to be scheduled.
     277     1258097 :       if (IsSingleEntrySingleExitRegion(node, exit)) {
     278      215525 :         TRACE("Found SESE at #%d:%s\n", node->id(), node->op()->mnemonic());
     279             :         DCHECK(!component_entry_);
     280      215525 :         component_entry_ = node;
     281      215525 :         continue;
     282             :       }
     283             : 
     284     1042572 :       int max = NodeProperties::PastControlIndex(node);
     285     2399683 :       for (int i = NodeProperties::FirstControlIndex(node); i < max; i++) {
     286     1357111 :         Queue(node->InputAt(i));
     287             :       }
     288             :     }
     289             :     DCHECK(component_entry_);
     290             : 
     291     1689148 :     for (NodeVector::iterator i = control_.begin(); i != control_.end(); ++i) {
     292     1258098 :       ConnectBlocks(*i);  // Connect block to its predecessor/successors.
     293             :     }
     294      215525 :   }
     295             : 
     296             :  private:
     297             :   friend class ScheduleLateNodeVisitor;
     298             :   friend class Scheduler;
     299             : 
     300    12941413 :   void FixNode(BasicBlock* block, Node* node) {
     301    12941413 :     schedule_->AddNode(block, node);
     302    12941336 :     scheduler_->UpdatePlacement(node, Scheduler::kFixed);
     303    12941338 :   }
     304             : 
     305    27172209 :   void Queue(Node* node) {
     306             :     // Mark the connected control nodes as they are queued.
     307    54344519 :     if (!queued_.Get(node)) {
     308    22989496 :       BuildBlocks(node);
     309             :       queue_.push(node);
     310    22989564 :       queued_.Set(node, true);
     311    22989566 :       control_.push_back(node);
     312             :     }
     313    27172288 :   }
     314             : 
     315    23080330 :   void BuildBlocks(Node* node) {
     316    22989478 :     switch (node->opcode()) {
     317             :       case IrOpcode::kEnd:
     318     1907429 :         FixNode(schedule_->end(), node);
     319      945251 :         break;
     320             :       case IrOpcode::kStart:
     321     1924292 :         FixNode(schedule_->start(), node);
     322      962145 :         break;
     323             :       case IrOpcode::kLoop:
     324             :       case IrOpcode::kMerge:
     325     2823273 :         BuildBlockForNode(node);
     326     2823289 :         break;
     327             :       case IrOpcode::kTerminate: {
     328             :         // Put Terminate in the loop to which it refers.
     329       90852 :         Node* loop = NodeProperties::GetControlInput(node);
     330       90852 :         BasicBlock* block = BuildBlockForNode(loop);
     331       90852 :         FixNode(block, node);
     332       90852 :         break;
     333             :       }
     334             :       case IrOpcode::kBranch:
     335             :       case IrOpcode::kSwitch:
     336     3563905 :         BuildBlocksForSuccessors(node);
     337     3563900 :         break;
     338             : #define BUILD_BLOCK_JS_CASE(Name) case IrOpcode::k##Name:
     339             :         JS_OP_LIST(BUILD_BLOCK_JS_CASE)
     340             : // JS opcodes are just like calls => fall through.
     341             : #undef BUILD_BLOCK_JS_CASE
     342             :       case IrOpcode::kCall:
     343     4802720 :         if (NodeProperties::IsExceptionalCall(node)) {
     344      463576 :           BuildBlocksForSuccessors(node);
     345             :         }
     346             :         break;
     347             :       default:
     348             :         break;
     349             :     }
     350    22989450 :   }
     351             : 
     352    22989797 :   void ConnectBlocks(Node* node) {
     353    22989797 :     switch (node->opcode()) {
     354             :       case IrOpcode::kLoop:
     355             :       case IrOpcode::kMerge:
     356     2823296 :         ConnectMerge(node);
     357     2823276 :         break;
     358             :       case IrOpcode::kBranch:
     359     3545262 :         scheduler_->UpdatePlacement(node, Scheduler::kFixed);
     360     3545267 :         ConnectBranch(node);
     361     3545224 :         break;
     362             :       case IrOpcode::kSwitch:
     363       18660 :         scheduler_->UpdatePlacement(node, Scheduler::kFixed);
     364       18660 :         ConnectSwitch(node);
     365       18660 :         break;
     366             :       case IrOpcode::kDeoptimize:
     367       30409 :         scheduler_->UpdatePlacement(node, Scheduler::kFixed);
     368       30409 :         ConnectDeoptimize(node);
     369       30409 :         break;
     370             :       case IrOpcode::kTailCall:
     371         354 :         scheduler_->UpdatePlacement(node, Scheduler::kFixed);
     372         354 :         ConnectTailCall(node);
     373         354 :         break;
     374             :       case IrOpcode::kReturn:
     375     1306586 :         scheduler_->UpdatePlacement(node, Scheduler::kFixed);
     376     1306562 :         ConnectReturn(node);
     377     1306610 :         break;
     378             :       case IrOpcode::kThrow:
     379       62610 :         scheduler_->UpdatePlacement(node, Scheduler::kFixed);
     380       62610 :         ConnectThrow(node);
     381       62610 :         break;
     382             : #define CONNECT_BLOCK_JS_CASE(Name) case IrOpcode::k##Name:
     383             :         JS_OP_LIST(CONNECT_BLOCK_JS_CASE)
     384             : // JS opcodes are just like calls => fall through.
     385             : #undef CONNECT_BLOCK_JS_CASE
     386             :       case IrOpcode::kCall:
     387     4802700 :         if (NodeProperties::IsExceptionalCall(node)) {
     388      463575 :           scheduler_->UpdatePlacement(node, Scheduler::kFixed);
     389      463575 :           ConnectCall(node);
     390             :         }
     391             :         break;
     392             :       default:
     393             :         break;
     394             :     }
     395    22989770 :   }
     396             : 
     397    21977057 :   BasicBlock* BuildBlockForNode(Node* node) {
     398    11033832 :     BasicBlock* block = schedule_->block(node);
     399    11033839 :     if (block == nullptr) {
     400    10942996 :       block = schedule_->NewBasicBlock();
     401    10943225 :       TRACE("Create block id:%d for #%d:%s\n", block->id().ToInt(), node->id(),
     402             :             node->op()->mnemonic());
     403    10943225 :       FixNode(block, node);
     404             :     }
     405    11034042 :     return block;
     406             :   }
     407             : 
     408     4027471 :   void BuildBlocksForSuccessors(Node* node) {
     409     8054942 :     size_t const successor_cnt = node->op()->ControlOutputCount();
     410     4027471 :     Node** successors = zone_->NewArray<Node*>(successor_cnt);
     411     4027475 :     NodeProperties::CollectControlProjections(node, successors, successor_cnt);
     412    12147373 :     for (size_t index = 0; index < successor_cnt; ++index) {
     413     8119906 :       BuildBlockForNode(successors[index]);
     414             :     }
     415     4027467 :   }
     416             : 
     417     4027462 :   void CollectSuccessorBlocks(Node* node, BasicBlock** successor_blocks,
     418             :                               size_t successor_cnt) {
     419             :     Node** successors = reinterpret_cast<Node**>(successor_blocks);
     420     4027462 :     NodeProperties::CollectControlProjections(node, successors, successor_cnt);
     421     8120008 :     for (size_t index = 0; index < successor_cnt; ++index) {
     422     8119994 :       successor_blocks[index] = schedule_->block(successors[index]);
     423             :     }
     424     4027500 :   }
     425             : 
     426    20836984 :   BasicBlock* FindPredecessorBlock(Node* node) {
     427             :     BasicBlock* predecessor_block = nullptr;
     428             :     while (true) {
     429    29534740 :       predecessor_block = schedule_->block(node);
     430    29534697 :       if (predecessor_block != nullptr) break;
     431     8697767 :       node = NodeProperties::GetControlInput(node);
     432             :     }
     433    20836930 :     return predecessor_block;
     434             :   }
     435             : 
     436      463575 :   void ConnectCall(Node* call) {
     437             :     BasicBlock* successor_blocks[2];
     438      463575 :     CollectSuccessorBlocks(call, successor_blocks, arraysize(successor_blocks));
     439             : 
     440             :     // Consider the exception continuation to be deferred.
     441      463575 :     successor_blocks[1]->set_deferred(true);
     442             : 
     443      463575 :     Node* call_control = NodeProperties::GetControlInput(call);
     444      463574 :     BasicBlock* call_block = FindPredecessorBlock(call_control);
     445      463574 :     TraceConnect(call, call_block, successor_blocks[0]);
     446      463574 :     TraceConnect(call, call_block, successor_blocks[1]);
     447             :     schedule_->AddCall(call_block, call, successor_blocks[0],
     448      463574 :                        successor_blocks[1]);
     449      463573 :   }
     450             : 
     451     7090501 :   void ConnectBranch(Node* branch) {
     452             :     BasicBlock* successor_blocks[2];
     453             :     CollectSuccessorBlocks(branch, successor_blocks,
     454     3545235 :                            arraysize(successor_blocks));
     455             : 
     456             :     // Consider branch hints.
     457     3545266 :     switch (BranchHintOf(branch->op())) {
     458             :       case BranchHint::kNone:
     459             :         break;
     460             :       case BranchHint::kTrue:
     461     1189815 :         successor_blocks[1]->set_deferred(true);
     462             :         break;
     463             :       case BranchHint::kFalse:
     464      421041 :         successor_blocks[0]->set_deferred(true);
     465             :         break;
     466             :     }
     467             : 
     468     3545218 :     if (branch == component_entry_) {
     469      215524 :       TraceConnect(branch, component_start_, successor_blocks[0]);
     470      215524 :       TraceConnect(branch, component_start_, successor_blocks[1]);
     471             :       schedule_->InsertBranch(component_start_, component_end_, branch,
     472      215524 :                               successor_blocks[0], successor_blocks[1]);
     473             :     } else {
     474     3329694 :       Node* branch_control = NodeProperties::GetControlInput(branch);
     475     3329738 :       BasicBlock* branch_block = FindPredecessorBlock(branch_control);
     476     3329728 :       TraceConnect(branch, branch_block, successor_blocks[0]);
     477     3329670 :       TraceConnect(branch, branch_block, successor_blocks[1]);
     478             :       schedule_->AddBranch(branch_block, branch, successor_blocks[0],
     479     3329674 :                            successor_blocks[1]);
     480             :     }
     481     3545225 :   }
     482             : 
     483       18660 :   void ConnectSwitch(Node* sw) {
     484       37320 :     size_t const successor_count = sw->op()->ControlOutputCount();
     485             :     BasicBlock** successor_blocks =
     486       18660 :         zone_->NewArray<BasicBlock*>(successor_count);
     487       18660 :     CollectSuccessorBlocks(sw, successor_blocks, successor_count);
     488             : 
     489       18660 :     if (sw == component_entry_) {
     490           3 :       for (size_t index = 0; index < successor_count; ++index) {
     491           3 :         TraceConnect(sw, component_start_, successor_blocks[index]);
     492             :       }
     493             :       schedule_->InsertSwitch(component_start_, component_end_, sw,
     494           1 :                               successor_blocks, successor_count);
     495             :     } else {
     496       18659 :       Node* switch_control = NodeProperties::GetControlInput(sw);
     497       18659 :       BasicBlock* switch_block = FindPredecessorBlock(switch_control);
     498      121022 :       for (size_t index = 0; index < successor_count; ++index) {
     499      102363 :         TraceConnect(sw, switch_block, successor_blocks[index]);
     500             :       }
     501       18659 :       schedule_->AddSwitch(switch_block, sw, successor_blocks, successor_count);
     502             :     }
     503       18660 :   }
     504             : 
     505     2823262 :   void ConnectMerge(Node* merge) {
     506             :     // Don't connect the special merge at the end to its predecessors.
     507     5646511 :     if (IsFinalMerge(merge)) return;
     508             : 
     509     2823297 :     BasicBlock* block = schedule_->block(merge);
     510             :     DCHECK_NOT_NULL(block);
     511             :     // For all of the merge's control inputs, add a goto at the end to the
     512             :     // merge's basic block.
     513     9301157 :     for (Node* const input : merge->inputs()) {
     514     6477873 :       BasicBlock* predecessor_block = FindPredecessorBlock(input);
     515     6477876 :       TraceConnect(merge, predecessor_block, block);
     516     6477862 :       schedule_->AddGoto(predecessor_block, block);
     517             :     }
     518             :   }
     519             : 
     520         354 :   void ConnectTailCall(Node* call) {
     521         354 :     Node* call_control = NodeProperties::GetControlInput(call);
     522         354 :     BasicBlock* call_block = FindPredecessorBlock(call_control);
     523         354 :     TraceConnect(call, call_block, nullptr);
     524         354 :     schedule_->AddTailCall(call_block, call);
     525         354 :   }
     526             : 
     527     1306566 :   void ConnectReturn(Node* ret) {
     528     1306566 :     Node* return_control = NodeProperties::GetControlInput(ret);
     529     1306620 :     BasicBlock* return_block = FindPredecessorBlock(return_control);
     530     1306629 :     TraceConnect(ret, return_block, nullptr);
     531     1306625 :     schedule_->AddReturn(return_block, ret);
     532     1306604 :   }
     533             : 
     534       30409 :   void ConnectDeoptimize(Node* deopt) {
     535       30409 :     Node* deoptimize_control = NodeProperties::GetControlInput(deopt);
     536       30409 :     BasicBlock* deoptimize_block = FindPredecessorBlock(deoptimize_control);
     537       30409 :     TraceConnect(deopt, deoptimize_block, nullptr);
     538       30409 :     schedule_->AddDeoptimize(deoptimize_block, deopt);
     539       30409 :   }
     540             : 
     541       62610 :   void ConnectThrow(Node* thr) {
     542       62610 :     Node* throw_control = NodeProperties::GetControlInput(thr);
     543       62610 :     BasicBlock* throw_block = FindPredecessorBlock(throw_control);
     544       62610 :     TraceConnect(thr, throw_block, nullptr);
     545       62610 :     schedule_->AddThrow(throw_block, thr);
     546       62610 :   }
     547             : 
     548    15997447 :   void TraceConnect(Node* node, BasicBlock* block, BasicBlock* succ) {
     549             :     DCHECK_NOT_NULL(block);
     550    15997447 :     if (succ == nullptr) {
     551     1399984 :       TRACE("Connect #%d:%s, id:%d -> end\n", node->id(),
     552             :             node->op()->mnemonic(), block->id().ToInt());
     553             :     } else {
     554    14597463 :       TRACE("Connect #%d:%s, id:%d -> id:%d\n", node->id(),
     555             :             node->op()->mnemonic(), block->id().ToInt(), succ->id().ToInt());
     556             :     }
     557    15997447 :   }
     558             : 
     559     2823262 :   bool IsFinalMerge(Node* node) {
     560     5553756 :     return (node->opcode() == IrOpcode::kMerge &&
     561     2730494 :             node == scheduler_->graph_->end()->InputAt(0));
     562             :   }
     563             : 
     564     1258097 :   bool IsSingleEntrySingleExitRegion(Node* entry, Node* exit) const {
     565     1258097 :     size_t entry_class = scheduler_->equivalence_->ClassOf(entry);
     566     1258097 :     size_t exit_class = scheduler_->equivalence_->ClassOf(exit);
     567     1258097 :     return entry != exit && entry_class == exit_class;
     568             :   }
     569             : 
     570             :   void ResetDataStructures() {
     571             :     control_.clear();
     572             :     DCHECK(queue_.empty());
     573             :     DCHECK(control_.empty());
     574             :   }
     575             : 
     576             :   Zone* zone_;
     577             :   Scheduler* scheduler_;
     578             :   Schedule* schedule_;
     579             :   NodeMarker<bool> queued_;      // Mark indicating whether node is queued.
     580             :   ZoneQueue<Node*> queue_;       // Queue used for breadth-first traversal.
     581             :   NodeVector control_;           // List of encountered control nodes.
     582             :   Node* component_entry_;        // Component single-entry node.
     583             :   BasicBlock* component_start_;  // Component single-entry block.
     584             :   BasicBlock* component_end_;    // Component single-exit block.
     585             : };
     586             : 
     587             : 
     588      962125 : void Scheduler::BuildCFG() {
     589      962125 :   TRACE("--- CREATING CFG -------------------------------------------\n");
     590             : 
     591             :   // Instantiate a new control equivalence algorithm for the graph.
     592     1924242 :   equivalence_ = new (zone_) ControlEquivalence(zone_, graph_);
     593             : 
     594             :   // Build a control-flow graph for the main control-connected component that
     595             :   // is being spanned by the graph's start and end nodes.
     596     1924282 :   control_flow_builder_ = new (zone_) CFGBuilder(zone_, this);
     597      962155 :   control_flow_builder_->Run();
     598             : 
     599             :   // Initialize per-block data.
     600             :   // Reserve an extra 10% to avoid resizing vector when fusing floating control.
     601     1924274 :   scheduled_nodes_.reserve(schedule_->BasicBlockCount() * 1.1);
     602     1924264 :   scheduled_nodes_.resize(schedule_->BasicBlockCount());
     603      962078 : }
     604             : 
     605             : 
     606             : // -----------------------------------------------------------------------------
     607             : // Phase 2: Compute special RPO and dominator tree.
     608             : 
     609             : 
     610             : // Compute the special reverse-post-order block ordering, which is essentially
     611             : // a RPO of the graph where loop bodies are contiguous. Properties:
     612             : // 1. If block A is a predecessor of B, then A appears before B in the order,
     613             : //    unless B is a loop header and A is in the loop headed at B
     614             : //    (i.e. A -> B is a backedge).
     615             : // => If block A dominates block B, then A appears before B in the order.
     616             : // => If block A is a loop header, A appears before all blocks in the loop
     617             : //    headed at A.
     618             : // 2. All loops are contiguous in the order (i.e. no intervening blocks that
     619             : //    do not belong to the loop.)
     620             : // Note a simple RPO traversal satisfies (1) but not (2).
     621             : class SpecialRPONumberer : public ZoneObject {
     622             :  public:
     623     1310080 :   SpecialRPONumberer(Zone* zone, Schedule* schedule)
     624             :       : zone_(zone),
     625             :         schedule_(schedule),
     626             :         order_(nullptr),
     627             :         beyond_end_(nullptr),
     628             :         loops_(zone),
     629             :         backedges_(zone),
     630             :         stack_(zone),
     631             :         previous_block_count_(0),
     632     3930185 :         empty_(0, zone) {}
     633             : 
     634             :   // Computes the special reverse-post-order for the main control flow graph,
     635             :   // that is for the graph spanned between the schedule's start and end blocks.
     636             :   void ComputeSpecialRPO() {
     637             :     DCHECK(schedule_->end()->SuccessorCount() == 0);
     638             :     DCHECK(!order_);  // Main order does not exist yet.
     639     1310031 :     ComputeAndInsertSpecialRPO(schedule_->start(), schedule_->end());
     640             :   }
     641             : 
     642             :   // Computes the special reverse-post-order for a partial control flow graph,
     643             :   // that is for the graph spanned between the given {entry} and {end} blocks,
     644             :   // then updates the existing ordering with this new information.
     645             :   void UpdateSpecialRPO(BasicBlock* entry, BasicBlock* end) {
     646             :     DCHECK(order_);  // Main order to be updated is present.
     647      215525 :     ComputeAndInsertSpecialRPO(entry, end);
     648             :   }
     649             : 
     650             :   // Serialize the previously computed order as a special reverse-post-order
     651             :   // numbering for basic blocks into the final schedule.
     652     2620163 :   void SerializeRPOIntoSchedule() {
     653             :     int32_t number = 0;
     654    33484630 :     for (BasicBlock* b = order_; b != nullptr; b = b->rpo_next()) {
     655    32174497 :       b->set_rpo_number(number++);
     656    16087256 :       schedule_->rpo_order()->push_back(b);
     657             :     }
     658     1310133 :     BeyondEndSentinel()->set_rpo_number(number);
     659     1310076 :   }
     660             : 
     661             :   // Print and verify the special reverse-post-order.
     662             :   void PrintAndVerifySpecialRPO() {
     663             : #if DEBUG
     664             :     if (FLAG_trace_turbo_scheduler) PrintRPO();
     665             :     VerifySpecialRPO();
     666             : #endif
     667             :   }
     668             : 
     669             :   const ZoneVector<BasicBlock*>& GetOutgoingBlocks(BasicBlock* block) {
     670     6341436 :     if (HasLoopNumber(block)) {
     671     6341512 :       LoopInfo const& loop = loops_[GetLoopNumber(block)];
     672     6341512 :       if (loop.outgoing) return *loop.outgoing;
     673             :     }
     674       73816 :     return empty_;
     675             :   }
     676             : 
     677             :  private:
     678             :   typedef std::pair<BasicBlock*, size_t> Backedge;
     679             : 
     680             :   // Numbering for BasicBlock::rpo_number for this block traversal:
     681             :   static const int kBlockOnStack = -2;
     682             :   static const int kBlockVisited1 = -3;
     683             :   static const int kBlockVisited2 = -4;
     684             :   static const int kBlockUnvisited1 = -1;
     685             :   static const int kBlockUnvisited2 = kBlockVisited1;
     686             : 
     687             :   struct SpecialRPOStackFrame {
     688             :     BasicBlock* block;
     689             :     size_t index;
     690             :   };
     691             : 
     692             :   struct LoopInfo {
     693             :     BasicBlock* header;
     694             :     ZoneVector<BasicBlock*>* outgoing;
     695             :     BitVector* members;
     696             :     LoopInfo* prev;
     697             :     BasicBlock* end;
     698             :     BasicBlock* start;
     699             : 
     700      456427 :     void AddOutgoing(Zone* zone, BasicBlock* block) {
     701      456427 :       if (outgoing == nullptr) {
     702             :         outgoing = new (zone->New(sizeof(ZoneVector<BasicBlock*>)))
     703      286900 :             ZoneVector<BasicBlock*>(zone);
     704             :       }
     705      456427 :       outgoing->push_back(block);
     706      456427 :     }
     707             :   };
     708             : 
     709             :   int Push(ZoneVector<SpecialRPOStackFrame>& stack, int depth,
     710     1608743 :            BasicBlock* child, int unvisited) {
     711    22306988 :     if (child->rpo_number() == unvisited) {
     712    43005098 :       stack[depth].block = child;
     713    22306919 :       stack[depth].index = 0;
     714    22306919 :       child->set_rpo_number(kBlockOnStack);
     715    20698216 :       return depth + 1;
     716             :     }
     717             :     return depth;
     718             :   }
     719             : 
     720             :   BasicBlock* PushFront(BasicBlock* head, BasicBlock* block) {
     721             :     block->set_rpo_next(head);
     722             :     return block;
     723             :   }
     724             : 
     725      767492 :   static int GetLoopNumber(BasicBlock* block) { return block->loop_number(); }
     726             :   static void SetLoopNumber(BasicBlock* block, int loop_number) {
     727             :     return block->set_loop_number(loop_number);
     728             :   }
     729    41277335 :   static bool HasLoopNumber(BasicBlock* block) {
     730             :     return block->loop_number() >= 0;
     731             :   }
     732             : 
     733             :   // TODO(mstarzinger): We only need this special sentinel because some tests
     734             :   // use the schedule's end block in actual control flow (e.g. with end having
     735             :   // successors). Once this has been cleaned up we can use the end block here.
     736     1313825 :   BasicBlock* BeyondEndSentinel() {
     737     1313825 :     if (beyond_end_ == nullptr) {
     738             :       BasicBlock::Id id = BasicBlock::Id::FromInt(-1);
     739     2620263 :       beyond_end_ = new (schedule_->zone()) BasicBlock(schedule_->zone(), id);
     740             :     }
     741     1313763 :     return beyond_end_;
     742             :   }
     743             : 
     744             :   // Compute special RPO for the control flow graph between {entry} and {end},
     745             :   // mutating any existing order so that the result is still valid.
     746     4580311 :   void ComputeAndInsertSpecialRPO(BasicBlock* entry, BasicBlock* end) {
     747             :     // RPO should not have been serialized for this schedule yet.
     748     1525535 :     CHECK_EQ(kBlockUnvisited1, schedule_->start()->loop_number());
     749     1525535 :     CHECK_EQ(kBlockUnvisited1, schedule_->start()->rpo_number());
     750     3051070 :     CHECK_EQ(0, static_cast<int>(schedule_->rpo_order()->size()));
     751             : 
     752             :     // Find correct insertion point within existing order.
     753             :     BasicBlock* insertion_point = entry->rpo_next();
     754             :     BasicBlock* order = insertion_point;
     755             : 
     756             :     // Perform an iterative RPO traversal using an explicit stack,
     757             :     // recording backedges that form cycles. O(|B|).
     758             :     DCHECK_LT(previous_block_count_, schedule_->BasicBlockCount());
     759    52358824 :     stack_.resize(schedule_->BasicBlockCount() - previous_block_count_);
     760     3051254 :     previous_block_count_ = schedule_->BasicBlockCount();
     761             :     int stack_depth = Push(stack_, 0, entry, kBlockUnvisited1);
     762     5622495 :     int num_loops = static_cast<int>(loops_.size());
     763             : 
     764    39241305 :     while (stack_depth > 0) {
     765    36190275 :       int current = stack_depth - 1;
     766    36190275 :       SpecialRPOStackFrame* frame = &stack_[current];
     767             : 
     768    70858864 :       if (frame->block != end &&
     769    34668589 :           frame->index < frame->block->SuccessorCount()) {
     770             :         // Process the next successor.
     771    39775422 :         BasicBlock* succ = frame->block->SuccessorAt(frame->index++);
     772    19887711 :         if (succ->rpo_number() == kBlockVisited1) continue;
     773    14940674 :         if (succ->rpo_number() == kBlockOnStack) {
     774             :           // The successor is on the stack, so this is a backedge (cycle).
     775      327364 :           backedges_.push_back(Backedge(frame->block, frame->index - 1));
     776      163682 :           if (!HasLoopNumber(succ)) {
     777             :             // Assign a new loop number to the header if it doesn't have one.
     778      147381 :             SetLoopNumber(succ, num_loops++);
     779             :           }
     780             :         } else {
     781             :           // Push the successor onto the stack.
     782             :           DCHECK(succ->rpo_number() == kBlockUnvisited1);
     783             :           stack_depth = Push(stack_, stack_depth, succ, kBlockUnvisited1);
     784             :         }
     785             :       } else {
     786             :         // Finished with all successors; pop the stack and add the block.
     787             :         order = PushFront(order, frame->block);
     788    16302564 :         frame->block->set_rpo_number(kBlockVisited1);
     789             :         stack_depth--;
     790             :       }
     791             :     }
     792             : 
     793             :     // If no loops were encountered, then the order we computed was correct.
     794     1525547 :     if (num_loops > static_cast<int>(loops_.size())) {
     795             :       // Otherwise, compute the loop information from the backedges in order
     796             :       // to perform a traversal that groups loop bodies together.
     797       83116 :       ComputeLoopInfo(stack_, num_loops, &backedges_);
     798             : 
     799             :       // Initialize the "loop stack". Note the entry could be a loop header.
     800             :       LoopInfo* loop =
     801       83116 :           HasLoopNumber(entry) ? &loops_[GetLoopNumber(entry)] : nullptr;
     802             :       order = insertion_point;
     803             : 
     804             :       // Perform an iterative post-order traversal, visiting loop bodies before
     805             :       // edges that lead out of loops. Visits each block once, but linking loop
     806             :       // sections together is linear in the loop size, so overall is
     807             :       // O(|B| + max(loop_depth) * max(|loop|))
     808             :       stack_depth = Push(stack_, 0, entry, kBlockUnvisited2);
     809    14809246 :       while (stack_depth > 0) {
     810    14643014 :         SpecialRPOStackFrame* frame = &stack_[stack_depth - 1];
     811    15246823 :         BasicBlock* block = frame->block;
     812     8638645 :         BasicBlock* succ = nullptr;
     813             : 
     814    29206570 :         if (block != end && frame->index < block->SuccessorCount()) {
     815             :           // Process the next normal successor.
     816     8182218 :           succ = block->SuccessorAt(frame->index++);
     817     6460796 :         } else if (HasLoopNumber(block)) {
     818             :           // Process additional outgoing edges from the loop header.
     819      603809 :           if (block->rpo_number() == kBlockOnStack) {
     820             :             // Finish the loop body the first time the header is left on the
     821             :             // stack.
     822             :             DCHECK(loop != nullptr && loop->header == block);
     823      147382 :             loop->start = PushFront(order, block);
     824      147382 :             order = loop->end;
     825      147382 :             block->set_rpo_number(kBlockVisited2);
     826             :             // Pop the loop stack and continue visiting outgoing edges within
     827             :             // the context of the outer loop, if any.
     828      147382 :             loop = loop->prev;
     829             :             // We leave the loop header on the stack; the rest of this iteration
     830             :             // and later iterations will go through its outgoing edges list.
     831             :           }
     832             : 
     833             :           // Use the next outgoing edge if there are any.
     834     1207618 :           size_t outgoing_index = frame->index - block->SuccessorCount();
     835      603809 :           LoopInfo* info = &loops_[GetLoopNumber(block)];
     836             :           DCHECK(loop != info);
     837     1203686 :           if (block != entry && info->outgoing != nullptr &&
     838      599877 :               outgoing_index < info->outgoing->size()) {
     839      912854 :             succ = info->outgoing->at(outgoing_index);
     840      456427 :             frame->index++;
     841             :           }
     842             :         }
     843             : 
     844    14643014 :         if (succ != nullptr) {
     845             :           // Process the next successor.
     846     8638645 :           if (succ->rpo_number() == kBlockOnStack) continue;
     847     8474963 :           if (succ->rpo_number() == kBlockVisited2) continue;
     848             :           DCHECK(succ->rpo_number() == kBlockUnvisited2);
     849    11800294 :           if (loop != nullptr && !loop->members->Contains(succ->id().ToInt())) {
     850             :             // The successor is not in the current loop or any nested loop.
     851             :             // Add it to the outgoing edges of this loop and visit it later.
     852      456427 :             loop->AddOutgoing(zone_, succ);
     853             :           } else {
     854             :             // Push the successor onto the stack.
     855             :             stack_depth = Push(stack_, stack_depth, succ, kBlockUnvisited2);
     856     5921253 :             if (HasLoopNumber(succ)) {
     857             :               // Push the inner loop onto the loop stack.
     858             :               DCHECK(GetLoopNumber(succ) < num_loops);
     859      147378 :               LoopInfo* next = &loops_[GetLoopNumber(succ)];
     860      147378 :               next->end = order;
     861      147378 :               next->prev = loop;
     862             :               loop = next;
     863             :             }
     864             :           }
     865             :         } else {
     866             :           // Finished with all successors of the current block.
     867     6004369 :           if (HasLoopNumber(block)) {
     868             :             // If we are going to pop a loop header, then add its entire body.
     869      147382 :             LoopInfo* info = &loops_[GetLoopNumber(block)];
     870      147382 :             for (BasicBlock* b = info->start; true; b = b->rpo_next()) {
     871     2748165 :               if (b->rpo_next() == info->end) {
     872             :                 b->set_rpo_next(order);
     873      147382 :                 info->end = order;
     874             :                 break;
     875             :               }
     876             :             }
     877      147382 :             order = info->start;
     878             :           } else {
     879             :             // Pop a single node off the stack and add it to the order.
     880             :             order = PushFront(order, block);
     881     5856987 :             block->set_rpo_number(kBlockVisited2);
     882             :           }
     883             :           stack_depth--;
     884             :         }
     885             :       }
     886             :     }
     887             : 
     888             :     // Publish new order the first time.
     889     1525547 :     if (order_ == nullptr) order_ = order;
     890             : 
     891             :     // Compute the correct loop headers and set the correct loop ends.
     892             :     LoopInfo* current_loop = nullptr;
     893     2516876 :     BasicBlock* current_header = entry->loop_header();
     894             :     int32_t loop_depth = entry->loop_depth();
     895     1525547 :     if (entry->IsLoopHeader()) --loop_depth;  // Entry might be a loop header.
     896    17828228 :     for (BasicBlock* b = order; b != insertion_point; b = b->rpo_next()) {
     897    16302663 :       BasicBlock* current = b;
     898             : 
     899             :       // Reset BasicBlock::rpo_number again.
     900    16302646 :       current->set_rpo_number(kBlockUnvisited1);
     901             : 
     902             :       // Finish the previous loop(s) if we just exited them.
     903    35265911 :       while (current_header != nullptr &&
     904             :              current == current_header->loop_end()) {
     905             :         DCHECK(current_header->IsLoopHeader());
     906             :         DCHECK_NOT_NULL(current_loop);
     907      143687 :         current_loop = current_loop->prev;
     908             :         current_header =
     909      143687 :             current_loop == nullptr ? nullptr : current_loop->header;
     910      143687 :         --loop_depth;
     911             :       }
     912    16302674 :       current->set_loop_header(current_header);
     913             : 
     914             :       // Push a new loop onto the stack if this loop is a loop header.
     915    16302683 :       if (HasLoopNumber(current)) {
     916      147409 :         ++loop_depth;
     917      147409 :         current_loop = &loops_[GetLoopNumber(current)];
     918      147409 :         BasicBlock* end = current_loop->end;
     919      151103 :         current->set_loop_end(end == nullptr ? BeyondEndSentinel() : end);
     920      147409 :         current_header = current_loop->header;
     921      147409 :         TRACE("id:%d is a loop header, increment loop depth to %d\n",
     922             :               current->id().ToInt(), loop_depth);
     923             :       }
     924             : 
     925    16302683 :       current->set_loop_depth(loop_depth);
     926             : 
     927    16302663 :       if (current->loop_header() == nullptr) {
     928    13929474 :         TRACE("id:%d is not in a loop (depth == %d)\n", current->id().ToInt(),
     929             :               current->loop_depth());
     930             :       } else {
     931     2373189 :         TRACE("id:%d has loop header id:%d, (depth == %d)\n",
     932             :               current->id().ToInt(), current->loop_header()->id().ToInt(),
     933             :               current->loop_depth());
     934             :       }
     935             :     }
     936     1525582 :   }
     937             : 
     938             :   // Computes loop membership from the backedges of the control flow graph.
     939       83116 :   void ComputeLoopInfo(ZoneVector<SpecialRPOStackFrame>& queue,
     940             :                        size_t num_loops, ZoneVector<Backedge>* backedges) {
     941             :     // Extend existing loop membership vectors.
     942      166233 :     for (LoopInfo& loop : loops_) {
     943           1 :       BitVector* new_members = new (zone_)
     944           3 :           BitVector(static_cast<int>(schedule_->BasicBlockCount()), zone_);
     945           2 :       new_members->CopyFrom(*loop.members);
     946           1 :       loop.members = new_members;
     947             :     }
     948             : 
     949             :     // Extend loop information vector.
     950     3758393 :     loops_.resize(num_loops, LoopInfo());
     951             : 
     952             :     // Compute loop membership starting from backedges.
     953             :     // O(max(loop_depth) * max(|loop|)
     954      493598 :     for (size_t i = 0; i < backedges->size(); i++) {
     955      410482 :       BasicBlock* member = backedges->at(i).first;
     956      163683 :       BasicBlock* header = member->SuccessorAt(backedges->at(i).second);
     957      163683 :       size_t loop_num = GetLoopNumber(header);
     958      163683 :       if (loops_[loop_num].header == nullptr) {
     959      147381 :         loops_[loop_num].header = header;
     960             :         loops_[loop_num].members = new (zone_)
     961      589524 :             BitVector(static_cast<int>(schedule_->BasicBlockCount()), zone_);
     962             :       }
     963             : 
     964             :       int queue_length = 0;
     965      163683 :       if (member != header) {
     966             :         // As long as the header doesn't have a backedge to itself,
     967             :         // Push the member onto the queue and process its predecessors.
     968      327242 :         if (!loops_[loop_num].members->Contains(member->id().ToInt())) {
     969             :           loops_[loop_num].members->Add(member->id().ToInt());
     970             :         }
     971     2764520 :         queue[queue_length++].block = member;
     972             :       }
     973             : 
     974             :       // Propagate loop membership backwards. All predecessors of M up to the
     975             :       // loop header H are members of the loop too. O(|blocks between M and H|).
     976     2764582 :       while (queue_length > 0) {
     977     5201798 :         BasicBlock* block = queue[--queue_length].block;
     978    11978374 :         for (size_t i = 0; i < block->PredecessorCount(); i++) {
     979             :           BasicBlock* pred = block->PredecessorAt(i);
     980     3388288 :           if (pred != header) {
     981     6401184 :             if (!loops_[loop_num].members->Contains(pred->id().ToInt())) {
     982             :               loops_[loop_num].members->Add(pred->id().ToInt());
     983     4874556 :               queue[queue_length++].block = pred;
     984             :             }
     985             :           }
     986             :         }
     987             :       }
     988             :     }
     989       83116 :   }
     990             : 
     991             : #if DEBUG
     992             :   void PrintRPO() {
     993             :     OFStream os(stdout);
     994             :     os << "RPO with " << loops_.size() << " loops";
     995             :     if (loops_.size() > 0) {
     996             :       os << " (";
     997             :       for (size_t i = 0; i < loops_.size(); i++) {
     998             :         if (i > 0) os << " ";
     999             :         os << "id:" << loops_[i].header->id();
    1000             :       }
    1001             :       os << ")";
    1002             :     }
    1003             :     os << ":\n";
    1004             : 
    1005             :     for (BasicBlock* block = order_; block != nullptr;
    1006             :          block = block->rpo_next()) {
    1007             :       os << std::setw(5) << "B" << block->rpo_number() << ":";
    1008             :       for (size_t i = 0; i < loops_.size(); i++) {
    1009             :         bool range = loops_[i].header->LoopContains(block);
    1010             :         bool membership = loops_[i].header != block && range;
    1011             :         os << (membership ? " |" : "  ");
    1012             :         os << (range ? "x" : " ");
    1013             :       }
    1014             :       os << "  id:" << block->id() << ": ";
    1015             :       if (block->loop_end() != nullptr) {
    1016             :         os << " range: [B" << block->rpo_number() << ", B"
    1017             :            << block->loop_end()->rpo_number() << ")";
    1018             :       }
    1019             :       if (block->loop_header() != nullptr) {
    1020             :         os << " header: id:" << block->loop_header()->id();
    1021             :       }
    1022             :       if (block->loop_depth() > 0) {
    1023             :         os << " depth: " << block->loop_depth();
    1024             :       }
    1025             :       os << "\n";
    1026             :     }
    1027             :   }
    1028             : 
    1029             :   void VerifySpecialRPO() {
    1030             :     BasicBlockVector* order = schedule_->rpo_order();
    1031             :     DCHECK(order->size() > 0);
    1032             :     DCHECK((*order)[0]->id().ToInt() == 0);  // entry should be first.
    1033             : 
    1034             :     for (size_t i = 0; i < loops_.size(); i++) {
    1035             :       LoopInfo* loop = &loops_[i];
    1036             :       BasicBlock* header = loop->header;
    1037             :       BasicBlock* end = header->loop_end();
    1038             : 
    1039             :       DCHECK_NOT_NULL(header);
    1040             :       DCHECK(header->rpo_number() >= 0);
    1041             :       DCHECK(header->rpo_number() < static_cast<int>(order->size()));
    1042             :       DCHECK_NOT_NULL(end);
    1043             :       DCHECK(end->rpo_number() <= static_cast<int>(order->size()));
    1044             :       DCHECK(end->rpo_number() > header->rpo_number());
    1045             :       DCHECK(header->loop_header() != header);
    1046             : 
    1047             :       // Verify the start ... end list relationship.
    1048             :       int links = 0;
    1049             :       BasicBlock* block = loop->start;
    1050             :       DCHECK_EQ(header, block);
    1051             :       bool end_found;
    1052             :       while (true) {
    1053             :         if (block == nullptr || block == loop->end) {
    1054             :           end_found = (loop->end == block);
    1055             :           break;
    1056             :         }
    1057             :         // The list should be in same order as the final result.
    1058             :         DCHECK(block->rpo_number() == links + header->rpo_number());
    1059             :         links++;
    1060             :         block = block->rpo_next();
    1061             :         DCHECK_LT(links, static_cast<int>(2 * order->size()));  // cycle?
    1062             :       }
    1063             :       DCHECK(links > 0);
    1064             :       DCHECK(links == end->rpo_number() - header->rpo_number());
    1065             :       DCHECK(end_found);
    1066             : 
    1067             :       // Check loop depth of the header.
    1068             :       int loop_depth = 0;
    1069             :       for (LoopInfo* outer = loop; outer != nullptr; outer = outer->prev) {
    1070             :         loop_depth++;
    1071             :       }
    1072             :       DCHECK_EQ(loop_depth, header->loop_depth());
    1073             : 
    1074             :       // Check the contiguousness of loops.
    1075             :       int count = 0;
    1076             :       for (int j = 0; j < static_cast<int>(order->size()); j++) {
    1077             :         BasicBlock* block = order->at(j);
    1078             :         DCHECK(block->rpo_number() == j);
    1079             :         if (j < header->rpo_number() || j >= end->rpo_number()) {
    1080             :           DCHECK(!header->LoopContains(block));
    1081             :         } else {
    1082             :           DCHECK(header->LoopContains(block));
    1083             :           DCHECK_GE(block->loop_depth(), loop_depth);
    1084             :           count++;
    1085             :         }
    1086             :       }
    1087             :       DCHECK(links == count);
    1088             :     }
    1089             :   }
    1090             : #endif  // DEBUG
    1091             : 
    1092             :   Zone* zone_;
    1093             :   Schedule* schedule_;
    1094             :   BasicBlock* order_;
    1095             :   BasicBlock* beyond_end_;
    1096             :   ZoneVector<LoopInfo> loops_;
    1097             :   ZoneVector<Backedge> backedges_;
    1098             :   ZoneVector<SpecialRPOStackFrame> stack_;
    1099             :   size_t previous_block_count_;
    1100             :   ZoneVector<BasicBlock*> const empty_;
    1101             : };
    1102             : 
    1103             : 
    1104      347979 : BasicBlockVector* Scheduler::ComputeSpecialRPO(Zone* zone, Schedule* schedule) {
    1105      347979 :   SpecialRPONumberer numberer(zone, schedule);
    1106             :   numberer.ComputeSpecialRPO();
    1107      347979 :   numberer.SerializeRPOIntoSchedule();
    1108             :   numberer.PrintAndVerifySpecialRPO();
    1109      347979 :   return schedule->rpo_order();
    1110             : }
    1111             : 
    1112             : 
    1113      962016 : void Scheduler::ComputeSpecialRPONumbering() {
    1114      962016 :   TRACE("--- COMPUTING SPECIAL RPO ----------------------------------\n");
    1115             : 
    1116             :   // Compute the special reverse-post-order for basic blocks.
    1117     1924109 :   special_rpo_ = new (zone_) SpecialRPONumberer(zone_, schedule_);
    1118             :   special_rpo_->ComputeSpecialRPO();
    1119      962085 : }
    1120             : 
    1121             : 
    1122    49717559 : void Scheduler::PropagateImmediateDominators(BasicBlock* block) {
    1123    26625198 :   for (/*nop*/; block != nullptr; block = block->rpo_next()) {
    1124             :     auto pred = block->predecessors().begin();
    1125             :     auto end = block->predecessors().end();
    1126             :     DCHECK(pred != end);  // All blocks except start have predecessors.
    1127    48539887 :     BasicBlock* dominator = *pred;
    1128             :     bool deferred = dominator->deferred();
    1129             :     // For multiple predecessors, walk up the dominator tree until a common
    1130             :     // dominator is found. Visitation order guarantees that all predecessors
    1131             :     // except for backwards edges have been visited.
    1132    34471918 :     for (++pred; pred != end; ++pred) {
    1133             :       // Don't examine backwards edges.
    1134    10201996 :       if ((*pred)->dominator_depth() < 0) continue;
    1135    10066875 :       dominator = BasicBlock::GetCommonDominator(dominator, *pred);
    1136    10066832 :       deferred = deferred & (*pred)->deferred();
    1137             :     }
    1138             :     block->set_dominator(dominator);
    1139    24269922 :     block->set_dominator_depth(dominator->dominator_depth() + 1);
    1140    24269922 :     block->set_deferred(deferred | block->deferred());
    1141    24269922 :     TRACE("Block id:%d's idom is id:%d, depth = %d\n", block->id().ToInt(),
    1142             :           dominator->id().ToInt(), block->dominator_depth());
    1143             :   }
    1144     1177672 : }
    1145             : 
    1146             : 
    1147      962065 : void Scheduler::GenerateImmediateDominatorTree() {
    1148      962065 :   TRACE("--- IMMEDIATE BLOCK DOMINATORS -----------------------------\n");
    1149             : 
    1150             :   // Seed start block to be the first dominator.
    1151      962065 :   schedule_->start()->set_dominator_depth(0);
    1152             : 
    1153             :   // Build the block dominator tree resulting from the above seed.
    1154      962065 :   PropagateImmediateDominators(schedule_->start()->rpo_next());
    1155      962138 : }
    1156             : 
    1157             : 
    1158             : // -----------------------------------------------------------------------------
    1159             : // Phase 3: Prepare use counts for nodes.
    1160             : 
    1161             : 
    1162             : class PrepareUsesVisitor {
    1163             :  public:
    1164             :   explicit PrepareUsesVisitor(Scheduler* scheduler)
    1165      962066 :       : scheduler_(scheduler), schedule_(scheduler->schedule_) {}
    1166             : 
    1167    82189883 :   void Pre(Node* node) {
    1168    88079261 :     if (scheduler_->GetPlacement(node) == Scheduler::kFixed) {
    1169             :       // Fixed nodes are always roots for schedule late.
    1170    22999041 :       scheduler_->schedule_root_nodes_.push_back(node);
    1171    25299692 :       if (!schedule_->IsScheduled(node)) {
    1172             :         // Make sure root nodes are scheduled in their respective blocks.
    1173     5889341 :         TRACE("Scheduling fixed position node #%d:%s\n", node->id(),
    1174             :               node->op()->mnemonic());
    1175     5889378 :         IrOpcode::Value opcode = node->opcode();
    1176             :         BasicBlock* block =
    1177             :             opcode == IrOpcode::kParameter
    1178     2300807 :                 ? schedule_->start()
    1179     9477949 :                 : schedule_->block(NodeProperties::GetControlInput(node));
    1180             :         DCHECK_NOT_NULL(block);
    1181     5889376 :         schedule_->AddNode(block, node);
    1182             :       }
    1183             :     }
    1184    82189917 :   }
    1185             : 
    1186   197557438 :   void PostEdge(Node* from, int index, Node* to) {
    1187             :     // If the edge is from an unscheduled node, then tally it in the use count
    1188             :     // for all of its inputs. The same criterion will be used in ScheduleLate
    1189             :     // for decrementing use counts.
    1190   197557438 :     if (!schedule_->IsScheduled(from)) {
    1191             :       DCHECK_NE(Scheduler::kFixed, scheduler_->GetPlacement(from));
    1192   151461768 :       scheduler_->IncrementUnscheduledUseCount(to, index, from);
    1193             :     }
    1194   197557029 :   }
    1195             : 
    1196             :  private:
    1197             :   Scheduler* scheduler_;
    1198             :   Schedule* schedule_;
    1199             : };
    1200             : 
    1201             : 
    1202      962066 : void Scheduler::PrepareUses() {
    1203      962066 :   TRACE("--- PREPARE USES -------------------------------------------\n");
    1204             : 
    1205             :   // Count the uses of every node, which is used to ensure that all of a
    1206             :   // node's uses are scheduled before the node itself.
    1207             :   PrepareUsesVisitor prepare_uses(this);
    1208             : 
    1209             :   // TODO(turbofan): simplify the careful pre/post ordering here.
    1210     1924176 :   BoolVector visited(graph_->NodeCount(), false, zone_);
    1211      962119 :   ZoneStack<Node::InputEdges::iterator> stack(zone_);
    1212     1924114 :   Node* node = graph_->end();
    1213      962110 :   prepare_uses.Pre(node);
    1214      962004 :   visited[node->id()] = true;
    1215      962660 :   stack.push(node->input_edges().begin());
    1216   280690075 :   while (!stack.empty()) {
    1217             :     Edge edge = *stack.top();
    1218   359994118 :     Node* node = edge.to();
    1219   557530538 :     if (visited[node->id()]) {
    1220   197537153 :       prepare_uses.PostEdge(edge.from(), edge.index(), edge.to());
    1221   197565632 :       if (++stack.top() == edge.from()->input_edges().end()) stack.pop();
    1222             :     } else {
    1223    81228116 :       prepare_uses.Pre(node);
    1224    81228849 :       visited[node->id()] = true;
    1225   139597278 :       if (node->InputCount() > 0) stack.push(node->input_edges().begin());
    1226             :     }
    1227             :   }
    1228      962150 : }
    1229             : 
    1230             : 
    1231             : // -----------------------------------------------------------------------------
    1232             : // Phase 4: Schedule nodes early.
    1233             : 
    1234             : 
    1235             : class ScheduleEarlyNodeVisitor {
    1236             :  public:
    1237             :   ScheduleEarlyNodeVisitor(Zone* zone, Scheduler* scheduler)
    1238     1177661 :       : scheduler_(scheduler), schedule_(scheduler->schedule_), queue_(zone) {}
    1239             : 
    1240             :   // Run the schedule early algorithm on a set of fixed root nodes.
    1241     1177399 :   void Run(NodeVector* roots) {
    1242    27019041 :     for (Node* const root : *roots) {
    1243             :       queue_.push(root);
    1244   107970291 :       while (!queue_.empty()) {
    1245    83306048 :         VisitNode(queue_.front());
    1246             :         queue_.pop();
    1247             :       }
    1248             :     }
    1249     1177682 :   }
    1250             : 
    1251             :  private:
    1252             :   // Visits one node from the queue and propagates its current schedule early
    1253             :   // position to all uses. This in turn might push more nodes onto the queue.
    1254    83305807 :   void VisitNode(Node* node) {
    1255    83305807 :     Scheduler::SchedulerData* data = scheduler_->GetData(node);
    1256             : 
    1257             :     // Fixed nodes already know their schedule early position.
    1258    83305807 :     if (scheduler_->GetPlacement(node) == Scheduler::kFixed) {
    1259   107970481 :       data->minimum_block_ = schedule_->block(node);
    1260    24664199 :       TRACE("Fixing #%d:%s minimum_block = id:%d, dominator_depth = %d\n",
    1261             :             node->id(), node->op()->mnemonic(),
    1262             :             data->minimum_block_->id().ToInt(),
    1263             :             data->minimum_block_->dominator_depth());
    1264             :     }
    1265             : 
    1266             :     // No need to propagate unconstrained schedule early positions.
    1267   249919429 :     if (data->minimum_block_ == schedule_->start()) return;
    1268             : 
    1269             :     // Propagate schedule early position.
    1270             :     DCHECK_NOT_NULL(data->minimum_block_);
    1271   234319580 :     for (auto use : node->uses()) {
    1272   155385425 :       PropagateMinimumPositionToNode(data->minimum_block_, use);
    1273             :     }
    1274             :   }
    1275             : 
    1276             :   // Propagates {block} as another minimum position into the given {node}. This
    1277             :   // has the net effect of computing the minimum dominator block of {node} that
    1278             :   // still post-dominates all inputs to {node} when the queue is processed.
    1279   263738982 :   void PropagateMinimumPositionToNode(BasicBlock* block, Node* node) {
    1280   156830770 :     Scheduler::SchedulerData* data = scheduler_->GetData(node);
    1281             : 
    1282             :     // No need to propagate to fixed node, it's guaranteed to be a root.
    1283   313666497 :     if (scheduler_->GetPlacement(node) == Scheduler::kFixed) return;
    1284             : 
    1285             :     // Coupled nodes influence schedule early position of their control.
    1286   106903442 :     if (scheduler_->GetPlacement(node) == Scheduler::kCoupled) {
    1287     1445524 :       Node* control = NodeProperties::GetControlInput(node);
    1288     1445524 :       PropagateMinimumPositionToNode(block, control);
    1289             :     }
    1290             : 
    1291             :     // Propagate new position if it is deeper down the dominator tree than the
    1292             :     // current. Note that all inputs need to have minimum block position inside
    1293             :     // the dominator chain of {node}'s minimum block position.
    1294             :     DCHECK(InsideSameDominatorChain(block, data->minimum_block_));
    1295   106908212 :     if (block->dominator_depth() > data->minimum_block_->dominator_depth()) {
    1296    58645119 :       data->minimum_block_ = block;
    1297             :       queue_.push(node);
    1298    58645180 :       TRACE("Propagating #%d:%s minimum_block = id:%d, dominator_depth = %d\n",
    1299             :             node->id(), node->op()->mnemonic(),
    1300             :             data->minimum_block_->id().ToInt(),
    1301             :             data->minimum_block_->dominator_depth());
    1302             :     }
    1303             :   }
    1304             : 
    1305             : #if DEBUG
    1306             :   bool InsideSameDominatorChain(BasicBlock* b1, BasicBlock* b2) {
    1307             :     BasicBlock* dominator = BasicBlock::GetCommonDominator(b1, b2);
    1308             :     return dominator == b1 || dominator == b2;
    1309             :   }
    1310             : #endif
    1311             : 
    1312             :   Scheduler* scheduler_;
    1313             :   Schedule* schedule_;
    1314             :   ZoneQueue<Node*> queue_;
    1315             : };
    1316             : 
    1317             : 
    1318      962129 : void Scheduler::ScheduleEarly() {
    1319      962129 :   TRACE("--- SCHEDULE EARLY -----------------------------------------\n");
    1320      962136 :   if (FLAG_trace_turbo_scheduler) {
    1321           0 :     TRACE("roots: ");
    1322           0 :     for (Node* node : schedule_root_nodes_) {
    1323           0 :       TRACE("#%d:%s ", node->id(), node->op()->mnemonic());
    1324             :     }
    1325           0 :     TRACE("\n");
    1326             :   }
    1327             : 
    1328             :   // Compute the minimum block for each node thereby determining the earliest
    1329             :   // position each node could be placed within a valid schedule.
    1330      962136 :   ScheduleEarlyNodeVisitor schedule_early_visitor(zone_, this);
    1331      962104 :   schedule_early_visitor.Run(&schedule_root_nodes_);
    1332      962154 : }
    1333             : 
    1334             : 
    1335             : // -----------------------------------------------------------------------------
    1336             : // Phase 5: Schedule nodes late.
    1337             : 
    1338             : 
    1339             : class ScheduleLateNodeVisitor {
    1340             :  public:
    1341      962106 :   ScheduleLateNodeVisitor(Zone* zone, Scheduler* scheduler)
    1342             :       : zone_(zone),
    1343             :         scheduler_(scheduler),
    1344             :         schedule_(scheduler_->schedule_),
    1345             :         marked_(scheduler->zone_),
    1346     1924181 :         marking_queue_(scheduler->zone_) {}
    1347             : 
    1348             :   // Run the schedule late algorithm on a set of fixed root nodes.
    1349             :   void Run(NodeVector* roots) {
    1350    46961281 :     for (Node* const root : *roots) {
    1351    22999565 :       ProcessQueue(root);
    1352             :     }
    1353             :   }
    1354             : 
    1355             :  private:
    1356    22999577 :   void ProcessQueue(Node* root) {
    1357    22999577 :     ZoneQueue<Node*>* queue = &(scheduler_->schedule_queue_);
    1358   115213580 :     for (Node* node : root->inputs()) {
    1359             :       // Don't schedule coupled nodes on their own.
    1360    46106985 :       if (scheduler_->GetPlacement(node) == Scheduler::kCoupled) {
    1361       16523 :         node = NodeProperties::GetControlInput(node);
    1362             :       }
    1363             : 
    1364             :       // Test schedulability condition by looking at unscheduled use count.
    1365    92222112 :       if (scheduler_->GetData(node)->unscheduled_count_ != 0) continue;
    1366             : 
    1367             :       queue->push(node);
    1368    94369552 :       do {
    1369    94373632 :         Node* const node = queue->front();
    1370             :         queue->pop();
    1371    94369563 :         VisitNode(node);
    1372             :       } while (!queue->empty());
    1373             :     }
    1374    22999610 :   }
    1375             : 
    1376             :   // Visits one node from the queue of schedulable nodes and determines its
    1377             :   // schedule late position. Also hoists nodes out of loops to find a more
    1378             :   // optimal scheduling position.
    1379   154142814 :   void VisitNode(Node* node) {
    1380             :     DCHECK_EQ(0, scheduler_->GetData(node)->unscheduled_count_);
    1381             : 
    1382             :     // Don't schedule nodes that are already scheduled.
    1383   188738917 :     if (schedule_->IsScheduled(node)) return;
    1384             :     DCHECK_EQ(Scheduler::kSchedulable, scheduler_->GetPlacement(node));
    1385             : 
    1386             :     // Determine the dominating block for all of the uses of this node. It is
    1387             :     // the latest block that this node can be scheduled in.
    1388    59558448 :     TRACE("Scheduling #%d:%s\n", node->id(), node->op()->mnemonic());
    1389    59558448 :     BasicBlock* block = GetCommonDominatorOfUses(node);
    1390             :     DCHECK_NOT_NULL(block);
    1391             : 
    1392             :     // The schedule early block dominates the schedule late block.
    1393   119964542 :     BasicBlock* min_block = scheduler_->GetData(node)->minimum_block_;
    1394             :     DCHECK_EQ(min_block, BasicBlock::GetCommonDominator(block, min_block));
    1395    59558037 :     TRACE(
    1396             :         "Schedule late of #%d:%s is id:%d at loop depth %d, minimum = id:%d\n",
    1397             :         node->id(), node->op()->mnemonic(), block->id().ToInt(),
    1398             :         block->loop_depth(), min_block->id().ToInt());
    1399             : 
    1400             :     // Hoist nodes out of loops if possible. Nodes can be hoisted iteratively
    1401             :     // into enclosing loop pre-headers until they would preceed their schedule
    1402             :     // early position.
    1403    60406505 :     BasicBlock* hoist_block = GetHoistBlock(block);
    1404    60405047 :     if (hoist_block &&
    1405             :         hoist_block->dominator_depth() >= min_block->dominator_depth()) {
    1406      153686 :       do {
    1407      153686 :         TRACE("  hoisting #%d:%s to block id:%d\n", node->id(),
    1408             :               node->op()->mnemonic(), hoist_block->id().ToInt());
    1409             :         DCHECK_LT(hoist_block->loop_depth(), block->loop_depth());
    1410             :         block = hoist_block;
    1411      153686 :         hoist_block = GetHoistBlock(hoist_block);
    1412      155210 :       } while (hoist_block &&
    1413             :                hoist_block->dominator_depth() >= min_block->dominator_depth());
    1414   118811584 :     } else if (scheduler_->flags_ & Scheduler::kSplitNodes) {
    1415             :       // Split the {node} if beneficial and return the new {block} for it.
    1416    30151033 :       block = SplitNode(block, node);
    1417             :     }
    1418             : 
    1419             :     // Schedule the node or a floating control structure.
    1420    59557573 :     if (IrOpcode::IsMergeOpcode(node->opcode())) {
    1421             :       ScheduleFloatingControl(block, node);
    1422    59342049 :     } else if (node->opcode() == IrOpcode::kFinishRegion) {
    1423      107876 :       ScheduleRegion(block, node);
    1424             :     } else {
    1425    59234173 :       ScheduleNode(block, node);
    1426             :     }
    1427             :   }
    1428             : 
    1429             :   // Mark {block} and push its non-marked predecessor on the marking queue.
    1430    38249501 :   void MarkBlock(BasicBlock* block) {
    1431             :     DCHECK_LT(block->id().ToSize(), marked_.size());
    1432    90206091 :     marked_[block->id().ToSize()] = true;
    1433   128455661 :     for (BasicBlock* pred_block : block->predecessors()) {
    1434             :       DCHECK_LT(pred_block->id().ToSize(), marked_.size());
    1435    51956590 :       if (marked_[pred_block->id().ToSize()]) continue;
    1436    48175004 :       marking_queue_.push_back(pred_block);
    1437             :     }
    1438    38249570 :   }
    1439             : 
    1440    30151026 :   BasicBlock* SplitNode(BasicBlock* block, Node* node) {
    1441             :     // For now, we limit splitting to pure nodes.
    1442    30151026 :     if (!node->op()->HasProperty(Operator::kPure)) return block;
    1443             :     // TODO(titzer): fix the special case of splitting of projections.
    1444    23352077 :     if (node->opcode() == IrOpcode::kProjection) return block;
    1445             : 
    1446             :     // The {block} is common dominator of all uses of {node}, so we cannot
    1447             :     // split anything unless the {block} has at least two successors.
    1448             :     DCHECK_EQ(block, GetCommonDominatorOfUses(node));
    1449    23213197 :     if (block->SuccessorCount() < 2) return block;
    1450             : 
    1451             :     // Clear marking bits.
    1452             :     DCHECK(marking_queue_.empty());
    1453   107080457 :     std::fill(marked_.begin(), marked_.end(), false);
    1454    26501868 :     marked_.resize(schedule_->BasicBlockCount() + 1, false);
    1455             : 
    1456             :     // Check if the {node} has uses in {block}.
    1457    59967571 :     for (Edge edge : node->use_edges()) {
    1458    28751696 :       BasicBlock* use_block = GetBlockForUse(edge);
    1459    57503383 :       if (use_block == nullptr || marked_[use_block->id().ToSize()]) continue;
    1460    24637411 :       if (use_block == block) {
    1461    10786743 :         TRACE("  not splitting #%d:%s, it is used in id:%d\n", node->id(),
    1462             :               node->op()->mnemonic(), block->id().ToInt());
    1463    10786743 :         marking_queue_.clear();
    1464    10786742 :         return block;
    1465             :       }
    1466    13850668 :       MarkBlock(use_block);
    1467             :     }
    1468             : 
    1469             :     // Compute transitive marking closure; a block is marked if all its
    1470             :     // successors are marked.
    1471    41066515 :     do {
    1472    41066515 :       BasicBlock* top_block = marking_queue_.front();
    1473    41066515 :       marking_queue_.pop_front();
    1474    41066464 :       if (marked_[top_block->id().ToSize()]) continue;
    1475             :       bool marked = true;
    1476   111108700 :       for (BasicBlock* successor : top_block->successors()) {
    1477    50117768 :         if (!marked_[successor->id().ToSize()]) {
    1478             :           marked = false;
    1479             :           break;
    1480             :         }
    1481             :       }
    1482    36591494 :       if (marked) MarkBlock(top_block);
    1483             :     } while (!marking_queue_.empty());
    1484             : 
    1485             :     // If the (common dominator) {block} is marked, we know that all paths from
    1486             :     // {block} to the end contain at least one use of {node}, and hence there's
    1487             :     // no point in splitting the {node} in this case.
    1488     2464179 :     if (marked_[block->id().ToSize()]) {
    1489     1418378 :       TRACE("  not splitting #%d:%s, its common dominator id:%d is perfect\n",
    1490             :             node->id(), node->op()->mnemonic(), block->id().ToInt());
    1491     1418378 :       return block;
    1492             :     }
    1493             : 
    1494             :     // Split {node} for uses according to the previously computed marking
    1495             :     // closure. Every marking partition has a unique dominator, which get's a
    1496             :     // copy of the {node} with the exception of the first partition, which get's
    1497             :     // the {node} itself.
    1498     1045801 :     ZoneMap<BasicBlock*, Node*> dominators(scheduler_->zone_);
    1499    10797016 :     for (Edge edge : node->use_edges()) {
    1500     4875609 :       BasicBlock* use_block = GetBlockForUse(edge);
    1501    13172034 :       if (use_block == nullptr) continue;
    1502    16592832 :       while (marked_[use_block->dominator()->id().ToSize()]) {
    1503     3420798 :         use_block = use_block->dominator();
    1504             :       }
    1505     4875618 :       auto& use_node = dominators[use_block];
    1506     4875623 :       if (use_node == nullptr) {
    1507     3770220 :         if (dominators.size() == 1u) {
    1508             :           // Place the {node} at {use_block}.
    1509     1045800 :           block = use_block;
    1510     1045800 :           use_node = node;
    1511     1045800 :           TRACE("  pushing #%d:%s down to id:%d\n", node->id(),
    1512             :                 node->op()->mnemonic(), block->id().ToInt());
    1513             :         } else {
    1514             :           // Place a copy of {node} at {use_block}.
    1515     2724420 :           use_node = CloneNode(node);
    1516     2724420 :           TRACE("  cloning #%d:%s for id:%d\n", use_node->id(),
    1517             :                 use_node->op()->mnemonic(), use_block->id().ToInt());
    1518     2724420 :           scheduler_->schedule_queue_.push(use_node);
    1519             :         }
    1520             :       }
    1521     4875622 :       edge.UpdateTo(use_node);
    1522             :     }
    1523             :     return block;
    1524             :   }
    1525             : 
    1526   119423616 :   BasicBlock* GetHoistBlock(BasicBlock* block) {
    1527    60273318 :     if (block->IsLoopHeader()) return block->dominator();
    1528             :     // We have to check to make sure that the {block} dominates all
    1529             :     // of the outgoing blocks.  If it doesn't, then there is a path
    1530             :     // out of the loop which does not execute this {block}, so we
    1531             :     // can't hoist operations from this {block} out of the loop, as
    1532             :     // that would introduce additional computations.
    1533    59437256 :     if (BasicBlock* header_block = block->loop_header()) {
    1534    19705738 :       for (BasicBlock* outgoing_block :
    1535    25760216 :            scheduler_->special_rpo_->GetOutgoingBlocks(header_block)) {
    1536    13077344 :         if (BasicBlock::GetCommonDominator(block, outgoing_block) != block) {
    1537             :           return nullptr;
    1538             :         }
    1539             :       }
    1540      286958 :       return header_block->dominator();
    1541             :     }
    1542             :     return nullptr;
    1543             :   }
    1544             : 
    1545    59892612 :   BasicBlock* GetCommonDominatorOfUses(Node* node) {
    1546             :     BasicBlock* block = nullptr;
    1547   327750487 :     for (Edge edge : node->use_edges()) {
    1548   133929562 :       BasicBlock* use_block = GetBlockForUse(edge);
    1549             :       block = block == nullptr
    1550             :                   ? use_block
    1551             :                   : use_block == nullptr
    1552             :                         ? block
    1553   133928129 :                         : BasicBlock::GetCommonDominator(block, use_block);
    1554             :     }
    1555    59891363 :     return block;
    1556             :   }
    1557             : 
    1558             :   BasicBlock* FindPredecessorBlock(Node* node) {
    1559     9148219 :     return scheduler_->control_flow_builder_->FindPredecessorBlock(node);
    1560             :   }
    1561             : 
    1562   176698758 :   BasicBlock* GetBlockForUse(Edge edge) {
    1563             :     // TODO(titzer): ignore uses from dead nodes (not visited in PrepareUses()).
    1564             :     // Dead uses only occur if the graph is not trimmed before scheduling.
    1565   167550539 :     Node* use = edge.from();
    1566   167550539 :     if (IrOpcode::IsPhiOpcode(use->opcode())) {
    1567             :       // If the use is from a coupled (i.e. floating) phi, compute the common
    1568             :       // dominator of its uses. This will not recurse more than one level.
    1569     8033051 :       if (scheduler_->GetPlacement(use) == Scheduler::kCoupled) {
    1570      333367 :         TRACE("  inspecting uses of coupled #%d:%s\n", use->id(),
    1571             :               use->op()->mnemonic());
    1572             :         // TODO(titzer): reenable once above TODO is addressed.
    1573             :         //        DCHECK_EQ(edge.to(), NodeProperties::GetControlInput(use));
    1574      333367 :         return GetCommonDominatorOfUses(use);
    1575             :       }
    1576             :       // If the use is from a fixed (i.e. non-floating) phi, we use the
    1577             :       // predecessor block of the corresponding control input to the merge.
    1578     7699674 :       if (scheduler_->GetPlacement(use) == Scheduler::kFixed) {
    1579     7699672 :         TRACE("  input@%d into a fixed phi #%d:%s\n", edge.index(), use->id(),
    1580             :               use->op()->mnemonic());
    1581     7699672 :         Node* merge = NodeProperties::GetControlInput(use, 0);
    1582             :         DCHECK(IrOpcode::IsMergeOpcode(merge->opcode()));
    1583     7699680 :         Node* input = NodeProperties::GetControlInput(merge, edge.index());
    1584     7699628 :         return FindPredecessorBlock(input);
    1585             :       }
    1586   159517488 :     } else if (IrOpcode::IsMergeOpcode(use->opcode())) {
    1587             :       // If the use is from a fixed (i.e. non-floating) merge, we use the
    1588             :       // predecessor block of the current input to the merge.
    1589     1448597 :       if (scheduler_->GetPlacement(use) == Scheduler::kFixed) {
    1590     1448597 :         TRACE("  input@%d into a fixed merge #%d:%s\n", edge.index(), use->id(),
    1591             :               use->op()->mnemonic());
    1592     2897198 :         return FindPredecessorBlock(edge.to());
    1593             :       }
    1594             :     }
    1595   158068881 :     BasicBlock* result = schedule_->block(use);
    1596   158068806 :     if (result == nullptr) return nullptr;
    1597   158068909 :     TRACE("  must dominate use #%d:%s in id:%d\n", use->id(),
    1598             :           use->op()->mnemonic(), result->id().ToInt());
    1599   158068827 :     return result;
    1600             :   }
    1601             : 
    1602             :   void ScheduleFloatingControl(BasicBlock* block, Node* node) {
    1603      215524 :     scheduler_->FuseFloatingControl(block, node);
    1604             :   }
    1605             : 
    1606      107876 :   void ScheduleRegion(BasicBlock* block, Node* region_end) {
    1607             :     // We only allow regions of instructions connected into a linear
    1608             :     // effect chain. The only value allowed to be produced by a node
    1609             :     // in the chain must be the value consumed by the FinishRegion node.
    1610             : 
    1611             :     // We schedule back to front; we first schedule FinishRegion.
    1612      107876 :     CHECK_EQ(IrOpcode::kFinishRegion, region_end->opcode());
    1613      107876 :     ScheduleNode(block, region_end);
    1614             : 
    1615             :     // Schedule the chain.
    1616     1019437 :     Node* node = NodeProperties::GetEffectInput(region_end);
    1617     1019437 :     while (node->opcode() != IrOpcode::kBeginRegion) {
    1618             :       DCHECK_EQ(0, scheduler_->GetData(node)->unscheduled_count_);
    1619             :       DCHECK_EQ(1, node->op()->EffectInputCount());
    1620             :       DCHECK_EQ(1, node->op()->EffectOutputCount());
    1621             :       DCHECK_EQ(0, node->op()->ControlOutputCount());
    1622             :       // The value output (if there is any) must be consumed
    1623             :       // by the EndRegion node.
    1624             :       DCHECK(node->op()->ValueOutputCount() == 0 ||
    1625             :              node == region_end->InputAt(0));
    1626      803685 :       ScheduleNode(block, node);
    1627      803685 :       node = NodeProperties::GetEffectInput(node);
    1628             :     }
    1629             :     // Schedule the BeginRegion node.
    1630             :     DCHECK_EQ(0, scheduler_->GetData(node)->unscheduled_count_);
    1631      107876 :     ScheduleNode(block, node);
    1632      107876 :   }
    1633             : 
    1634    60253503 :   void ScheduleNode(BasicBlock* block, Node* node) {
    1635    60253503 :     schedule_->PlanNode(block, node);
    1636             :     size_t block_id = block->id().ToSize();
    1637   188603795 :     if (!scheduler_->scheduled_nodes_[block_id]) {
    1638     7841734 :       scheduler_->scheduled_nodes_[block_id] =
    1639    23525321 :           new (zone_->New(sizeof(NodeVector))) NodeVector(zone_);
    1640             :     }
    1641   120507882 :     scheduler_->scheduled_nodes_[block_id]->push_back(node);
    1642    60252928 :     scheduler_->UpdatePlacement(node, Scheduler::kScheduled);
    1643    60253722 :   }
    1644             : 
    1645     5448842 :   Node* CloneNode(Node* node) {
    1646             :     int const input_count = node->InputCount();
    1647     3662254 :     for (int index = 0; index < input_count; ++index) {
    1648             :       Node* const input = node->InputAt(index);
    1649     3662251 :       scheduler_->IncrementUnscheduledUseCount(input, index, node);
    1650             :     }
    1651     8173260 :     Node* const copy = scheduler_->graph_->CloneNode(node);
    1652     2724419 :     TRACE(("clone #%d:%s -> #%d\n"), node->id(), node->op()->mnemonic(),
    1653             :           copy->id());
    1654     2724419 :     scheduler_->node_data_.resize(copy->id() + 1,
    1655    10897676 :                                   scheduler_->DefaultSchedulerData());
    1656     8173257 :     scheduler_->node_data_[copy->id()] = scheduler_->node_data_[node->id()];
    1657     2724419 :     return copy;
    1658             :   }
    1659             : 
    1660             :   Zone* zone_;
    1661             :   Scheduler* scheduler_;
    1662             :   Schedule* schedule_;
    1663             :   BoolVector marked_;
    1664             :   ZoneDeque<BasicBlock*> marking_queue_;
    1665             : };
    1666             : 
    1667             : 
    1668      962080 : void Scheduler::ScheduleLate() {
    1669      962080 :   TRACE("--- SCHEDULE LATE ------------------------------------------\n");
    1670      962098 :   if (FLAG_trace_turbo_scheduler) {
    1671           0 :     TRACE("roots: ");
    1672           0 :     for (Node* node : schedule_root_nodes_) {
    1673           0 :       TRACE("#%d:%s ", node->id(), node->op()->mnemonic());
    1674             :     }
    1675           0 :     TRACE("\n");
    1676             :   }
    1677             : 
    1678             :   // Schedule: Places nodes in dominator block of all their uses.
    1679      962098 :   ScheduleLateNodeVisitor schedule_late_visitor(zone_, this);
    1680             :   schedule_late_visitor.Run(&schedule_root_nodes_);
    1681      962118 : }
    1682             : 
    1683             : 
    1684             : // -----------------------------------------------------------------------------
    1685             : // Phase 6: Seal the final schedule.
    1686             : 
    1687             : 
    1688      962103 : void Scheduler::SealFinalSchedule() {
    1689      962103 :   TRACE("--- SEAL FINAL SCHEDULE ------------------------------------\n");
    1690             : 
    1691             :   // Serialize the assembly order and reverse-post-order numbering.
    1692      962103 :   special_rpo_->SerializeRPOIntoSchedule();
    1693             :   special_rpo_->PrintAndVerifySpecialRPO();
    1694             : 
    1695             :   // Add collected nodes for basic blocks to their blocks in the right order.
    1696             :   int block_num = 0;
    1697    14791673 :   for (NodeVector* nodes : scheduled_nodes_) {
    1698    25734932 :     BasicBlock::Id id = BasicBlock::Id::FromInt(block_num++);
    1699    12867466 :     BasicBlock* block = schedule_->GetBlockById(id);
    1700    12867484 :     if (nodes) {
    1701   128351909 :       for (Node* node : base::Reversed(*nodes)) {
    1702    60254925 :         schedule_->AddNode(block, node);
    1703             :       }
    1704             :     }
    1705             :   }
    1706      962150 : }
    1707             : 
    1708             : 
    1709             : // -----------------------------------------------------------------------------
    1710             : 
    1711             : 
    1712      646571 : void Scheduler::FuseFloatingControl(BasicBlock* block, Node* node) {
    1713      215523 :   TRACE("--- FUSE FLOATING CONTROL ----------------------------------\n");
    1714      215523 :   if (FLAG_trace_turbo_scheduler) {
    1715           0 :     OFStream os(stdout);
    1716           0 :     os << "Schedule before control flow fusion:\n" << *schedule_;
    1717             :   }
    1718             : 
    1719             :   // Iterate on phase 1: Build control-flow graph.
    1720      215523 :   control_flow_builder_->Run(block, node);
    1721             : 
    1722             :   // Iterate on phase 2: Compute special RPO and dominator tree.
    1723      215525 :   special_rpo_->UpdateSpecialRPO(block, schedule_->block(node));
    1724             :   // TODO(mstarzinger): Currently "iterate on" means "re-run". Fix that.
    1725    13526608 :   for (BasicBlock* b = block->rpo_next(); b != nullptr; b = b->rpo_next()) {
    1726             :     b->set_dominator_depth(-1);
    1727             :     b->set_dominator(nullptr);
    1728             :   }
    1729      215524 :   PropagateImmediateDominators(block->rpo_next());
    1730             : 
    1731             :   // Iterate on phase 4: Schedule nodes early.
    1732             :   // TODO(mstarzinger): The following loop gathering the propagation roots is a
    1733             :   // temporary solution and should be merged into the rest of the scheduler as
    1734             :   // soon as the approach settled for all floating loops.
    1735      215525 :   NodeVector propagation_roots(control_flow_builder_->control_);
    1736     1904664 :   for (Node* node : control_flow_builder_->control_) {
    1737     5447416 :     for (Node* use : node->uses()) {
    1738     2094659 :       if (NodeProperties::IsPhi(use)) propagation_roots.push_back(use);
    1739             :     }
    1740             :   }
    1741      215525 :   if (FLAG_trace_turbo_scheduler) {
    1742           0 :     TRACE("propagation roots: ");
    1743           0 :     for (Node* node : propagation_roots) {
    1744           0 :       TRACE("#%d:%s ", node->id(), node->op()->mnemonic());
    1745             :     }
    1746           0 :     TRACE("\n");
    1747             :   }
    1748      215525 :   ScheduleEarlyNodeVisitor schedule_early_visitor(zone_, this);
    1749      215525 :   schedule_early_visitor.Run(&propagation_roots);
    1750             : 
    1751             :   // Move previously planned nodes.
    1752             :   // TODO(mstarzinger): Improve that by supporting bulk moves.
    1753      431050 :   scheduled_nodes_.resize(schedule_->BasicBlockCount());
    1754      215525 :   MovePlannedNodes(block, schedule_->block(node));
    1755             : 
    1756      215525 :   if (FLAG_trace_turbo_scheduler) {
    1757           0 :     OFStream os(stdout);
    1758           0 :     os << "Schedule after control flow fusion:\n" << *schedule_;
    1759             :   }
    1760      215525 : }
    1761             : 
    1762             : 
    1763      215525 : void Scheduler::MovePlannedNodes(BasicBlock* from, BasicBlock* to) {
    1764      215525 :   TRACE("Move planned nodes from id:%d to id:%d\n", from->id().ToInt(),
    1765             :         to->id().ToInt());
    1766      630924 :   NodeVector* from_nodes = scheduled_nodes_[from->id().ToSize()];
    1767      215525 :   NodeVector* to_nodes = scheduled_nodes_[to->id().ToSize()];
    1768      431050 :   if (!from_nodes) return;
    1769             : 
    1770     3601593 :   for (Node* const node : *from_nodes) {
    1771     3201845 :     schedule_->SetBlockForNode(to, node);
    1772             :   }
    1773      199874 :   if (to_nodes) {
    1774           0 :     to_nodes->insert(to_nodes->end(), from_nodes->begin(), from_nodes->end());
    1775             :     from_nodes->clear();
    1776             :   } else {
    1777             :     std::swap(scheduled_nodes_[from->id().ToSize()],
    1778             :               scheduled_nodes_[to->id().ToSize()]);
    1779             :   }
    1780             : }
    1781             : 
    1782             : }  // namespace compiler
    1783             : }  // namespace internal
    1784             : }  // namespace v8

Generated by: LCOV version 1.10