LCOV - code coverage report
Current view: top level - src/compiler - scheduler.cc (source / functions) Hit Total Coverage
Test: app.info Lines: 656 676 97.0 %
Date: 2019-04-17 Functions: 60 62 96.8 %

          Line data    Source code
       1             : // Copyright 2013 the V8 project authors. All rights reserved.
       2             : // Use of this source code is governed by a BSD-style license that can be
       3             : // found in the LICENSE file.
       4             : 
       5             : #include "src/compiler/scheduler.h"
       6             : 
       7             : #include <iomanip>
       8             : 
       9             : #include "src/base/adapters.h"
      10             : #include "src/bit-vector.h"
      11             : #include "src/compiler/common-operator.h"
      12             : #include "src/compiler/control-equivalence.h"
      13             : #include "src/compiler/graph.h"
      14             : #include "src/compiler/node-marker.h"
      15             : #include "src/compiler/node-properties.h"
      16             : #include "src/compiler/node.h"
      17             : #include "src/zone/zone-containers.h"
      18             : 
      19             : namespace v8 {
      20             : namespace internal {
      21             : namespace compiler {
      22             : 
      23             : #define TRACE(...)                                       \
      24             :   do {                                                   \
      25             :     if (FLAG_trace_turbo_scheduler) PrintF(__VA_ARGS__); \
      26             :   } while (false)
      27             : 
      28     2865733 : Scheduler::Scheduler(Zone* zone, Graph* graph, Schedule* schedule, Flags flags,
      29             :                      size_t node_count_hint)
      30             :     : zone_(zone),
      31             :       graph_(graph),
      32             :       schedule_(schedule),
      33             :       flags_(flags),
      34             :       scheduled_nodes_(zone),
      35             :       schedule_root_nodes_(zone),
      36             :       schedule_queue_(zone),
      37     5731466 :       node_data_(zone) {
      38     2868277 :   node_data_.reserve(node_count_hint);
      39     5733384 :   node_data_.resize(graph->NodeCount(), DefaultSchedulerData());
      40     2866330 : }
      41             : 
      42     2865835 : Schedule* Scheduler::ComputeSchedule(Zone* zone, Graph* graph, Flags flags) {
      43             :   Zone* schedule_zone =
      44     2865835 :       (flags & Scheduler::kTempSchedule) ? zone : graph->zone();
      45             : 
      46             :   // Reserve 10% more space for nodes if node splitting is enabled to try to
      47             :   // avoid resizing the vector since that would triple its zone memory usage.
      48     2865835 :   float node_hint_multiplier = (flags & Scheduler::kSplitNodes) ? 1.1 : 1;
      49     2865835 :   size_t node_count_hint = node_hint_multiplier * graph->NodeCount();
      50             : 
      51             :   Schedule* schedule =
      52     2866222 :       new (schedule_zone) Schedule(schedule_zone, node_count_hint);
      53     2867018 :   Scheduler scheduler(zone, graph, schedule, flags, node_count_hint);
      54             : 
      55     2866405 :   scheduler.BuildCFG();
      56     2866818 :   scheduler.ComputeSpecialRPONumbering();
      57     2867271 :   scheduler.GenerateImmediateDominatorTree();
      58             : 
      59     2867709 :   scheduler.PrepareUses();
      60     2868341 :   scheduler.ScheduleEarly();
      61     2867548 :   scheduler.ScheduleLate();
      62             : 
      63     2868094 :   scheduler.SealFinalSchedule();
      64             : 
      65     2868320 :   return schedule;
      66             : }
      67             : 
      68             : 
      69             : Scheduler::SchedulerData Scheduler::DefaultSchedulerData() {
      70             :   SchedulerData def = {schedule_->start(), 0, kUnknown};
      71             :   return def;
      72             : }
      73             : 
      74             : 
      75             : Scheduler::SchedulerData* Scheduler::GetData(Node* node) {
      76  2564975594 :   return &node_data_[node->id()];
      77             : }
      78             : 
      79   140977022 : Scheduler::Placement Scheduler::InitializePlacement(Node* node) {
      80             :   SchedulerData* data = GetData(node);
      81   140977022 :   if (data->placement_ == kFixed) {
      82             :     // Nothing to do for control nodes that have been already fixed in
      83             :     // the schedule.
      84             :     return data->placement_;
      85             :   }
      86             :   DCHECK_EQ(kUnknown, data->placement_);
      87             :   switch (node->opcode()) {
      88             :     case IrOpcode::kParameter:
      89             :     case IrOpcode::kOsrValue:
      90             :       // Parameters and OSR values are always fixed to the start block.
      91     6552002 :       data->placement_ = kFixed;
      92     6552002 :       break;
      93             :     case IrOpcode::kPhi:
      94             :     case IrOpcode::kEffectPhi: {
      95             :       // Phis and effect phis are fixed if their control inputs are, whereas
      96             :       // otherwise they are coupled to a floating control node.
      97     5019873 :       Placement p = GetPlacement(NodeProperties::GetControlInput(node));
      98     5019793 :       data->placement_ = (p == kFixed ? kFixed : kCoupled);
      99     5019793 :       break;
     100             :     }
     101             : #define DEFINE_CONTROL_CASE(V) case IrOpcode::k##V:
     102             :       CONTROL_OP_LIST(DEFINE_CONTROL_CASE)
     103             : #undef DEFINE_CONTROL_CASE
     104             :       {
     105             :         // Control nodes that were not control-reachable from end may float.
     106      630609 :         data->placement_ = kSchedulable;
     107      630609 :         break;
     108             :     }
     109             :     default:
     110    98718028 :       data->placement_ = kSchedulable;
     111    98718028 :       break;
     112             :   }
     113   110920432 :   return data->placement_;
     114             : }
     115             : 
     116           0 : Scheduler::Placement Scheduler::GetPlacement(Node* node) {
     117  1933523437 :   return GetData(node)->placement_;
     118             : }
     119             : 
     120           0 : bool Scheduler::IsLive(Node* node) { return GetPlacement(node) != kUnknown; }
     121             : 
     122   131791128 : void Scheduler::UpdatePlacement(Node* node, Placement placement) {
     123             :   SchedulerData* data = GetData(node);
     124   131791128 :   if (data->placement_ == kUnknown) {
     125             :     // We only update control nodes from {kUnknown} to {kFixed}.  Ideally, we
     126             :     // should check that {node} is a control node (including exceptional calls),
     127             :     // but that is expensive.
     128             :     DCHECK_EQ(Scheduler::kFixed, placement);
     129    30072683 :     data->placement_ = placement;
     130    30072683 :     return;
     131             :   }
     132             : 
     133   101718445 :   switch (node->opcode()) {
     134             :     case IrOpcode::kParameter:
     135             :       // Parameters are fixed once and for all.
     136           0 :       UNREACHABLE();
     137             :       break;
     138             :     case IrOpcode::kPhi:
     139             :     case IrOpcode::kEffectPhi: {
     140             :       // Phis and effect phis are coupled to their respective blocks.
     141             :       DCHECK_EQ(Scheduler::kCoupled, data->placement_);
     142             :       DCHECK_EQ(Scheduler::kFixed, placement);
     143       39381 :       Node* control = NodeProperties::GetControlInput(node);
     144       39382 :       BasicBlock* block = schedule_->block(control);
     145       39382 :       schedule_->AddNode(block, node);
     146       39383 :       break;
     147             :     }
     148             : #define DEFINE_CONTROL_CASE(V) case IrOpcode::k##V:
     149             :       CONTROL_OP_LIST(DEFINE_CONTROL_CASE)
     150             : #undef DEFINE_CONTROL_CASE
     151             :       {
     152             :         // Control nodes force coupled uses to be placed.
     153     2498371 :         for (auto use : node->uses()) {
     154     1867811 :           if (GetPlacement(use) == Scheduler::kCoupled) {
     155             :             DCHECK_EQ(node, NodeProperties::GetControlInput(use));
     156       39382 :             UpdatePlacement(use, placement);
     157             :           }
     158             :       }
     159             :       break;
     160             :     }
     161             :     default:
     162             :       DCHECK_EQ(Scheduler::kSchedulable, data->placement_);
     163             :       DCHECK_EQ(Scheduler::kScheduled, placement);
     164             :       break;
     165             :   }
     166             :   // Reduce the use count of the node's inputs to potentially make them
     167             :   // schedulable. If all the uses of a node have been scheduled, then the node
     168             :   // itself can be scheduled.
     169   347107409 :   for (Edge const edge : node->input_edges()) {
     170   245389122 :     DecrementUnscheduledUseCount(edge.to(), edge.index(), edge.from());
     171             :   }
     172   101718287 :   data->placement_ = placement;
     173             : }
     174             : 
     175             : 
     176             : bool Scheduler::IsCoupledControlEdge(Node* node, int index) {
     177   491101052 :   return GetPlacement(node) == kCoupled &&
     178             :          NodeProperties::FirstControlIndex(node) == index;
     179             : }
     180             : 
     181             : 
     182   245401822 : void Scheduler::IncrementUnscheduledUseCount(Node* node, int index,
     183             :                                              Node* from) {
     184             :   // Make sure that control edges from coupled nodes are not counted.
     185   245430991 :   if (IsCoupledControlEdge(from, index)) return;
     186             : 
     187             :   // Tracking use counts for fixed nodes is useless.
     188   245390275 :   if (GetPlacement(node) == kFixed) return;
     189             : 
     190             :   // Use count for coupled nodes is summed up on their control.
     191   183166750 :   if (GetPlacement(node) == kCoupled) {
     192       29168 :     Node* control = NodeProperties::GetControlInput(node);
     193       29169 :     return IncrementUnscheduledUseCount(control, index, from);
     194             :   }
     195             : 
     196   183137582 :   ++(GetData(node)->unscheduled_count_);
     197   183137582 :   if (FLAG_trace_turbo_scheduler) {
     198           0 :     TRACE("  Use count of #%d:%s (used by #%d:%s)++ = %d\n", node->id(),
     199             :           node->op()->mnemonic(), from->id(), from->op()->mnemonic(),
     200             :           GetData(node)->unscheduled_count_);
     201             :   }
     202             : }
     203             : 
     204             : 
     205   245419804 : void Scheduler::DecrementUnscheduledUseCount(Node* node, int index,
     206             :                                              Node* from) {
     207             :   // Make sure that control edges from coupled nodes are not counted.
     208   245419804 :   if (IsCoupledControlEdge(from, index)) return;
     209             : 
     210             :   // Tracking use counts for fixed nodes is useless.
     211   490766846 :   if (GetPlacement(node) == kFixed) return;
     212             : 
     213             :   // Use count for coupled nodes is summed up on their control.
     214   183145062 :   if (GetPlacement(node) == kCoupled) {
     215       29168 :     Node* control = NodeProperties::GetControlInput(node);
     216       29168 :     return DecrementUnscheduledUseCount(control, index, from);
     217             :   }
     218             : 
     219             :   DCHECK_LT(0, GetData(node)->unscheduled_count_);
     220   183115894 :   --(GetData(node)->unscheduled_count_);
     221   183115894 :   if (FLAG_trace_turbo_scheduler) {
     222           0 :     TRACE("  Use count of #%d:%s (used by #%d:%s)-- = %d\n", node->id(),
     223             :           node->op()->mnemonic(), from->id(), from->op()->mnemonic(),
     224             :           GetData(node)->unscheduled_count_);
     225             :   }
     226   366260064 :   if (GetData(node)->unscheduled_count_ == 0) {
     227    83858953 :     TRACE("    newly eligible #%d:%s\n", node->id(), node->op()->mnemonic());
     228             :     schedule_queue_.push(node);
     229             :   }
     230             : }
     231             : 
     232             : 
     233             : // -----------------------------------------------------------------------------
     234             : // Phase 1: Build control-flow graph.
     235             : 
     236             : 
     237             : // Internal class to build a control flow graph (i.e the basic blocks and edges
     238             : // between them within a Schedule) from the node graph. Visits control edges of
     239             : // the graph backwards from an end node in order to find the connected control
     240             : // subgraph, needed for scheduling.
     241             : class CFGBuilder : public ZoneObject {
     242             :  public:
     243     2865284 :   CFGBuilder(Zone* zone, Scheduler* scheduler)
     244             :       : zone_(zone),
     245             :         scheduler_(scheduler),
     246     2865284 :         schedule_(scheduler->schedule_),
     247             :         queued_(scheduler->graph_, 2),
     248             :         queue_(zone),
     249             :         control_(zone),
     250             :         component_entry_(nullptr),
     251             :         component_start_(nullptr),
     252    11464025 :         component_end_(nullptr) {}
     253             : 
     254             :   // Run the control flow graph construction algorithm by walking the graph
     255             :   // backwards from end through control edges, building and connecting the
     256             :   // basic blocks for control nodes.
     257     2868099 :   void Run() {
     258             :     ResetDataStructures();
     259     2868099 :     Queue(scheduler_->graph_->end());
     260             : 
     261    39202086 :     while (!queue_.empty()) {  // Breadth-first backwards traversal.
     262    36333487 :       Node* node = queue_.front();
     263             :       queue_.pop();
     264    36333806 :       int max = NodeProperties::PastControlIndex(node);
     265   115274601 :       for (int i = NodeProperties::FirstControlIndex(node); i < max; i++) {
     266    39469241 :         Queue(node->InputAt(i));
     267             :       }
     268             :     }
     269             : 
     270    39203195 :     for (NodeVector::iterator i = control_.begin(); i != control_.end(); ++i) {
     271    36335121 :       ConnectBlocks(*i);  // Connect block to its predecessor/successors.
     272             :     }
     273     2868074 :   }
     274             : 
     275             :   // Run the control flow graph construction for a minimal control-connected
     276             :   // component ending in {exit} and merge that component into an existing
     277             :   // control flow graph at the bottom of {block}.
     278       38957 :   void Run(BasicBlock* block, Node* exit) {
     279             :     ResetDataStructures();
     280       38957 :     Queue(exit);
     281             : 
     282       38959 :     component_entry_ = nullptr;
     283       38959 :     component_start_ = block;
     284       38959 :     component_end_ = schedule_->block(exit);
     285       38959 :     scheduler_->equivalence_->Run(exit);
     286      196486 :     while (!queue_.empty()) {  // Breadth-first backwards traversal.
     287      157526 :       Node* node = queue_.front();
     288             :       queue_.pop();
     289             : 
     290             :       // Use control dependence equivalence to find a canonical single-entry
     291             :       // single-exit region that makes up a minimal component to be scheduled.
     292      157526 :       if (IsSingleEntrySingleExitRegion(node, exit)) {
     293       38959 :         TRACE("Found SESE at #%d:%s\n", node->id(), node->op()->mnemonic());
     294             :         DCHECK(!component_entry_);
     295       38959 :         component_entry_ = node;
     296       38959 :         continue;
     297             :       }
     298             : 
     299      118569 :       int max = NodeProperties::PastControlIndex(node);
     300      434466 :       for (int i = NodeProperties::FirstControlIndex(node); i < max; i++) {
     301      157949 :         Queue(node->InputAt(i));
     302             :       }
     303             :     }
     304             :     DCHECK(component_entry_);
     305             : 
     306      196487 :     for (NodeVector::iterator i = control_.begin(); i != control_.end(); ++i) {
     307      157528 :       ConnectBlocks(*i);  // Connect block to its predecessor/successors.
     308             :     }
     309       38959 :   }
     310             : 
     311             :  private:
     312             :   friend class ScheduleLateNodeVisitor;
     313             :   friend class Scheduler;
     314             : 
     315             :   void FixNode(BasicBlock* block, Node* node) {
     316    20985430 :     schedule_->AddNode(block, node);
     317    20985545 :     scheduler_->UpdatePlacement(node, Scheduler::kFixed);
     318             :   }
     319             : 
     320    42530891 :   void Queue(Node* node) {
     321             :     // Mark the connected control nodes as they are queued.
     322    85061782 :     if (!queued_.Get(node)) {
     323    36488947 :       BuildBlocks(node);
     324             :       queue_.push(node);
     325    36490087 :       queued_.Set(node, true);
     326    36490087 :       control_.push_back(node);
     327             :     }
     328    42532460 :   }
     329             : 
     330    36489141 :   void BuildBlocks(Node* node) {
     331    36489141 :     switch (node->opcode()) {
     332             :       case IrOpcode::kEnd:
     333     2856541 :         FixNode(schedule_->end(), node);
     334             :         break;
     335             :       case IrOpcode::kStart:
     336     2868339 :         FixNode(schedule_->start(), node);
     337             :         break;
     338             :       case IrOpcode::kLoop:
     339             :       case IrOpcode::kMerge:
     340     3649998 :         BuildBlockForNode(node);
     341     3650011 :         break;
     342             :       case IrOpcode::kTerminate: {
     343             :         // Put Terminate in the loop to which it refers.
     344      180577 :         Node* loop = NodeProperties::GetControlInput(node);
     345      180576 :         BasicBlock* block = BuildBlockForNode(loop);
     346             :         FixNode(block, node);
     347             :         break;
     348             :       }
     349             :       case IrOpcode::kBranch:
     350             :       case IrOpcode::kSwitch:
     351     5153274 :         BuildBlocksForSuccessors(node);
     352     5153283 :         break;
     353             : #define BUILD_BLOCK_JS_CASE(Name) case IrOpcode::k##Name:
     354             :         JS_OP_LIST(BUILD_BLOCK_JS_CASE)
     355             : // JS opcodes are just like calls => fall through.
     356             : #undef BUILD_BLOCK_JS_CASE
     357             :       case IrOpcode::kCall:
     358             :       case IrOpcode::kCallWithCallerSavedRegisters:
     359     6200692 :         if (NodeProperties::IsExceptionalCall(node)) {
     360      414007 :           BuildBlocksForSuccessors(node);
     361             :         }
     362             :         break;
     363             :       default:
     364             :         break;
     365             :     }
     366    36489691 :   }
     367             : 
     368    36489810 :   void ConnectBlocks(Node* node) {
     369    36489810 :     switch (node->opcode()) {
     370             :       case IrOpcode::kLoop:
     371             :       case IrOpcode::kMerge:
     372     3649986 :         ConnectMerge(node);
     373     3650058 :         break;
     374             :       case IrOpcode::kBranch:
     375     5123993 :         scheduler_->UpdatePlacement(node, Scheduler::kFixed);
     376     5123995 :         ConnectBranch(node);
     377     5123972 :         break;
     378             :       case IrOpcode::kSwitch:
     379       29411 :         scheduler_->UpdatePlacement(node, Scheduler::kFixed);
     380       29411 :         ConnectSwitch(node);
     381       29411 :         break;
     382             :       case IrOpcode::kDeoptimize:
     383       86360 :         scheduler_->UpdatePlacement(node, Scheduler::kFixed);
     384       86360 :         ConnectDeoptimize(node);
     385       86360 :         break;
     386             :       case IrOpcode::kTailCall:
     387       63323 :         scheduler_->UpdatePlacement(node, Scheduler::kFixed);
     388       63323 :         ConnectTailCall(node);
     389       63324 :         break;
     390             :       case IrOpcode::kReturn:
     391     3255617 :         scheduler_->UpdatePlacement(node, Scheduler::kFixed);
     392     3255674 :         ConnectReturn(node);
     393     3257878 :         break;
     394             :       case IrOpcode::kThrow:
     395      273819 :         scheduler_->UpdatePlacement(node, Scheduler::kFixed);
     396      273819 :         ConnectThrow(node);
     397      273821 :         break;
     398             : #define CONNECT_BLOCK_JS_CASE(Name) case IrOpcode::k##Name:
     399             :         JS_OP_LIST(CONNECT_BLOCK_JS_CASE)
     400             : // JS opcodes are just like calls => fall through.
     401             : #undef CONNECT_BLOCK_JS_CASE
     402             :       case IrOpcode::kCall:
     403             :       case IrOpcode::kCallWithCallerSavedRegisters:
     404     6200711 :         if (NodeProperties::IsExceptionalCall(node)) {
     405      414009 :           scheduler_->UpdatePlacement(node, Scheduler::kFixed);
     406      414009 :           ConnectCall(node);
     407             :         }
     408             :         break;
     409             :       default:
     410             :         break;
     411             :     }
     412    36492128 :   }
     413             : 
     414    15260067 :   BasicBlock* BuildBlockForNode(Node* node) {
     415    15260067 :     BasicBlock* block = schedule_->block(node);
     416    15260075 :     if (block == nullptr) {
     417    15079560 :       block = schedule_->NewBasicBlock();
     418    15079968 :       TRACE("Create block id:%d for #%d:%s\n", block->id().ToInt(), node->id(),
     419             :             node->op()->mnemonic());
     420             :       FixNode(block, node);
     421             :     }
     422    15260482 :     return block;
     423             :   }
     424             : 
     425     5567233 :   void BuildBlocksForSuccessors(Node* node) {
     426     5567233 :     size_t const successor_cnt = node->op()->ControlOutputCount();
     427     5567233 :     Node** successors = zone_->NewArray<Node*>(successor_cnt);
     428     5567205 :     NodeProperties::CollectControlProjections(node, successors, successor_cnt);
     429    28427613 :     for (size_t index = 0; index < successor_cnt; ++index) {
     430    11430044 :       BuildBlockForNode(successors[index]);
     431             :     }
     432     5567382 :   }
     433             : 
     434     5567348 :   void CollectSuccessorBlocks(Node* node, BasicBlock** successor_blocks,
     435             :                               size_t successor_cnt) {
     436             :     Node** successors = reinterpret_cast<Node**>(successor_blocks);
     437     5567348 :     NodeProperties::CollectControlProjections(node, successors, successor_cnt);
     438    28428012 :     for (size_t index = 0; index < successor_cnt; ++index) {
     439    11430315 :       successor_blocks[index] = schedule_->block(successors[index]);
     440             :     }
     441     5567355 :   }
     442             : 
     443    29653423 :   BasicBlock* FindPredecessorBlock(Node* node) {
     444             :     BasicBlock* predecessor_block = nullptr;
     445             :     while (true) {
     446    40240746 :       predecessor_block = schedule_->block(node);
     447    40241003 :       if (predecessor_block != nullptr) break;
     448    10587300 :       node = NodeProperties::GetControlInput(node);
     449             :     }
     450    29653703 :     return predecessor_block;
     451             :   }
     452             : 
     453      414009 :   void ConnectCall(Node* call) {
     454             :     BasicBlock* successor_blocks[2];
     455      414009 :     CollectSuccessorBlocks(call, successor_blocks, arraysize(successor_blocks));
     456             : 
     457             :     // Consider the exception continuation to be deferred.
     458      414009 :     successor_blocks[1]->set_deferred(true);
     459             : 
     460      414009 :     Node* call_control = NodeProperties::GetControlInput(call);
     461      414009 :     BasicBlock* call_block = FindPredecessorBlock(call_control);
     462      414009 :     TraceConnect(call, call_block, successor_blocks[0]);
     463      414009 :     TraceConnect(call, call_block, successor_blocks[1]);
     464      414009 :     schedule_->AddCall(call_block, call, successor_blocks[0],
     465      414009 :                        successor_blocks[1]);
     466      414009 :   }
     467             : 
     468     5123860 :   void ConnectBranch(Node* branch) {
     469             :     BasicBlock* successor_blocks[2];
     470             :     CollectSuccessorBlocks(branch, successor_blocks,
     471     5123860 :                            arraysize(successor_blocks));
     472             : 
     473             :     // Consider branch hints.
     474     5123938 :     switch (BranchHintOf(branch->op())) {
     475             :       case BranchHint::kNone:
     476             :         break;
     477             :       case BranchHint::kTrue:
     478     1749802 :         successor_blocks[1]->set_deferred(true);
     479             :         break;
     480             :       case BranchHint::kFalse:
     481      836232 :         successor_blocks[0]->set_deferred(true);
     482             :         break;
     483             :     }
     484             : 
     485     5123660 :     if (branch == component_entry_) {
     486       38956 :       TraceConnect(branch, component_start_, successor_blocks[0]);
     487       38956 :       TraceConnect(branch, component_start_, successor_blocks[1]);
     488       38957 :       schedule_->InsertBranch(component_start_, component_end_, branch,
     489       38957 :                               successor_blocks[0], successor_blocks[1]);
     490             :     } else {
     491     5084704 :       Node* branch_control = NodeProperties::GetControlInput(branch);
     492     5084734 :       BasicBlock* branch_block = FindPredecessorBlock(branch_control);
     493     5084824 :       TraceConnect(branch, branch_block, successor_blocks[0]);
     494     5084849 :       TraceConnect(branch, branch_block, successor_blocks[1]);
     495     5084831 :       schedule_->AddBranch(branch_block, branch, successor_blocks[0],
     496     5084831 :                            successor_blocks[1]);
     497             :     }
     498     5123968 :   }
     499             : 
     500       29411 :   void ConnectSwitch(Node* sw) {
     501       29411 :     size_t const successor_count = sw->op()->ControlOutputCount();
     502             :     BasicBlock** successor_blocks =
     503       29411 :         zone_->NewArray<BasicBlock*>(successor_count);
     504       29411 :     CollectSuccessorBlocks(sw, successor_blocks, successor_count);
     505             : 
     506       29411 :     if (sw == component_entry_) {
     507           7 :       for (size_t index = 0; index < successor_count; ++index) {
     508           3 :         TraceConnect(sw, component_start_, successor_blocks[index]);
     509             :       }
     510           1 :       schedule_->InsertSwitch(component_start_, component_end_, sw,
     511           1 :                               successor_blocks, successor_count);
     512             :     } else {
     513       29410 :       Node* switch_control = NodeProperties::GetControlInput(sw);
     514       29410 :       BasicBlock* switch_block = FindPredecessorBlock(switch_control);
     515      738364 :       for (size_t index = 0; index < successor_count; ++index) {
     516      354477 :         TraceConnect(sw, switch_block, successor_blocks[index]);
     517             :       }
     518       29410 :       schedule_->AddSwitch(switch_block, sw, successor_blocks, successor_count);
     519             :     }
     520      738371 :     for (size_t index = 0; index < successor_count; ++index) {
     521      708960 :       if (BranchHintOf(successor_blocks[index]->front()->op()) ==
     522             :           BranchHint::kFalse) {
     523       12208 :         successor_blocks[index]->set_deferred(true);
     524             :       }
     525             :     }
     526       29411 :   }
     527             : 
     528     3649932 :   void ConnectMerge(Node* merge) {
     529             :     // Don't connect the special merge at the end to its predecessors.
     530     3649932 :     if (IsFinalMerge(merge)) return;
     531             : 
     532     3650005 :     BasicBlock* block = schedule_->block(merge);
     533             :     DCHECK_NOT_NULL(block);
     534             :     // For all of the merge's control inputs, add a goto at the end to the
     535             :     // merge's basic block.
     536    12349525 :     for (Node* const input : merge->inputs()) {
     537     8699455 :       BasicBlock* predecessor_block = FindPredecessorBlock(input);
     538     8699451 :       TraceConnect(merge, predecessor_block, block);
     539     8699457 :       schedule_->AddGoto(predecessor_block, block);
     540             :     }
     541             :   }
     542             : 
     543       63323 :   void ConnectTailCall(Node* call) {
     544       63323 :     Node* call_control = NodeProperties::GetControlInput(call);
     545       63323 :     BasicBlock* call_block = FindPredecessorBlock(call_control);
     546       63323 :     TraceConnect(call, call_block, nullptr);
     547       63323 :     schedule_->AddTailCall(call_block, call);
     548       63324 :   }
     549             : 
     550     3255624 :   void ConnectReturn(Node* ret) {
     551     3255624 :     Node* return_control = NodeProperties::GetControlInput(ret);
     552     3255713 :     BasicBlock* return_block = FindPredecessorBlock(return_control);
     553     3255652 :     TraceConnect(ret, return_block, nullptr);
     554     3255679 :     schedule_->AddReturn(return_block, ret);
     555     3257839 :   }
     556             : 
     557       86360 :   void ConnectDeoptimize(Node* deopt) {
     558       86360 :     Node* deoptimize_control = NodeProperties::GetControlInput(deopt);
     559       86360 :     BasicBlock* deoptimize_block = FindPredecessorBlock(deoptimize_control);
     560       86360 :     TraceConnect(deopt, deoptimize_block, nullptr);
     561       86360 :     schedule_->AddDeoptimize(deoptimize_block, deopt);
     562       86360 :   }
     563             : 
     564      273820 :   void ConnectThrow(Node* thr) {
     565      273820 :     Node* throw_control = NodeProperties::GetControlInput(thr);
     566      273820 :     BasicBlock* throw_block = FindPredecessorBlock(throw_control);
     567      273820 :     TraceConnect(thr, throw_block, nullptr);
     568      273821 :     schedule_->AddThrow(throw_block, thr);
     569      273821 :   }
     570             : 
     571    23808148 :   void TraceConnect(Node* node, BasicBlock* block, BasicBlock* succ) {
     572             :     DCHECK_NOT_NULL(block);
     573    23808148 :     if (succ == nullptr) {
     574     3679036 :       TRACE("Connect #%d:%s, id:%d -> end\n", node->id(),
     575             :             node->op()->mnemonic(), block->id().ToInt());
     576             :     } else {
     577    20129112 :       TRACE("Connect #%d:%s, id:%d -> id:%d\n", node->id(),
     578             :             node->op()->mnemonic(), block->id().ToInt(), succ->id().ToInt());
     579             :     }
     580    23808148 :   }
     581             : 
     582             :   bool IsFinalMerge(Node* node) {
     583     7119378 :     return (node->opcode() == IrOpcode::kMerge &&
     584     3469446 :             node == scheduler_->graph_->end()->InputAt(0));
     585             :   }
     586             : 
     587      157526 :   bool IsSingleEntrySingleExitRegion(Node* entry, Node* exit) const {
     588      157526 :     size_t entry_class = scheduler_->equivalence_->ClassOf(entry);
     589      157526 :     size_t exit_class = scheduler_->equivalence_->ClassOf(exit);
     590      157527 :     return entry != exit && entry_class == exit_class;
     591             :   }
     592             : 
     593             :   void ResetDataStructures() {
     594             :     control_.clear();
     595             :     DCHECK(queue_.empty());
     596             :     DCHECK(control_.empty());
     597             :   }
     598             : 
     599             :   Zone* zone_;
     600             :   Scheduler* scheduler_;
     601             :   Schedule* schedule_;
     602             :   NodeMarker<bool> queued_;      // Mark indicating whether node is queued.
     603             :   ZoneQueue<Node*> queue_;       // Queue used for breadth-first traversal.
     604             :   NodeVector control_;           // List of encountered control nodes.
     605             :   Node* component_entry_;        // Component single-entry node.
     606             :   BasicBlock* component_start_;  // Component single-entry block.
     607             :   BasicBlock* component_end_;    // Component single-exit block.
     608             : };
     609             : 
     610             : 
     611     2865366 : void Scheduler::BuildCFG() {
     612     2865366 :   TRACE("--- CREATING CFG -------------------------------------------\n");
     613             : 
     614             :   // Instantiate a new control equivalence algorithm for the graph.
     615     8597296 :   equivalence_ = new (zone_) ControlEquivalence(zone_, graph_);
     616             : 
     617             :   // Build a control-flow graph for the main control-connected component that
     618             :   // is being spanned by the graph's start and end nodes.
     619     5732649 :   control_flow_builder_ = new (zone_) CFGBuilder(zone_, this);
     620     2868112 :   control_flow_builder_->Run();
     621             : 
     622             :   // Initialize per-block data.
     623             :   // Reserve an extra 10% to avoid resizing vector when fusing floating control.
     624     5736098 :   scheduled_nodes_.reserve(schedule_->BasicBlockCount() * 1.1);
     625     5732894 :   scheduled_nodes_.resize(schedule_->BasicBlockCount());
     626     2866800 : }
     627             : 
     628             : 
     629             : // -----------------------------------------------------------------------------
     630             : // Phase 2: Compute special RPO and dominator tree.
     631             : 
     632             : 
     633             : // Compute the special reverse-post-order block ordering, which is essentially
     634             : // a RPO of the graph where loop bodies are contiguous. Properties:
     635             : // 1. If block A is a predecessor of B, then A appears before B in the order,
     636             : //    unless B is a loop header and A is in the loop headed at B
     637             : //    (i.e. A -> B is a backedge).
     638             : // => If block A dominates block B, then A appears before B in the order.
     639             : // => If block A is a loop header, A appears before all blocks in the loop
     640             : //    headed at A.
     641             : // 2. All loops are contiguous in the order (i.e. no intervening blocks that
     642             : //    do not belong to the loop.)
     643             : // Note a simple RPO traversal satisfies (1) but not (2).
     644      249018 : class SpecialRPONumberer : public ZoneObject {
     645             :  public:
     646     3114902 :   SpecialRPONumberer(Zone* zone, Schedule* schedule)
     647             :       : zone_(zone),
     648             :         schedule_(schedule),
     649             :         order_(nullptr),
     650             :         beyond_end_(nullptr),
     651             :         loops_(zone),
     652             :         backedges_(zone),
     653             :         stack_(zone),
     654             :         previous_block_count_(0),
     655     9344655 :         empty_(0, zone) {}
     656             : 
     657             :   // Computes the special reverse-post-order for the main control flow graph,
     658             :   // that is for the graph spanned between the schedule's start and end blocks.
     659             :   void ComputeSpecialRPO() {
     660             :     DCHECK_EQ(0, schedule_->end()->SuccessorCount());
     661             :     DCHECK(!order_);  // Main order does not exist yet.
     662     3114816 :     ComputeAndInsertSpecialRPO(schedule_->start(), schedule_->end());
     663             :   }
     664             : 
     665             :   // Computes the special reverse-post-order for a partial control flow graph,
     666             :   // that is for the graph spanned between the given {entry} and {end} blocks,
     667             :   // then updates the existing ordering with this new information.
     668             :   void UpdateSpecialRPO(BasicBlock* entry, BasicBlock* end) {
     669             :     DCHECK(order_);  // Main order to be updated is present.
     670       38959 :     ComputeAndInsertSpecialRPO(entry, end);
     671             :   }
     672             : 
     673             :   // Serialize the previously computed order as a special reverse-post-order
     674             :   // numbering for basic blocks into the final schedule.
     675     3116697 :   void SerializeRPOIntoSchedule() {
     676             :     int32_t number = 0;
     677    30746473 :     for (BasicBlock* b = order_; b != nullptr; b = b->rpo_next()) {
     678    27629026 :       b->set_rpo_number(number++);
     679    27629159 :       schedule_->rpo_order()->push_back(b);
     680             :     }
     681     3117447 :     BeyondEndSentinel()->set_rpo_number(number);
     682     3114812 :   }
     683             : 
     684             :   // Print and verify the special reverse-post-order.
     685             :   void PrintAndVerifySpecialRPO() {
     686             : #if DEBUG
     687             :     if (FLAG_trace_turbo_scheduler) PrintRPO();
     688             :     VerifySpecialRPO();
     689             : #endif
     690             :   }
     691             : 
     692             :   const ZoneVector<BasicBlock*>& GetOutgoingBlocks(BasicBlock* block) {
     693     7081015 :     if (HasLoopNumber(block)) {
     694     7081014 :       LoopInfo const& loop = loops_[GetLoopNumber(block)];
     695     7081014 :       if (loop.outgoing) return *loop.outgoing;
     696             :     }
     697       59426 :     return empty_;
     698             :   }
     699             : 
     700             :  private:
     701             :   using Backedge = std::pair<BasicBlock*, size_t>;
     702             : 
     703             :   // Numbering for BasicBlock::rpo_number for this block traversal:
     704             :   static const int kBlockOnStack = -2;
     705             :   static const int kBlockVisited1 = -3;
     706             :   static const int kBlockVisited2 = -4;
     707             :   static const int kBlockUnvisited1 = -1;
     708             :   static const int kBlockUnvisited2 = kBlockVisited1;
     709             : 
     710             :   struct SpecialRPOStackFrame {
     711             :     BasicBlock* block;
     712             :     size_t index;
     713             :   };
     714             : 
     715             :   struct LoopInfo {
     716             :     BasicBlock* header;
     717             :     ZoneVector<BasicBlock*>* outgoing;
     718             :     BitVector* members;
     719             :     LoopInfo* prev;
     720             :     BasicBlock* end;
     721             :     BasicBlock* start;
     722             : 
     723      890929 :     void AddOutgoing(Zone* zone, BasicBlock* block) {
     724      890929 :       if (outgoing == nullptr) {
     725             :         outgoing = new (zone->New(sizeof(ZoneVector<BasicBlock*>)))
     726      539858 :             ZoneVector<BasicBlock*>(zone);
     727             :       }
     728      890928 :       outgoing->push_back(block);
     729      890932 :     }
     730             :   };
     731             : 
     732             :   int Push(ZoneVector<SpecialRPOStackFrame>& stack, int depth,
     733             :            BasicBlock* child, int unvisited) {
     734    40254612 :     if (child->rpo_number() == unvisited) {
     735    77234476 :       stack[depth].block = child;
     736    40254942 :       stack[depth].index = 0;
     737    40254942 :       child->set_rpo_number(kBlockOnStack);
     738    36980028 :       return depth + 1;
     739             :     }
     740             :     return depth;
     741             :   }
     742             : 
     743             :   BasicBlock* PushFront(BasicBlock* head, BasicBlock* block) {
     744             :     block->set_rpo_next(head);
     745             :     return block;
     746             :   }
     747             : 
     748             :   static int GetLoopNumber(BasicBlock* block) { return block->loop_number(); }
     749             :   static void SetLoopNumber(BasicBlock* block, int loop_number) {
     750             :     return block->set_loop_number(loop_number);
     751             :   }
     752             :   static bool HasLoopNumber(BasicBlock* block) {
     753             :     return block->loop_number() >= 0;
     754             :   }
     755             : 
     756             :   // TODO(mstarzinger): We only need this special sentinel because some tests
     757             :   // use the schedule's end block in actual control flow (e.g. with end having
     758             :   // successors). Once this has been cleaned up we can use the end block here.
     759     3119481 :   BasicBlock* BeyondEndSentinel() {
     760     3119481 :     if (beyond_end_ == nullptr) {
     761             :       BasicBlock::Id id = BasicBlock::Id::FromInt(-1);
     762     6234147 :       beyond_end_ = new (schedule_->zone()) BasicBlock(schedule_->zone(), id);
     763             :     }
     764     3116745 :     return beyond_end_;
     765             :   }
     766             : 
     767             :   // Compute special RPO for the control flow graph between {entry} and {end},
     768             :   // mutating any existing order so that the result is still valid.
     769     3153700 :   void ComputeAndInsertSpecialRPO(BasicBlock* entry, BasicBlock* end) {
     770             :     // RPO should not have been serialized for this schedule yet.
     771     3153700 :     CHECK_EQ(kBlockUnvisited1, schedule_->start()->loop_number());
     772     3153700 :     CHECK_EQ(kBlockUnvisited1, schedule_->start()->rpo_number());
     773     3153700 :     CHECK_EQ(0, static_cast<int>(schedule_->rpo_order()->size()));
     774             : 
     775             :     // Find correct insertion point within existing order.
     776             :     BasicBlock* insertion_point = entry->rpo_next();
     777             :     BasicBlock* order = insertion_point;
     778             : 
     779             :     // Perform an iterative RPO traversal using an explicit stack,
     780             :     // recording backedges that form cycles. O(|B|).
     781             :     DCHECK_LT(previous_block_count_, schedule_->BasicBlockCount());
     782     3153700 :     stack_.resize(schedule_->BasicBlockCount() - previous_block_count_);
     783     6307664 :     previous_block_count_ = schedule_->BasicBlockCount();
     784             :     int stack_depth = Push(stack_, 0, entry, kBlockUnvisited1);
     785     3156236 :     int num_loops = static_cast<int>(loops_.size());
     786             : 
     787    62597488 :     while (stack_depth > 0) {
     788    59441669 :       int current = stack_depth - 1;
     789    59441669 :       SpecialRPOStackFrame* frame = &stack_[current];
     790             : 
     791   115729626 :       if (frame->block != end &&
     792    56287957 :           frame->index < frame->block->SuccessorCount()) {
     793             :         // Process the next successor.
     794    31775282 :         BasicBlock* succ = frame->block->SuccessorAt(frame->index++);
     795    31775282 :         if (succ->rpo_number() == kBlockVisited1) continue;
     796    24812515 :         if (succ->rpo_number() == kBlockOnStack) {
     797             :           // The successor is on the stack, so this is a backedge (cycle).
     798      604295 :           backedges_.push_back(Backedge(frame->block, frame->index - 1));
     799      302146 :           if (!HasLoopNumber(succ)) {
     800             :             // Assign a new loop number to the header if it doesn't have one.
     801      272181 :             SetLoopNumber(succ, num_loops++);
     802             :           }
     803             :         } else {
     804             :           // Push the successor onto the stack.
     805             :           DCHECK_EQ(kBlockUnvisited1, succ->rpo_number());
     806             :           stack_depth = Push(stack_, stack_depth, succ, kBlockUnvisited1);
     807             :         }
     808             :       } else {
     809             :         // Finished with all successors; pop the stack and add the block.
     810             :         order = PushFront(order, frame->block);
     811    27666387 :         frame->block->set_rpo_number(kBlockVisited1);
     812             :         stack_depth--;
     813             :       }
     814             :     }
     815             : 
     816             :     // If no loops were encountered, then the order we computed was correct.
     817     3155819 :     if (num_loops > static_cast<int>(loops_.size())) {
     818             :       // Otherwise, compute the loop information from the backedges in order
     819             :       // to perform a traversal that groups loop bodies together.
     820      120888 :       ComputeLoopInfo(stack_, num_loops, &backedges_);
     821             : 
     822             :       // Initialize the "loop stack". Note the entry could be a loop header.
     823             :       LoopInfo* loop =
     824      120888 :           HasLoopNumber(entry) ? &loops_[GetLoopNumber(entry)] : nullptr;
     825             :       order = insertion_point;
     826             : 
     827             :       // Perform an iterative post-order traversal, visiting loop bodies before
     828             :       // edges that lead out of loops. Visits each block once, but linking loop
     829             :       // sections together is linear in the loop size, so overall is
     830             :       // O(|B| + max(loop_depth) * max(|loop|))
     831             :       stack_depth = Push(stack_, 0, entry, kBlockUnvisited2);
     832    30108599 :       while (stack_depth > 0) {
     833    29988091 :         SpecialRPOStackFrame* frame = &stack_[stack_depth - 1];
     834    29988091 :         BasicBlock* block = frame->block;
     835             :         BasicBlock* succ = nullptr;
     836             : 
     837    59857011 :         if (block != end && frame->index < block->SuccessorCount()) {
     838             :           // Process the next normal successor.
     839    16506766 :           succ = block->SuccessorAt(frame->index++);
     840    13481325 :         } else if (HasLoopNumber(block)) {
     841             :           // Process additional outgoing edges from the loop header.
     842     1163114 :           if (block->rpo_number() == kBlockOnStack) {
     843             :             // Finish the loop body the first time the header is left on the
     844             :             // stack.
     845             :             DCHECK(loop != nullptr && loop->header == block);
     846      272185 :             loop->start = PushFront(order, block);
     847      272185 :             order = loop->end;
     848      272185 :             block->set_rpo_number(kBlockVisited2);
     849             :             // Pop the loop stack and continue visiting outgoing edges within
     850             :             // the context of the outer loop, if any.
     851      272184 :             loop = loop->prev;
     852             :             // We leave the loop header on the stack; the rest of this iteration
     853             :             // and later iterations will go through its outgoing edges list.
     854             :           }
     855             : 
     856             :           // Use the next outgoing edge if there are any.
     857     2326226 :           size_t outgoing_index = frame->index - block->SuccessorCount();
     858     1163113 :           LoopInfo* info = &loops_[GetLoopNumber(block)];
     859             :           DCHECK(loop != info);
     860     2323975 :           if (block != entry && info->outgoing != nullptr &&
     861             :               outgoing_index < info->outgoing->size()) {
     862     1781866 :             succ = info->outgoing->at(outgoing_index);
     863      890933 :             frame->index++;
     864             :           }
     865             :         }
     866             : 
     867    29988090 :         if (succ != nullptr) {
     868             :           // Process the next successor.
     869    17397707 :           if (succ->rpo_number() == kBlockOnStack) continue;
     870    17095581 :           if (succ->rpo_number() == kBlockVisited2) continue;
     871             :           DCHECK_EQ(kBlockUnvisited2, succ->rpo_number());
     872    22745749 :           if (loop != nullptr && !loop->members->Contains(succ->id().ToInt())) {
     873             :             // The successor is not in the current loop or any nested loop.
     874             :             // Add it to the outgoing edges of this loop and visit it later.
     875      890931 :             loop->AddOutgoing(zone_, succ);
     876             :           } else {
     877             :             // Push the successor onto the stack.
     878             :             stack_depth = Push(stack_, stack_depth, succ, kBlockUnvisited2);
     879    12469510 :             if (HasLoopNumber(succ)) {
     880             :               // Push the inner loop onto the loop stack.
     881             :               DCHECK(GetLoopNumber(succ) < num_loops);
     882      272180 :               LoopInfo* next = &loops_[GetLoopNumber(succ)];
     883      272180 :               next->end = order;
     884      272180 :               next->prev = loop;
     885             :               loop = next;
     886             :             }
     887             :           }
     888             :         } else {
     889             :           // Finished with all successors of the current block.
     890    12590383 :           if (HasLoopNumber(block)) {
     891             :             // If we are going to pop a loop header, then add its entire body.
     892      272183 :             LoopInfo* info = &loops_[GetLoopNumber(block)];
     893      272183 :             for (BasicBlock* b = info->start; true; b = b->rpo_next()) {
     894     4967690 :               if (b->rpo_next() == info->end) {
     895             :                 b->set_rpo_next(order);
     896      272183 :                 info->end = order;
     897             :                 break;
     898             :               }
     899             :             }
     900      272183 :             order = info->start;
     901             :           } else {
     902             :             // Pop a single node off the stack and add it to the order.
     903             :             order = PushFront(order, block);
     904    12318200 :             block->set_rpo_number(kBlockVisited2);
     905             :           }
     906             :           stack_depth--;
     907             :         }
     908             :       }
     909             :     }
     910             : 
     911             :     // Publish new order the first time.
     912     3155439 :     if (order_ == nullptr) order_ = order;
     913             : 
     914             :     // Compute the correct loop headers and set the correct loop ends.
     915             :     LoopInfo* current_loop = nullptr;
     916             :     BasicBlock* current_header = entry->loop_header();
     917             :     int32_t loop_depth = entry->loop_depth();
     918     3155439 :     if (entry->IsLoopHeader()) --loop_depth;  // Entry might be a loop header.
     919    58486999 :     for (BasicBlock* b = order; b != insertion_point; b = b->rpo_next()) {
     920             :       BasicBlock* current = b;
     921             : 
     922             :       // Reset BasicBlock::rpo_number again.
     923    27665999 :       current->set_rpo_number(kBlockUnvisited1);
     924             : 
     925             :       // Finish the previous loop(s) if we just exited them.
     926    28205870 :       while (current_header != nullptr &&
     927             :              current == current_header->loop_end()) {
     928             :         DCHECK(current_header->IsLoopHeader());
     929             :         DCHECK_NOT_NULL(current_loop);
     930      270096 :         current_loop = current_loop->prev;
     931             :         current_header =
     932      270096 :             current_loop == nullptr ? nullptr : current_loop->header;
     933      270096 :         --loop_depth;
     934             :       }
     935    27665678 :       current->set_loop_header(current_header);
     936             : 
     937             :       // Push a new loop onto the stack if this loop is a loop header.
     938    27666285 :       if (HasLoopNumber(current)) {
     939      272185 :         ++loop_depth;
     940      272185 :         current_loop = &loops_[GetLoopNumber(current)];
     941      272185 :         BasicBlock* end = current_loop->end;
     942      272185 :         current->set_loop_end(end == nullptr ? BeyondEndSentinel() : end);
     943      272186 :         current_header = current_loop->header;
     944      272186 :         TRACE("id:%d is a loop header, increment loop depth to %d\n",
     945             :               current->id().ToInt(), loop_depth);
     946             :       }
     947             : 
     948    27666286 :       current->set_loop_depth(loop_depth);
     949             : 
     950    27665921 :       if (current->loop_header() == nullptr) {
     951    23856754 :         TRACE("id:%d is not in a loop (depth == %d)\n", current->id().ToInt(),
     952             :               current->loop_depth());
     953             :       } else {
     954     3809167 :         TRACE("id:%d has loop header id:%d, (depth == %d)\n",
     955             :               current->id().ToInt(), current->loop_header()->id().ToInt(),
     956             :               current->loop_depth());
     957             :       }
     958             :     }
     959     3155220 :   }
     960             : 
     961             :   // Computes loop membership from the backedges of the control flow graph.
     962      120882 :   void ComputeLoopInfo(ZoneVector<SpecialRPOStackFrame>& queue,
     963             :                        size_t num_loops, ZoneVector<Backedge>* backedges) {
     964             :     // Extend existing loop membership vectors.
     965      120883 :     for (LoopInfo& loop : loops_) {
     966           2 :       loop.members->Resize(static_cast<int>(schedule_->BasicBlockCount()),
     967           1 :                            zone_);
     968             :     }
     969             : 
     970             :     // Extend loop information vector.
     971      120882 :     loops_.resize(num_loops, LoopInfo());
     972             : 
     973             :     // Compute loop membership starting from backedges.
     974             :     // O(max(loop_depth) * max(|loop|)
     975      725185 :     for (size_t i = 0; i < backedges->size(); i++) {
     976      302148 :       BasicBlock* member = backedges->at(i).first;
     977      302148 :       BasicBlock* header = member->SuccessorAt(backedges->at(i).second);
     978      302148 :       size_t loop_num = GetLoopNumber(header);
     979      302148 :       if (loops_[loop_num].header == nullptr) {
     980      272176 :         loops_[loop_num].header = header;
     981             :         loops_[loop_num].members = new (zone_)
     982     1088708 :             BitVector(static_cast<int>(schedule_->BasicBlockCount()), zone_);
     983             :       }
     984             : 
     985             :       int queue_length = 0;
     986      302150 :       if (member != header) {
     987             :         // As long as the header doesn't have a backedge to itself,
     988             :         // Push the member onto the queue and process its predecessors.
     989      604140 :         if (!loops_[loop_num].members->Contains(member->id().ToInt())) {
     990             :           loops_[loop_num].members->Add(member->id().ToInt());
     991             :         }
     992      302070 :         queue[queue_length++].block = member;
     993             :       }
     994             : 
     995             :       // Propagate loop membership backwards. All predecessors of M up to the
     996             :       // loop header H are members of the loop too. O(|blocks between M and H|).
     997     4997609 :       while (queue_length > 0) {
     998     9390918 :         BasicBlock* block = queue[--queue_length].block;
     999    16407567 :         for (size_t i = 0; i < block->PredecessorCount(); i++) {
    1000             :           BasicBlock* pred = block->PredecessorAt(i);
    1001     5856054 :           if (pred != header) {
    1002    11036920 :             if (!loops_[loop_num].members->Contains(pred->id().ToInt())) {
    1003             :               loops_[loop_num].members->Add(pred->id().ToInt());
    1004     8786814 :               queue[queue_length++].block = pred;
    1005             :             }
    1006             :           }
    1007             :         }
    1008             :       }
    1009             :     }
    1010      120887 :   }
    1011             : 
    1012             : #if DEBUG
    1013             :   void PrintRPO() {
    1014             :     StdoutStream os;
    1015             :     os << "RPO with " << loops_.size() << " loops";
    1016             :     if (loops_.size() > 0) {
    1017             :       os << " (";
    1018             :       for (size_t i = 0; i < loops_.size(); i++) {
    1019             :         if (i > 0) os << " ";
    1020             :         os << "id:" << loops_[i].header->id();
    1021             :       }
    1022             :       os << ")";
    1023             :     }
    1024             :     os << ":\n";
    1025             : 
    1026             :     for (BasicBlock* block = order_; block != nullptr;
    1027             :          block = block->rpo_next()) {
    1028             :       os << std::setw(5) << "B" << block->rpo_number() << ":";
    1029             :       for (size_t i = 0; i < loops_.size(); i++) {
    1030             :         bool range = loops_[i].header->LoopContains(block);
    1031             :         bool membership = loops_[i].header != block && range;
    1032             :         os << (membership ? " |" : "  ");
    1033             :         os << (range ? "x" : " ");
    1034             :       }
    1035             :       os << "  id:" << block->id() << ": ";
    1036             :       if (block->loop_end() != nullptr) {
    1037             :         os << " range: [B" << block->rpo_number() << ", B"
    1038             :            << block->loop_end()->rpo_number() << ")";
    1039             :       }
    1040             :       if (block->loop_header() != nullptr) {
    1041             :         os << " header: id:" << block->loop_header()->id();
    1042             :       }
    1043             :       if (block->loop_depth() > 0) {
    1044             :         os << " depth: " << block->loop_depth();
    1045             :       }
    1046             :       os << "\n";
    1047             :     }
    1048             :   }
    1049             : 
    1050             :   void VerifySpecialRPO() {
    1051             :     BasicBlockVector* order = schedule_->rpo_order();
    1052             :     DCHECK_LT(0, order->size());
    1053             :     DCHECK_EQ(0, (*order)[0]->id().ToInt());  // entry should be first.
    1054             : 
    1055             :     for (size_t i = 0; i < loops_.size(); i++) {
    1056             :       LoopInfo* loop = &loops_[i];
    1057             :       BasicBlock* header = loop->header;
    1058             :       BasicBlock* end = header->loop_end();
    1059             : 
    1060             :       DCHECK_NOT_NULL(header);
    1061             :       DCHECK_LE(0, header->rpo_number());
    1062             :       DCHECK_LT(header->rpo_number(), order->size());
    1063             :       DCHECK_NOT_NULL(end);
    1064             :       DCHECK_LE(end->rpo_number(), order->size());
    1065             :       DCHECK_GT(end->rpo_number(), header->rpo_number());
    1066             :       DCHECK_NE(header->loop_header(), header);
    1067             : 
    1068             :       // Verify the start ... end list relationship.
    1069             :       int links = 0;
    1070             :       BasicBlock* block = loop->start;
    1071             :       DCHECK_EQ(header, block);
    1072             :       bool end_found;
    1073             :       while (true) {
    1074             :         if (block == nullptr || block == loop->end) {
    1075             :           end_found = (loop->end == block);
    1076             :           break;
    1077             :         }
    1078             :         // The list should be in same order as the final result.
    1079             :         DCHECK(block->rpo_number() == links + header->rpo_number());
    1080             :         links++;
    1081             :         block = block->rpo_next();
    1082             :         DCHECK_LT(links, static_cast<int>(2 * order->size()));  // cycle?
    1083             :       }
    1084             :       DCHECK_LT(0, links);
    1085             :       DCHECK_EQ(links, end->rpo_number() - header->rpo_number());
    1086             :       DCHECK(end_found);
    1087             : 
    1088             :       // Check loop depth of the header.
    1089             :       int loop_depth = 0;
    1090             :       for (LoopInfo* outer = loop; outer != nullptr; outer = outer->prev) {
    1091             :         loop_depth++;
    1092             :       }
    1093             :       DCHECK_EQ(loop_depth, header->loop_depth());
    1094             : 
    1095             :       // Check the contiguousness of loops.
    1096             :       int count = 0;
    1097             :       for (int j = 0; j < static_cast<int>(order->size()); j++) {
    1098             :         BasicBlock* block = order->at(j);
    1099             :         DCHECK_EQ(block->rpo_number(), j);
    1100             :         if (j < header->rpo_number() || j >= end->rpo_number()) {
    1101             :           DCHECK(!header->LoopContains(block));
    1102             :         } else {
    1103             :           DCHECK(header->LoopContains(block));
    1104             :           DCHECK_GE(block->loop_depth(), loop_depth);
    1105             :           count++;
    1106             :         }
    1107             :       }
    1108             :       DCHECK_EQ(links, count);
    1109             :     }
    1110             :   }
    1111             : #endif  // DEBUG
    1112             : 
    1113             :   Zone* zone_;
    1114             :   Schedule* schedule_;
    1115             :   BasicBlock* order_;
    1116             :   BasicBlock* beyond_end_;
    1117             :   ZoneVector<LoopInfo> loops_;
    1118             :   ZoneVector<Backedge> backedges_;
    1119             :   ZoneVector<SpecialRPOStackFrame> stack_;
    1120             :   size_t previous_block_count_;
    1121             :   ZoneVector<BasicBlock*> const empty_;
    1122             : };
    1123             : 
    1124             : 
    1125      249018 : BasicBlockVector* Scheduler::ComputeSpecialRPO(Zone* zone, Schedule* schedule) {
    1126      249018 :   SpecialRPONumberer numberer(zone, schedule);
    1127             :   numberer.ComputeSpecialRPO();
    1128      249018 :   numberer.SerializeRPOIntoSchedule();
    1129             :   numberer.PrintAndVerifySpecialRPO();
    1130      249018 :   return schedule->rpo_order();
    1131             : }
    1132             : 
    1133             : 
    1134     2866122 : void Scheduler::ComputeSpecialRPONumbering() {
    1135     2866122 :   TRACE("--- COMPUTING SPECIAL RPO ----------------------------------\n");
    1136             : 
    1137             :   // Compute the special reverse-post-order for basic blocks.
    1138     5732258 :   special_rpo_ = new (zone_) SpecialRPONumberer(zone_, schedule_);
    1139             :   special_rpo_->ComputeSpecialRPO();
    1140     2867280 : }
    1141             : 
    1142             : 
    1143     2906361 : void Scheduler::PropagateImmediateDominators(BasicBlock* block) {
    1144    44951767 :   for (/*nop*/; block != nullptr; block = block->rpo_next()) {
    1145             :     auto pred = block->predecessors().begin();
    1146             :     auto end = block->predecessors().end();
    1147             :     DCHECK(pred != end);  // All blocks except start have predecessors.
    1148    21022537 :     BasicBlock* dominator = *pred;
    1149             :     bool deferred = dominator->deferred();
    1150             :     // For multiple predecessors, walk up the dominator tree until a common
    1151             :     // dominator is found. Visitation order guarantees that all predecessors
    1152             :     // except for backwards edges have been visited.
    1153    28252522 :     for (++pred; pred != end; ++pred) {
    1154             :       // Don't examine backwards edges.
    1155     7230072 :       if ((*pred)->dominator_depth() < 0) continue;
    1156     7023245 :       dominator = BasicBlock::GetCommonDominator(dominator, *pred);
    1157     7023158 :       deferred = deferred & (*pred)->deferred();
    1158             :     }
    1159             :     block->set_dominator(dominator);
    1160    21022450 :     block->set_dominator_depth(dominator->dominator_depth() + 1);
    1161    21022450 :     block->set_deferred(deferred | block->deferred());
    1162    21022450 :     TRACE("Block id:%d's idom is id:%d, depth = %d\n", block->id().ToInt(),
    1163             :           dominator->id().ToInt(), block->dominator_depth());
    1164             :   }
    1165     2906527 : }
    1166             : 
    1167             : 
    1168     2867207 : void Scheduler::GenerateImmediateDominatorTree() {
    1169     2867207 :   TRACE("--- IMMEDIATE BLOCK DOMINATORS -----------------------------\n");
    1170             : 
    1171             :   // Seed start block to be the first dominator.
    1172     2867207 :   schedule_->start()->set_dominator_depth(0);
    1173             : 
    1174             :   // Build the block dominator tree resulting from the above seed.
    1175     2867207 :   PropagateImmediateDominators(schedule_->start()->rpo_next());
    1176     2867796 : }
    1177             : 
    1178             : 
    1179             : // -----------------------------------------------------------------------------
    1180             : // Phase 3: Prepare use counts for nodes.
    1181             : 
    1182             : 
    1183             : class PrepareUsesVisitor {
    1184             :  public:
    1185             :   explicit PrepareUsesVisitor(Scheduler* scheduler)
    1186     2866699 :       : scheduler_(scheduler), schedule_(scheduler->schedule_) {}
    1187             : 
    1188   140975664 :   void Pre(Node* node) {
    1189   140975664 :     if (scheduler_->InitializePlacement(node) == Scheduler::kFixed) {
    1190             :       // Fixed nodes are always roots for schedule late.
    1191    41610612 :       scheduler_->schedule_root_nodes_.push_back(node);
    1192    41610372 :       if (!schedule_->IsScheduled(node)) {
    1193             :         // Make sure root nodes are scheduled in their respective blocks.
    1194    11536362 :         TRACE("Scheduling fixed position node #%d:%s\n", node->id(),
    1195             :               node->op()->mnemonic());
    1196    11536451 :         IrOpcode::Value opcode = node->opcode();
    1197             :         BasicBlock* block =
    1198             :             opcode == IrOpcode::kParameter
    1199     6513276 :                 ? schedule_->start()
    1200    16559626 :                 : schedule_->block(NodeProperties::GetControlInput(node));
    1201             :         DCHECK_NOT_NULL(block);
    1202    11536476 :         schedule_->AddNode(block, node);
    1203             :       }
    1204             :     }
    1205   140979813 :   }
    1206             : 
    1207   319088521 :   void PostEdge(Node* from, int index, Node* to) {
    1208             :     // If the edge is from an unscheduled node, then tally it in the use count
    1209             :     // for all of its inputs. The same criterion will be used in ScheduleLate
    1210             :     // for decrementing use counts.
    1211   319088521 :     if (!schedule_->IsScheduled(from)) {
    1212             :       DCHECK_NE(Scheduler::kFixed, scheduler_->GetPlacement(from));
    1213   243332269 :       scheduler_->IncrementUnscheduledUseCount(to, index, from);
    1214             :     }
    1215   319088340 :   }
    1216             : 
    1217             :  private:
    1218             :   Scheduler* scheduler_;
    1219             :   Schedule* schedule_;
    1220             : };
    1221             : 
    1222             : 
    1223     2866699 : void Scheduler::PrepareUses() {
    1224     2866699 :   TRACE("--- PREPARE USES -------------------------------------------\n");
    1225             : 
    1226             :   // Count the uses of every node, which is used to ensure that all of a
    1227             :   // node's uses are scheduled before the node itself.
    1228             :   PrepareUsesVisitor prepare_uses(this);
    1229             : 
    1230             :   // TODO(turbofan): simplify the careful pre/post ordering here.
    1231     2866699 :   BoolVector visited(graph_->NodeCount(), false, zone_);
    1232     2867092 :   ZoneStack<Node::InputEdges::iterator> stack(zone_);
    1233     2867219 :   Node* node = graph_->end();
    1234     2867219 :   prepare_uses.Pre(node);
    1235     2867775 :   visited[node->id()] = true;
    1236     5833539 :   stack.push(node->input_edges().begin());
    1237   460071978 :   while (!stack.empty()) {
    1238             :     Edge edge = *stack.top();
    1239             :     Node* node = edge.to();
    1240   914407130 :     if (visited[node->id()]) {
    1241   319092065 :       prepare_uses.PostEdge(edge.from(), edge.index(), edge.to());
    1242   319095171 :       if (++stack.top() == edge.from()->input_edges().end()) stack.pop();
    1243             :     } else {
    1244   138111500 :       prepare_uses.Pre(node);
    1245   138118130 :       visited[node->id()] = true;
    1246   327584689 :       if (node->InputCount() > 0) stack.push(node->input_edges().begin());
    1247             :     }
    1248             :   }
    1249     2868442 : }
    1250             : 
    1251             : 
    1252             : // -----------------------------------------------------------------------------
    1253             : // Phase 4: Schedule nodes early.
    1254             : 
    1255             : 
    1256     2906504 : class ScheduleEarlyNodeVisitor {
    1257             :  public:
    1258             :   ScheduleEarlyNodeVisitor(Zone* zone, Scheduler* scheduler)
    1259     2906534 :       : scheduler_(scheduler), schedule_(scheduler->schedule_), queue_(zone) {}
    1260             : 
    1261             :   // Run the schedule early algorithm on a set of fixed root nodes.
    1262     2903957 :   void Run(NodeVector* roots) {
    1263    44714675 :     for (Node* const root : *roots) {
    1264             :       queue_.push(root);
    1265   172896911 :       while (!queue_.empty()) {
    1266   131086193 :         VisitNode(queue_.front());
    1267             :         queue_.pop();
    1268             :       }
    1269             :     }
    1270     2907415 :   }
    1271             : 
    1272             :  private:
    1273             :   // Visits one node from the queue and propagates its current schedule early
    1274             :   // position to all uses. This in turn might push more nodes onto the queue.
    1275   131085885 :   void VisitNode(Node* node) {
    1276   131085885 :     Scheduler::SchedulerData* data = scheduler_->GetData(node);
    1277             : 
    1278             :     // Fixed nodes already know their schedule early position.
    1279   131085885 :     if (scheduler_->GetPlacement(node) == Scheduler::kFixed) {
    1280    41808980 :       data->minimum_block_ = schedule_->block(node);
    1281    41810304 :       TRACE("Fixing #%d:%s minimum_block = id:%d, dominator_depth = %d\n",
    1282             :             node->id(), node->op()->mnemonic(),
    1283             :             data->minimum_block_->id().ToInt(),
    1284             :             data->minimum_block_->dominator_depth());
    1285             :     }
    1286             : 
    1287             :     // No need to propagate unconstrained schedule early positions.
    1288   131090879 :     if (data->minimum_block_ == schedule_->start()) return;
    1289             : 
    1290             :     // Propagate schedule early position.
    1291             :     DCHECK_NOT_NULL(data->minimum_block_);
    1292   355424435 :     for (auto use : node->uses()) {
    1293   473264236 :       if (scheduler_->IsLive(use)) {
    1294   236631000 :         PropagateMinimumPositionToNode(data->minimum_block_, use);
    1295             :       }
    1296             :     }
    1297             :   }
    1298             : 
    1299             :   // Propagates {block} as another minimum position into the given {node}. This
    1300             :   // has the net effect of computing the minimum dominator block of {node} that
    1301             :   // still post-dominates all inputs to {node} when the queue is processed.
    1302   236700740 :   void PropagateMinimumPositionToNode(BasicBlock* block, Node* node) {
    1303   236700740 :     Scheduler::SchedulerData* data = scheduler_->GetData(node);
    1304             : 
    1305             :     // No need to propagate to fixed node, it's guaranteed to be a root.
    1306   236700740 :     if (scheduler_->GetPlacement(node) == Scheduler::kFixed) return;
    1307             : 
    1308             :     // Coupled nodes influence schedule early position of their control.
    1309   163021109 :     if (scheduler_->GetPlacement(node) == Scheduler::kCoupled) {
    1310       69044 :       Node* control = NodeProperties::GetControlInput(node);
    1311       69044 :       PropagateMinimumPositionToNode(block, control);
    1312             :     }
    1313             : 
    1314             :     // Propagate new position if it is deeper down the dominator tree than the
    1315             :     // current. Note that all inputs need to have minimum block position inside
    1316             :     // the dominator chain of {node}'s minimum block position.
    1317             :     DCHECK(InsideSameDominatorChain(block, data->minimum_block_));
    1318   163023310 :     if (block->dominator_depth() > data->minimum_block_->dominator_depth()) {
    1319    89282122 :       data->minimum_block_ = block;
    1320             :       queue_.push(node);
    1321    89282322 :       TRACE("Propagating #%d:%s minimum_block = id:%d, dominator_depth = %d\n",
    1322             :             node->id(), node->op()->mnemonic(),
    1323             :             data->minimum_block_->id().ToInt(),
    1324             :             data->minimum_block_->dominator_depth());
    1325             :     }
    1326             :   }
    1327             : 
    1328             : #if DEBUG
    1329             :   bool InsideSameDominatorChain(BasicBlock* b1, BasicBlock* b2) {
    1330             :     BasicBlock* dominator = BasicBlock::GetCommonDominator(b1, b2);
    1331             :     return dominator == b1 || dominator == b2;
    1332             :   }
    1333             : #endif
    1334             : 
    1335             :   Scheduler* scheduler_;
    1336             :   Schedule* schedule_;
    1337             :   ZoneQueue<Node*> queue_;
    1338             : };
    1339             : 
    1340             : 
    1341     2867430 : void Scheduler::ScheduleEarly() {
    1342     2867430 :   TRACE("--- SCHEDULE EARLY -----------------------------------------\n");
    1343     2867575 :   if (FLAG_trace_turbo_scheduler) {
    1344           0 :     TRACE("roots: ");
    1345           0 :     for (Node* node : schedule_root_nodes_) {
    1346           0 :       TRACE("#%d:%s ", node->id(), node->op()->mnemonic());
    1347             :     }
    1348           0 :     TRACE("\n");
    1349             :   }
    1350             : 
    1351             :   // Compute the minimum block for each node thereby determining the earliest
    1352             :   // position each node could be placed within a valid schedule.
    1353     2867575 :   ScheduleEarlyNodeVisitor schedule_early_visitor(zone_, this);
    1354     2867907 :   schedule_early_visitor.Run(&schedule_root_nodes_);
    1355     2867545 : }
    1356             : 
    1357             : 
    1358             : // -----------------------------------------------------------------------------
    1359             : // Phase 5: Schedule nodes late.
    1360             : 
    1361             : 
    1362     2868130 : class ScheduleLateNodeVisitor {
    1363             :  public:
    1364     2866842 :   ScheduleLateNodeVisitor(Zone* zone, Scheduler* scheduler)
    1365             :       : zone_(zone),
    1366             :         scheduler_(scheduler),
    1367     2866842 :         schedule_(scheduler_->schedule_),
    1368             :         marked_(scheduler->zone_),
    1369    11466808 :         marking_queue_(scheduler->zone_) {}
    1370             : 
    1371             :   // Run the schedule late algorithm on a set of fixed root nodes.
    1372             :   void Run(NodeVector* roots) {
    1373    44478180 :     for (Node* const root : *roots) {
    1374    41609695 :       ProcessQueue(root);
    1375             :     }
    1376             :   }
    1377             : 
    1378             :  private:
    1379    41631536 :   void ProcessQueue(Node* root) {
    1380    41631536 :     ZoneQueue<Node*>* queue = &(scheduler_->schedule_queue_);
    1381   193249302 :     for (Node* node : root->inputs()) {
    1382             :       // Don't schedule coupled nodes on their own.
    1383   151636946 :       if (scheduler_->GetPlacement(node) == Scheduler::kCoupled) {
    1384       21300 :         node = NodeProperties::GetControlInput(node);
    1385             :       }
    1386             : 
    1387             :       // Test schedulability condition by looking at unscheduled use count.
    1388   151636946 :       if (scheduler_->GetData(node)->unscheduled_count_ != 0) continue;
    1389             : 
    1390             :       queue->push(node);
    1391   154964330 :       do {
    1392   154984760 :         Node* const node = queue->front();
    1393             :         queue->pop();
    1394   154963166 :         VisitNode(node);
    1395             :       } while (!queue->empty());
    1396             :     }
    1397    41612356 :   }
    1398             : 
    1399             :   // Visits one node from the queue of schedulable nodes and determines its
    1400             :   // schedule late position. Also hoists nodes out of loops to find a more
    1401             :   // optimal scheduling position.
    1402   154962629 :   void VisitNode(Node* node) {
    1403             :     DCHECK_EQ(0, scheduler_->GetData(node)->unscheduled_count_);
    1404             : 
    1405             :     // Don't schedule nodes that are already scheduled.
    1406   154962629 :     if (schedule_->IsScheduled(node)) return;
    1407             :     DCHECK_EQ(Scheduler::kSchedulable, scheduler_->GetPlacement(node));
    1408             : 
    1409             :     // Determine the dominating block for all of the uses of this node. It is
    1410             :     // the latest block that this node can be scheduled in.
    1411    99732327 :     TRACE("Scheduling #%d:%s\n", node->id(), node->op()->mnemonic());
    1412    99732327 :     BasicBlock* block = GetCommonDominatorOfUses(node);
    1413             :     DCHECK_NOT_NULL(block);
    1414             : 
    1415             :     // The schedule early block dominates the schedule late block.
    1416   199471004 :     BasicBlock* min_block = scheduler_->GetData(node)->minimum_block_;
    1417             :     DCHECK_EQ(min_block, BasicBlock::GetCommonDominator(block, min_block));
    1418    99735502 :     TRACE(
    1419             :         "Schedule late of #%d:%s is id:%d at loop depth %d, minimum = id:%d\n",
    1420             :         node->id(), node->op()->mnemonic(), block->id().ToInt(),
    1421             :         block->loop_depth(), min_block->id().ToInt());
    1422             : 
    1423             :     // Hoist nodes out of loops if possible. Nodes can be hoisted iteratively
    1424             :     // into enclosing loop pre-headers until they would precede their schedule
    1425             :     // early position.
    1426    99735502 :     BasicBlock* hoist_block = GetHoistBlock(block);
    1427    99734613 :     if (hoist_block &&
    1428             :         hoist_block->dominator_depth() >= min_block->dominator_depth()) {
    1429      461406 :       do {
    1430      461407 :         TRACE("  hoisting #%d:%s to block id:%d\n", node->id(),
    1431             :               node->op()->mnemonic(), hoist_block->id().ToInt());
    1432             :         DCHECK_LT(hoist_block->loop_depth(), block->loop_depth());
    1433             :         block = hoist_block;
    1434      461407 :         hoist_block = GetHoistBlock(hoist_block);
    1435      461406 :       } while (hoist_block &&
    1436             :                hoist_block->dominator_depth() >= min_block->dominator_depth());
    1437   198554206 :     } else if (scheduler_->flags_ & Scheduler::kSplitNodes) {
    1438             :       // Split the {node} if beneficial and return the new {block} for it.
    1439    41591088 :       block = SplitNode(block, node);
    1440             :     }
    1441             : 
    1442             :     // Schedule the node or a floating control structure.
    1443    99733583 :     if (IrOpcode::IsMergeOpcode(node->opcode())) {
    1444             :       ScheduleFloatingControl(block, node);
    1445    99694624 :     } else if (node->opcode() == IrOpcode::kFinishRegion) {
    1446      132941 :       ScheduleRegion(block, node);
    1447             :     } else {
    1448    99561683 :       ScheduleNode(block, node);
    1449             :     }
    1450             :   }
    1451             : 
    1452             :   bool IsMarked(BasicBlock* block) const {
    1453             :     DCHECK_LT(block->id().ToSize(), marked_.size());
    1454             :     return marked_[block->id().ToSize()];
    1455             :   }
    1456             : 
    1457             :   void Mark(BasicBlock* block) { marked_[block->id().ToSize()] = true; }
    1458             : 
    1459             :   // Mark {block} and push its non-marked predecessor on the marking queue.
    1460    80094263 :   void MarkBlock(BasicBlock* block) {
    1461             :     DCHECK_LT(block->id().ToSize(), marked_.size());
    1462             :     Mark(block);
    1463   185616480 :     for (BasicBlock* pred_block : block->predecessors()) {
    1464   105522172 :       if (IsMarked(pred_block)) continue;
    1465    99358552 :       marking_queue_.push_back(pred_block);
    1466             :     }
    1467    80094308 :   }
    1468             : 
    1469    41590935 :   BasicBlock* SplitNode(BasicBlock* block, Node* node) {
    1470             :     // For now, we limit splitting to pure nodes.
    1471    41590935 :     if (!node->op()->HasProperty(Operator::kPure)) return block;
    1472             :     // TODO(titzer): fix the special case of splitting of projections.
    1473    28994431 :     if (node->opcode() == IrOpcode::kProjection) return block;
    1474             : 
    1475             :     // The {block} is common dominator of all uses of {node}, so we cannot
    1476             :     // split anything unless the {block} has at least two successors.
    1477             :     DCHECK_EQ(block, GetCommonDominatorOfUses(node));
    1478    28851532 :     if (block->SuccessorCount() < 2) return block;
    1479             : 
    1480             :     // Clear marking bits.
    1481             :     DCHECK(marking_queue_.empty());
    1482    30229406 :     std::fill(marked_.begin(), marked_.end(), false);
    1483    30229750 :     marked_.resize(schedule_->BasicBlockCount() + 1, false);
    1484             : 
    1485             :     // Check if the {node} has uses in {block}.
    1486    74877561 :     for (Edge edge : node->use_edges()) {
    1487    72004128 :       if (!scheduler_->IsLive(edge.from())) continue;
    1488    36001982 :       BasicBlock* use_block = GetBlockForUse(edge);
    1489    72003727 :       if (use_block == nullptr || IsMarked(use_block)) continue;
    1490    28360187 :       if (use_block == block) {
    1491    12241278 :         TRACE("  not splitting #%d:%s, it is used in id:%d\n", node->id(),
    1492             :               node->op()->mnemonic(), block->id().ToInt());
    1493    12241278 :         marking_queue_.clear();
    1494    12241224 :         return block;
    1495             :       }
    1496    16118909 :       MarkBlock(use_block);
    1497             :     }
    1498             : 
    1499             :     // Compute transitive marking closure; a block is marked if all its
    1500             :     // successors are marked.
    1501    92439647 :     do {
    1502    92439651 :       BasicBlock* top_block = marking_queue_.front();
    1503    92439651 :       marking_queue_.pop_front();
    1504    92439627 :       if (IsMarked(top_block)) continue;
    1505             :       bool marked = true;
    1506   161780898 :       for (BasicBlock* successor : top_block->successors()) {
    1507    97805668 :         if (!IsMarked(successor)) {
    1508             :           marked = false;
    1509             :           break;
    1510             :         }
    1511             :       }
    1512    72708290 :       if (marked) MarkBlock(top_block);
    1513             :     } while (!marking_queue_.empty());
    1514             : 
    1515             :     // If the (common dominator) {block} is marked, we know that all paths from
    1516             :     // {block} to the end contain at least one use of {node}, and hence there's
    1517             :     // no point in splitting the {node} in this case.
    1518     2873429 :     if (IsMarked(block)) {
    1519     1771612 :       TRACE("  not splitting #%d:%s, its common dominator id:%d is perfect\n",
    1520             :             node->id(), node->op()->mnemonic(), block->id().ToInt());
    1521             :       return block;
    1522             :     }
    1523             : 
    1524             :     // Split {node} for uses according to the previously computed marking
    1525             :     // closure. Every marking partition has a unique dominator, which get's a
    1526             :     // copy of the {node} with the exception of the first partition, which get's
    1527             :     // the {node} itself.
    1528     1101817 :     ZoneMap<BasicBlock*, Node*> dominators(scheduler_->zone_);
    1529    10551447 :     for (Edge edge : node->use_edges()) {
    1530     9449634 :       if (!scheduler_->IsLive(edge.from())) continue;
    1531     4724820 :       BasicBlock* use_block = GetBlockForUse(edge);
    1532     4724818 :       if (use_block == nullptr) continue;
    1533    22097174 :       while (IsMarked(use_block->dominator())) {
    1534     4215848 :         use_block = use_block->dominator();
    1535             :       }
    1536     4724815 :       auto& use_node = dominators[use_block];
    1537     4724809 :       if (use_node == nullptr) {
    1538     3450591 :         if (dominators.size() == 1u) {
    1539             :           // Place the {node} at {use_block}.
    1540     1101810 :           block = use_block;
    1541     1101810 :           use_node = node;
    1542     1101810 :           TRACE("  pushing #%d:%s down to id:%d\n", node->id(),
    1543             :                 node->op()->mnemonic(), block->id().ToInt());
    1544             :         } else {
    1545             :           // Place a copy of {node} at {use_block}.
    1546     2348781 :           use_node = CloneNode(node);
    1547     2348777 :           TRACE("  cloning #%d:%s for id:%d\n", use_node->id(),
    1548             :                 use_node->op()->mnemonic(), use_block->id().ToInt());
    1549     2348777 :           scheduler_->schedule_queue_.push(use_node);
    1550             :         }
    1551             :       }
    1552     4724809 :       edge.UpdateTo(use_node);
    1553             :     }
    1554             :     return block;
    1555             :   }
    1556             : 
    1557   100196622 :   BasicBlock* GetHoistBlock(BasicBlock* block) {
    1558   100196622 :     if (block->IsLoopHeader()) return block->dominator();
    1559             :     // We have to check to make sure that the {block} dominates all
    1560             :     // of the outgoing blocks.  If it doesn't, then there is a path
    1561             :     // out of the loop which does not execute this {block}, so we
    1562             :     // can't hoist operations from this {block} out of the loop, as
    1563             :     // that would introduce additional computations.
    1564    99062846 :     if (BasicBlock* header_block = block->loop_header()) {
    1565    12643422 :       for (BasicBlock* outgoing_block :
    1566    19487946 :            scheduler_->special_rpo_->GetOutgoingBlocks(header_block)) {
    1567    12406931 :         if (BasicBlock::GetCommonDominator(block, outgoing_block) != block) {
    1568             :           return nullptr;
    1569             :         }
    1570             :       }
    1571      236491 :       return header_block->dominator();
    1572             :     }
    1573             :     return nullptr;
    1574             :   }
    1575             : 
    1576    99780167 :   BasicBlock* GetCommonDominatorOfUses(Node* node) {
    1577             :     BasicBlock* block = nullptr;
    1578   524213793 :     for (Edge edge : node->use_edges()) {
    1579   424439650 :       if (!scheduler_->IsLive(edge.from())) continue;
    1580   212217611 :       BasicBlock* use_block = GetBlockForUse(edge);
    1581             :       block = block == nullptr
    1582             :                   ? use_block
    1583             :                   : use_block == nullptr
    1584             :                         ? block
    1585   212214005 :                         : BasicBlock::GetCommonDominator(block, use_block);
    1586             :     }
    1587    99774143 :     return block;
    1588             :   }
    1589             : 
    1590             :   BasicBlock* FindPredecessorBlock(Node* node) {
    1591    11748796 :     return scheduler_->control_flow_builder_->FindPredecessorBlock(node);
    1592             :   }
    1593             : 
    1594   252924531 :   BasicBlock* GetBlockForUse(Edge edge) {
    1595             :     // TODO(titzer): ignore uses from dead nodes (not visited in PrepareUses()).
    1596             :     // Dead uses only occur if the graph is not trimmed before scheduling.
    1597             :     Node* use = edge.from();
    1598   252924531 :     if (IrOpcode::IsPhiOpcode(use->opcode())) {
    1599             :       // If the use is from a coupled (i.e. floating) phi, compute the common
    1600             :       // dominator of its uses. This will not recurse more than one level.
    1601    20389334 :       if (scheduler_->GetPlacement(use) == Scheduler::kCoupled) {
    1602       38960 :         TRACE("  inspecting uses of coupled #%d:%s\n", use->id(),
    1603             :               use->op()->mnemonic());
    1604             :         // TODO(titzer): reenable once above TODO is addressed.
    1605             :         //        DCHECK_EQ(edge.to(), NodeProperties::GetControlInput(use));
    1606       38960 :         return GetCommonDominatorOfUses(use);
    1607             :       }
    1608             :       // If the use is from a fixed (i.e. non-floating) phi, we use the
    1609             :       // predecessor block of the corresponding control input to the merge.
    1610    10155707 :       if (scheduler_->GetPlacement(use) == Scheduler::kFixed) {
    1611    10155664 :         TRACE("  input@%d into a fixed phi #%d:%s\n", edge.index(), use->id(),
    1612             :               use->op()->mnemonic());
    1613    10155664 :         Node* merge = NodeProperties::GetControlInput(use, 0);
    1614             :         DCHECK(IrOpcode::IsMergeOpcode(merge->opcode()));
    1615    10155561 :         Node* input = NodeProperties::GetControlInput(merge, edge.index());
    1616    10155466 :         return FindPredecessorBlock(input);
    1617             :       }
    1618   242729864 :     } else if (IrOpcode::IsMergeOpcode(use->opcode())) {
    1619             :       // If the use is from a fixed (i.e. non-floating) merge, we use the
    1620             :       // predecessor block of the current input to the merge.
    1621     3186736 :       if (scheduler_->GetPlacement(use) == Scheduler::kFixed) {
    1622     1593367 :         TRACE("  input@%d into a fixed merge #%d:%s\n", edge.index(), use->id(),
    1623             :               use->op()->mnemonic());
    1624     1593368 :         return FindPredecessorBlock(edge.to());
    1625             :       }
    1626             :     }
    1627   241136540 :     BasicBlock* result = schedule_->block(use);
    1628   241133795 :     if (result == nullptr) return nullptr;
    1629   241134412 :     TRACE("  must dominate use #%d:%s in id:%d\n", use->id(),
    1630             :           use->op()->mnemonic(), result->id().ToInt());
    1631             :     return result;
    1632             :   }
    1633             : 
    1634             :   void ScheduleFloatingControl(BasicBlock* block, Node* node) {
    1635       38959 :     scheduler_->FuseFloatingControl(block, node);
    1636             :   }
    1637             : 
    1638      132941 :   void ScheduleRegion(BasicBlock* block, Node* region_end) {
    1639             :     // We only allow regions of instructions connected into a linear
    1640             :     // effect chain. The only value allowed to be produced by a node
    1641             :     // in the chain must be the value consumed by the FinishRegion node.
    1642             : 
    1643             :     // We schedule back to front; we first schedule FinishRegion.
    1644      132941 :     CHECK_EQ(IrOpcode::kFinishRegion, region_end->opcode());
    1645      132941 :     ScheduleNode(block, region_end);
    1646             : 
    1647             :     // Schedule the chain.
    1648      132941 :     Node* node = NodeProperties::GetEffectInput(region_end);
    1649     3521645 :     while (node->opcode() != IrOpcode::kBeginRegion) {
    1650             :       DCHECK_EQ(0, scheduler_->GetData(node)->unscheduled_count_);
    1651             :       DCHECK_EQ(1, node->op()->EffectInputCount());
    1652             :       DCHECK_EQ(1, node->op()->EffectOutputCount());
    1653             :       DCHECK_EQ(0, node->op()->ControlOutputCount());
    1654             :       // The value output (if there is any) must be consumed
    1655             :       // by the EndRegion node.
    1656             :       DCHECK(node->op()->ValueOutputCount() == 0 ||
    1657             :              node == region_end->InputAt(0));
    1658     1694352 :       ScheduleNode(block, node);
    1659     1694352 :       node = NodeProperties::GetEffectInput(node);
    1660             :     }
    1661             :     // Schedule the BeginRegion node.
    1662             :     DCHECK_EQ(0, scheduler_->GetData(node)->unscheduled_count_);
    1663      132941 :     ScheduleNode(block, node);
    1664      132941 :   }
    1665             : 
    1666   101520496 :   void ScheduleNode(BasicBlock* block, Node* node) {
    1667   101520496 :     schedule_->PlanNode(block, node);
    1668             :     size_t block_id = block->id().ToSize();
    1669   203034904 :     if (!scheduler_->scheduled_nodes_[block_id]) {
    1670    12409998 :       scheduler_->scheduled_nodes_[block_id] =
    1671    37230995 :           new (zone_->New(sizeof(NodeVector))) NodeVector(zone_);
    1672             :     }
    1673   203032902 :     scheduler_->scheduled_nodes_[block_id]->push_back(node);
    1674   101510271 :     scheduler_->UpdatePlacement(node, Scheduler::kScheduled);
    1675   101520519 :   }
    1676             : 
    1677     2348780 :   Node* CloneNode(Node* node) {
    1678             :     int const input_count = node->InputCount();
    1679     6488146 :     for (int index = 0; index < input_count; ++index) {
    1680             :       Node* const input = node->InputAt(index);
    1681     2069681 :       scheduler_->IncrementUnscheduledUseCount(input, index, node);
    1682             :     }
    1683     2348782 :     Node* const copy = scheduler_->graph_->CloneNode(node);
    1684     2348774 :     TRACE(("clone #%d:%s -> #%d\n"), node->id(), node->op()->mnemonic(),
    1685             :           copy->id());
    1686     2348774 :     scheduler_->node_data_.resize(copy->id() + 1,
    1687     4697548 :                                   scheduler_->DefaultSchedulerData());
    1688     7046328 :     scheduler_->node_data_[copy->id()] = scheduler_->node_data_[node->id()];
    1689     2348776 :     return copy;
    1690             :   }
    1691             : 
    1692             :   Zone* zone_;
    1693             :   Scheduler* scheduler_;
    1694             :   Schedule* schedule_;
    1695             :   BoolVector marked_;
    1696             :   ZoneDeque<BasicBlock*> marking_queue_;
    1697             : };
    1698             : 
    1699             : 
    1700     2866713 : void Scheduler::ScheduleLate() {
    1701     2866713 :   TRACE("--- SCHEDULE LATE ------------------------------------------\n");
    1702     2866832 :   if (FLAG_trace_turbo_scheduler) {
    1703           0 :     TRACE("roots: ");
    1704           0 :     for (Node* node : schedule_root_nodes_) {
    1705           0 :       TRACE("#%d:%s ", node->id(), node->op()->mnemonic());
    1706             :     }
    1707           0 :     TRACE("\n");
    1708             :   }
    1709             : 
    1710             :   // Schedule: Places nodes in dominator block of all their uses.
    1711     2866832 :   ScheduleLateNodeVisitor schedule_late_visitor(zone_, this);
    1712             :   schedule_late_visitor.Run(&schedule_root_nodes_);
    1713     2868130 : }
    1714             : 
    1715             : 
    1716             : // -----------------------------------------------------------------------------
    1717             : // Phase 6: Seal the final schedule.
    1718             : 
    1719             : 
    1720     2867971 : void Scheduler::SealFinalSchedule() {
    1721     2867971 :   TRACE("--- SEAL FINAL SCHEDULE ------------------------------------\n");
    1722             : 
    1723             :   // Serialize the assembly order and reverse-post-order numbering.
    1724     2867971 :   special_rpo_->SerializeRPOIntoSchedule();
    1725             :   special_rpo_->PrintAndVerifySpecialRPO();
    1726             : 
    1727             :   // Add collected nodes for basic blocks to their blocks in the right order.
    1728             :   int block_num = 0;
    1729    23682304 :   for (NodeVector* nodes : scheduled_nodes_) {
    1730    41628010 :     BasicBlock::Id id = BasicBlock::Id::FromInt(block_num++);
    1731    20814005 :     BasicBlock* block = schedule_->GetBlockById(id);
    1732    20814713 :     if (nodes) {
    1733   113948817 :       for (Node* node : base::Reversed(*nodes)) {
    1734   101536457 :         schedule_->AddNode(block, node);
    1735             :       }
    1736             :     }
    1737             :   }
    1738     2868299 : }
    1739             : 
    1740             : 
    1741             : // -----------------------------------------------------------------------------
    1742             : 
    1743             : 
    1744       38957 : void Scheduler::FuseFloatingControl(BasicBlock* block, Node* node) {
    1745       38957 :   TRACE("--- FUSE FLOATING CONTROL ----------------------------------\n");
    1746       38958 :   if (FLAG_trace_turbo_scheduler) {
    1747           0 :     StdoutStream{} << "Schedule before control flow fusion:\n" << *schedule_;
    1748             :   }
    1749             : 
    1750             :   // Iterate on phase 1: Build control-flow graph.
    1751       38958 :   control_flow_builder_->Run(block, node);
    1752             : 
    1753             :   // Iterate on phase 2: Compute special RPO and dominator tree.
    1754       38958 :   special_rpo_->UpdateSpecialRPO(block, schedule_->block(node));
    1755             :   // TODO(mstarzinger): Currently "iterate on" means "re-run". Fix that.
    1756     6430179 :   for (BasicBlock* b = block->rpo_next(); b != nullptr; b = b->rpo_next()) {
    1757             :     b->set_dominator_depth(-1);
    1758             :     b->set_dominator(nullptr);
    1759             :   }
    1760       38959 :   PropagateImmediateDominators(block->rpo_next());
    1761             : 
    1762             :   // Iterate on phase 4: Schedule nodes early.
    1763             :   // TODO(mstarzinger): The following loop gathering the propagation roots is a
    1764             :   // temporary solution and should be merged into the rest of the scheduler as
    1765             :   // soon as the approach settled for all floating loops.
    1766       38958 :   NodeVector propagation_roots(control_flow_builder_->control_);
    1767      235445 :   for (Node* node : control_flow_builder_->control_) {
    1768      582489 :     for (Node* use : node->uses()) {
    1769      251864 :       if (NodeProperties::IsPhi(use) && IsLive(use)) {
    1770       39383 :         propagation_roots.push_back(use);
    1771             :       }
    1772             :     }
    1773             :   }
    1774       38959 :   if (FLAG_trace_turbo_scheduler) {
    1775           0 :     TRACE("propagation roots: ");
    1776           0 :     for (Node* node : propagation_roots) {
    1777           0 :       TRACE("#%d:%s ", node->id(), node->op()->mnemonic());
    1778             :     }
    1779           0 :     TRACE("\n");
    1780             :   }
    1781       38959 :   ScheduleEarlyNodeVisitor schedule_early_visitor(zone_, this);
    1782       38958 :   schedule_early_visitor.Run(&propagation_roots);
    1783             : 
    1784             :   // Move previously planned nodes.
    1785             :   // TODO(mstarzinger): Improve that by supporting bulk moves.
    1786       77918 :   scheduled_nodes_.resize(schedule_->BasicBlockCount());
    1787       38958 :   MovePlannedNodes(block, schedule_->block(node));
    1788             : 
    1789       38957 :   if (FLAG_trace_turbo_scheduler) {
    1790           0 :     StdoutStream{} << "Schedule after control flow fusion:\n" << *schedule_;
    1791             :   }
    1792       38959 : }
    1793             : 
    1794             : 
    1795       38957 : void Scheduler::MovePlannedNodes(BasicBlock* from, BasicBlock* to) {
    1796       38957 :   TRACE("Move planned nodes from id:%d to id:%d\n", from->id().ToInt(),
    1797             :         to->id().ToInt());
    1798       38959 :   NodeVector* from_nodes = scheduled_nodes_[from->id().ToSize()];
    1799       38959 :   NodeVector* to_nodes = scheduled_nodes_[to->id().ToSize()];
    1800       38959 :   if (!from_nodes) return;
    1801             : 
    1802      189985 :   for (Node* const node : *from_nodes) {
    1803      172344 :     schedule_->SetBlockForNode(to, node);
    1804             :   }
    1805       17641 :   if (to_nodes) {
    1806           0 :     to_nodes->insert(to_nodes->end(), from_nodes->begin(), from_nodes->end());
    1807             :     from_nodes->clear();
    1808             :   } else {
    1809             :     std::swap(scheduled_nodes_[from->id().ToSize()],
    1810             :               scheduled_nodes_[to->id().ToSize()]);
    1811             :   }
    1812             : }
    1813             : 
    1814             : #undef TRACE
    1815             : 
    1816             : }  // namespace compiler
    1817             : }  // namespace internal
    1818      121996 : }  // namespace v8

Generated by: LCOV version 1.10