LCOV - code coverage report
Current view: top level - src/compiler - scheduler.cc (source / functions) Hit Total Coverage
Test: app.info Lines: 674 698 96.6 %
Date: 2017-10-20 Functions: 62 64 96.9 %

          Line data    Source code
       1             : // Copyright 2013 the V8 project authors. All rights reserved.
       2             : // Use of this source code is governed by a BSD-style license that can be
       3             : // found in the LICENSE file.
       4             : 
       5             : #include "src/compiler/scheduler.h"
       6             : 
       7             : #include <iomanip>
       8             : 
       9             : #include "src/base/adapters.h"
      10             : #include "src/bit-vector.h"
      11             : #include "src/compiler/common-operator.h"
      12             : #include "src/compiler/control-equivalence.h"
      13             : #include "src/compiler/graph.h"
      14             : #include "src/compiler/node-marker.h"
      15             : #include "src/compiler/node-properties.h"
      16             : #include "src/compiler/node.h"
      17             : #include "src/zone/zone-containers.h"
      18             : 
      19             : namespace v8 {
      20             : namespace internal {
      21             : namespace compiler {
      22             : 
      23             : #define TRACE(...)                                       \
      24             :   do {                                                   \
      25             :     if (FLAG_trace_turbo_scheduler) PrintF(__VA_ARGS__); \
      26             :   } while (false)
      27             : 
      28     2912554 : Scheduler::Scheduler(Zone* zone, Graph* graph, Schedule* schedule, Flags flags,
      29     1456313 :                      size_t node_count_hint)
      30             :     : zone_(zone),
      31             :       graph_(graph),
      32             :       schedule_(schedule),
      33             :       flags_(flags),
      34             :       scheduled_nodes_(zone),
      35             :       schedule_root_nodes_(zone),
      36             :       schedule_queue_(zone),
      37     2912482 :       node_data_(zone) {
      38     1456309 :   node_data_.reserve(node_count_hint);
      39     2912626 :   node_data_.resize(graph->NodeCount(), DefaultSchedulerData());
      40     1456305 : }
      41             : 
      42     3925431 : Schedule* Scheduler::ComputeSchedule(Zone* zone, Graph* graph, Flags flags) {
      43             :   Zone* schedule_zone =
      44     1456257 :       (flags & Scheduler::kTempSchedule) ? zone : graph->zone();
      45             : 
      46             :   // Reserve 10% more space for nodes if node splitting is enabled to try to
      47             :   // avoid resizing the vector since that would triple its zone memory usage.
      48     1456257 :   float node_hint_multiplier = (flags & Scheduler::kSplitNodes) ? 1.1 : 1;
      49     1456257 :   size_t node_count_hint = node_hint_multiplier * graph->NodeCount();
      50             : 
      51             :   Schedule* schedule =
      52     1456297 :       new (schedule_zone) Schedule(schedule_zone, node_count_hint);
      53     1456289 :   Scheduler scheduler(zone, graph, schedule, flags, node_count_hint);
      54             : 
      55     1456308 :   scheduler.BuildCFG();
      56     1456254 :   scheduler.ComputeSpecialRPONumbering();
      57     1456267 :   scheduler.GenerateImmediateDominatorTree();
      58             : 
      59     1456262 :   scheduler.PrepareUses();
      60     1456308 :   scheduler.ScheduleEarly();
      61     1456279 :   scheduler.ScheduleLate();
      62             : 
      63     1456212 :   scheduler.SealFinalSchedule();
      64             : 
      65     1456308 :   return schedule;
      66             : }
      67             : 
      68             : 
      69             : Scheduler::SchedulerData Scheduler::DefaultSchedulerData() {
      70     3513513 :   SchedulerData def = {schedule_->start(), 0, kUnknown};
      71             :   return def;
      72             : }
      73             : 
      74             : 
      75  1834180024 : Scheduler::SchedulerData* Scheduler::GetData(Node* node) {
      76  1834180024 :   return &node_data_[node->id()];
      77             : }
      78             : 
      79   172562167 : Scheduler::Placement Scheduler::InitializePlacement(Node* node) {
      80             :   SchedulerData* data = GetData(node);
      81    95320201 :   if (data->placement_ == kFixed) {
      82             :     // Nothing to do for control nodes that have been already fixed in
      83             :     // the schedule.
      84             :     return data->placement_;
      85             :   }
      86             :   DCHECK_EQ(kUnknown, data->placement_);
      87    77241966 :   switch (node->opcode()) {
      88             :     case IrOpcode::kParameter:
      89             :     case IrOpcode::kOsrValue:
      90             :       // Parameters and OSR values are always fixed to the start block.
      91     4966350 :       data->placement_ = kFixed;
      92     4966350 :       break;
      93             :     case IrOpcode::kPhi:
      94             :     case IrOpcode::kEffectPhi: {
      95             :       // Phis and effect phis are fixed if their control inputs are, whereas
      96             :       // otherwise they are coupled to a floating control node.
      97     3536458 :       Placement p = GetPlacement(NodeProperties::GetControlInput(node));
      98     3536448 :       data->placement_ = (p == kFixed ? kFixed : kCoupled);
      99     3536448 :       break;
     100             :     }
     101             : #define DEFINE_CONTROL_CASE(V) case IrOpcode::k##V:
     102             :       CONTROL_OP_LIST(DEFINE_CONTROL_CASE)
     103             : #undef DEFINE_CONTROL_CASE
     104             :       {
     105             :         // Control nodes that were not control-reachable from end may float.
     106     2291368 :         data->placement_ = kSchedulable;
     107     2291368 :         break;
     108             :     }
     109             :     default:
     110    66447790 :       data->placement_ = kSchedulable;
     111    66447790 :       break;
     112             :   }
     113    77241956 :   return data->placement_;
     114             : }
     115             : 
     116           0 : Scheduler::Placement Scheduler::GetPlacement(Node* node) {
     117  1390782592 :   return GetData(node)->placement_;
     118             : }
     119             : 
     120           0 : bool Scheduler::IsLive(Node* node) { return GetPlacement(node) != kUnknown; }
     121             : 
     122   160631044 : void Scheduler::UpdatePlacement(Node* node, Placement placement) {
     123             :   SchedulerData* data = GetData(node);
     124    89356061 :   if (data->placement_ == kUnknown) {
     125             :     // We only update control nodes from {kUnknown} to {kFixed}.  Ideally, we
     126             :     // should check that {node} is a control node (including exceptional calls),
     127             :     // but that is expensive.
     128             :     DCHECK_EQ(Scheduler::kFixed, placement);
     129    18081078 :     data->placement_ = placement;
     130   107437526 :     return;
     131             :   }
     132             : 
     133    71274983 :   switch (node->opcode()) {
     134             :     case IrOpcode::kParameter:
     135             :       // Parameters are fixed once and for all.
     136           0 :       UNREACHABLE();
     137             :       break;
     138             :     case IrOpcode::kPhi:
     139             :     case IrOpcode::kEffectPhi: {
     140             :       // Phis and effect phis are coupled to their respective blocks.
     141             :       DCHECK_EQ(Scheduler::kCoupled, data->placement_);
     142             :       DCHECK_EQ(Scheduler::kFixed, placement);
     143      479344 :       Node* control = NodeProperties::GetControlInput(node);
     144      479344 :       BasicBlock* block = schedule_->block(control);
     145      479344 :       schedule_->AddNode(block, node);
     146      479344 :       break;
     147             :     }
     148             : #define DEFINE_CONTROL_CASE(V) case IrOpcode::k##V:
     149             :       CONTROL_OP_LIST(DEFINE_CONTROL_CASE)
     150             : #undef DEFINE_CONTROL_CASE
     151             :       {
     152             :         // Control nodes force coupled uses to be placed.
     153     6489398 :         for (auto use : node->uses()) {
     154     4198048 :           if (GetPlacement(use) == Scheduler::kCoupled) {
     155             :             DCHECK_EQ(node, NodeProperties::GetControlInput(use));
     156      479344 :             UpdatePlacement(use, placement);
     157             :           }
     158             :       }
     159             :       break;
     160             :     }
     161             :     default:
     162             :       DCHECK_EQ(Scheduler::kSchedulable, data->placement_);
     163             :       DCHECK_EQ(Scheduler::kScheduled, placement);
     164             :       break;
     165             :   }
     166             :   // Reduce the use count of the node's inputs to potentially make them
     167             :   // schedulable. If all the uses of a node have been scheduled, then the node
     168             :   // itself can be scheduled.
     169   255890019 :   for (Edge const edge : node->input_edges()) {
     170   184614649 :     DecrementUnscheduledUseCount(edge.to(), edge.index(), edge.from());
     171             :   }
     172    71275370 :   data->placement_ = placement;
     173             : }
     174             : 
     175             : 
     176   370395352 : bool Scheduler::IsCoupledControlEdge(Node* node, int index) {
     177   373448693 :   return GetPlacement(node) == kCoupled &&
     178   370395349 :          NodeProperties::FirstControlIndex(node) == index;
     179             : }
     180             : 
     181             : 
     182   184611620 : void Scheduler::IncrementUnscheduledUseCount(Node* node, int index,
     183           0 :                                              Node* from) {
     184             :   // Make sure that control edges from coupled nodes are not counted.
     185   185205539 :   if (IsCoupledControlEdge(from, index)) return;
     186             : 
     187             :   // Tracking use counts for fixed nodes is useless.
     188   184726786 :   if (GetPlacement(node) == kFixed) return;
     189             : 
     190             :   // Use count for coupled nodes is summed up on their control.
     191   144005229 :   if (GetPlacement(node) == kCoupled) {
     192      593919 :     Node* control = NodeProperties::GetControlInput(node);
     193      593919 :     return IncrementUnscheduledUseCount(control, index, from);
     194             :   }
     195             : 
     196   143411310 :   ++(GetData(node)->unscheduled_count_);
     197   143411310 :   if (FLAG_trace_turbo_scheduler) {
     198           0 :     TRACE("  Use count of #%d:%s (used by #%d:%s)++ = %d\n", node->id(),
     199             :           node->op()->mnemonic(), from->id(), from->op()->mnemonic(),
     200             :           GetData(node)->unscheduled_count_);
     201             :   }
     202             : }
     203             : 
     204             : 
     205   185208555 : void Scheduler::DecrementUnscheduledUseCount(Node* node, int index,
     206           0 :                                              Node* from) {
     207             :   // Make sure that control edges from coupled nodes are not counted.
     208   185208555 :   if (IsCoupledControlEdge(from, index)) return;
     209             : 
     210             :   // Tracking use counts for fixed nodes is useless.
     211   369460328 :   if (GetPlacement(node) == kFixed) return;
     212             : 
     213             :   // Use count for coupled nodes is summed up on their control.
     214   143532661 :   if (GetPlacement(node) == kCoupled) {
     215      593918 :     Node* control = NodeProperties::GetControlInput(node);
     216      593918 :     return DecrementUnscheduledUseCount(control, index, from);
     217             :   }
     218             : 
     219             :   DCHECK_LT(0, GetData(node)->unscheduled_count_);
     220   142938743 :   --(GetData(node)->unscheduled_count_);
     221   142938743 :   if (FLAG_trace_turbo_scheduler) {
     222           0 :     TRACE("  Use count of #%d:%s (used by #%d:%s)-- = %d\n", node->id(),
     223             :           node->op()->mnemonic(), from->id(), from->op()->mnemonic(),
     224             :           GetData(node)->unscheduled_count_);
     225             :   }
     226   285880752 :   if (GetData(node)->unscheduled_count_ == 0) {
     227    60033962 :     TRACE("    newly eligible #%d:%s\n", node->id(), node->op()->mnemonic());
     228             :     schedule_queue_.push(node);
     229             :   }
     230             : }
     231             : 
     232             : 
     233             : // -----------------------------------------------------------------------------
     234             : // Phase 1: Build control-flow graph.
     235             : 
     236             : 
     237             : // Internal class to build a control flow graph (i.e the basic blocks and edges
     238             : // between them within a Schedule) from the node graph. Visits control edges of
     239             : // the graph backwards from an end node in order to find the connected control
     240             : // subgraph, needed for scheduling.
     241             : class CFGBuilder : public ZoneObject {
     242             :  public:
     243     1456290 :   CFGBuilder(Zone* zone, Scheduler* scheduler)
     244             :       : zone_(zone),
     245             :         scheduler_(scheduler),
     246             :         schedule_(scheduler->schedule_),
     247             :         queued_(scheduler->graph_, 2),
     248             :         queue_(zone),
     249             :         control_(zone),
     250             :         component_entry_(nullptr),
     251             :         component_start_(nullptr),
     252     4368875 :         component_end_(nullptr) {}
     253             : 
     254             :   // Run the control flow graph construction algorithm by walking the graph
     255             :   // backwards from end through control edges, building and connecting the
     256             :   // basic blocks for control nodes.
     257     1456301 :   void Run() {
     258             :     ResetDataStructures();
     259     1456301 :     Queue(scheduler_->graph_->end());
     260             : 
     261    26600198 :     while (!queue_.empty()) {  // Breadth-first backwards traversal.
     262    23687552 :       Node* node = queue_.front();
     263             :       queue_.pop();
     264    23687574 :       int max = NodeProperties::PastControlIndex(node);
     265    49755054 :       for (int i = NodeProperties::FirstControlIndex(node); i < max; i++) {
     266    26067376 :         Queue(node->InputAt(i));
     267             :       }
     268             :     }
     269             : 
     270    26600521 :     for (NodeVector::iterator i = control_.begin(); i != control_.end(); ++i) {
     271    23687835 :       ConnectBlocks(*i);  // Connect block to its predecessor/successors.
     272             :     }
     273     1456300 :   }
     274             : 
     275             :   // Run the control flow graph construction for a minimal control-connected
     276             :   // component ending in {exit} and merge that component into an existing
     277             :   // control flow graph at the bottom of {block}.
     278      271529 :   void Run(BasicBlock* block, Node* exit) {
     279             :     ResetDataStructures();
     280      271529 :     Queue(exit);
     281             : 
     282      271530 :     component_entry_ = nullptr;
     283      271530 :     component_start_ = block;
     284      271530 :     component_end_ = schedule_->block(exit);
     285      271530 :     scheduler_->equivalence_->Run(exit);
     286     2334407 :     while (!queue_.empty()) {  // Breadth-first backwards traversal.
     287     1791347 :       Node* node = queue_.front();
     288             :       queue_.pop();
     289             : 
     290             :       // Use control dependence equivalence to find a canonical single-entry
     291             :       // single-exit region that makes up a minimal component to be scheduled.
     292     1791348 :       if (IsSingleEntrySingleExitRegion(node, exit)) {
     293      271530 :         TRACE("Found SESE at #%d:%s\n", node->id(), node->op()->mnemonic());
     294             :         DCHECK(!component_entry_);
     295      271528 :         component_entry_ = node;
     296      271528 :         continue;
     297             :       }
     298             : 
     299     1519818 :       int max = NodeProperties::PastControlIndex(node);
     300     3487478 :       for (int i = NodeProperties::FirstControlIndex(node); i < max; i++) {
     301     1967659 :         Queue(node->InputAt(i));
     302             :       }
     303             :     }
     304             :     DCHECK(component_entry_);
     305             : 
     306     2334409 :     for (NodeVector::iterator i = control_.begin(); i != control_.end(); ++i) {
     307     1791349 :       ConnectBlocks(*i);  // Connect block to its predecessor/successors.
     308             :     }
     309      271530 :   }
     310             : 
     311             :  private:
     312             :   friend class ScheduleLateNodeVisitor;
     313             :   friend class Scheduler;
     314             : 
     315    13870803 :   void FixNode(BasicBlock* block, Node* node) {
     316    13870803 :     schedule_->AddNode(block, node);
     317    13870846 :     scheduler_->UpdatePlacement(node, Scheduler::kFixed);
     318    13870844 :   }
     319             : 
     320    29762757 :   void Queue(Node* node) {
     321             :     // Mark the connected control nodes as they are queued.
     322    59525611 :     if (!queued_.Get(node)) {
     323    25478934 :       BuildBlocks(node);
     324             :       queue_.push(node);
     325    25478941 :       queued_.Set(node, true);
     326    25478989 :       control_.push_back(node);
     327             :     }
     328    29762841 :   }
     329             : 
     330    25565116 :   void BuildBlocks(Node* node) {
     331    25478939 :     switch (node->opcode()) {
     332             :       case IrOpcode::kEnd:
     333     2898140 :         FixNode(schedule_->end(), node);
     334     1441805 :         break;
     335             :       case IrOpcode::kStart:
     336     2912618 :         FixNode(schedule_->start(), node);
     337     1456298 :         break;
     338             :       case IrOpcode::kLoop:
     339             :       case IrOpcode::kMerge:
     340     2781072 :         BuildBlockForNode(node);
     341     2781109 :         break;
     342             :       case IrOpcode::kTerminate: {
     343             :         // Put Terminate in the loop to which it refers.
     344       86173 :         Node* loop = NodeProperties::GetControlInput(node);
     345       86176 :         BasicBlock* block = BuildBlockForNode(loop);
     346       86177 :         FixNode(block, node);
     347       86176 :         break;
     348             :       }
     349             :       case IrOpcode::kBranch:
     350             :       case IrOpcode::kSwitch:
     351     3602595 :         BuildBlocksForSuccessors(node);
     352     3602617 :         break;
     353             : #define BUILD_BLOCK_JS_CASE(Name) case IrOpcode::k##Name:
     354             :         JS_OP_LIST(BUILD_BLOCK_JS_CASE)
     355             : // JS opcodes are just like calls => fall through.
     356             : #undef BUILD_BLOCK_JS_CASE
     357             :       case IrOpcode::kCall:
     358             :       case IrOpcode::kCallWithCallerSavedRegisters:
     359     5411522 :         if (NodeProperties::IsExceptionalCall(node)) {
     360      304804 :           BuildBlocksForSuccessors(node);
     361             :         }
     362             :         break;
     363             :       default:
     364             :         break;
     365             :     }
     366    25478960 :   }
     367             : 
     368    25479126 :   void ConnectBlocks(Node* node) {
     369    25479126 :     switch (node->opcode()) {
     370             :       case IrOpcode::kLoop:
     371             :       case IrOpcode::kMerge:
     372     2781091 :         ConnectMerge(node);
     373     2781071 :         break;
     374             :       case IrOpcode::kBranch:
     375     3575681 :         scheduler_->UpdatePlacement(node, Scheduler::kFixed);
     376     3575686 :         ConnectBranch(node);
     377     3575649 :         break;
     378             :       case IrOpcode::kSwitch:
     379       26938 :         scheduler_->UpdatePlacement(node, Scheduler::kFixed);
     380       26938 :         ConnectSwitch(node);
     381       26938 :         break;
     382             :       case IrOpcode::kDeoptimize:
     383       99317 :         scheduler_->UpdatePlacement(node, Scheduler::kFixed);
     384       99317 :         ConnectDeoptimize(node);
     385       99317 :         break;
     386             :       case IrOpcode::kTailCall:
     387      135896 :         scheduler_->UpdatePlacement(node, Scheduler::kFixed);
     388      135896 :         ConnectTailCall(node);
     389      135896 :         break;
     390             :       case IrOpcode::kReturn:
     391     1739780 :         scheduler_->UpdatePlacement(node, Scheduler::kFixed);
     392     1739768 :         ConnectReturn(node);
     393     1739883 :         break;
     394             :       case IrOpcode::kThrow:
     395      119327 :         scheduler_->UpdatePlacement(node, Scheduler::kFixed);
     396      119327 :         ConnectThrow(node);
     397      119327 :         break;
     398             : #define CONNECT_BLOCK_JS_CASE(Name) case IrOpcode::k##Name:
     399             :         JS_OP_LIST(CONNECT_BLOCK_JS_CASE)
     400             : // JS opcodes are just like calls => fall through.
     401             : #undef CONNECT_BLOCK_JS_CASE
     402             :       case IrOpcode::kCall:
     403             :       case IrOpcode::kCallWithCallerSavedRegisters:
     404     5411536 :         if (NodeProperties::IsExceptionalCall(node)) {
     405      304804 :           scheduler_->UpdatePlacement(node, Scheduler::kFixed);
     406      304804 :           ConnectCall(node);
     407             :         }
     408             :         break;
     409             :       default:
     410             :         break;
     411             :     }
     412    25479179 :   }
     413             : 
     414    21859063 :   BasicBlock* BuildBlockForNode(Node* node) {
     415    10972536 :     BasicBlock* block = schedule_->block(node);
     416    10972566 :     if (block == nullptr) {
     417    10886398 :       block = schedule_->NewBasicBlock();
     418    10886527 :       TRACE("Create block id:%d for #%d:%s\n", block->id().ToInt(), node->id(),
     419             :             node->op()->mnemonic());
     420    10886527 :       FixNode(block, node);
     421             :     }
     422    10972788 :     return block;
     423             :   }
     424             : 
     425     3907398 :   void BuildBlocksForSuccessors(Node* node) {
     426     7814796 :     size_t const successor_cnt = node->op()->ControlOutputCount();
     427     3907398 :     Node** successors = zone_->NewArray<Node*>(successor_cnt);
     428     3907390 :     NodeProperties::CollectControlProjections(node, successors, successor_cnt);
     429    12012943 :     for (size_t index = 0; index < successor_cnt; ++index) {
     430     8105517 :       BuildBlockForNode(successors[index]);
     431             :     }
     432     3907426 :   }
     433             : 
     434     3907423 :   void CollectSuccessorBlocks(Node* node, BasicBlock** successor_blocks,
     435             :                               size_t successor_cnt) {
     436             :     Node** successors = reinterpret_cast<Node**>(successor_blocks);
     437     3907423 :     NodeProperties::CollectControlProjections(node, successors, successor_cnt);
     438     8105549 :     for (size_t index = 0; index < successor_cnt; ++index) {
     439     8105543 :       successor_blocks[index] = schedule_->block(successors[index]);
     440             :     }
     441     3907412 :   }
     442             : 
     443    20320538 :   BasicBlock* FindPredecessorBlock(Node* node) {
     444             :     BasicBlock* predecessor_block = nullptr;
     445             :     while (true) {
     446    29476673 :       predecessor_block = schedule_->block(node);
     447    29476724 :       if (predecessor_block != nullptr) break;
     448     9156134 :       node = NodeProperties::GetControlInput(node);
     449             :     }
     450    20320590 :     return predecessor_block;
     451             :   }
     452             : 
     453      304804 :   void ConnectCall(Node* call) {
     454             :     BasicBlock* successor_blocks[2];
     455      304804 :     CollectSuccessorBlocks(call, successor_blocks, arraysize(successor_blocks));
     456             : 
     457             :     // Consider the exception continuation to be deferred.
     458      304804 :     successor_blocks[1]->set_deferred(true);
     459             : 
     460      304804 :     Node* call_control = NodeProperties::GetControlInput(call);
     461      304804 :     BasicBlock* call_block = FindPredecessorBlock(call_control);
     462      304804 :     TraceConnect(call, call_block, successor_blocks[0]);
     463      304804 :     TraceConnect(call, call_block, successor_blocks[1]);
     464             :     schedule_->AddCall(call_block, call, successor_blocks[0],
     465      304804 :                        successor_blocks[1]);
     466      304804 :   }
     467             : 
     468     7151354 :   void ConnectBranch(Node* branch) {
     469             :     BasicBlock* successor_blocks[2];
     470             :     CollectSuccessorBlocks(branch, successor_blocks,
     471     3575682 :                            arraysize(successor_blocks));
     472             : 
     473             :     // Consider branch hints.
     474     3575672 :     switch (BranchHintOf(branch->op())) {
     475             :       case BranchHint::kNone:
     476             :         break;
     477             :       case BranchHint::kTrue:
     478     1372055 :         successor_blocks[1]->set_deferred(true);
     479             :         break;
     480             :       case BranchHint::kFalse:
     481      533493 :         successor_blocks[0]->set_deferred(true);
     482             :         break;
     483             :     }
     484             : 
     485     3575646 :     if (branch == component_entry_) {
     486      271527 :       TraceConnect(branch, component_start_, successor_blocks[0]);
     487      271527 :       TraceConnect(branch, component_start_, successor_blocks[1]);
     488             :       schedule_->InsertBranch(component_start_, component_end_, branch,
     489      271527 :                               successor_blocks[0], successor_blocks[1]);
     490             :     } else {
     491     3304119 :       Node* branch_control = NodeProperties::GetControlInput(branch);
     492     3304127 :       BasicBlock* branch_block = FindPredecessorBlock(branch_control);
     493     3304127 :       TraceConnect(branch, branch_block, successor_blocks[0]);
     494     3304108 :       TraceConnect(branch, branch_block, successor_blocks[1]);
     495             :       schedule_->AddBranch(branch_block, branch, successor_blocks[0],
     496     3304101 :                            successor_blocks[1]);
     497             :     }
     498     3575649 :   }
     499             : 
     500       26938 :   void ConnectSwitch(Node* sw) {
     501       53876 :     size_t const successor_count = sw->op()->ControlOutputCount();
     502             :     BasicBlock** successor_blocks =
     503       26938 :         zone_->NewArray<BasicBlock*>(successor_count);
     504       26938 :     CollectSuccessorBlocks(sw, successor_blocks, successor_count);
     505             : 
     506       26938 :     if (sw == component_entry_) {
     507           3 :       for (size_t index = 0; index < successor_count; ++index) {
     508           3 :         TraceConnect(sw, component_start_, successor_blocks[index]);
     509             :       }
     510             :       schedule_->InsertSwitch(component_start_, component_end_, sw,
     511           1 :                               successor_blocks, successor_count);
     512             :     } else {
     513       26937 :       Node* switch_control = NodeProperties::GetControlInput(sw);
     514       26937 :       BasicBlock* switch_block = FindPredecessorBlock(switch_control);
     515      371570 :       for (size_t index = 0; index < successor_count; ++index) {
     516      344633 :         TraceConnect(sw, switch_block, successor_blocks[index]);
     517             :       }
     518       26937 :       schedule_->AddSwitch(switch_block, sw, successor_blocks, successor_count);
     519             :     }
     520       26938 :   }
     521             : 
     522     2781086 :   void ConnectMerge(Node* merge) {
     523             :     // Don't connect the special merge at the end to its predecessors.
     524     5562145 :     if (IsFinalMerge(merge)) return;
     525             : 
     526     2781101 :     BasicBlock* block = schedule_->block(merge);
     527             :     DCHECK_NOT_NULL(block);
     528             :     // For all of the merge's control inputs, add a goto at the end to the
     529             :     // merge's basic block.
     530     9122111 :     for (Node* const input : merge->inputs()) {
     531     6341037 :       BasicBlock* predecessor_block = FindPredecessorBlock(input);
     532     6341044 :       TraceConnect(merge, predecessor_block, block);
     533     6341040 :       schedule_->AddGoto(predecessor_block, block);
     534             :     }
     535             :   }
     536             : 
     537      135896 :   void ConnectTailCall(Node* call) {
     538      135896 :     Node* call_control = NodeProperties::GetControlInput(call);
     539      135896 :     BasicBlock* call_block = FindPredecessorBlock(call_control);
     540      135896 :     TraceConnect(call, call_block, nullptr);
     541      135896 :     schedule_->AddTailCall(call_block, call);
     542      135896 :   }
     543             : 
     544     1739762 :   void ConnectReturn(Node* ret) {
     545     1739762 :     Node* return_control = NodeProperties::GetControlInput(ret);
     546     1739888 :     BasicBlock* return_block = FindPredecessorBlock(return_control);
     547     1739883 :     TraceConnect(ret, return_block, nullptr);
     548     1739823 :     schedule_->AddReturn(return_block, ret);
     549     1739878 :   }
     550             : 
     551       99317 :   void ConnectDeoptimize(Node* deopt) {
     552       99317 :     Node* deoptimize_control = NodeProperties::GetControlInput(deopt);
     553       99317 :     BasicBlock* deoptimize_block = FindPredecessorBlock(deoptimize_control);
     554       99317 :     TraceConnect(deopt, deoptimize_block, nullptr);
     555       99317 :     schedule_->AddDeoptimize(deoptimize_block, deopt);
     556       99317 :   }
     557             : 
     558      119327 :   void ConnectThrow(Node* thr) {
     559      119327 :     Node* throw_control = NodeProperties::GetControlInput(thr);
     560      119327 :     BasicBlock* throw_block = FindPredecessorBlock(throw_control);
     561      119327 :     TraceConnect(thr, throw_block, nullptr);
     562      119327 :     schedule_->AddThrow(throw_block, thr);
     563      119327 :   }
     564             : 
     565    16540556 :   void TraceConnect(Node* node, BasicBlock* block, BasicBlock* succ) {
     566             :     DCHECK_NOT_NULL(block);
     567    16540556 :     if (succ == nullptr) {
     568     2094313 :       TRACE("Connect #%d:%s, id:%d -> end\n", node->id(),
     569             :             node->op()->mnemonic(), block->id().ToInt());
     570             :     } else {
     571    14446243 :       TRACE("Connect #%d:%s, id:%d -> id:%d\n", node->id(),
     572             :             node->op()->mnemonic(), block->id().ToInt(), succ->id().ToInt());
     573             :     }
     574    16540556 :   }
     575             : 
     576     2781086 :   bool IsFinalMerge(Node* node) {
     577     5471180 :     return (node->opcode() == IrOpcode::kMerge &&
     578     2690094 :             node == scheduler_->graph_->end()->InputAt(0));
     579             :   }
     580             : 
     581     1791347 :   bool IsSingleEntrySingleExitRegion(Node* entry, Node* exit) const {
     582     1791347 :     size_t entry_class = scheduler_->equivalence_->ClassOf(entry);
     583     1791348 :     size_t exit_class = scheduler_->equivalence_->ClassOf(exit);
     584     1791348 :     return entry != exit && entry_class == exit_class;
     585             :   }
     586             : 
     587             :   void ResetDataStructures() {
     588             :     control_.clear();
     589             :     DCHECK(queue_.empty());
     590             :     DCHECK(control_.empty());
     591             :   }
     592             : 
     593             :   Zone* zone_;
     594             :   Scheduler* scheduler_;
     595             :   Schedule* schedule_;
     596             :   NodeMarker<bool> queued_;      // Mark indicating whether node is queued.
     597             :   ZoneQueue<Node*> queue_;       // Queue used for breadth-first traversal.
     598             :   NodeVector control_;           // List of encountered control nodes.
     599             :   Node* component_entry_;        // Component single-entry node.
     600             :   BasicBlock* component_start_;  // Component single-entry block.
     601             :   BasicBlock* component_end_;    // Component single-exit block.
     602             : };
     603             : 
     604             : 
     605     1456297 : void Scheduler::BuildCFG() {
     606     1456297 :   TRACE("--- CREATING CFG -------------------------------------------\n");
     607             : 
     608             :   // Instantiate a new control equivalence algorithm for the graph.
     609     2912603 :   equivalence_ = new (zone_) ControlEquivalence(zone_, graph_);
     610             : 
     611             :   // Build a control-flow graph for the main control-connected component that
     612             :   // is being spanned by the graph's start and end nodes.
     613     2912596 :   control_flow_builder_ = new (zone_) CFGBuilder(zone_, this);
     614     1456296 :   control_flow_builder_->Run();
     615             : 
     616             :   // Initialize per-block data.
     617             :   // Reserve an extra 10% to avoid resizing vector when fusing floating control.
     618     2912598 :   scheduled_nodes_.reserve(schedule_->BasicBlockCount() * 1.1);
     619     2912430 :   scheduled_nodes_.resize(schedule_->BasicBlockCount());
     620     1456273 : }
     621             : 
     622             : 
     623             : // -----------------------------------------------------------------------------
     624             : // Phase 2: Compute special RPO and dominator tree.
     625             : 
     626             : 
     627             : // Compute the special reverse-post-order block ordering, which is essentially
     628             : // a RPO of the graph where loop bodies are contiguous. Properties:
     629             : // 1. If block A is a predecessor of B, then A appears before B in the order,
     630             : //    unless B is a loop header and A is in the loop headed at B
     631             : //    (i.e. A -> B is a backedge).
     632             : // => If block A dominates block B, then A appears before B in the order.
     633             : // => If block A is a loop header, A appears before all blocks in the loop
     634             : //    headed at A.
     635             : // 2. All loops are contiguous in the order (i.e. no intervening blocks that
     636             : //    do not belong to the loop.)
     637             : // Note a simple RPO traversal satisfies (1) but not (2).
     638             : class SpecialRPONumberer : public ZoneObject {
     639             :  public:
     640     1722156 :   SpecialRPONumberer(Zone* zone, Schedule* schedule)
     641             :       : zone_(zone),
     642             :         schedule_(schedule),
     643             :         order_(nullptr),
     644             :         beyond_end_(nullptr),
     645             :         loops_(zone),
     646             :         backedges_(zone),
     647             :         stack_(zone),
     648             :         previous_block_count_(0),
     649     5166428 :         empty_(0, zone) {}
     650             : 
     651             :   // Computes the special reverse-post-order for the main control flow graph,
     652             :   // that is for the graph spanned between the schedule's start and end blocks.
     653             :   void ComputeSpecialRPO() {
     654             :     DCHECK_EQ(0, schedule_->end()->SuccessorCount());
     655             :     DCHECK(!order_);  // Main order does not exist yet.
     656     1722115 :     ComputeAndInsertSpecialRPO(schedule_->start(), schedule_->end());
     657             :   }
     658             : 
     659             :   // Computes the special reverse-post-order for a partial control flow graph,
     660             :   // that is for the graph spanned between the given {entry} and {end} blocks,
     661             :   // then updates the existing ordering with this new information.
     662             :   void UpdateSpecialRPO(BasicBlock* entry, BasicBlock* end) {
     663             :     DCHECK(order_);  // Main order to be updated is present.
     664      271530 :     ComputeAndInsertSpecialRPO(entry, end);
     665             :   }
     666             : 
     667             :   // Serialize the previously computed order as a special reverse-post-order
     668             :   // numbering for basic blocks into the final schedule.
     669     3444256 :   void SerializeRPOIntoSchedule() {
     670             :     int32_t number = 0;
     671    33479361 :     for (BasicBlock* b = order_; b != nullptr; b = b->rpo_next()) {
     672    31757132 :       b->set_rpo_number(number++);
     673    15878605 :       schedule_->rpo_order()->push_back(b);
     674             :     }
     675     1722229 :     BeyondEndSentinel()->set_rpo_number(number);
     676     1722067 :   }
     677             : 
     678             :   // Print and verify the special reverse-post-order.
     679             :   void PrintAndVerifySpecialRPO() {
     680             : #if DEBUG
     681             :     if (FLAG_trace_turbo_scheduler) PrintRPO();
     682             :     VerifySpecialRPO();
     683             : #endif
     684             :   }
     685             : 
     686             :   const ZoneVector<BasicBlock*>& GetOutgoingBlocks(BasicBlock* block) {
     687     4783112 :     if (HasLoopNumber(block)) {
     688     4783297 :       LoopInfo const& loop = loops_[GetLoopNumber(block)];
     689     4783297 :       if (loop.outgoing) return *loop.outgoing;
     690             :     }
     691       61299 :     return empty_;
     692             :   }
     693             : 
     694             :  private:
     695             :   typedef std::pair<BasicBlock*, size_t> Backedge;
     696             : 
     697             :   // Numbering for BasicBlock::rpo_number for this block traversal:
     698             :   static const int kBlockOnStack = -2;
     699             :   static const int kBlockVisited1 = -3;
     700             :   static const int kBlockVisited2 = -4;
     701             :   static const int kBlockUnvisited1 = -1;
     702             :   static const int kBlockUnvisited2 = kBlockVisited1;
     703             : 
     704             :   struct SpecialRPOStackFrame {
     705             :     BasicBlock* block;
     706             :     size_t index;
     707             :   };
     708             : 
     709             :   struct LoopInfo {
     710             :     BasicBlock* header;
     711             :     ZoneVector<BasicBlock*>* outgoing;
     712             :     BitVector* members;
     713             :     LoopInfo* prev;
     714             :     BasicBlock* end;
     715             :     BasicBlock* start;
     716             : 
     717      382820 :     void AddOutgoing(Zone* zone, BasicBlock* block) {
     718      382820 :       if (outgoing == nullptr) {
     719             :         outgoing = new (zone->New(sizeof(ZoneVector<BasicBlock*>)))
     720      265733 :             ZoneVector<BasicBlock*>(zone);
     721             :       }
     722      382819 :       outgoing->push_back(block);
     723      382820 :     }
     724             :   };
     725             : 
     726    21128801 :   int Push(ZoneVector<SpecialRPOStackFrame>& stack, int depth,
     727    21128801 :            BasicBlock* child, int unvisited) {
     728    21128801 :     if (child->rpo_number() == unvisited) {
     729    42257458 :       stack[depth].block = child;
     730    21128729 :       stack[depth].index = 0;
     731    21128729 :       child->set_rpo_number(kBlockOnStack);
     732    21128649 :       return depth + 1;
     733             :     }
     734             :     return depth;
     735             :   }
     736             : 
     737             :   BasicBlock* PushFront(BasicBlock* head, BasicBlock* block) {
     738             :     block->set_rpo_next(head);
     739             :     return block;
     740             :   }
     741             : 
     742      671901 :   static int GetLoopNumber(BasicBlock* block) { return block->loop_number(); }
     743             :   static void SetLoopNumber(BasicBlock* block, int loop_number) {
     744             :     return block->set_loop_number(loop_number);
     745             :   }
     746    36405137 :   static bool HasLoopNumber(BasicBlock* block) {
     747             :     return block->loop_number() >= 0;
     748             :   }
     749             : 
     750             :   // TODO(mstarzinger): We only need this special sentinel because some tests
     751             :   // use the schedule's end block in actual control flow (e.g. with end having
     752             :   // successors). Once this has been cleaned up we can use the end block here.
     753     1725933 :   BasicBlock* BeyondEndSentinel() {
     754     1725933 :     if (beyond_end_ == nullptr) {
     755             :       BasicBlock::Id id = BasicBlock::Id::FromInt(-1);
     756     3444469 :       beyond_end_ = new (schedule_->zone()) BasicBlock(schedule_->zone(), id);
     757             :     }
     758     1725753 :     return beyond_end_;
     759             :   }
     760             : 
     761             :   // Compute special RPO for the control flow graph between {entry} and {end},
     762             :   // mutating any existing order so that the result is still valid.
     763     5984532 :   void ComputeAndInsertSpecialRPO(BasicBlock* entry, BasicBlock* end) {
     764             :     // RPO should not have been serialized for this schedule yet.
     765     1993644 :     CHECK_EQ(kBlockUnvisited1, schedule_->start()->loop_number());
     766     1993644 :     CHECK_EQ(kBlockUnvisited1, schedule_->start()->rpo_number());
     767     3987288 :     CHECK_EQ(0, static_cast<int>(schedule_->rpo_order()->size()));
     768             : 
     769             :     // Find correct insertion point within existing order.
     770             :     BasicBlock* insertion_point = entry->rpo_next();
     771             :     BasicBlock* order = insertion_point;
     772             : 
     773             :     // Perform an iterative RPO traversal using an explicit stack,
     774             :     // recording backedges that form cycles. O(|B|).
     775             :     DCHECK_LT(previous_block_count_, schedule_->BasicBlockCount());
     776    49244977 :     stack_.resize(schedule_->BasicBlockCount() - previous_block_count_);
     777     3987480 :     previous_block_count_ = schedule_->BasicBlockCount();
     778     1993740 :     int stack_depth = Push(stack_, 0, entry, kBlockUnvisited1);
     779     6910379 :     int num_loops = static_cast<int>(loops_.size());
     780             : 
     781    39120064 :     while (stack_depth > 0) {
     782    35133052 :       int current = stack_depth - 1;
     783    35133052 :       SpecialRPOStackFrame* frame = &stack_[current];
     784             : 
     785    68276283 :       if (frame->block != end &&
     786    33143231 :           frame->index < frame->block->SuccessorCount()) {
     787             :         // Process the next successor.
     788    37965454 :         BasicBlock* succ = frame->block->SuccessorAt(frame->index++);
     789    18982727 :         if (succ->rpo_number() == kBlockVisited1) continue;
     790    14308694 :         if (succ->rpo_number() == kBlockOnStack) {
     791             :           // The successor is on the stack, so this is a backedge (cycle).
     792      304440 :           backedges_.push_back(Backedge(frame->block, frame->index - 1));
     793      152219 :           if (!HasLoopNumber(succ)) {
     794             :             // Assign a new loop number to the header if it doesn't have one.
     795      136738 :             SetLoopNumber(succ, num_loops++);
     796             :           }
     797             :         } else {
     798             :           // Push the successor onto the stack.
     799             :           DCHECK_EQ(kBlockUnvisited1, succ->rpo_number());
     800    14156473 :           stack_depth = Push(stack_, stack_depth, succ, kBlockUnvisited1);
     801             :         }
     802             :       } else {
     803             :         // Finished with all successors; pop the stack and add the block.
     804             :         order = PushFront(order, frame->block);
     805    16150325 :         frame->block->set_rpo_number(kBlockVisited1);
     806             :         stack_depth--;
     807             :       }
     808             :     }
     809             : 
     810             :     // If no loops were encountered, then the order we computed was correct.
     811     1993554 :     if (num_loops > static_cast<int>(loops_.size())) {
     812             :       // Otherwise, compute the loop information from the backedges in order
     813             :       // to perform a traversal that groups loop bodies together.
     814       80236 :       ComputeLoopInfo(stack_, num_loops, &backedges_);
     815             : 
     816             :       // Initialize the "loop stack". Note the entry could be a loop header.
     817             :       LoopInfo* loop =
     818       80234 :           HasLoopNumber(entry) ? &loops_[GetLoopNumber(entry)] : nullptr;
     819             :       order = insertion_point;
     820             : 
     821             :       // Perform an iterative post-order traversal, visiting loop bodies before
     822             :       // edges that lead out of loops. Visits each block once, but linking loop
     823             :       // sections together is linear in the loop size, so overall is
     824             :       // O(|B| + max(loop_depth) * max(|loop|))
     825       80234 :       stack_depth = Push(stack_, 0, entry, kBlockUnvisited2);
     826    12278741 :       while (stack_depth > 0) {
     827    12118281 :         SpecialRPOStackFrame* frame = &stack_[stack_depth - 1];
     828    12637841 :         BasicBlock* block = frame->block;
     829     7139417 :         BasicBlock* succ = nullptr;
     830             : 
     831    24159982 :         if (block != end && frame->index < block->SuccessorCount()) {
     832             :           // Process the next normal successor.
     833     6756596 :           succ = block->SuccessorAt(frame->index++);
     834     5361685 :         } else if (HasLoopNumber(block)) {
     835             :           // Process additional outgoing edges from the loop header.
     836      519560 :           if (block->rpo_number() == kBlockOnStack) {
     837             :             // Finish the loop body the first time the header is left on the
     838             :             // stack.
     839             :             DCHECK(loop != nullptr && loop->header == block);
     840      136619 :             loop->start = PushFront(order, block);
     841      136619 :             order = loop->end;
     842      136619 :             block->set_rpo_number(kBlockVisited2);
     843             :             // Pop the loop stack and continue visiting outgoing edges within
     844             :             // the context of the outer loop, if any.
     845      136741 :             loop = loop->prev;
     846             :             // We leave the loop header on the stack; the rest of this iteration
     847             :             // and later iterations will go through its outgoing edges list.
     848             :           }
     849             : 
     850             :           // Use the next outgoing edge if there are any.
     851     1039364 :           size_t outgoing_index = frame->index - block->SuccessorCount();
     852      519682 :           LoopInfo* info = &loops_[GetLoopNumber(block)];
     853             :           DCHECK(loop != info);
     854     1035369 :           if (block != entry && info->outgoing != nullptr &&
     855      515687 :               outgoing_index < info->outgoing->size()) {
     856      765640 :             succ = info->outgoing->at(outgoing_index);
     857      382820 :             frame->index++;
     858             :           }
     859             :         }
     860             : 
     861    12118403 :         if (succ != nullptr) {
     862             :           // Process the next successor.
     863     7139417 :           if (succ->rpo_number() == kBlockOnStack) continue;
     864     6987215 :           if (succ->rpo_number() == kBlockVisited2) continue;
     865             :           DCHECK_EQ(kBlockUnvisited2, succ->rpo_number());
     866     9400936 :           if (loop != nullptr && !loop->members->Contains(succ->id().ToInt())) {
     867             :             // The successor is not in the current loop or any nested loop.
     868             :             // Add it to the outgoing edges of this loop and visit it later.
     869      382820 :             loop->AddOutgoing(zone_, succ);
     870             :           } else {
     871             :             // Push the successor onto the stack.
     872     4898680 :             stack_depth = Push(stack_, stack_depth, succ, kBlockUnvisited2);
     873     4898681 :             if (HasLoopNumber(succ)) {
     874             :               // Push the inner loop onto the loop stack.
     875             :               DCHECK(GetLoopNumber(succ) < num_loops);
     876      136734 :               LoopInfo* next = &loops_[GetLoopNumber(succ)];
     877      136734 :               next->end = order;
     878      136734 :               next->prev = loop;
     879             :               loop = next;
     880             :             }
     881             :           }
     882             :         } else {
     883             :           // Finished with all successors of the current block.
     884     4978986 :           if (HasLoopNumber(block)) {
     885             :             // If we are going to pop a loop header, then add its entire body.
     886      136741 :             LoopInfo* info = &loops_[GetLoopNumber(block)];
     887      136741 :             for (BasicBlock* b = info->start; true; b = b->rpo_next()) {
     888     2076704 :               if (b->rpo_next() == info->end) {
     889             :                 b->set_rpo_next(order);
     890      136741 :                 info->end = order;
     891             :                 break;
     892             :               }
     893             :             }
     894      136741 :             order = info->start;
     895             :           } else {
     896             :             // Pop a single node off the stack and add it to the order.
     897             :             order = PushFront(order, block);
     898     4842245 :             block->set_rpo_number(kBlockVisited2);
     899             :           }
     900             :           stack_depth--;
     901             :         }
     902             :       }
     903             :     }
     904             : 
     905             :     // Publish new order the first time.
     906     1993542 :     if (order_ == nullptr) order_ = order;
     907             : 
     908             :     // Compute the correct loop headers and set the correct loop ends.
     909             :     LoopInfo* current_loop = nullptr;
     910     1906935 :     BasicBlock* current_header = entry->loop_header();
     911             :     int32_t loop_depth = entry->loop_depth();
     912     1993542 :     if (entry->IsLoopHeader()) --loop_depth;  // Entry might be a loop header.
     913    18143835 :     for (BasicBlock* b = order; b != insertion_point; b = b->rpo_next()) {
     914    16150290 :       BasicBlock* current = b;
     915             : 
     916             :       // Reset BasicBlock::rpo_number again.
     917    16150102 :       current->set_rpo_number(kBlockUnvisited1);
     918             : 
     919             :       // Finish the previous loop(s) if we just exited them.
     920    34340378 :       while (current_header != nullptr &&
     921             :              current == current_header->loop_end()) {
     922             :         DCHECK(current_header->IsLoopHeader());
     923             :         DCHECK_NOT_NULL(current_loop);
     924      133037 :         current_loop = current_loop->prev;
     925             :         current_header =
     926      133037 :             current_loop == nullptr ? nullptr : current_loop->header;
     927      133037 :         --loop_depth;
     928             :       }
     929    16150203 :       current->set_loop_header(current_header);
     930             : 
     931             :       // Push a new loop onto the stack if this loop is a loop header.
     932    16150220 :       if (HasLoopNumber(current)) {
     933      136748 :         ++loop_depth;
     934      136748 :         current_loop = &loops_[GetLoopNumber(current)];
     935      136748 :         BasicBlock* end = current_loop->end;
     936      140450 :         current->set_loop_end(end == nullptr ? BeyondEndSentinel() : end);
     937      136748 :         current_header = current_loop->header;
     938      136748 :         TRACE("id:%d is a loop header, increment loop depth to %d\n",
     939             :               current->id().ToInt(), loop_depth);
     940             :       }
     941             : 
     942    16150220 :       current->set_loop_depth(loop_depth);
     943             : 
     944    16150290 :       if (current->loop_header() == nullptr) {
     945    14376394 :         TRACE("id:%d is not in a loop (depth == %d)\n", current->id().ToInt(),
     946             :               current->loop_depth());
     947             :       } else {
     948     1773896 :         TRACE("id:%d has loop header id:%d, (depth == %d)\n",
     949             :               current->id().ToInt(), current->loop_header()->id().ToInt(),
     950             :               current->loop_depth());
     951             :       }
     952             :     }
     953     1993733 :   }
     954             : 
     955             :   // Computes loop membership from the backedges of the control flow graph.
     956       80232 :   void ComputeLoopInfo(ZoneVector<SpecialRPOStackFrame>& queue,
     957             :                        size_t num_loops, ZoneVector<Backedge>* backedges) {
     958             :     // Extend existing loop membership vectors.
     959      160465 :     for (LoopInfo& loop : loops_) {
     960           1 :       loop.members->Resize(static_cast<int>(schedule_->BasicBlockCount()),
     961           2 :                            zone_);
     962             :     }
     963             : 
     964             :     // Extend loop information vector.
     965     2841970 :     loops_.resize(num_loops, LoopInfo());
     966             : 
     967             :     // Compute loop membership starting from backedges.
     968             :     // O(max(loop_depth) * max(|loop|)
     969      464908 :     for (size_t i = 0; i < backedges->size(); i++) {
     970      384673 :       BasicBlock* member = backedges->at(i).first;
     971      152219 :       BasicBlock* header = member->SuccessorAt(backedges->at(i).second);
     972      152219 :       size_t loop_num = GetLoopNumber(header);
     973      152219 :       if (loops_[loop_num].header == nullptr) {
     974      136737 :         loops_[loop_num].header = header;
     975             :         loops_[loop_num].members = new (zone_)
     976      546952 :             BitVector(static_cast<int>(schedule_->BasicBlockCount()), zone_);
     977             :       }
     978             : 
     979             :       int queue_length = 0;
     980      152221 :       if (member != header) {
     981             :         // As long as the header doesn't have a backedge to itself,
     982             :         // Push the member onto the queue and process its predecessors.
     983      304274 :         if (!loops_[loop_num].members->Contains(member->id().ToInt())) {
     984             :           loops_[loop_num].members->Add(member->id().ToInt());
     985             :         }
     986     3880082 :         queue[queue_length++].block = member;
     987             :       }
     988             : 
     989             :       // Propagate loop membership backwards. All predecessors of M up to the
     990             :       // loop header H are members of the loop too. O(|blocks between M and H|).
     991     2092262 :       while (queue_length > 0) {
     992     3880082 :         BasicBlock* block = queue[--queue_length].block;
     993     8867464 :         for (size_t i = 0; i < block->PredecessorCount(); i++) {
     994             :           BasicBlock* pred = block->PredecessorAt(i);
     995     2493691 :           if (pred != header) {
     996     4641286 :             if (!loops_[loop_num].members->Contains(pred->id().ToInt())) {
     997             :               loops_[loop_num].members->Add(pred->id().ToInt());
     998     3575808 :               queue[queue_length++].block = pred;
     999             :             }
    1000             :           }
    1001             :         }
    1002             :       }
    1003             :     }
    1004       80235 :   }
    1005             : 
    1006             : #if DEBUG
    1007             :   void PrintRPO() {
    1008             :     OFStream os(stdout);
    1009             :     os << "RPO with " << loops_.size() << " loops";
    1010             :     if (loops_.size() > 0) {
    1011             :       os << " (";
    1012             :       for (size_t i = 0; i < loops_.size(); i++) {
    1013             :         if (i > 0) os << " ";
    1014             :         os << "id:" << loops_[i].header->id();
    1015             :       }
    1016             :       os << ")";
    1017             :     }
    1018             :     os << ":\n";
    1019             : 
    1020             :     for (BasicBlock* block = order_; block != nullptr;
    1021             :          block = block->rpo_next()) {
    1022             :       os << std::setw(5) << "B" << block->rpo_number() << ":";
    1023             :       for (size_t i = 0; i < loops_.size(); i++) {
    1024             :         bool range = loops_[i].header->LoopContains(block);
    1025             :         bool membership = loops_[i].header != block && range;
    1026             :         os << (membership ? " |" : "  ");
    1027             :         os << (range ? "x" : " ");
    1028             :       }
    1029             :       os << "  id:" << block->id() << ": ";
    1030             :       if (block->loop_end() != nullptr) {
    1031             :         os << " range: [B" << block->rpo_number() << ", B"
    1032             :            << block->loop_end()->rpo_number() << ")";
    1033             :       }
    1034             :       if (block->loop_header() != nullptr) {
    1035             :         os << " header: id:" << block->loop_header()->id();
    1036             :       }
    1037             :       if (block->loop_depth() > 0) {
    1038             :         os << " depth: " << block->loop_depth();
    1039             :       }
    1040             :       os << "\n";
    1041             :     }
    1042             :   }
    1043             : 
    1044             :   void VerifySpecialRPO() {
    1045             :     BasicBlockVector* order = schedule_->rpo_order();
    1046             :     DCHECK_LT(0, order->size());
    1047             :     DCHECK_EQ(0, (*order)[0]->id().ToInt());  // entry should be first.
    1048             : 
    1049             :     for (size_t i = 0; i < loops_.size(); i++) {
    1050             :       LoopInfo* loop = &loops_[i];
    1051             :       BasicBlock* header = loop->header;
    1052             :       BasicBlock* end = header->loop_end();
    1053             : 
    1054             :       DCHECK_NOT_NULL(header);
    1055             :       DCHECK_LE(0, header->rpo_number());
    1056             :       DCHECK_LT(header->rpo_number(), order->size());
    1057             :       DCHECK_NOT_NULL(end);
    1058             :       DCHECK_LE(end->rpo_number(), order->size());
    1059             :       DCHECK_GT(end->rpo_number(), header->rpo_number());
    1060             :       DCHECK_NE(header->loop_header(), header);
    1061             : 
    1062             :       // Verify the start ... end list relationship.
    1063             :       int links = 0;
    1064             :       BasicBlock* block = loop->start;
    1065             :       DCHECK_EQ(header, block);
    1066             :       bool end_found;
    1067             :       while (true) {
    1068             :         if (block == nullptr || block == loop->end) {
    1069             :           end_found = (loop->end == block);
    1070             :           break;
    1071             :         }
    1072             :         // The list should be in same order as the final result.
    1073             :         DCHECK(block->rpo_number() == links + header->rpo_number());
    1074             :         links++;
    1075             :         block = block->rpo_next();
    1076             :         DCHECK_LT(links, static_cast<int>(2 * order->size()));  // cycle?
    1077             :       }
    1078             :       DCHECK_LT(0, links);
    1079             :       DCHECK_EQ(links, end->rpo_number() - header->rpo_number());
    1080             :       DCHECK(end_found);
    1081             : 
    1082             :       // Check loop depth of the header.
    1083             :       int loop_depth = 0;
    1084             :       for (LoopInfo* outer = loop; outer != nullptr; outer = outer->prev) {
    1085             :         loop_depth++;
    1086             :       }
    1087             :       DCHECK_EQ(loop_depth, header->loop_depth());
    1088             : 
    1089             :       // Check the contiguousness of loops.
    1090             :       int count = 0;
    1091             :       for (int j = 0; j < static_cast<int>(order->size()); j++) {
    1092             :         BasicBlock* block = order->at(j);
    1093             :         DCHECK_EQ(block->rpo_number(), j);
    1094             :         if (j < header->rpo_number() || j >= end->rpo_number()) {
    1095             :           DCHECK(!header->LoopContains(block));
    1096             :         } else {
    1097             :           DCHECK(header->LoopContains(block));
    1098             :           DCHECK_GE(block->loop_depth(), loop_depth);
    1099             :           count++;
    1100             :         }
    1101             :       }
    1102             :       DCHECK_EQ(links, count);
    1103             :     }
    1104             :   }
    1105             : #endif  // DEBUG
    1106             : 
    1107             :   Zone* zone_;
    1108             :   Schedule* schedule_;
    1109             :   BasicBlock* order_;
    1110             :   BasicBlock* beyond_end_;
    1111             :   ZoneVector<LoopInfo> loops_;
    1112             :   ZoneVector<Backedge> backedges_;
    1113             :   ZoneVector<SpecialRPOStackFrame> stack_;
    1114             :   size_t previous_block_count_;
    1115             :   ZoneVector<BasicBlock*> const empty_;
    1116             : };
    1117             : 
    1118             : 
    1119      265944 : BasicBlockVector* Scheduler::ComputeSpecialRPO(Zone* zone, Schedule* schedule) {
    1120      265944 :   SpecialRPONumberer numberer(zone, schedule);
    1121             :   numberer.ComputeSpecialRPO();
    1122      265944 :   numberer.SerializeRPOIntoSchedule();
    1123             :   numberer.PrintAndVerifySpecialRPO();
    1124      265944 :   return schedule->rpo_order();
    1125             : }
    1126             : 
    1127             : 
    1128     1456102 : void Scheduler::ComputeSpecialRPONumbering() {
    1129     1456102 :   TRACE("--- COMPUTING SPECIAL RPO ----------------------------------\n");
    1130             : 
    1131             :   // Compute the special reverse-post-order for basic blocks.
    1132     2912315 :   special_rpo_ = new (zone_) SpecialRPONumberer(zone_, schedule_);
    1133             :   special_rpo_->ComputeSpecialRPO();
    1134     1456272 : }
    1135             : 
    1136             : 
    1137    41351382 : void Scheduler::PropagateImmediateDominators(BasicBlock* block) {
    1138    23267329 :   for (/*nop*/; block != nullptr; block = block->rpo_next()) {
    1139             :     auto pred = block->predecessors().begin();
    1140             :     auto end = block->predecessors().end();
    1141             :     DCHECK(pred != end);  // All blocks except start have predecessors.
    1142    39623610 :     BasicBlock* dominator = *pred;
    1143             :     bool deferred = dominator->deferred();
    1144             :     // For multiple predecessors, walk up the dominator tree until a common
    1145             :     // dominator is found. Visitation order guarantees that all predecessors
    1146             :     // except for backwards edges have been visited.
    1147    27214173 :     for (++pred; pred != end; ++pred) {
    1148             :       // Don't examine backwards edges.
    1149     7402379 :       if ((*pred)->dominator_depth() < 0) continue;
    1150     7271585 :       dominator = BasicBlock::GetCommonDominator(dominator, *pred);
    1151     7271563 :       deferred = deferred & (*pred)->deferred();
    1152             :     }
    1153             :     block->set_dominator(dominator);
    1154    19811794 :     block->set_dominator_depth(dominator->dominator_depth() + 1);
    1155    19811794 :     block->set_deferred(deferred | block->deferred());
    1156    19811794 :     TRACE("Block id:%d's idom is id:%d, depth = %d\n", block->id().ToInt(),
    1157             :           dominator->id().ToInt(), block->dominator_depth());
    1158             :   }
    1159     1727772 : }
    1160             : 
    1161             : 
    1162     1456219 : void Scheduler::GenerateImmediateDominatorTree() {
    1163     1456219 :   TRACE("--- IMMEDIATE BLOCK DOMINATORS -----------------------------\n");
    1164             : 
    1165             :   // Seed start block to be the first dominator.
    1166     2912438 :   schedule_->start()->set_dominator_depth(0);
    1167             : 
    1168             :   // Build the block dominator tree resulting from the above seed.
    1169     2912438 :   PropagateImmediateDominators(schedule_->start()->rpo_next());
    1170     1456255 : }
    1171             : 
    1172             : 
    1173             : // -----------------------------------------------------------------------------
    1174             : // Phase 3: Prepare use counts for nodes.
    1175             : 
    1176             : 
    1177             : class PrepareUsesVisitor {
    1178             :  public:
    1179             :   explicit PrepareUsesVisitor(Scheduler* scheduler)
    1180     1456062 :       : scheduler_(scheduler), schedule_(scheduler->schedule_) {}
    1181             : 
    1182    95320135 :   void Pre(Node* node) {
    1183   103344675 :     if (scheduler_->InitializePlacement(node) == Scheduler::kFixed) {
    1184             :       // Fixed nodes are always roots for schedule late.
    1185    26104710 :       scheduler_->schedule_root_nodes_.push_back(node);
    1186    31023127 :       if (!schedule_->IsScheduled(node)) {
    1187             :         // Make sure root nodes are scheduled in their respective blocks.
    1188     8024537 :         TRACE("Scheduling fixed position node #%d:%s\n", node->id(),
    1189             :               node->op()->mnemonic());
    1190     8024540 :         IrOpcode::Value opcode = node->opcode();
    1191             :         BasicBlock* block =
    1192             :             opcode == IrOpcode::kParameter
    1193     4918528 :                 ? schedule_->start()
    1194    11130552 :                 : schedule_->block(NodeProperties::GetControlInput(node));
    1195             :         DCHECK_NOT_NULL(block);
    1196     8024537 :         schedule_->AddNode(block, node);
    1197             :       }
    1198             :     }
    1199    95320072 :   }
    1200             : 
    1201   230169550 :   void PostEdge(Node* from, int index, Node* to) {
    1202             :     // If the edge is from an unscheduled node, then tally it in the use count
    1203             :     // for all of its inputs. The same criterion will be used in ScheduleLate
    1204             :     // for decrementing use counts.
    1205   230169550 :     if (!schedule_->IsScheduled(from)) {
    1206             :       DCHECK_NE(Scheduler::kFixed, scheduler_->GetPlacement(from));
    1207   182692595 :       scheduler_->IncrementUnscheduledUseCount(to, index, from);
    1208             :     }
    1209   230171095 :   }
    1210             : 
    1211             :  private:
    1212             :   Scheduler* scheduler_;
    1213             :   Schedule* schedule_;
    1214             : };
    1215             : 
    1216             : 
    1217     1456062 : void Scheduler::PrepareUses() {
    1218     1456062 :   TRACE("--- PREPARE USES -------------------------------------------\n");
    1219             : 
    1220             :   // Count the uses of every node, which is used to ensure that all of a
    1221             :   // node's uses are scheduled before the node itself.
    1222             :   PrepareUsesVisitor prepare_uses(this);
    1223             : 
    1224             :   // TODO(turbofan): simplify the careful pre/post ordering here.
    1225     2912278 :   BoolVector visited(graph_->NodeCount(), false, zone_);
    1226     1456228 :   ZoneStack<Node::InputEdges::iterator> stack(zone_);
    1227     2912370 :   Node* node = graph_->end();
    1228     1456216 :   prepare_uses.Pre(node);
    1229     1456154 :   visited[node->id()] = true;
    1230     1457244 :   stack.push(node->input_edges().begin());
    1231   326932590 :   while (!stack.empty()) {
    1232             :     Edge edge = *stack.top();
    1233   417883363 :     Node* node = edge.to();
    1234   648038074 :     if (visited[node->id()]) {
    1235   230154730 :       prepare_uses.PostEdge(edge.from(), edge.index(), edge.to());
    1236   230175190 :       if (++stack.top() == edge.from()->input_edges().end()) stack.pop();
    1237             :     } else {
    1238    93864307 :       prepare_uses.Pre(node);
    1239    93864326 :       visited[node->id()] = true;
    1240   160025070 :       if (node->InputCount() > 0) stack.push(node->input_edges().begin());
    1241             :     }
    1242             :   }
    1243     1456315 : }
    1244             : 
    1245             : 
    1246             : // -----------------------------------------------------------------------------
    1247             : // Phase 4: Schedule nodes early.
    1248             : 
    1249             : 
    1250             : class ScheduleEarlyNodeVisitor {
    1251             :  public:
    1252             :   ScheduleEarlyNodeVisitor(Zone* zone, Scheduler* scheduler)
    1253     1727793 :       : scheduler_(scheduler), schedule_(scheduler->schedule_), queue_(zone) {}
    1254             : 
    1255             :   // Run the schedule early algorithm on a set of fixed root nodes.
    1256     1727485 :   void Run(NodeVector* roots) {
    1257    31830769 :     for (Node* const root : *roots) {
    1258             :       queue_.push(root);
    1259   115176625 :       while (!queue_.empty()) {
    1260    86800826 :         VisitNode(queue_.front());
    1261             :         queue_.pop();
    1262             :       }
    1263             :     }
    1264     1727835 :   }
    1265             : 
    1266             :  private:
    1267             :   // Visits one node from the queue and propagates its current schedule early
    1268             :   // position to all uses. This in turn might push more nodes onto the queue.
    1269    86800850 :   void VisitNode(Node* node) {
    1270    86800850 :     Scheduler::SchedulerData* data = scheduler_->GetData(node);
    1271             : 
    1272             :     // Fixed nodes already know their schedule early position.
    1273    86800850 :     if (scheduler_->GetPlacement(node) == Scheduler::kFixed) {
    1274   115177152 :       data->minimum_block_ = schedule_->block(node);
    1275    28375769 :       TRACE("Fixing #%d:%s minimum_block = id:%d, dominator_depth = %d\n",
    1276             :             node->id(), node->op()->mnemonic(),
    1277             :             data->minimum_block_->id().ToInt(),
    1278             :             data->minimum_block_->dominator_depth());
    1279             :     }
    1280             : 
    1281             :     // No need to propagate unconstrained schedule early positions.
    1282   260404525 :     if (data->minimum_block_ == schedule_->start()) return;
    1283             : 
    1284             :     // Propagate schedule early position.
    1285             :     DCHECK_NOT_NULL(data->minimum_block_);
    1286   231528423 :     for (auto use : node->uses()) {
    1287   305319500 :       if (scheduler_->IsLive(use)) {
    1288   152659437 :         PropagateMinimumPositionToNode(data->minimum_block_, use);
    1289             :       }
    1290             :     }
    1291             :   }
    1292             : 
    1293             :   // Propagates {block} as another minimum position into the given {node}. This
    1294             :   // has the net effect of computing the minimum dominator block of {node} that
    1295             :   // still post-dominates all inputs to {node} when the queue is processed.
    1296   263394051 :   void PropagateMinimumPositionToNode(BasicBlock* block, Node* node) {
    1297   154010145 :     Scheduler::SchedulerData* data = scheduler_->GetData(node);
    1298             : 
    1299             :     // No need to propagate to fixed node, it's guaranteed to be a root.
    1300   308022935 :     if (scheduler_->GetPlacement(node) == Scheduler::kFixed) return;
    1301             : 
    1302             :     // Coupled nodes influence schedule early position of their control.
    1303   109381194 :     if (scheduler_->GetPlacement(node) == Scheduler::kCoupled) {
    1304     1350816 :       Node* control = NodeProperties::GetControlInput(node);
    1305     1350816 :       PropagateMinimumPositionToNode(block, control);
    1306             :     }
    1307             : 
    1308             :     // Propagate new position if it is deeper down the dominator tree than the
    1309             :     // current. Note that all inputs need to have minimum block position inside
    1310             :     // the dominator chain of {node}'s minimum block position.
    1311             :     DCHECK(InsideSameDominatorChain(block, data->minimum_block_));
    1312   109383906 :     if (block->dominator_depth() > data->minimum_block_->dominator_depth()) {
    1313    58429546 :       data->minimum_block_ = block;
    1314             :       queue_.push(node);
    1315    58429479 :       TRACE("Propagating #%d:%s minimum_block = id:%d, dominator_depth = %d\n",
    1316             :             node->id(), node->op()->mnemonic(),
    1317             :             data->minimum_block_->id().ToInt(),
    1318             :             data->minimum_block_->dominator_depth());
    1319             :     }
    1320             :   }
    1321             : 
    1322             : #if DEBUG
    1323             :   bool InsideSameDominatorChain(BasicBlock* b1, BasicBlock* b2) {
    1324             :     BasicBlock* dominator = BasicBlock::GetCommonDominator(b1, b2);
    1325             :     return dominator == b1 || dominator == b2;
    1326             :   }
    1327             : #endif
    1328             : 
    1329             :   Scheduler* scheduler_;
    1330             :   Schedule* schedule_;
    1331             :   ZoneQueue<Node*> queue_;
    1332             : };
    1333             : 
    1334             : 
    1335     1456235 : void Scheduler::ScheduleEarly() {
    1336     1456235 :   TRACE("--- SCHEDULE EARLY -----------------------------------------\n");
    1337     1456263 :   if (FLAG_trace_turbo_scheduler) {
    1338           0 :     TRACE("roots: ");
    1339           0 :     for (Node* node : schedule_root_nodes_) {
    1340           0 :       TRACE("#%d:%s ", node->id(), node->op()->mnemonic());
    1341             :     }
    1342           0 :     TRACE("\n");
    1343             :   }
    1344             : 
    1345             :   // Compute the minimum block for each node thereby determining the earliest
    1346             :   // position each node could be placed within a valid schedule.
    1347     1456263 :   ScheduleEarlyNodeVisitor schedule_early_visitor(zone_, this);
    1348     1456200 :   schedule_early_visitor.Run(&schedule_root_nodes_);
    1349     1456276 : }
    1350             : 
    1351             : 
    1352             : // -----------------------------------------------------------------------------
    1353             : // Phase 5: Schedule nodes late.
    1354             : 
    1355             : 
    1356             : class ScheduleLateNodeVisitor {
    1357             :  public:
    1358     1456166 :   ScheduleLateNodeVisitor(Zone* zone, Scheduler* scheduler)
    1359             :       : zone_(zone),
    1360             :         scheduler_(scheduler),
    1361             :         schedule_(scheduler_->schedule_),
    1362             :         marked_(scheduler->zone_),
    1363     4368505 :         marking_queue_(scheduler->zone_) {}
    1364             : 
    1365             :   // Run the schedule late algorithm on a set of fixed root nodes.
    1366             :   void Run(NodeVector* roots) {
    1367    53666689 :     for (Node* const root : *roots) {
    1368    26105193 :       ProcessQueue(root);
    1369             :     }
    1370             :   }
    1371             : 
    1372             :  private:
    1373    26105132 :   void ProcessQueue(Node* root) {
    1374    26105132 :     ZoneQueue<Node*>* queue = &(scheduler_->schedule_queue_);
    1375   121076533 :     for (Node* node : root->inputs()) {
    1376             :       // Don't schedule coupled nodes on their own.
    1377    94971204 :       if (scheduler_->GetPlacement(node) == Scheduler::kCoupled) {
    1378       17267 :         node = NodeProperties::GetControlInput(node);
    1379             :       }
    1380             : 
    1381             :       // Test schedulability condition by looking at unscheduled use count.
    1382    94978478 :       if (scheduler_->GetData(node)->unscheduled_count_ != 0) continue;
    1383             : 
    1384             :       queue->push(node);
    1385   104515808 :       do {
    1386   104519601 :         Node* const node = queue->front();
    1387             :         queue->pop();
    1388   104515945 :         VisitNode(node);
    1389             :       } while (!queue->empty());
    1390             :     }
    1391    26105329 :   }
    1392             : 
    1393             :   // Visits one node from the queue of schedulable nodes and determines its
    1394             :   // schedule late position. Also hoists nodes out of loops to find a more
    1395             :   // optimal scheduling position.
    1396   173079139 :   void VisitNode(Node* node) {
    1397             :     DCHECK_EQ(0, scheduler_->GetData(node)->unscheduled_count_);
    1398             : 
    1399             :     // Don't schedule nodes that are already scheduled.
    1400   209032068 :     if (schedule_->IsScheduled(node)) return;
    1401             :     DCHECK_EQ(Scheduler::kSchedulable, scheduler_->GetPlacement(node));
    1402             : 
    1403             :     // Determine the dominating block for all of the uses of this node. It is
    1404             :     // the latest block that this node can be scheduled in.
    1405    68291911 :     TRACE("Scheduling #%d:%s\n", node->id(), node->op()->mnemonic());
    1406    68291911 :     BasicBlock* block = GetCommonDominatorOfUses(node);
    1407             :     DCHECK_NOT_NULL(block);
    1408             : 
    1409             :     // The schedule early block dominates the schedule late block.
    1410   137308646 :     BasicBlock* min_block = scheduler_->GetData(node)->minimum_block_;
    1411             :     DCHECK_EQ(min_block, BasicBlock::GetCommonDominator(block, min_block));
    1412    68291555 :     TRACE(
    1413             :         "Schedule late of #%d:%s is id:%d at loop depth %d, minimum = id:%d\n",
    1414             :         node->id(), node->op()->mnemonic(), block->id().ToInt(),
    1415             :         block->loop_depth(), min_block->id().ToInt());
    1416             : 
    1417             :     // Hoist nodes out of loops if possible. Nodes can be hoisted iteratively
    1418             :     // into enclosing loop pre-headers until they would precede their schedule
    1419             :     // early position.
    1420    69017091 :     BasicBlock* hoist_block = GetHoistBlock(block);
    1421    69016124 :     if (hoist_block &&
    1422             :         hoist_block->dominator_depth() >= min_block->dominator_depth()) {
    1423      161412 :       do {
    1424      161414 :         TRACE("  hoisting #%d:%s to block id:%d\n", node->id(),
    1425             :               node->op()->mnemonic(), hoist_block->id().ToInt());
    1426             :         DCHECK_LT(hoist_block->loop_depth(), block->loop_depth());
    1427             :         block = hoist_block;
    1428      161414 :         hoist_block = GetHoistBlock(hoist_block);
    1429      162515 :       } while (hoist_block &&
    1430             :                hoist_block->dominator_depth() >= min_block->dominator_depth());
    1431   136262512 :     } else if (scheduler_->flags_ & Scheduler::kSplitNodes) {
    1432             :       // Split the {node} if beneficial and return the new {block} for it.
    1433    31216571 :       block = SplitNode(block, node);
    1434             :     }
    1435             : 
    1436             :     // Schedule the node or a floating control structure.
    1437    68291399 :     if (IrOpcode::IsMergeOpcode(node->opcode())) {
    1438             :       ScheduleFloatingControl(block, node);
    1439    68019869 :     } else if (node->opcode() == IrOpcode::kFinishRegion) {
    1440      156257 :       ScheduleRegion(block, node);
    1441             :     } else {
    1442    67863612 :       ScheduleNode(block, node);
    1443             :     }
    1444             :   }
    1445             : 
    1446             :   // Mark {block} and push its non-marked predecessor on the marking queue.
    1447    29532830 :   void MarkBlock(BasicBlock* block) {
    1448             :     DCHECK_LT(block->id().ToSize(), marked_.size());
    1449    68352296 :     marked_[block->id().ToSize()] = true;
    1450    97885183 :     for (BasicBlock* pred_block : block->predecessors()) {
    1451             :       DCHECK_LT(pred_block->id().ToSize(), marked_.size());
    1452    38819466 :       if (marked_[pred_block->id().ToSize()]) continue;
    1453    35395597 :       marking_queue_.push_back(pred_block);
    1454             :     }
    1455    29532887 :   }
    1456             : 
    1457    31216580 :   BasicBlock* SplitNode(BasicBlock* block, Node* node) {
    1458             :     // For now, we limit splitting to pure nodes.
    1459    31216580 :     if (!node->op()->HasProperty(Operator::kPure)) return block;
    1460             :     // TODO(titzer): fix the special case of splitting of projections.
    1461    23099484 :     if (node->opcode() == IrOpcode::kProjection) return block;
    1462             : 
    1463             :     // The {block} is common dominator of all uses of {node}, so we cannot
    1464             :     // split anything unless the {block} has at least two successors.
    1465             :     DCHECK_EQ(block, GetCommonDominatorOfUses(node));
    1466    22932887 :     if (block->SuccessorCount() < 2) return block;
    1467             : 
    1468             :     // Clear marking bits.
    1469             :     DCHECK(marking_queue_.empty());
    1470    92101051 :     std::fill(marked_.begin(), marked_.end(), false);
    1471    24834204 :     marked_.resize(schedule_->BasicBlockCount() + 1, false);
    1472             : 
    1473             :     // Check if the {node} has uses in {block}.
    1474    60445336 :     for (Edge edge : node->use_edges()) {
    1475    58042916 :       if (!scheduler_->IsLive(edge.from())) continue;
    1476    29021482 :       BasicBlock* use_block = GetBlockForUse(edge);
    1477    58043088 :       if (use_block == nullptr || marked_[use_block->id().ToSize()]) continue;
    1478    22332590 :       if (use_block == block) {
    1479    10014683 :         TRACE("  not splitting #%d:%s, it is used in id:%d\n", node->id(),
    1480             :               node->op()->mnemonic(), block->id().ToInt());
    1481    10014683 :         marking_queue_.clear();
    1482    10014683 :         return block;
    1483             :       }
    1484    12317907 :       MarkBlock(use_block);
    1485             :     }
    1486             : 
    1487             :     // Compute transitive marking closure; a block is marked if all its
    1488             :     // successors are marked.
    1489    29305871 :     do {
    1490    29305877 :       BasicBlock* top_block = marking_queue_.front();
    1491    29305877 :       marking_queue_.pop_front();
    1492    29305823 :       if (marked_[top_block->id().ToSize()]) continue;
    1493             :       bool marked = true;
    1494    79419451 :       for (BasicBlock* successor : top_block->successors()) {
    1495    36606420 :         if (!marked_[successor->id().ToSize()]) {
    1496             :           marked = false;
    1497             :           break;
    1498             :         }
    1499             :       }
    1500    25597563 :       if (marked) MarkBlock(top_block);
    1501             :     } while (!marking_queue_.empty());
    1502             : 
    1503             :     // If the (common dominator) {block} is marked, we know that all paths from
    1504             :     // {block} to the end contain at least one use of {node}, and hence there's
    1505             :     // no point in splitting the {node} in this case.
    1506     2402414 :     if (marked_[block->id().ToSize()]) {
    1507     1517809 :       TRACE("  not splitting #%d:%s, its common dominator id:%d is perfect\n",
    1508             :             node->id(), node->op()->mnemonic(), block->id().ToInt());
    1509     1517809 :       return block;
    1510             :     }
    1511             : 
    1512             :     // Split {node} for uses according to the previously computed marking
    1513             :     // closure. Every marking partition has a unique dominator, which get's a
    1514             :     // copy of the {node} with the exception of the first partition, which get's
    1515             :     // the {node} itself.
    1516      884605 :     ZoneMap<BasicBlock*, Node*> dominators(scheduler_->zone_);
    1517     8606951 :     for (Edge edge : node->use_edges()) {
    1518     7722344 :       if (!scheduler_->IsLive(edge.from())) continue;
    1519     3861158 :       BasicBlock* use_block = GetBlockForUse(edge);
    1520    10398338 :       if (use_block == nullptr) continue;
    1521    13074308 :       while (marked_[use_block->dominator()->id().ToSize()]) {
    1522     2675956 :         use_block = use_block->dominator();
    1523             :       }
    1524     3861198 :       auto& use_node = dominators[use_block];
    1525     3861187 :       if (use_node == nullptr) {
    1526     2941815 :         if (dominators.size() == 1u) {
    1527             :           // Place the {node} at {use_block}.
    1528      884610 :           block = use_block;
    1529      884610 :           use_node = node;
    1530      884610 :           TRACE("  pushing #%d:%s down to id:%d\n", node->id(),
    1531             :                 node->op()->mnemonic(), block->id().ToInt());
    1532             :         } else {
    1533             :           // Place a copy of {node} at {use_block}.
    1534     2057205 :           use_node = CloneNode(node);
    1535     2057193 :           TRACE("  cloning #%d:%s for id:%d\n", use_node->id(),
    1536             :                 use_node->op()->mnemonic(), use_block->id().ToInt());
    1537     2057193 :           scheduler_->schedule_queue_.push(use_node);
    1538             :         }
    1539             :       }
    1540     3861183 :       edge.UpdateTo(use_node);
    1541             :     }
    1542             :     return block;
    1543             :   }
    1544             : 
    1545   136905914 :   BasicBlock* GetHoistBlock(BasicBlock* block) {
    1546    68937100 :     if (block->IsLoopHeader()) return block->dominator();
    1547             :     // We have to check to make sure that the {block} dominates all
    1548             :     // of the outgoing blocks.  If it doesn't, then there is a path
    1549             :     // out of the loop which does not execute this {block}, so we
    1550             :     // can't hoist operations from this {block} out of the loop, as
    1551             :     // that would introduce additional computations.
    1552    68210207 :     if (BasicBlock* header_block = block->loop_header()) {
    1553    15318082 :       for (BasicBlock* outgoing_block :
    1554    19859801 :            scheduler_->special_rpo_->GetOutgoingBlocks(header_block)) {
    1555    10293577 :         if (BasicBlock::GetCommonDominator(block, outgoing_block) != block) {
    1556             :           return nullptr;
    1557             :         }
    1558             :       }
    1559      241393 :       return header_block->dominator();
    1560             :     }
    1561             :     return nullptr;
    1562             :   }
    1563             : 
    1564    68684350 :   BasicBlock* GetCommonDominatorOfUses(Node* node) {
    1565             :     BasicBlock* block = nullptr;
    1566   389154143 :     for (Edge edge : node->use_edges()) {
    1567   320470608 :       if (!scheduler_->IsLive(edge.from())) continue;
    1568   160235386 :       BasicBlock* use_block = GetBlockForUse(edge);
    1569             :       block = block == nullptr
    1570             :                   ? use_block
    1571             :                   : use_block == nullptr
    1572             :                         ? block
    1573   160234520 :                         : BasicBlock::GetCommonDominator(block, use_block);
    1574             :     }
    1575    68683535 :     return block;
    1576             :   }
    1577             : 
    1578             :   BasicBlock* FindPredecessorBlock(Node* node) {
    1579     8250051 :     return scheduler_->control_flow_builder_->FindPredecessorBlock(node);
    1580             :   }
    1581             : 
    1582   201364912 :   BasicBlock* GetBlockForUse(Edge edge) {
    1583             :     // TODO(titzer): ignore uses from dead nodes (not visited in PrepareUses()).
    1584             :     // Dead uses only occur if the graph is not trimmed before scheduling.
    1585   193114861 :     Node* use = edge.from();
    1586   193114861 :     if (IrOpcode::IsPhiOpcode(use->opcode())) {
    1587             :       // If the use is from a coupled (i.e. floating) phi, compute the common
    1588             :       // dominator of its uses. This will not recurse more than one level.
    1589    14564042 :       if (scheduler_->GetPlacement(use) == Scheduler::kCoupled) {
    1590      391976 :         TRACE("  inspecting uses of coupled #%d:%s\n", use->id(),
    1591             :               use->op()->mnemonic());
    1592             :         // TODO(titzer): reenable once above TODO is addressed.
    1593             :         //        DCHECK_EQ(edge.to(), NodeProperties::GetControlInput(use));
    1594      391976 :         return GetCommonDominatorOfUses(use);
    1595             :       }
    1596             :       // If the use is from a fixed (i.e. non-floating) phi, we use the
    1597             :       // predecessor block of the corresponding control input to the merge.
    1598     6890045 :       if (scheduler_->GetPlacement(use) == Scheduler::kFixed) {
    1599     6890049 :         TRACE("  input@%d into a fixed phi #%d:%s\n", edge.index(), use->id(),
    1600             :               use->op()->mnemonic());
    1601     6890049 :         Node* merge = NodeProperties::GetControlInput(use, 0);
    1602             :         DCHECK(IrOpcode::IsMergeOpcode(merge->opcode()));
    1603     6890000 :         Node* input = NodeProperties::GetControlInput(merge, edge.index());
    1604     6889921 :         return FindPredecessorBlock(input);
    1605             :       }
    1606   185832840 :     } else if (IrOpcode::IsMergeOpcode(use->opcode())) {
    1607             :       // If the use is from a fixed (i.e. non-floating) merge, we use the
    1608             :       // predecessor block of the current input to the merge.
    1609     2720296 :       if (scheduler_->GetPlacement(use) == Scheduler::kFixed) {
    1610     1360147 :         TRACE("  input@%d into a fixed merge #%d:%s\n", edge.index(), use->id(),
    1611             :               use->op()->mnemonic());
    1612     2720296 :         return FindPredecessorBlock(edge.to());
    1613             :       }
    1614             :     }
    1615   184472689 :     BasicBlock* result = schedule_->block(use);
    1616   184472217 :     if (result == nullptr) return nullptr;
    1617   184472400 :     TRACE("  must dominate use #%d:%s in id:%d\n", use->id(),
    1618             :           use->op()->mnemonic(), result->id().ToInt());
    1619   184472627 :     return result;
    1620             :   }
    1621             : 
    1622             :   void ScheduleFloatingControl(BasicBlock* block, Node* node) {
    1623      271530 :     scheduler_->FuseFloatingControl(block, node);
    1624             :   }
    1625             : 
    1626      156257 :   void ScheduleRegion(BasicBlock* block, Node* region_end) {
    1627             :     // We only allow regions of instructions connected into a linear
    1628             :     // effect chain. The only value allowed to be produced by a node
    1629             :     // in the chain must be the value consumed by the FinishRegion node.
    1630             : 
    1631             :     // We schedule back to front; we first schedule FinishRegion.
    1632      156257 :     CHECK_EQ(IrOpcode::kFinishRegion, region_end->opcode());
    1633      156257 :     ScheduleNode(block, region_end);
    1634             : 
    1635             :     // Schedule the chain.
    1636     1141313 :     Node* node = NodeProperties::GetEffectInput(region_end);
    1637     1141313 :     while (node->opcode() != IrOpcode::kBeginRegion) {
    1638             :       DCHECK_EQ(0, scheduler_->GetData(node)->unscheduled_count_);
    1639             :       DCHECK_EQ(1, node->op()->EffectInputCount());
    1640             :       DCHECK_EQ(1, node->op()->EffectOutputCount());
    1641             :       DCHECK_EQ(0, node->op()->ControlOutputCount());
    1642             :       // The value output (if there is any) must be consumed
    1643             :       // by the EndRegion node.
    1644             :       DCHECK(node->op()->ValueOutputCount() == 0 ||
    1645             :              node == region_end->InputAt(0));
    1646      828799 :       ScheduleNode(block, node);
    1647      828799 :       node = NodeProperties::GetEffectInput(node);
    1648             :     }
    1649             :     // Schedule the BeginRegion node.
    1650             :     DCHECK_EQ(0, scheduler_->GetData(node)->unscheduled_count_);
    1651      156257 :     ScheduleNode(block, node);
    1652      156257 :   }
    1653             : 
    1654    69004877 :   void ScheduleNode(BasicBlock* block, Node* node) {
    1655    69004877 :     schedule_->PlanNode(block, node);
    1656             :     size_t block_id = block->id().ToSize();
    1657   215242713 :     if (!scheduler_->scheduled_nodes_[block_id]) {
    1658     8227106 :       scheduler_->scheduled_nodes_[block_id] =
    1659    24681488 :           new (zone_->New(sizeof(NodeVector))) NodeVector(zone_);
    1660             :     }
    1661   138010178 :     scheduler_->scheduled_nodes_[block_id]->push_back(node);
    1662    69003909 :     scheduler_->UpdatePlacement(node, Scheduler::kScheduled);
    1663    69004763 :   }
    1664             : 
    1665     4114394 :   Node* CloneNode(Node* node) {
    1666             :     int const input_count = node->InputCount();
    1667     3975977 :     for (int index = 0; index < input_count; ++index) {
    1668             :       Node* const input = node->InputAt(index);
    1669     3975975 :       scheduler_->IncrementUnscheduledUseCount(input, index, node);
    1670             :     }
    1671     6171595 :     Node* const copy = scheduler_->graph_->CloneNode(node);
    1672     2057200 :     TRACE(("clone #%d:%s -> #%d\n"), node->id(), node->op()->mnemonic(),
    1673             :           copy->id());
    1674     2057200 :     scheduler_->node_data_.resize(copy->id() + 1,
    1675     8228793 :                                   scheduler_->DefaultSchedulerData());
    1676     6171579 :     scheduler_->node_data_[copy->id()] = scheduler_->node_data_[node->id()];
    1677     2057193 :     return copy;
    1678             :   }
    1679             : 
    1680             :   Zone* zone_;
    1681             :   Scheduler* scheduler_;
    1682             :   Schedule* schedule_;
    1683             :   BoolVector marked_;
    1684             :   ZoneDeque<BasicBlock*> marking_queue_;
    1685             : };
    1686             : 
    1687             : 
    1688     1456119 : void Scheduler::ScheduleLate() {
    1689     1456119 :   TRACE("--- SCHEDULE LATE ------------------------------------------\n");
    1690     1456166 :   if (FLAG_trace_turbo_scheduler) {
    1691           0 :     TRACE("roots: ");
    1692           0 :     for (Node* node : schedule_root_nodes_) {
    1693           0 :       TRACE("#%d:%s ", node->id(), node->op()->mnemonic());
    1694             :     }
    1695           0 :     TRACE("\n");
    1696             :   }
    1697             : 
    1698             :   // Schedule: Places nodes in dominator block of all their uses.
    1699     1456166 :   ScheduleLateNodeVisitor schedule_late_visitor(zone_, this);
    1700             :   schedule_late_visitor.Run(&schedule_root_nodes_);
    1701     1456211 : }
    1702             : 
    1703             : 
    1704             : // -----------------------------------------------------------------------------
    1705             : // Phase 6: Seal the final schedule.
    1706             : 
    1707             : 
    1708     1456183 : void Scheduler::SealFinalSchedule() {
    1709     1456183 :   TRACE("--- SEAL FINAL SCHEDULE ------------------------------------\n");
    1710             : 
    1711             :   // Serialize the assembly order and reverse-post-order numbering.
    1712     1456183 :   special_rpo_->SerializeRPOIntoSchedule();
    1713             :   special_rpo_->PrintAndVerifySpecialRPO();
    1714             : 
    1715             :   // Add collected nodes for basic blocks to their blocks in the right order.
    1716             :   int block_num = 0;
    1717    16711432 :   for (NodeVector* nodes : scheduled_nodes_) {
    1718    27597994 :     BasicBlock::Id id = BasicBlock::Id::FromInt(block_num++);
    1719    13798997 :     BasicBlock* block = schedule_->GetBlockById(id);
    1720    13798926 :     if (nodes) {
    1721   146236980 :       for (Node* node : base::Reversed(*nodes)) {
    1722    69004717 :         schedule_->AddNode(block, node);
    1723             :       }
    1724             :     }
    1725             :   }
    1726     1456311 : }
    1727             : 
    1728             : 
    1729             : // -----------------------------------------------------------------------------
    1730             : 
    1731             : 
    1732      814589 : void Scheduler::FuseFloatingControl(BasicBlock* block, Node* node) {
    1733      271529 :   TRACE("--- FUSE FLOATING CONTROL ----------------------------------\n");
    1734      271530 :   if (FLAG_trace_turbo_scheduler) {
    1735           0 :     OFStream os(stdout);
    1736           0 :     os << "Schedule before control flow fusion:\n" << *schedule_;
    1737             :   }
    1738             : 
    1739             :   // Iterate on phase 1: Build control-flow graph.
    1740      271530 :   control_flow_builder_->Run(block, node);
    1741             : 
    1742             :   // Iterate on phase 2: Compute special RPO and dominator tree.
    1743      271530 :   special_rpo_->UpdateSpecialRPO(block, schedule_->block(node));
    1744             :   // TODO(mstarzinger): Currently "iterate on" means "re-run". Fix that.
    1745     9087994 :   for (BasicBlock* b = block->rpo_next(); b != nullptr; b = b->rpo_next()) {
    1746             :     b->set_dominator_depth(-1);
    1747             :     b->set_dominator(nullptr);
    1748             :   }
    1749      271530 :   PropagateImmediateDominators(block->rpo_next());
    1750             : 
    1751             :   // Iterate on phase 4: Schedule nodes early.
    1752             :   // TODO(mstarzinger): The following loop gathering the propagation roots is a
    1753             :   // temporary solution and should be merged into the rest of the scheduler as
    1754             :   // soon as the approach settled for all floating loops.
    1755      271530 :   NodeVector propagation_roots(control_flow_builder_->control_);
    1756     2605936 :   for (Node* node : control_flow_builder_->control_) {
    1757     7517019 :     for (Node* use : node->uses()) {
    1758     3342179 :       if (NodeProperties::IsPhi(use) && IsLive(use)) {
    1759      479344 :         propagation_roots.push_back(use);
    1760             :       }
    1761             :     }
    1762             :   }
    1763      271530 :   if (FLAG_trace_turbo_scheduler) {
    1764           0 :     TRACE("propagation roots: ");
    1765           0 :     for (Node* node : propagation_roots) {
    1766           0 :       TRACE("#%d:%s ", node->id(), node->op()->mnemonic());
    1767             :     }
    1768           0 :     TRACE("\n");
    1769             :   }
    1770      271530 :   ScheduleEarlyNodeVisitor schedule_early_visitor(zone_, this);
    1771      271529 :   schedule_early_visitor.Run(&propagation_roots);
    1772             : 
    1773             :   // Move previously planned nodes.
    1774             :   // TODO(mstarzinger): Improve that by supporting bulk moves.
    1775      543060 :   scheduled_nodes_.resize(schedule_->BasicBlockCount());
    1776      271530 :   MovePlannedNodes(block, schedule_->block(node));
    1777             : 
    1778      271530 :   if (FLAG_trace_turbo_scheduler) {
    1779           0 :     OFStream os(stdout);
    1780           0 :     os << "Schedule after control flow fusion:\n" << *schedule_;
    1781             :   }
    1782      271530 : }
    1783             : 
    1784             : 
    1785      271530 : void Scheduler::MovePlannedNodes(BasicBlock* from, BasicBlock* to) {
    1786      271530 :   TRACE("Move planned nodes from id:%d to id:%d\n", from->id().ToInt(),
    1787             :         to->id().ToInt());
    1788      796521 :   NodeVector* from_nodes = scheduled_nodes_[from->id().ToSize()];
    1789      271530 :   NodeVector* to_nodes = scheduled_nodes_[to->id().ToSize()];
    1790      543060 :   if (!from_nodes) return;
    1791             : 
    1792     7077442 :   for (Node* const node : *from_nodes) {
    1793     6570520 :     schedule_->SetBlockForNode(to, node);
    1794             :   }
    1795      253461 :   if (to_nodes) {
    1796           0 :     to_nodes->insert(to_nodes->end(), from_nodes->begin(), from_nodes->end());
    1797             :     from_nodes->clear();
    1798             :   } else {
    1799             :     std::swap(scheduled_nodes_[from->id().ToSize()],
    1800             :               scheduled_nodes_[to->id().ToSize()]);
    1801             :   }
    1802             : }
    1803             : 
    1804             : #undef TRACE
    1805             : 
    1806             : }  // namespace compiler
    1807             : }  // namespace internal
    1808             : }  // namespace v8

Generated by: LCOV version 1.10