Line data Source code
1 : // Copyright 2013 the V8 project authors. All rights reserved.
2 : // Use of this source code is governed by a BSD-style license that can be
3 : // found in the LICENSE file.
4 :
5 : #include "src/compiler/scheduler.h"
6 :
7 : #include <iomanip>
8 :
9 : #include "src/base/adapters.h"
10 : #include "src/bit-vector.h"
11 : #include "src/compiler/common-operator.h"
12 : #include "src/compiler/control-equivalence.h"
13 : #include "src/compiler/graph.h"
14 : #include "src/compiler/node-marker.h"
15 : #include "src/compiler/node-properties.h"
16 : #include "src/compiler/node.h"
17 : #include "src/zone/zone-containers.h"
18 :
19 : namespace v8 {
20 : namespace internal {
21 : namespace compiler {
22 :
23 : #define TRACE(...) \
24 : do { \
25 : if (FLAG_trace_turbo_scheduler) PrintF(__VA_ARGS__); \
26 : } while (false)
27 :
28 1927136 : Scheduler::Scheduler(Zone* zone, Graph* graph, Schedule* schedule, Flags flags,
29 963580 : size_t node_count_hint)
30 : : zone_(zone),
31 : graph_(graph),
32 : schedule_(schedule),
33 : flags_(flags),
34 : scheduled_nodes_(zone),
35 : schedule_root_nodes_(zone),
36 : schedule_queue_(zone),
37 1927112 : node_data_(zone) {
38 963602 : node_data_.reserve(node_count_hint);
39 1927160 : node_data_.resize(graph->NodeCount(), DefaultSchedulerData());
40 963555 : }
41 :
42 2495484 : Schedule* Scheduler::ComputeSchedule(Zone* zone, Graph* graph, Flags flags) {
43 : Zone* schedule_zone =
44 963594 : (flags & Scheduler::kTempSchedule) ? zone : graph->zone();
45 :
46 : // Reserve 10% more space for nodes if node splitting is enabled to try to
47 : // avoid resizing the vector since that would triple its zone memory usage.
48 963594 : float node_hint_multiplier = (flags & Scheduler::kSplitNodes) ? 1.1 : 1;
49 963594 : size_t node_count_hint = node_hint_multiplier * graph->NodeCount();
50 :
51 : Schedule* schedule =
52 963584 : new (schedule_zone) Schedule(schedule_zone, node_count_hint);
53 963578 : Scheduler scheduler(zone, graph, schedule, flags, node_count_hint);
54 :
55 963556 : scheduler.BuildCFG();
56 963533 : scheduler.ComputeSpecialRPONumbering();
57 963517 : scheduler.GenerateImmediateDominatorTree();
58 :
59 963560 : scheduler.PrepareUses();
60 963589 : scheduler.ScheduleEarly();
61 963574 : scheduler.ScheduleLate();
62 :
63 963580 : scheduler.SealFinalSchedule();
64 :
65 963586 : return schedule;
66 : }
67 :
68 :
69 : Scheduler::SchedulerData Scheduler::DefaultSchedulerData() {
70 3678169 : SchedulerData def = {schedule_->start(), 0, kUnknown};
71 : return def;
72 : }
73 :
74 :
75 2110482107 : Scheduler::SchedulerData* Scheduler::GetData(Node* node) {
76 2110482107 : return &node_data_[node->id()];
77 : }
78 :
79 :
80 1405815033 : Scheduler::Placement Scheduler::GetPlacement(Node* node) {
81 : SchedulerData* data = GetData(node);
82 1340703745 : if (data->placement_ == kUnknown) { // Compute placement, once, on demand.
83 65111288 : switch (node->opcode()) {
84 : case IrOpcode::kParameter:
85 : case IrOpcode::kOsrValue:
86 : // Parameters and OSR values are always fixed to the start block.
87 2384884 : data->placement_ = kFixed;
88 2384884 : break;
89 : case IrOpcode::kPhi:
90 : case IrOpcode::kEffectPhi: {
91 : // Phis and effect phis are fixed if their control inputs are, whereas
92 : // otherwise they are coupled to a floating control node.
93 3913384 : Placement p = GetPlacement(NodeProperties::GetControlInput(node));
94 3913378 : data->placement_ = (p == kFixed ? kFixed : kCoupled);
95 3913378 : break;
96 : }
97 : #define DEFINE_CONTROL_CASE(V) case IrOpcode::k##V:
98 : CONTROL_OP_LIST(DEFINE_CONTROL_CASE)
99 : #undef DEFINE_CONTROL_CASE
100 : {
101 : // Control nodes that were not control-reachable from end may float.
102 1539101 : data->placement_ = kSchedulable;
103 1539101 : break;
104 : }
105 : default:
106 57273919 : data->placement_ = kSchedulable;
107 57273919 : break;
108 : }
109 : }
110 1340703739 : return data->placement_;
111 : }
112 :
113 :
114 140975754 : void Scheduler::UpdatePlacement(Node* node, Placement placement) {
115 : SchedulerData* data = GetData(node);
116 79042101 : if (data->placement_ != kUnknown) { // Trap on mutation, not initialization.
117 61933653 : switch (node->opcode()) {
118 : case IrOpcode::kParameter:
119 : // Parameters are fixed once and for all.
120 0 : UNREACHABLE();
121 : break;
122 : case IrOpcode::kPhi:
123 : case IrOpcode::kEffectPhi: {
124 : // Phis and effect phis are coupled to their respective blocks.
125 : DCHECK_EQ(Scheduler::kCoupled, data->placement_);
126 : DCHECK_EQ(Scheduler::kFixed, placement);
127 405395 : Node* control = NodeProperties::GetControlInput(node);
128 405395 : BasicBlock* block = schedule_->block(control);
129 405395 : schedule_->AddNode(block, node);
130 405395 : break;
131 : }
132 : #define DEFINE_CONTROL_CASE(V) case IrOpcode::k##V:
133 : CONTROL_OP_LIST(DEFINE_CONTROL_CASE)
134 : #undef DEFINE_CONTROL_CASE
135 : {
136 : // Control nodes force coupled uses to be placed.
137 4714094 : for (auto use : node->uses()) {
138 3175003 : if (GetPlacement(use) == Scheduler::kCoupled) {
139 : DCHECK_EQ(node, NodeProperties::GetControlInput(use));
140 405395 : UpdatePlacement(use, placement);
141 : }
142 : }
143 : break;
144 : }
145 : default:
146 : DCHECK_EQ(Scheduler::kSchedulable, data->placement_);
147 : DCHECK_EQ(Scheduler::kScheduled, placement);
148 : break;
149 : }
150 : // Reduce the use count of the node's inputs to potentially make them
151 : // schedulable. If all the uses of a node have been scheduled, then the node
152 : // itself can be scheduled.
153 214350271 : for (Edge const edge : node->input_edges()) {
154 152417593 : DecrementUnscheduledUseCount(edge.to(), edge.index(), edge.from());
155 : }
156 : }
157 79041126 : data->placement_ = placement;
158 79041126 : }
159 :
160 :
161 305813997 : bool Scheduler::IsCoupledControlEdge(Node* node, int index) {
162 308393596 : return GetPlacement(node) == kCoupled &&
163 305814293 : NodeProperties::FirstControlIndex(node) == index;
164 : }
165 :
166 :
167 152413390 : void Scheduler::IncrementUnscheduledUseCount(Node* node, int index,
168 0 : Node* from) {
169 : // Make sure that control edges from coupled nodes are not counted.
170 152921784 : if (IsCoupledControlEdge(from, index)) return;
171 :
172 : // Tracking use counts for fixed nodes is useless.
173 152517387 : if (GetPlacement(node) == kFixed) return;
174 :
175 : // Use count for coupled nodes is summed up on their control.
176 115752552 : if (GetPlacement(node) == kCoupled) {
177 508394 : Node* control = NodeProperties::GetControlInput(node);
178 508394 : return IncrementUnscheduledUseCount(control, index, from);
179 : }
180 :
181 115244648 : ++(GetData(node)->unscheduled_count_);
182 115244648 : if (FLAG_trace_turbo_scheduler) {
183 0 : TRACE(" Use count of #%d:%s (used by #%d:%s)++ = %d\n", node->id(),
184 : node->op()->mnemonic(), from->id(), from->op()->mnemonic(),
185 : GetData(node)->unscheduled_count_);
186 : }
187 : }
188 :
189 :
190 152925823 : void Scheduler::DecrementUnscheduledUseCount(Node* node, int index,
191 0 : Node* from) {
192 : // Make sure that control edges from coupled nodes are not counted.
193 152925823 : if (IsCoupledControlEdge(from, index)) return;
194 :
195 : // Tracking use counts for fixed nodes is useless.
196 152521067 : if (GetPlacement(node) == kFixed) return;
197 :
198 : // Use count for coupled nodes is summed up on their control.
199 115439386 : if (GetPlacement(node) == kCoupled) {
200 508388 : Node* control = NodeProperties::GetControlInput(node);
201 508390 : return DecrementUnscheduledUseCount(control, index, from);
202 : }
203 :
204 : DCHECK(GetData(node)->unscheduled_count_ > 0);
205 229861426 : --(GetData(node)->unscheduled_count_);
206 114930713 : if (FLAG_trace_turbo_scheduler) {
207 0 : TRACE(" Use count of #%d:%s (used by #%d:%s)-- = %d\n", node->id(),
208 : node->op()->mnemonic(), from->id(), from->op()->mnemonic(),
209 : GetData(node)->unscheduled_count_);
210 : }
211 229868314 : if (GetData(node)->unscheduled_count_ == 0) {
212 50599193 : TRACE(" newly eligible #%d:%s\n", node->id(), node->op()->mnemonic());
213 : schedule_queue_.push(node);
214 : }
215 : }
216 :
217 :
218 : // -----------------------------------------------------------------------------
219 : // Phase 1: Build control-flow graph.
220 :
221 :
222 : // Internal class to build a control flow graph (i.e the basic blocks and edges
223 : // between them within a Schedule) from the node graph. Visits control edges of
224 : // the graph backwards from an end node in order to find the connected control
225 : // subgraph, needed for scheduling.
226 : class CFGBuilder : public ZoneObject {
227 : public:
228 963577 : CFGBuilder(Zone* zone, Scheduler* scheduler)
229 : : zone_(zone),
230 : scheduler_(scheduler),
231 : schedule_(scheduler->schedule_),
232 : queued_(scheduler->graph_, 2),
233 : queue_(zone),
234 : control_(zone),
235 : component_entry_(nullptr),
236 : component_start_(nullptr),
237 2890740 : component_end_(nullptr) {}
238 :
239 : // Run the control flow graph construction algorithm by walking the graph
240 : // backwards from end through control edges, building and connecting the
241 : // basic blocks for control nodes.
242 963587 : void Run() {
243 : ResetDataStructures();
244 963587 : Queue(scheduler_->graph_->end());
245 :
246 23668329 : while (!queue_.empty()) { // Breadth-first backwards traversal.
247 21741171 : Node* node = queue_.front();
248 : queue_.pop();
249 21741236 : int max = NodeProperties::PastControlIndex(node);
250 46386480 : for (int i = NodeProperties::FirstControlIndex(node); i < max; i++) {
251 24645269 : Queue(node->InputAt(i));
252 : }
253 : }
254 :
255 23668574 : for (NodeVector::iterator i = control_.begin(); i != control_.end(); ++i) {
256 21741400 : ConnectBlocks(*i); // Connect block to its predecessor/successors.
257 : }
258 963575 : }
259 :
260 : // Run the control flow graph construction for a minimal control-connected
261 : // component ending in {exit} and merge that component into an existing
262 : // control flow graph at the bottom of {block}.
263 215030 : void Run(BasicBlock* block, Node* exit) {
264 : ResetDataStructures();
265 215030 : Queue(exit);
266 :
267 215032 : component_entry_ = nullptr;
268 215032 : component_start_ = block;
269 215032 : component_end_ = schedule_->block(exit);
270 215031 : scheduler_->equivalence_->Run(exit);
271 1686171 : while (!queue_.empty()) { // Breadth-first backwards traversal.
272 1256111 : Node* node = queue_.front();
273 : queue_.pop();
274 :
275 : // Use control dependence equivalence to find a canonical single-entry
276 : // single-exit region that makes up a minimal component to be scheduled.
277 1256111 : if (IsSingleEntrySingleExitRegion(node, exit)) {
278 215031 : TRACE("Found SESE at #%d:%s\n", node->id(), node->op()->mnemonic());
279 : DCHECK(!component_entry_);
280 215027 : component_entry_ = node;
281 215027 : continue;
282 : }
283 :
284 1041078 : int max = NodeProperties::PastControlIndex(node);
285 2396201 : for (int i = NodeProperties::FirstControlIndex(node); i < max; i++) {
286 1355117 : Queue(node->InputAt(i));
287 : }
288 : }
289 : DCHECK(component_entry_);
290 :
291 1686169 : for (NodeVector::iterator i = control_.begin(); i != control_.end(); ++i) {
292 1256113 : ConnectBlocks(*i); // Connect block to its predecessor/successors.
293 : }
294 215026 : }
295 :
296 : private:
297 : friend class ScheduleLateNodeVisitor;
298 : friend class Scheduler;
299 :
300 12943835 : void FixNode(BasicBlock* block, Node* node) {
301 12943835 : schedule_->AddNode(block, node);
302 12943767 : scheduler_->UpdatePlacement(node, Scheduler::kFixed);
303 12943729 : }
304 :
305 27178881 : void Queue(Node* node) {
306 : // Mark the connected control nodes as they are queued.
307 54357828 : if (!queued_.Get(node)) {
308 22997184 : BuildBlocks(node);
309 : queue_.push(node);
310 22997232 : queued_.Set(node, true);
311 22997236 : control_.push_back(node);
312 : }
313 27178940 : }
314 :
315 23087899 : void BuildBlocks(Node* node) {
316 22997195 : switch (node->opcode()) {
317 : case IrOpcode::kEnd:
318 1910324 : FixNode(schedule_->end(), node);
319 946655 : break;
320 : case IrOpcode::kStart:
321 1927164 : FixNode(schedule_->start(), node);
322 963593 : break;
323 : case IrOpcode::kLoop:
324 : case IrOpcode::kMerge:
325 2824115 : BuildBlockForNode(node);
326 2824173 : break;
327 : case IrOpcode::kTerminate: {
328 : // Put Terminate in the loop to which it refers.
329 90704 : Node* loop = NodeProperties::GetControlInput(node);
330 90704 : BasicBlock* block = BuildBlockForNode(loop);
331 90704 : FixNode(block, node);
332 90704 : break;
333 : }
334 : case IrOpcode::kBranch:
335 : case IrOpcode::kSwitch:
336 3564207 : BuildBlocksForSuccessors(node);
337 3564227 : break;
338 : #define BUILD_BLOCK_JS_CASE(Name) case IrOpcode::k##Name:
339 : JS_OP_LIST(BUILD_BLOCK_JS_CASE)
340 : // JS opcodes are just like calls => fall through.
341 : #undef BUILD_BLOCK_JS_CASE
342 : case IrOpcode::kCall:
343 4806873 : if (NodeProperties::IsExceptionalCall(node)) {
344 463060 : BuildBlocksForSuccessors(node);
345 : }
346 : break;
347 : default:
348 : break;
349 : }
350 22997185 : }
351 :
352 22997531 : void ConnectBlocks(Node* node) {
353 22997531 : switch (node->opcode()) {
354 : case IrOpcode::kLoop:
355 : case IrOpcode::kMerge:
356 2824147 : ConnectMerge(node);
357 2824130 : break;
358 : case IrOpcode::kBranch:
359 3545971 : scheduler_->UpdatePlacement(node, Scheduler::kFixed);
360 3545981 : ConnectBranch(node);
361 3545936 : break;
362 : case IrOpcode::kSwitch:
363 18237 : scheduler_->UpdatePlacement(node, Scheduler::kFixed);
364 18237 : ConnectSwitch(node);
365 18237 : break;
366 : case IrOpcode::kDeoptimize:
367 30130 : scheduler_->UpdatePlacement(node, Scheduler::kFixed);
368 30130 : ConnectDeoptimize(node);
369 30130 : break;
370 : case IrOpcode::kTailCall:
371 354 : scheduler_->UpdatePlacement(node, Scheduler::kFixed);
372 354 : ConnectTailCall(node);
373 354 : break;
374 : case IrOpcode::kReturn:
375 1307449 : scheduler_->UpdatePlacement(node, Scheduler::kFixed);
376 1307434 : ConnectReturn(node);
377 1307486 : break;
378 : case IrOpcode::kThrow:
379 62096 : scheduler_->UpdatePlacement(node, Scheduler::kFixed);
380 62096 : ConnectThrow(node);
381 62096 : break;
382 : #define CONNECT_BLOCK_JS_CASE(Name) case IrOpcode::k##Name:
383 : JS_OP_LIST(CONNECT_BLOCK_JS_CASE)
384 : // JS opcodes are just like calls => fall through.
385 : #undef CONNECT_BLOCK_JS_CASE
386 : case IrOpcode::kCall:
387 4806877 : if (NodeProperties::IsExceptionalCall(node)) {
388 463060 : scheduler_->UpdatePlacement(node, Scheduler::kFixed);
389 463060 : ConnectCall(node);
390 : }
391 : break;
392 : default:
393 : break;
394 : }
395 22997505 : }
396 :
397 21976300 : BasicBlock* BuildBlockForNode(Node* node) {
398 11033373 : BasicBlock* block = schedule_->block(node);
399 11033402 : if (block == nullptr) {
400 10942683 : block = schedule_->NewBasicBlock();
401 10942927 : TRACE("Create block id:%d for #%d:%s\n", block->id().ToInt(), node->id(),
402 : node->op()->mnemonic());
403 10942927 : FixNode(block, node);
404 : }
405 11033629 : return block;
406 : }
407 :
408 4027257 : void BuildBlocksForSuccessors(Node* node) {
409 8054514 : size_t const successor_cnt = node->op()->ControlOutputCount();
410 4027257 : Node** successors = zone_->NewArray<Node*>(successor_cnt);
411 4027265 : NodeProperties::CollectControlProjections(node, successors, successor_cnt);
412 12146018 : for (size_t index = 0; index < successor_cnt; ++index) {
413 8118732 : BuildBlockForNode(successors[index]);
414 : }
415 4027286 : }
416 :
417 4027264 : void CollectSuccessorBlocks(Node* node, BasicBlock** successor_blocks,
418 : size_t successor_cnt) {
419 : Node** successors = reinterpret_cast<Node**>(successor_blocks);
420 4027264 : NodeProperties::CollectControlProjections(node, successors, successor_cnt);
421 8118760 : for (size_t index = 0; index < successor_cnt; ++index) {
422 8118750 : successor_blocks[index] = schedule_->block(successors[index]);
423 : }
424 4027249 : }
425 :
426 20832851 : BasicBlock* FindPredecessorBlock(Node* node) {
427 : BasicBlock* predecessor_block = nullptr;
428 : while (true) {
429 29541796 : predecessor_block = schedule_->block(node);
430 29541759 : if (predecessor_block != nullptr) break;
431 8708947 : node = NodeProperties::GetControlInput(node);
432 : }
433 20832812 : return predecessor_block;
434 : }
435 :
436 463060 : void ConnectCall(Node* call) {
437 : BasicBlock* successor_blocks[2];
438 463060 : CollectSuccessorBlocks(call, successor_blocks, arraysize(successor_blocks));
439 :
440 : // Consider the exception continuation to be deferred.
441 463060 : successor_blocks[1]->set_deferred(true);
442 :
443 463060 : Node* call_control = NodeProperties::GetControlInput(call);
444 463060 : BasicBlock* call_block = FindPredecessorBlock(call_control);
445 463060 : TraceConnect(call, call_block, successor_blocks[0]);
446 463060 : TraceConnect(call, call_block, successor_blocks[1]);
447 : schedule_->AddCall(call_block, call, successor_blocks[0],
448 463060 : successor_blocks[1]);
449 463060 : }
450 :
451 7091921 : void ConnectBranch(Node* branch) {
452 : BasicBlock* successor_blocks[2];
453 : CollectSuccessorBlocks(branch, successor_blocks,
454 3545966 : arraysize(successor_blocks));
455 :
456 : // Consider branch hints.
457 3545955 : switch (BranchHintOf(branch->op())) {
458 : case BranchHint::kNone:
459 : break;
460 : case BranchHint::kTrue:
461 1190762 : successor_blocks[1]->set_deferred(true);
462 : break;
463 : case BranchHint::kFalse:
464 421399 : successor_blocks[0]->set_deferred(true);
465 : break;
466 : }
467 :
468 3545948 : if (branch == component_entry_) {
469 215029 : TraceConnect(branch, component_start_, successor_blocks[0]);
470 215028 : TraceConnect(branch, component_start_, successor_blocks[1]);
471 : schedule_->InsertBranch(component_start_, component_end_, branch,
472 215029 : successor_blocks[0], successor_blocks[1]);
473 : } else {
474 3330919 : Node* branch_control = NodeProperties::GetControlInput(branch);
475 3330892 : BasicBlock* branch_block = FindPredecessorBlock(branch_control);
476 3330907 : TraceConnect(branch, branch_block, successor_blocks[0]);
477 3330890 : TraceConnect(branch, branch_block, successor_blocks[1]);
478 : schedule_->AddBranch(branch_block, branch, successor_blocks[0],
479 3330883 : successor_blocks[1]);
480 : }
481 3545938 : }
482 :
483 18237 : void ConnectSwitch(Node* sw) {
484 36474 : size_t const successor_count = sw->op()->ControlOutputCount();
485 : BasicBlock** successor_blocks =
486 18237 : zone_->NewArray<BasicBlock*>(successor_count);
487 18237 : CollectSuccessorBlocks(sw, successor_blocks, successor_count);
488 :
489 18237 : if (sw == component_entry_) {
490 3 : for (size_t index = 0; index < successor_count; ++index) {
491 3 : TraceConnect(sw, component_start_, successor_blocks[index]);
492 : }
493 : schedule_->InsertSwitch(component_start_, component_end_, sw,
494 1 : successor_blocks, successor_count);
495 : } else {
496 18236 : Node* switch_control = NodeProperties::GetControlInput(sw);
497 18236 : BasicBlock* switch_block = FindPredecessorBlock(switch_control);
498 118984 : for (size_t index = 0; index < successor_count; ++index) {
499 100748 : TraceConnect(sw, switch_block, successor_blocks[index]);
500 : }
501 18236 : schedule_->AddSwitch(switch_block, sw, successor_blocks, successor_count);
502 : }
503 18237 : }
504 :
505 2824070 : void ConnectMerge(Node* merge) {
506 : // Don't connect the special merge at the end to its predecessors.
507 5648138 : if (IsFinalMerge(merge)) return;
508 :
509 2824109 : BasicBlock* block = schedule_->block(merge);
510 : DCHECK_NOT_NULL(block);
511 : // For all of the merge's control inputs, add a goto at the end to the
512 : // merge's basic block.
513 9303166 : for (Node* const input : merge->inputs()) {
514 6479059 : BasicBlock* predecessor_block = FindPredecessorBlock(input);
515 6479066 : TraceConnect(merge, predecessor_block, block);
516 6479066 : schedule_->AddGoto(predecessor_block, block);
517 : }
518 : }
519 :
520 354 : void ConnectTailCall(Node* call) {
521 354 : Node* call_control = NodeProperties::GetControlInput(call);
522 354 : BasicBlock* call_block = FindPredecessorBlock(call_control);
523 354 : TraceConnect(call, call_block, nullptr);
524 354 : schedule_->AddTailCall(call_block, call);
525 354 : }
526 :
527 1307440 : void ConnectReturn(Node* ret) {
528 1307440 : Node* return_control = NodeProperties::GetControlInput(ret);
529 1307473 : BasicBlock* return_block = FindPredecessorBlock(return_control);
530 1307476 : TraceConnect(ret, return_block, nullptr);
531 1307458 : schedule_->AddReturn(return_block, ret);
532 1307486 : }
533 :
534 30130 : void ConnectDeoptimize(Node* deopt) {
535 30130 : Node* deoptimize_control = NodeProperties::GetControlInput(deopt);
536 30130 : BasicBlock* deoptimize_block = FindPredecessorBlock(deoptimize_control);
537 30130 : TraceConnect(deopt, deoptimize_block, nullptr);
538 30130 : schedule_->AddDeoptimize(deoptimize_block, deopt);
539 30130 : }
540 :
541 62096 : void ConnectThrow(Node* thr) {
542 62096 : Node* throw_control = NodeProperties::GetControlInput(thr);
543 62096 : BasicBlock* throw_block = FindPredecessorBlock(throw_control);
544 62096 : TraceConnect(thr, throw_block, nullptr);
545 62096 : schedule_->AddThrow(throw_block, thr);
546 62096 : }
547 :
548 15997538 : void TraceConnect(Node* node, BasicBlock* block, BasicBlock* succ) {
549 : DCHECK_NOT_NULL(block);
550 15997538 : if (succ == nullptr) {
551 1400016 : TRACE("Connect #%d:%s, id:%d -> end\n", node->id(),
552 : node->op()->mnemonic(), block->id().ToInt());
553 : } else {
554 14597522 : TRACE("Connect #%d:%s, id:%d -> id:%d\n", node->id(),
555 : node->op()->mnemonic(), block->id().ToInt(), succ->id().ToInt());
556 : }
557 15997538 : }
558 :
559 2824070 : bool IsFinalMerge(Node* node) {
560 5555537 : return (node->opcode() == IrOpcode::kMerge &&
561 2731467 : node == scheduler_->graph_->end()->InputAt(0));
562 : }
563 :
564 1256112 : bool IsSingleEntrySingleExitRegion(Node* entry, Node* exit) const {
565 1256112 : size_t entry_class = scheduler_->equivalence_->ClassOf(entry);
566 1256112 : size_t exit_class = scheduler_->equivalence_->ClassOf(exit);
567 1256111 : return entry != exit && entry_class == exit_class;
568 : }
569 :
570 : void ResetDataStructures() {
571 : control_.clear();
572 : DCHECK(queue_.empty());
573 : DCHECK(control_.empty());
574 : }
575 :
576 : Zone* zone_;
577 : Scheduler* scheduler_;
578 : Schedule* schedule_;
579 : NodeMarker<bool> queued_; // Mark indicating whether node is queued.
580 : ZoneQueue<Node*> queue_; // Queue used for breadth-first traversal.
581 : NodeVector control_; // List of encountered control nodes.
582 : Node* component_entry_; // Component single-entry node.
583 : BasicBlock* component_start_; // Component single-entry block.
584 : BasicBlock* component_end_; // Component single-exit block.
585 : };
586 :
587 :
588 963575 : void Scheduler::BuildCFG() {
589 963575 : TRACE("--- CREATING CFG -------------------------------------------\n");
590 :
591 : // Instantiate a new control equivalence algorithm for the graph.
592 1927144 : equivalence_ = new (zone_) ControlEquivalence(zone_, graph_);
593 :
594 : // Build a control-flow graph for the main control-connected component that
595 : // is being spanned by the graph's start and end nodes.
596 1927182 : control_flow_builder_ = new (zone_) CFGBuilder(zone_, this);
597 963589 : control_flow_builder_->Run();
598 :
599 : // Initialize per-block data.
600 : // Reserve an extra 10% to avoid resizing vector when fusing floating control.
601 1927156 : scheduled_nodes_.reserve(schedule_->BasicBlockCount() * 1.1);
602 1927108 : scheduled_nodes_.resize(schedule_->BasicBlockCount());
603 963530 : }
604 :
605 :
606 : // -----------------------------------------------------------------------------
607 : // Phase 2: Compute special RPO and dominator tree.
608 :
609 :
610 : // Compute the special reverse-post-order block ordering, which is essentially
611 : // a RPO of the graph where loop bodies are contiguous. Properties:
612 : // 1. If block A is a predecessor of B, then A appears before B in the order,
613 : // unless B is a loop header and A is in the loop headed at B
614 : // (i.e. A -> B is a backedge).
615 : // => If block A dominates block B, then A appears before B in the order.
616 : // => If block A is a loop header, A appears before all blocks in the loop
617 : // headed at A.
618 : // 2. All loops are contiguous in the order (i.e. no intervening blocks that
619 : // do not belong to the loop.)
620 : // Note a simple RPO traversal satisfies (1) but not (2).
621 : class SpecialRPONumberer : public ZoneObject {
622 : public:
623 1311621 : SpecialRPONumberer(Zone* zone, Schedule* schedule)
624 : : zone_(zone),
625 : schedule_(schedule),
626 : order_(nullptr),
627 : beyond_end_(nullptr),
628 : loops_(zone),
629 : backedges_(zone),
630 : stack_(zone),
631 : previous_block_count_(0),
632 3934813 : empty_(0, zone) {}
633 :
634 : // Computes the special reverse-post-order for the main control flow graph,
635 : // that is for the graph spanned between the schedule's start and end blocks.
636 : void ComputeSpecialRPO() {
637 : DCHECK(schedule_->end()->SuccessorCount() == 0);
638 : DCHECK(!order_); // Main order does not exist yet.
639 1311584 : ComputeAndInsertSpecialRPO(schedule_->start(), schedule_->end());
640 : }
641 :
642 : // Computes the special reverse-post-order for a partial control flow graph,
643 : // that is for the graph spanned between the given {entry} and {end} blocks,
644 : // then updates the existing ordering with this new information.
645 : void UpdateSpecialRPO(BasicBlock* entry, BasicBlock* end) {
646 : DCHECK(order_); // Main order to be updated is present.
647 215029 : ComputeAndInsertSpecialRPO(entry, end);
648 : }
649 :
650 : // Serialize the previously computed order as a special reverse-post-order
651 : // numbering for basic blocks into the final schedule.
652 2623306 : void SerializeRPOIntoSchedule() {
653 : int32_t number = 0;
654 33494206 : for (BasicBlock* b = order_; b != nullptr; b = b->rpo_next()) {
655 32182546 : b->set_rpo_number(number++);
656 16091304 : schedule_->rpo_order()->push_back(b);
657 : }
658 1311660 : BeyondEndSentinel()->set_rpo_number(number);
659 1311573 : }
660 :
661 : // Print and verify the special reverse-post-order.
662 : void PrintAndVerifySpecialRPO() {
663 : #if DEBUG
664 : if (FLAG_trace_turbo_scheduler) PrintRPO();
665 : VerifySpecialRPO();
666 : #endif
667 : }
668 :
669 : const ZoneVector<BasicBlock*>& GetOutgoingBlocks(BasicBlock* block) {
670 6326165 : if (HasLoopNumber(block)) {
671 6326294 : LoopInfo const& loop = loops_[GetLoopNumber(block)];
672 6326294 : if (loop.outgoing) return *loop.outgoing;
673 : }
674 73793 : return empty_;
675 : }
676 :
677 : private:
678 : typedef std::pair<BasicBlock*, size_t> Backedge;
679 :
680 : // Numbering for BasicBlock::rpo_number for this block traversal:
681 : static const int kBlockOnStack = -2;
682 : static const int kBlockVisited1 = -3;
683 : static const int kBlockVisited2 = -4;
684 : static const int kBlockUnvisited1 = -1;
685 : static const int kBlockUnvisited2 = kBlockVisited1;
686 :
687 : struct SpecialRPOStackFrame {
688 : BasicBlock* block;
689 : size_t index;
690 : };
691 :
692 : struct LoopInfo {
693 : BasicBlock* header;
694 : ZoneVector<BasicBlock*>* outgoing;
695 : BitVector* members;
696 : LoopInfo* prev;
697 : BasicBlock* end;
698 : BasicBlock* start;
699 :
700 455473 : void AddOutgoing(Zone* zone, BasicBlock* block) {
701 455473 : if (outgoing == nullptr) {
702 : outgoing = new (zone->New(sizeof(ZoneVector<BasicBlock*>)))
703 286604 : ZoneVector<BasicBlock*>(zone);
704 : }
705 455473 : outgoing->push_back(block);
706 455473 : }
707 : };
708 :
709 : int Push(ZoneVector<SpecialRPOStackFrame>& stack, int depth,
710 1609689 : BasicBlock* child, int unvisited) {
711 22301930 : if (child->rpo_number() == unvisited) {
712 42994082 : stack[depth].block = child;
713 22301901 : stack[depth].index = 0;
714 22301901 : child->set_rpo_number(kBlockOnStack);
715 20692155 : return depth + 1;
716 : }
717 : return depth;
718 : }
719 :
720 : BasicBlock* PushFront(BasicBlock* head, BasicBlock* block) {
721 : block->set_rpo_next(head);
722 : return block;
723 : }
724 :
725 766356 : static int GetLoopNumber(BasicBlock* block) { return block->loop_number(); }
726 : static void SetLoopNumber(BasicBlock* block, int loop_number) {
727 : return block->set_loop_number(loop_number);
728 : }
729 41238883 : static bool HasLoopNumber(BasicBlock* block) {
730 : return block->loop_number() >= 0;
731 : }
732 :
733 : // TODO(mstarzinger): We only need this special sentinel because some tests
734 : // use the schedule's end block in actual control flow (e.g. with end having
735 : // successors). Once this has been cleaned up we can use the end block here.
736 1315347 : BasicBlock* BeyondEndSentinel() {
737 1315347 : if (beyond_end_ == nullptr) {
738 : BasicBlock::Id id = BasicBlock::Id::FromInt(-1);
739 2623294 : beyond_end_ = new (schedule_->zone()) BasicBlock(schedule_->zone(), id);
740 : }
741 1315273 : return beyond_end_;
742 : }
743 :
744 : // Compute special RPO for the control flow graph between {entry} and {end},
745 : // mutating any existing order so that the result is still valid.
746 4583513 : void ComputeAndInsertSpecialRPO(BasicBlock* entry, BasicBlock* end) {
747 : // RPO should not have been serialized for this schedule yet.
748 1526614 : CHECK_EQ(kBlockUnvisited1, schedule_->start()->loop_number());
749 1526614 : CHECK_EQ(kBlockUnvisited1, schedule_->start()->rpo_number());
750 3053228 : CHECK_EQ(0, static_cast<int>(schedule_->rpo_order()->size()));
751 :
752 : // Find correct insertion point within existing order.
753 : BasicBlock* insertion_point = entry->rpo_next();
754 : BasicBlock* order = insertion_point;
755 :
756 : // Perform an iterative RPO traversal using an explicit stack,
757 : // recording backedges that form cycles. O(|B|).
758 : DCHECK_LT(previous_block_count_, schedule_->BasicBlockCount());
759 52343178 : stack_.resize(schedule_->BasicBlockCount() - previous_block_count_);
760 3053256 : previous_block_count_ = schedule_->BasicBlockCount();
761 : int stack_depth = Push(stack_, 0, entry, kBlockUnvisited1);
762 5624583 : int num_loops = static_cast<int>(loops_.size());
763 :
764 39248524 : while (stack_depth > 0) {
765 36195210 : int current = stack_depth - 1;
766 36195210 : SpecialRPOStackFrame* frame = &stack_[current];
767 :
768 70867653 : if (frame->block != end &&
769 34672443 : frame->index < frame->block->SuccessorCount()) {
770 : // Process the next successor.
771 39778634 : BasicBlock* succ = frame->block->SuccessorAt(frame->index++);
772 19889317 : if (succ->rpo_number() == kBlockVisited1) continue;
773 14942867 : if (succ->rpo_number() == kBlockOnStack) {
774 : // The successor is on the stack, so this is a backedge (cycle).
775 327064 : backedges_.push_back(Backedge(frame->block, frame->index - 1));
776 163532 : if (!HasLoopNumber(succ)) {
777 : // Assign a new loop number to the header if it doesn't have one.
778 147231 : SetLoopNumber(succ, num_loops++);
779 : }
780 : } else {
781 : // Push the successor onto the stack.
782 : DCHECK(succ->rpo_number() == kBlockUnvisited1);
783 : stack_depth = Push(stack_, stack_depth, succ, kBlockUnvisited1);
784 : }
785 : } else {
786 : // Finished with all successors; pop the stack and add the block.
787 : order = PushFront(order, frame->block);
788 16305893 : frame->block->set_rpo_number(kBlockVisited1);
789 : stack_depth--;
790 : }
791 : }
792 :
793 : // If no loops were encountered, then the order we computed was correct.
794 1526591 : if (num_loops > static_cast<int>(loops_.size())) {
795 : // Otherwise, compute the loop information from the backedges in order
796 : // to perform a traversal that groups loop bodies together.
797 83061 : ComputeLoopInfo(stack_, num_loops, &backedges_);
798 :
799 : // Initialize the "loop stack". Note the entry could be a loop header.
800 : LoopInfo* loop =
801 83061 : HasLoopNumber(entry) ? &loops_[GetLoopNumber(entry)] : nullptr;
802 : order = insertion_point;
803 :
804 : // Perform an iterative post-order traversal, visiting loop bodies before
805 : // edges that lead out of loops. Visits each block once, but linking loop
806 : // sections together is linear in the loop size, so overall is
807 : // O(|B| + max(loop_depth) * max(|loop|))
808 : stack_depth = Push(stack_, 0, entry, kBlockUnvisited2);
809 14787478 : while (stack_depth > 0) {
810 14621354 : SpecialRPOStackFrame* frame = &stack_[stack_depth - 1];
811 15224059 : BasicBlock* block = frame->block;
812 8625474 : BasicBlock* succ = nullptr;
813 :
814 29163307 : if (block != end && frame->index < block->SuccessorCount()) {
815 : // Process the next normal successor.
816 8170004 : succ = block->SuccessorAt(frame->index++);
817 6451350 : } else if (HasLoopNumber(block)) {
818 : // Process additional outgoing edges from the loop header.
819 602705 : if (block->rpo_number() == kBlockOnStack) {
820 : // Finish the loop body the first time the header is left on the
821 : // stack.
822 : DCHECK(loop != nullptr && loop->header == block);
823 147114 : loop->start = PushFront(order, block);
824 147114 : order = loop->end;
825 147114 : block->set_rpo_number(kBlockVisited2);
826 : // Pop the loop stack and continue visiting outgoing edges within
827 : // the context of the outer loop, if any.
828 147232 : loop = loop->prev;
829 : // We leave the loop header on the stack; the rest of this iteration
830 : // and later iterations will go through its outgoing edges list.
831 : }
832 :
833 : // Use the next outgoing edge if there are any.
834 1205646 : size_t outgoing_index = frame->index - block->SuccessorCount();
835 602823 : LoopInfo* info = &loops_[GetLoopNumber(block)];
836 : DCHECK(loop != info);
837 1201598 : if (block != entry && info->outgoing != nullptr &&
838 598775 : outgoing_index < info->outgoing->size()) {
839 910946 : succ = info->outgoing->at(outgoing_index);
840 455473 : frame->index++;
841 : }
842 : }
843 :
844 14621472 : if (succ != nullptr) {
845 : // Process the next successor.
846 8625474 : if (succ->rpo_number() == kBlockOnStack) continue;
847 8461945 : if (succ->rpo_number() == kBlockVisited2) continue;
848 : DCHECK(succ->rpo_number() == kBlockUnvisited2);
849 11778747 : if (loop != nullptr && !loop->members->Contains(succ->id().ToInt())) {
850 : // The successor is not in the current loop or any nested loop.
851 : // Add it to the outgoing edges of this loop and visit it later.
852 455473 : loop->AddOutgoing(zone_, succ);
853 : } else {
854 : // Push the successor onto the stack.
855 : stack_depth = Push(stack_, stack_depth, succ, kBlockUnvisited2);
856 5912900 : if (HasLoopNumber(succ)) {
857 : // Push the inner loop onto the loop stack.
858 : DCHECK(GetLoopNumber(succ) < num_loops);
859 147228 : LoopInfo* next = &loops_[GetLoopNumber(succ)];
860 147228 : next->end = order;
861 147228 : next->prev = loop;
862 : loop = next;
863 : }
864 : }
865 : } else {
866 : // Finished with all successors of the current block.
867 5995998 : if (HasLoopNumber(block)) {
868 : // If we are going to pop a loop header, then add its entire body.
869 147232 : LoopInfo* info = &loops_[GetLoopNumber(block)];
870 147232 : for (BasicBlock* b = info->start; true; b = b->rpo_next()) {
871 2740904 : if (b->rpo_next() == info->end) {
872 : b->set_rpo_next(order);
873 147232 : info->end = order;
874 : break;
875 : }
876 : }
877 147232 : order = info->start;
878 : } else {
879 : // Pop a single node off the stack and add it to the order.
880 : order = PushFront(order, block);
881 5848766 : block->set_rpo_number(kBlockVisited2);
882 : }
883 : stack_depth--;
884 : }
885 : }
886 : }
887 :
888 : // Publish new order the first time.
889 1526593 : if (order_ == nullptr) order_ = order;
890 :
891 : // Compute the correct loop headers and set the correct loop ends.
892 : LoopInfo* current_loop = nullptr;
893 2510956 : BasicBlock* current_header = entry->loop_header();
894 : int32_t loop_depth = entry->loop_depth();
895 1526593 : if (entry->IsLoopHeader()) --loop_depth; // Entry might be a loop header.
896 17832501 : for (BasicBlock* b = order; b != insertion_point; b = b->rpo_next()) {
897 16305908 : BasicBlock* current = b;
898 :
899 : // Reset BasicBlock::rpo_number again.
900 16305901 : current->set_rpo_number(kBlockUnvisited1);
901 :
902 : // Finish the previous loop(s) if we just exited them.
903 35266203 : while (current_header != nullptr &&
904 : current == current_header->loop_end()) {
905 : DCHECK(current_header->IsLoopHeader());
906 : DCHECK_NOT_NULL(current_loop);
907 143539 : current_loop = current_loop->prev;
908 : current_header =
909 143539 : current_loop == nullptr ? nullptr : current_loop->header;
910 143539 : --loop_depth;
911 : }
912 16305854 : current->set_loop_header(current_header);
913 :
914 : // Push a new loop onto the stack if this loop is a loop header.
915 16305877 : if (HasLoopNumber(current)) {
916 147259 : ++loop_depth;
917 147259 : current_loop = &loops_[GetLoopNumber(current)];
918 147259 : BasicBlock* end = current_loop->end;
919 150951 : current->set_loop_end(end == nullptr ? BeyondEndSentinel() : end);
920 147259 : current_header = current_loop->header;
921 147259 : TRACE("id:%d is a loop header, increment loop depth to %d\n",
922 : current->id().ToInt(), loop_depth);
923 : }
924 :
925 16305877 : current->set_loop_depth(loop_depth);
926 :
927 16305908 : if (current->loop_header() == nullptr) {
928 13938490 : TRACE("id:%d is not in a loop (depth == %d)\n", current->id().ToInt(),
929 : current->loop_depth());
930 : } else {
931 2367418 : TRACE("id:%d has loop header id:%d, (depth == %d)\n",
932 : current->id().ToInt(), current->loop_header()->id().ToInt(),
933 : current->loop_depth());
934 : }
935 : }
936 1526600 : }
937 :
938 : // Computes loop membership from the backedges of the control flow graph.
939 83061 : void ComputeLoopInfo(ZoneVector<SpecialRPOStackFrame>& queue,
940 : size_t num_loops, ZoneVector<Backedge>* backedges) {
941 : // Extend existing loop membership vectors.
942 166123 : for (LoopInfo& loop : loops_) {
943 1 : BitVector* new_members = new (zone_)
944 3 : BitVector(static_cast<int>(schedule_->BasicBlockCount()), zone_);
945 2 : new_members->CopyFrom(*loop.members);
946 1 : loop.members = new_members;
947 : }
948 :
949 : // Extend loop information vector.
950 3748461 : loops_.resize(num_loops, LoopInfo());
951 :
952 : // Compute loop membership starting from backedges.
953 : // O(max(loop_depth) * max(|loop|)
954 493188 : for (size_t i = 0; i < backedges->size(); i++) {
955 410127 : BasicBlock* member = backedges->at(i).first;
956 163533 : BasicBlock* header = member->SuccessorAt(backedges->at(i).second);
957 163533 : size_t loop_num = GetLoopNumber(header);
958 163533 : if (loops_[loop_num].header == nullptr) {
959 147231 : loops_[loop_num].header = header;
960 : loops_[loop_num].members = new (zone_)
961 588924 : BitVector(static_cast<int>(schedule_->BasicBlockCount()), zone_);
962 : }
963 :
964 : int queue_length = 0;
965 163533 : if (member != header) {
966 : // As long as the header doesn't have a backedge to itself,
967 : // Push the member onto the queue and process its predecessors.
968 326942 : if (!loops_[loop_num].members->Contains(member->id().ToInt())) {
969 : loops_[loop_num].members->Add(member->id().ToInt());
970 : }
971 2757232 : queue[queue_length++].block = member;
972 : }
973 :
974 : // Propagate loop membership backwards. All predecessors of M up to the
975 : // loop header H are members of the loop too. O(|blocks between M and H|).
976 2757294 : while (queue_length > 0) {
977 5187522 : BasicBlock* block = queue[--queue_length].block;
978 11944770 : for (size_t i = 0; i < block->PredecessorCount(); i++) {
979 : BasicBlock* pred = block->PredecessorAt(i);
980 3378624 : if (pred != header) {
981 6382330 : if (!loops_[loop_num].members->Contains(pred->id().ToInt())) {
982 : loops_[loop_num].members->Add(pred->id().ToInt());
983 4860604 : queue[queue_length++].block = pred;
984 : }
985 : }
986 : }
987 : }
988 : }
989 83061 : }
990 :
991 : #if DEBUG
992 : void PrintRPO() {
993 : OFStream os(stdout);
994 : os << "RPO with " << loops_.size() << " loops";
995 : if (loops_.size() > 0) {
996 : os << " (";
997 : for (size_t i = 0; i < loops_.size(); i++) {
998 : if (i > 0) os << " ";
999 : os << "id:" << loops_[i].header->id();
1000 : }
1001 : os << ")";
1002 : }
1003 : os << ":\n";
1004 :
1005 : for (BasicBlock* block = order_; block != nullptr;
1006 : block = block->rpo_next()) {
1007 : os << std::setw(5) << "B" << block->rpo_number() << ":";
1008 : for (size_t i = 0; i < loops_.size(); i++) {
1009 : bool range = loops_[i].header->LoopContains(block);
1010 : bool membership = loops_[i].header != block && range;
1011 : os << (membership ? " |" : " ");
1012 : os << (range ? "x" : " ");
1013 : }
1014 : os << " id:" << block->id() << ": ";
1015 : if (block->loop_end() != nullptr) {
1016 : os << " range: [B" << block->rpo_number() << ", B"
1017 : << block->loop_end()->rpo_number() << ")";
1018 : }
1019 : if (block->loop_header() != nullptr) {
1020 : os << " header: id:" << block->loop_header()->id();
1021 : }
1022 : if (block->loop_depth() > 0) {
1023 : os << " depth: " << block->loop_depth();
1024 : }
1025 : os << "\n";
1026 : }
1027 : }
1028 :
1029 : void VerifySpecialRPO() {
1030 : BasicBlockVector* order = schedule_->rpo_order();
1031 : DCHECK(order->size() > 0);
1032 : DCHECK((*order)[0]->id().ToInt() == 0); // entry should be first.
1033 :
1034 : for (size_t i = 0; i < loops_.size(); i++) {
1035 : LoopInfo* loop = &loops_[i];
1036 : BasicBlock* header = loop->header;
1037 : BasicBlock* end = header->loop_end();
1038 :
1039 : DCHECK_NOT_NULL(header);
1040 : DCHECK(header->rpo_number() >= 0);
1041 : DCHECK(header->rpo_number() < static_cast<int>(order->size()));
1042 : DCHECK_NOT_NULL(end);
1043 : DCHECK(end->rpo_number() <= static_cast<int>(order->size()));
1044 : DCHECK(end->rpo_number() > header->rpo_number());
1045 : DCHECK(header->loop_header() != header);
1046 :
1047 : // Verify the start ... end list relationship.
1048 : int links = 0;
1049 : BasicBlock* block = loop->start;
1050 : DCHECK_EQ(header, block);
1051 : bool end_found;
1052 : while (true) {
1053 : if (block == nullptr || block == loop->end) {
1054 : end_found = (loop->end == block);
1055 : break;
1056 : }
1057 : // The list should be in same order as the final result.
1058 : DCHECK(block->rpo_number() == links + header->rpo_number());
1059 : links++;
1060 : block = block->rpo_next();
1061 : DCHECK_LT(links, static_cast<int>(2 * order->size())); // cycle?
1062 : }
1063 : DCHECK(links > 0);
1064 : DCHECK(links == end->rpo_number() - header->rpo_number());
1065 : DCHECK(end_found);
1066 :
1067 : // Check loop depth of the header.
1068 : int loop_depth = 0;
1069 : for (LoopInfo* outer = loop; outer != nullptr; outer = outer->prev) {
1070 : loop_depth++;
1071 : }
1072 : DCHECK_EQ(loop_depth, header->loop_depth());
1073 :
1074 : // Check the contiguousness of loops.
1075 : int count = 0;
1076 : for (int j = 0; j < static_cast<int>(order->size()); j++) {
1077 : BasicBlock* block = order->at(j);
1078 : DCHECK(block->rpo_number() == j);
1079 : if (j < header->rpo_number() || j >= end->rpo_number()) {
1080 : DCHECK(!header->LoopContains(block));
1081 : } else {
1082 : DCHECK(header->LoopContains(block));
1083 : DCHECK_GE(block->loop_depth(), loop_depth);
1084 : count++;
1085 : }
1086 : }
1087 : DCHECK(links == count);
1088 : }
1089 : }
1090 : #endif // DEBUG
1091 :
1092 : Zone* zone_;
1093 : Schedule* schedule_;
1094 : BasicBlock* order_;
1095 : BasicBlock* beyond_end_;
1096 : ZoneVector<LoopInfo> loops_;
1097 : ZoneVector<Backedge> backedges_;
1098 : ZoneVector<SpecialRPOStackFrame> stack_;
1099 : size_t previous_block_count_;
1100 : ZoneVector<BasicBlock*> const empty_;
1101 : };
1102 :
1103 :
1104 348065 : BasicBlockVector* Scheduler::ComputeSpecialRPO(Zone* zone, Schedule* schedule) {
1105 348065 : SpecialRPONumberer numberer(zone, schedule);
1106 : numberer.ComputeSpecialRPO();
1107 348065 : numberer.SerializeRPOIntoSchedule();
1108 : numberer.PrintAndVerifySpecialRPO();
1109 348065 : return schedule->rpo_order();
1110 : }
1111 :
1112 :
1113 963503 : void Scheduler::ComputeSpecialRPONumbering() {
1114 963503 : TRACE("--- COMPUTING SPECIAL RPO ----------------------------------\n");
1115 :
1116 : // Compute the special reverse-post-order for basic blocks.
1117 1927057 : special_rpo_ = new (zone_) SpecialRPONumberer(zone_, schedule_);
1118 : special_rpo_->ComputeSpecialRPO();
1119 963520 : }
1120 :
1121 :
1122 49343566 : void Scheduler::PropagateImmediateDominators(BasicBlock* block) {
1123 26439750 : for (/*nop*/; block != nullptr; block = block->rpo_next()) {
1124 : auto pred = block->predecessors().begin();
1125 : auto end = block->predecessors().end();
1126 : DCHECK(pred != end); // All blocks except start have predecessors.
1127 48164965 : BasicBlock* dominator = *pred;
1128 : bool deferred = dominator->deferred();
1129 : // For multiple predecessors, walk up the dominator tree until a common
1130 : // dominator is found. Visitation order guarantees that all predecessors
1131 : // except for backwards edges have been visited.
1132 34198778 : for (++pred; pred != end; ++pred) {
1133 : // Don't examine backwards edges.
1134 10116358 : if ((*pred)->dominator_depth() < 0) continue;
1135 9981785 : dominator = BasicBlock::GetCommonDominator(dominator, *pred);
1136 9981660 : deferred = deferred & (*pred)->deferred();
1137 : }
1138 : block->set_dominator(dominator);
1139 24082420 : block->set_dominator_depth(dominator->dominator_depth() + 1);
1140 24082420 : block->set_deferred(deferred | block->deferred());
1141 24082420 : TRACE("Block id:%d's idom is id:%d, depth = %d\n", block->id().ToInt(),
1142 : dominator->id().ToInt(), block->dominator_depth());
1143 : }
1144 1178601 : }
1145 :
1146 :
1147 963468 : void Scheduler::GenerateImmediateDominatorTree() {
1148 963468 : TRACE("--- IMMEDIATE BLOCK DOMINATORS -----------------------------\n");
1149 :
1150 : // Seed start block to be the first dominator.
1151 963468 : schedule_->start()->set_dominator_depth(0);
1152 :
1153 : // Build the block dominator tree resulting from the above seed.
1154 963468 : PropagateImmediateDominators(schedule_->start()->rpo_next());
1155 963566 : }
1156 :
1157 :
1158 : // -----------------------------------------------------------------------------
1159 : // Phase 3: Prepare use counts for nodes.
1160 :
1161 :
1162 : class PrepareUsesVisitor {
1163 : public:
1164 : explicit PrepareUsesVisitor(Scheduler* scheduler)
1165 963411 : : scheduler_(scheduler), schedule_(scheduler->schedule_) {}
1166 :
1167 82220722 : void Pre(Node* node) {
1168 88113617 : if (scheduler_->GetPlacement(node) == Scheduler::kFixed) {
1169 : // Fixed nodes are always roots for schedule late.
1170 23006646 : scheduler_->schedule_root_nodes_.push_back(node);
1171 25311745 : if (!schedule_->IsScheduled(node)) {
1172 : // Make sure root nodes are scheduled in their respective blocks.
1173 5892848 : TRACE("Scheduling fixed position node #%d:%s\n", node->id(),
1174 : node->op()->mnemonic());
1175 5892895 : IrOpcode::Value opcode = node->opcode();
1176 : BasicBlock* block =
1177 : opcode == IrOpcode::kParameter
1178 2305145 : ? schedule_->start()
1179 9480645 : : schedule_->block(NodeProperties::GetControlInput(node));
1180 : DCHECK_NOT_NULL(block);
1181 5892894 : schedule_->AddNode(block, node);
1182 : }
1183 : }
1184 82220857 : }
1185 :
1186 197572415 : void PostEdge(Node* from, int index, Node* to) {
1187 : // If the edge is from an unscheduled node, then tally it in the use count
1188 : // for all of its inputs. The same criterion will be used in ScheduleLate
1189 : // for decrementing use counts.
1190 197572415 : if (!schedule_->IsScheduled(from)) {
1191 : DCHECK_NE(Scheduler::kFixed, scheduler_->GetPlacement(from));
1192 151477574 : scheduler_->IncrementUnscheduledUseCount(to, index, from);
1193 : }
1194 197572137 : }
1195 :
1196 : private:
1197 : Scheduler* scheduler_;
1198 : Schedule* schedule_;
1199 : };
1200 :
1201 :
1202 963411 : void Scheduler::PrepareUses() {
1203 963411 : TRACE("--- PREPARE USES -------------------------------------------\n");
1204 :
1205 : // Count the uses of every node, which is used to ensure that all of a
1206 : // node's uses are scheduled before the node itself.
1207 : PrepareUsesVisitor prepare_uses(this);
1208 :
1209 : // TODO(turbofan): simplify the careful pre/post ordering here.
1210 1926936 : BoolVector visited(graph_->NodeCount(), false, zone_);
1211 963555 : ZoneStack<Node::InputEdges::iterator> stack(zone_);
1212 1927006 : Node* node = graph_->end();
1213 963525 : prepare_uses.Pre(node);
1214 963481 : visited[node->id()] = true;
1215 964229 : stack.push(node->input_edges().begin());
1216 280736389 : while (!stack.empty()) {
1217 : Edge edge = *stack.top();
1218 360066580 : Node* node = edge.to();
1219 557617158 : if (visited[node->id()]) {
1220 197550795 : prepare_uses.PostEdge(edge.from(), edge.index(), edge.to());
1221 197580308 : if (++stack.top() == edge.from()->input_edges().end()) stack.pop();
1222 : } else {
1223 81257784 : prepare_uses.Pre(node);
1224 81258001 : visited[node->id()] = true;
1225 139633374 : if (node->InputCount() > 0) stack.push(node->input_edges().begin());
1226 : }
1227 : }
1228 963588 : }
1229 :
1230 :
1231 : // -----------------------------------------------------------------------------
1232 : // Phase 4: Schedule nodes early.
1233 :
1234 :
1235 : class ScheduleEarlyNodeVisitor {
1236 : public:
1237 : ScheduleEarlyNodeVisitor(Zone* zone, Scheduler* scheduler)
1238 1178605 : : scheduler_(scheduler), schedule_(scheduler->schedule_), queue_(zone) {}
1239 :
1240 : // Run the schedule early algorithm on a set of fixed root nodes.
1241 1178429 : void Run(NodeVector* roots) {
1242 27025202 : for (Node* const root : *roots) {
1243 : queue_.push(root);
1244 107929316 : while (!queue_.empty()) {
1245 83260972 : VisitNode(queue_.front());
1246 : queue_.pop();
1247 : }
1248 : }
1249 1178612 : }
1250 :
1251 : private:
1252 : // Visits one node from the queue and propagates its current schedule early
1253 : // position to all uses. This in turn might push more nodes onto the queue.
1254 83261000 : void VisitNode(Node* node) {
1255 83261000 : Scheduler::SchedulerData* data = scheduler_->GetData(node);
1256 :
1257 : // Fixed nodes already know their schedule early position.
1258 83261000 : if (scheduler_->GetPlacement(node) == Scheduler::kFixed) {
1259 107930101 : data->minimum_block_ = schedule_->block(node);
1260 24668275 : TRACE("Fixing #%d:%s minimum_block = id:%d, dominator_depth = %d\n",
1261 : node->id(), node->op()->mnemonic(),
1262 : data->minimum_block_->id().ToInt(),
1263 : data->minimum_block_->dominator_depth());
1264 : }
1265 :
1266 : // No need to propagate unconstrained schedule early positions.
1267 249785648 : if (data->minimum_block_ == schedule_->start()) return;
1268 :
1269 : // Propagate schedule early position.
1270 : DCHECK_NOT_NULL(data->minimum_block_);
1271 234123141 : for (auto use : node->uses()) {
1272 155240537 : PropagateMinimumPositionToNode(data->minimum_block_, use);
1273 : }
1274 : }
1275 :
1276 : // Propagates {block} as another minimum position into the given {node}. This
1277 : // has the net effect of computing the minimum dominator block of {node} that
1278 : // still post-dominates all inputs to {node} when the queue is processed.
1279 263448755 : void PropagateMinimumPositionToNode(BasicBlock* block, Node* node) {
1280 156676496 : Scheduler::SchedulerData* data = scheduler_->GetData(node);
1281 :
1282 : // No need to propagate to fixed node, it's guaranteed to be a root.
1283 313357243 : if (scheduler_->GetPlacement(node) == Scheduler::kFixed) return;
1284 :
1285 : // Coupled nodes influence schedule early position of their control.
1286 106768170 : if (scheduler_->GetPlacement(node) == Scheduler::kCoupled) {
1287 1436579 : Node* control = NodeProperties::GetControlInput(node);
1288 1436579 : PropagateMinimumPositionToNode(block, control);
1289 : }
1290 :
1291 : // Propagate new position if it is deeper down the dominator tree than the
1292 : // current. Note that all inputs need to have minimum block position inside
1293 : // the dominator chain of {node}'s minimum block position.
1294 : DCHECK(InsideSameDominatorChain(block, data->minimum_block_));
1295 106772259 : if (block->dominator_depth() > data->minimum_block_->dominator_depth()) {
1296 58596188 : data->minimum_block_ = block;
1297 : queue_.push(node);
1298 58596222 : TRACE("Propagating #%d:%s minimum_block = id:%d, dominator_depth = %d\n",
1299 : node->id(), node->op()->mnemonic(),
1300 : data->minimum_block_->id().ToInt(),
1301 : data->minimum_block_->dominator_depth());
1302 : }
1303 : }
1304 :
1305 : #if DEBUG
1306 : bool InsideSameDominatorChain(BasicBlock* b1, BasicBlock* b2) {
1307 : BasicBlock* dominator = BasicBlock::GetCommonDominator(b1, b2);
1308 : return dominator == b1 || dominator == b2;
1309 : }
1310 : #endif
1311 :
1312 : Scheduler* scheduler_;
1313 : Schedule* schedule_;
1314 : ZoneQueue<Node*> queue_;
1315 : };
1316 :
1317 :
1318 963565 : void Scheduler::ScheduleEarly() {
1319 963565 : TRACE("--- SCHEDULE EARLY -----------------------------------------\n");
1320 963574 : if (FLAG_trace_turbo_scheduler) {
1321 0 : TRACE("roots: ");
1322 0 : for (Node* node : schedule_root_nodes_) {
1323 0 : TRACE("#%d:%s ", node->id(), node->op()->mnemonic());
1324 : }
1325 0 : TRACE("\n");
1326 : }
1327 :
1328 : // Compute the minimum block for each node thereby determining the earliest
1329 : // position each node could be placed within a valid schedule.
1330 963574 : ScheduleEarlyNodeVisitor schedule_early_visitor(zone_, this);
1331 963562 : schedule_early_visitor.Run(&schedule_root_nodes_);
1332 963573 : }
1333 :
1334 :
1335 : // -----------------------------------------------------------------------------
1336 : // Phase 5: Schedule nodes late.
1337 :
1338 :
1339 : class ScheduleLateNodeVisitor {
1340 : public:
1341 963571 : ScheduleLateNodeVisitor(Zone* zone, Scheduler* scheduler)
1342 : : zone_(zone),
1343 : scheduler_(scheduler),
1344 : schedule_(scheduler_->schedule_),
1345 : marked_(scheduler->zone_),
1346 1927121 : marking_queue_(scheduler->zone_) {}
1347 :
1348 : // Run the schedule late algorithm on a set of fixed root nodes.
1349 : void Run(NodeVector* roots) {
1350 46977964 : for (Node* const root : *roots) {
1351 23007182 : ProcessQueue(root);
1352 : }
1353 : }
1354 :
1355 : private:
1356 23007276 : void ProcessQueue(Node* root) {
1357 23007276 : ZoneQueue<Node*>* queue = &(scheduler_->schedule_queue_);
1358 115219512 : for (Node* node : root->inputs()) {
1359 : // Don't schedule coupled nodes on their own.
1360 46106123 : if (scheduler_->GetPlacement(node) == Scheduler::kCoupled) {
1361 16500 : node = NodeProperties::GetControlInput(node);
1362 : }
1363 :
1364 : // Test schedulability condition by looking at unscheduled use count.
1365 92221882 : if (scheduler_->GetData(node)->unscheduled_count_ != 0) continue;
1366 :
1367 : queue->push(node);
1368 94384155 : do {
1369 94388630 : Node* const node = queue->front();
1370 : queue->pop();
1371 94384224 : VisitNode(node);
1372 : } while (!queue->empty());
1373 : }
1374 23007266 : }
1375 :
1376 : // Visits one node from the queue of schedulable nodes and determines its
1377 : // schedule late position. Also hoists nodes out of loops to find a more
1378 : // optimal scheduling position.
1379 154177191 : void VisitNode(Node* node) {
1380 : DCHECK_EQ(0, scheduler_->GetData(node)->unscheduled_count_);
1381 :
1382 : // Don't schedule nodes that are already scheduled.
1383 188768644 : if (schedule_->IsScheduled(node)) return;
1384 : DCHECK_EQ(Scheduler::kSchedulable, scheduler_->GetPlacement(node));
1385 :
1386 : // Determine the dominating block for all of the uses of this node. It is
1387 : // the latest block that this node can be scheduled in.
1388 59578997 : TRACE("Scheduling #%d:%s\n", node->id(), node->op()->mnemonic());
1389 59578997 : BasicBlock* block = GetCommonDominatorOfUses(node);
1390 : DCHECK_NOT_NULL(block);
1391 :
1392 : // The schedule early block dominates the schedule late block.
1393 120004714 : BasicBlock* min_block = scheduler_->GetData(node)->minimum_block_;
1394 : DCHECK_EQ(min_block, BasicBlock::GetCommonDominator(block, min_block));
1395 59578306 : TRACE(
1396 : "Schedule late of #%d:%s is id:%d at loop depth %d, minimum = id:%d\n",
1397 : node->id(), node->op()->mnemonic(), block->id().ToInt(),
1398 : block->loop_depth(), min_block->id().ToInt());
1399 :
1400 : // Hoist nodes out of loops if possible. Nodes can be hoisted iteratively
1401 : // into enclosing loop pre-headers until they would preceed their schedule
1402 : // early position.
1403 60426408 : BasicBlock* hoist_block = GetHoistBlock(block);
1404 60425024 : if (hoist_block &&
1405 : hoist_block->dominator_depth() >= min_block->dominator_depth()) {
1406 153159 : do {
1407 153159 : TRACE(" hoisting #%d:%s to block id:%d\n", node->id(),
1408 : node->op()->mnemonic(), hoist_block->id().ToInt());
1409 : DCHECK_LT(hoist_block->loop_depth(), block->loop_depth());
1410 : block = hoist_block;
1411 153159 : hoist_block = GetHoistBlock(hoist_block);
1412 154685 : } while (hoist_block &&
1413 : hoist_block->dominator_depth() >= min_block->dominator_depth());
1414 118853332 : } else if (scheduler_->flags_ & Scheduler::kSplitNodes) {
1415 : // Split the {node} if beneficial and return the new {block} for it.
1416 30154369 : block = SplitNode(block, node);
1417 : }
1418 :
1419 : // Schedule the node or a floating control structure.
1420 59577613 : if (IrOpcode::IsMergeOpcode(node->opcode())) {
1421 : ScheduleFloatingControl(block, node);
1422 59362582 : } else if (node->opcode() == IrOpcode::kFinishRegion) {
1423 107392 : ScheduleRegion(block, node);
1424 : } else {
1425 59255190 : ScheduleNode(block, node);
1426 : }
1427 : }
1428 :
1429 : // Mark {block} and push its non-marked predecessor on the marking queue.
1430 38242939 : void MarkBlock(BasicBlock* block) {
1431 : DCHECK_LT(block->id().ToSize(), marked_.size());
1432 90194000 : marked_[block->id().ToSize()] = true;
1433 128437121 : for (BasicBlock* pred_block : block->predecessors()) {
1434 : DCHECK_LT(pred_block->id().ToSize(), marked_.size());
1435 51951061 : if (marked_[pred_block->id().ToSize()]) continue;
1436 48171003 : marking_queue_.push_back(pred_block);
1437 : }
1438 38243121 : }
1439 :
1440 30154337 : BasicBlock* SplitNode(BasicBlock* block, Node* node) {
1441 : // For now, we limit splitting to pure nodes.
1442 30154337 : if (!node->op()->HasProperty(Operator::kPure)) return block;
1443 : // TODO(titzer): fix the special case of splitting of projections.
1444 23362791 : if (node->opcode() == IrOpcode::kProjection) return block;
1445 :
1446 : // The {block} is common dominator of all uses of {node}, so we cannot
1447 : // split anything unless the {block} has at least two successors.
1448 : DCHECK_EQ(block, GetCommonDominatorOfUses(node));
1449 23224128 : if (block->SuccessorCount() < 2) return block;
1450 :
1451 : // Clear marking bits.
1452 : DCHECK(marking_queue_.empty());
1453 107065086 : std::fill(marked_.begin(), marked_.end(), false);
1454 26509970 : marked_.resize(schedule_->BasicBlockCount() + 1, false);
1455 :
1456 : // Check if the {node} has uses in {block}.
1457 59952233 : for (Edge edge : node->use_edges()) {
1458 28744125 : BasicBlock* use_block = GetBlockForUse(edge);
1459 57488343 : if (use_block == nullptr || marked_[use_block->id().ToSize()]) continue;
1460 24632163 : if (use_block == block) {
1461 10790977 : TRACE(" not splitting #%d:%s, it is used in id:%d\n", node->id(),
1462 : node->op()->mnemonic(), block->id().ToInt());
1463 10790977 : marking_queue_.clear();
1464 10790980 : return block;
1465 : }
1466 13841186 : MarkBlock(use_block);
1467 : }
1468 :
1469 : // Compute transitive marking closure; a block is marked if all its
1470 : // successors are marked.
1471 41060958 : do {
1472 41060945 : BasicBlock* top_block = marking_queue_.front();
1473 41060945 : marking_queue_.pop_front();
1474 41060860 : if (marked_[top_block->id().ToSize()]) continue;
1475 : bool marked = true;
1476 111104933 : for (BasicBlock* successor : top_block->successors()) {
1477 50113552 : if (!marked_[successor->id().ToSize()]) {
1478 : marked = false;
1479 : break;
1480 : }
1481 : }
1482 36588669 : if (marked) MarkBlock(top_block);
1483 : } while (!marking_queue_.empty());
1484 :
1485 : // If the (common dominator) {block} is marked, we know that all paths from
1486 : // {block} to the end contain at least one use of {node}, and hence there's
1487 : // no point in splitting the {node} in this case.
1488 2463996 : if (marked_[block->id().ToSize()]) {
1489 1419631 : TRACE(" not splitting #%d:%s, its common dominator id:%d is perfect\n",
1490 : node->id(), node->op()->mnemonic(), block->id().ToInt());
1491 1419631 : return block;
1492 : }
1493 :
1494 : // Split {node} for uses according to the previously computed marking
1495 : // closure. Every marking partition has a unique dominator, which get's a
1496 : // copy of the {node} with the exception of the first partition, which get's
1497 : // the {node} itself.
1498 1044365 : ZoneMap<BasicBlock*, Node*> dominators(scheduler_->zone_);
1499 10763454 : for (Edge edge : node->use_edges()) {
1500 4859549 : BasicBlock* use_block = GetBlockForUse(edge);
1501 13145914 : if (use_block == nullptr) continue;
1502 16572716 : while (marked_[use_block->dominator()->id().ToSize()]) {
1503 3426802 : use_block = use_block->dominator();
1504 : }
1505 4859556 : auto& use_node = dominators[use_block];
1506 4859539 : if (use_node == nullptr) {
1507 3758953 : if (dominators.size() == 1u) {
1508 : // Place the {node} at {use_block}.
1509 1044358 : block = use_block;
1510 1044358 : use_node = node;
1511 1044358 : TRACE(" pushing #%d:%s down to id:%d\n", node->id(),
1512 : node->op()->mnemonic(), block->id().ToInt());
1513 : } else {
1514 : // Place a copy of {node} at {use_block}.
1515 2714595 : use_node = CloneNode(node);
1516 2714588 : TRACE(" cloning #%d:%s for id:%d\n", use_node->id(),
1517 : use_node->op()->mnemonic(), use_block->id().ToInt());
1518 2714588 : scheduler_->schedule_queue_.push(use_node);
1519 : }
1520 : }
1521 4859532 : edge.UpdateTo(use_node);
1522 : }
1523 : return block;
1524 : }
1525 :
1526 119463058 : BasicBlock* GetHoistBlock(BasicBlock* block) {
1527 60293161 : if (block->IsLoopHeader()) return block->dominator();
1528 : // We have to check to make sure that the {block} dominates all
1529 : // of the outgoing blocks. If it doesn't, then there is a path
1530 : // out of the loop which does not execute this {block}, so we
1531 : // can't hoist operations from this {block} out of the loop, as
1532 : // that would introduce additional computations.
1533 59456367 : if (BasicBlock* header_block = block->loop_header()) {
1534 19656978 : for (BasicBlock* outgoing_block :
1535 25696673 : scheduler_->special_rpo_->GetOutgoingBlocks(header_block)) {
1536 13044343 : if (BasicBlock::GetCommonDominator(block, outgoing_block) != block) {
1537 : return nullptr;
1538 : }
1539 : }
1540 286470 : return header_block->dominator();
1541 : }
1542 : return nullptr;
1543 : }
1544 :
1545 59912864 : BasicBlock* GetCommonDominatorOfUses(Node* node) {
1546 : BasicBlock* block = nullptr;
1547 327761609 : for (Edge edge : node->use_edges()) {
1548 133925428 : BasicBlock* use_block = GetBlockForUse(edge);
1549 : block = block == nullptr
1550 : ? use_block
1551 : : use_block == nullptr
1552 : ? block
1553 133922834 : : BasicBlock::GetCommonDominator(block, use_block);
1554 : }
1555 59910753 : return block;
1556 : }
1557 :
1558 : BasicBlock* FindPredecessorBlock(Node* node) {
1559 9142569 : return scheduler_->control_flow_builder_->FindPredecessorBlock(node);
1560 : }
1561 :
1562 176663040 : BasicBlock* GetBlockForUse(Edge edge) {
1563 : // TODO(titzer): ignore uses from dead nodes (not visited in PrepareUses()).
1564 : // Dead uses only occur if the graph is not trimmed before scheduling.
1565 167520471 : Node* use = edge.from();
1566 167520471 : if (IrOpcode::IsPhiOpcode(use->opcode())) {
1567 : // If the use is from a coupled (i.e. floating) phi, compute the common
1568 : // dominator of its uses. This will not recurse more than one level.
1569 8024679 : if (scheduler_->GetPlacement(use) == Scheduler::kCoupled) {
1570 332377 : TRACE(" inspecting uses of coupled #%d:%s\n", use->id(),
1571 : use->op()->mnemonic());
1572 : // TODO(titzer): reenable once above TODO is addressed.
1573 : // DCHECK_EQ(edge.to(), NodeProperties::GetControlInput(use));
1574 332377 : return GetCommonDominatorOfUses(use);
1575 : }
1576 : // If the use is from a fixed (i.e. non-floating) phi, we use the
1577 : // predecessor block of the corresponding control input to the merge.
1578 7692294 : if (scheduler_->GetPlacement(use) == Scheduler::kFixed) {
1579 7692294 : TRACE(" input@%d into a fixed phi #%d:%s\n", edge.index(), use->id(),
1580 : use->op()->mnemonic());
1581 7692294 : Node* merge = NodeProperties::GetControlInput(use, 0);
1582 : DCHECK(IrOpcode::IsMergeOpcode(merge->opcode()));
1583 7692300 : Node* input = NodeProperties::GetControlInput(merge, edge.index());
1584 7692244 : return FindPredecessorBlock(input);
1585 : }
1586 159495792 : } else if (IrOpcode::IsMergeOpcode(use->opcode())) {
1587 : // If the use is from a fixed (i.e. non-floating) merge, we use the
1588 : // predecessor block of the current input to the merge.
1589 1450333 : if (scheduler_->GetPlacement(use) == Scheduler::kFixed) {
1590 1450333 : TRACE(" input@%d into a fixed merge #%d:%s\n", edge.index(), use->id(),
1591 : use->op()->mnemonic());
1592 2900665 : return FindPredecessorBlock(edge.to());
1593 : }
1594 : }
1595 158045460 : BasicBlock* result = schedule_->block(use);
1596 158044820 : if (result == nullptr) return nullptr;
1597 158044962 : TRACE(" must dominate use #%d:%s in id:%d\n", use->id(),
1598 : use->op()->mnemonic(), result->id().ToInt());
1599 158045149 : return result;
1600 : }
1601 :
1602 : void ScheduleFloatingControl(BasicBlock* block, Node* node) {
1603 215031 : scheduler_->FuseFloatingControl(block, node);
1604 : }
1605 :
1606 107392 : void ScheduleRegion(BasicBlock* block, Node* region_end) {
1607 : // We only allow regions of instructions connected into a linear
1608 : // effect chain. The only value allowed to be produced by a node
1609 : // in the chain must be the value consumed by the FinishRegion node.
1610 :
1611 : // We schedule back to front; we first schedule FinishRegion.
1612 107392 : CHECK_EQ(IrOpcode::kFinishRegion, region_end->opcode());
1613 107392 : ScheduleNode(block, region_end);
1614 :
1615 : // Schedule the chain.
1616 1014957 : Node* node = NodeProperties::GetEffectInput(region_end);
1617 1014957 : while (node->opcode() != IrOpcode::kBeginRegion) {
1618 : DCHECK_EQ(0, scheduler_->GetData(node)->unscheduled_count_);
1619 : DCHECK_EQ(1, node->op()->EffectInputCount());
1620 : DCHECK_EQ(1, node->op()->EffectOutputCount());
1621 : DCHECK_EQ(0, node->op()->ControlOutputCount());
1622 : // The value output (if there is any) must be consumed
1623 : // by the EndRegion node.
1624 : DCHECK(node->op()->ValueOutputCount() == 0 ||
1625 : node == region_end->InputAt(0));
1626 800173 : ScheduleNode(block, node);
1627 800173 : node = NodeProperties::GetEffectInput(node);
1628 : }
1629 : // Schedule the BeginRegion node.
1630 : DCHECK_EQ(0, scheduler_->GetData(node)->unscheduled_count_);
1631 107392 : ScheduleNode(block, node);
1632 107392 : }
1633 :
1634 60270086 : void ScheduleNode(BasicBlock* block, Node* node) {
1635 60270086 : schedule_->PlanNode(block, node);
1636 : size_t block_id = block->id().ToSize();
1637 188655530 : if (!scheduler_->scheduled_nodes_[block_id]) {
1638 7842829 : scheduler_->scheduled_nodes_[block_id] =
1639 23528558 : new (zone_->New(sizeof(NodeVector))) NodeVector(zone_);
1640 : }
1641 120541706 : scheduler_->scheduled_nodes_[block_id]->push_back(node);
1642 60269780 : scheduler_->UpdatePlacement(node, Scheduler::kScheduled);
1643 60270817 : }
1644 :
1645 5429186 : Node* CloneNode(Node* node) {
1646 : int const input_count = node->InputCount();
1647 3651216 : for (int index = 0; index < input_count; ++index) {
1648 : Node* const input = node->InputAt(index);
1649 3651208 : scheduler_->IncrementUnscheduledUseCount(input, index, node);
1650 : }
1651 8143773 : Node* const copy = scheduler_->graph_->CloneNode(node);
1652 2714589 : TRACE(("clone #%d:%s -> #%d\n"), node->id(), node->op()->mnemonic(),
1653 : copy->id());
1654 2714589 : scheduler_->node_data_.resize(copy->id() + 1,
1655 10858354 : scheduler_->DefaultSchedulerData());
1656 8143761 : scheduler_->node_data_[copy->id()] = scheduler_->node_data_[node->id()];
1657 2714587 : return copy;
1658 : }
1659 :
1660 : Zone* zone_;
1661 : Scheduler* scheduler_;
1662 : Schedule* schedule_;
1663 : BoolVector marked_;
1664 : ZoneDeque<BasicBlock*> marking_queue_;
1665 : };
1666 :
1667 :
1668 963566 : void Scheduler::ScheduleLate() {
1669 963566 : TRACE("--- SCHEDULE LATE ------------------------------------------\n");
1670 963573 : if (FLAG_trace_turbo_scheduler) {
1671 0 : TRACE("roots: ");
1672 0 : for (Node* node : schedule_root_nodes_) {
1673 0 : TRACE("#%d:%s ", node->id(), node->op()->mnemonic());
1674 : }
1675 0 : TRACE("\n");
1676 : }
1677 :
1678 : // Schedule: Places nodes in dominator block of all their uses.
1679 963573 : ScheduleLateNodeVisitor schedule_late_visitor(zone_, this);
1680 : schedule_late_visitor.Run(&schedule_root_nodes_);
1681 963582 : }
1682 :
1683 :
1684 : // -----------------------------------------------------------------------------
1685 : // Phase 6: Seal the final schedule.
1686 :
1687 :
1688 963570 : void Scheduler::SealFinalSchedule() {
1689 963570 : TRACE("--- SEAL FINAL SCHEDULE ------------------------------------\n");
1690 :
1691 : // Serialize the assembly order and reverse-post-order numbering.
1692 963570 : special_rpo_->SerializeRPOIntoSchedule();
1693 : special_rpo_->PrintAndVerifySpecialRPO();
1694 :
1695 : // Add collected nodes for basic blocks to their blocks in the right order.
1696 : int block_num = 0;
1697 14796644 : for (NodeVector* nodes : scheduled_nodes_) {
1698 25739180 : BasicBlock::Id id = BasicBlock::Id::FromInt(block_num++);
1699 12869590 : BasicBlock* block = schedule_->GetBlockById(id);
1700 12869311 : if (nodes) {
1701 128379539 : for (Node* node : base::Reversed(*nodes)) {
1702 60268177 : schedule_->AddNode(block, node);
1703 : }
1704 : }
1705 : }
1706 963590 : }
1707 :
1708 :
1709 : // -----------------------------------------------------------------------------
1710 :
1711 :
1712 645094 : void Scheduler::FuseFloatingControl(BasicBlock* block, Node* node) {
1713 215030 : TRACE("--- FUSE FLOATING CONTROL ----------------------------------\n");
1714 215030 : if (FLAG_trace_turbo_scheduler) {
1715 0 : OFStream os(stdout);
1716 0 : os << "Schedule before control flow fusion:\n" << *schedule_;
1717 : }
1718 :
1719 : // Iterate on phase 1: Build control-flow graph.
1720 215030 : control_flow_builder_->Run(block, node);
1721 :
1722 : // Iterate on phase 2: Compute special RPO and dominator tree.
1723 215026 : special_rpo_->UpdateSpecialRPO(block, schedule_->block(node));
1724 : // TODO(mstarzinger): Currently "iterate on" means "re-run". Fix that.
1725 13336729 : for (BasicBlock* b = block->rpo_next(); b != nullptr; b = b->rpo_next()) {
1726 : b->set_dominator_depth(-1);
1727 : b->set_dominator(nullptr);
1728 : }
1729 215032 : PropagateImmediateDominators(block->rpo_next());
1730 :
1731 : // Iterate on phase 4: Schedule nodes early.
1732 : // TODO(mstarzinger): The following loop gathering the propagation roots is a
1733 : // temporary solution and should be merged into the rest of the scheduler as
1734 : // soon as the approach settled for all floating loops.
1735 215032 : NodeVector propagation_roots(control_flow_builder_->control_);
1736 1901199 : for (Node* node : control_flow_builder_->control_) {
1737 5437386 : for (Node* use : node->uses()) {
1738 2090637 : if (NodeProperties::IsPhi(use)) propagation_roots.push_back(use);
1739 : }
1740 : }
1741 215031 : if (FLAG_trace_turbo_scheduler) {
1742 0 : TRACE("propagation roots: ");
1743 0 : for (Node* node : propagation_roots) {
1744 0 : TRACE("#%d:%s ", node->id(), node->op()->mnemonic());
1745 : }
1746 0 : TRACE("\n");
1747 : }
1748 215031 : ScheduleEarlyNodeVisitor schedule_early_visitor(zone_, this);
1749 215032 : schedule_early_visitor.Run(&propagation_roots);
1750 :
1751 : // Move previously planned nodes.
1752 : // TODO(mstarzinger): Improve that by supporting bulk moves.
1753 430064 : scheduled_nodes_.resize(schedule_->BasicBlockCount());
1754 215032 : MovePlannedNodes(block, schedule_->block(node));
1755 :
1756 215032 : if (FLAG_trace_turbo_scheduler) {
1757 0 : OFStream os(stdout);
1758 0 : os << "Schedule after control flow fusion:\n" << *schedule_;
1759 : }
1760 215031 : }
1761 :
1762 :
1763 215031 : void Scheduler::MovePlannedNodes(BasicBlock* from, BasicBlock* to) {
1764 215031 : TRACE("Move planned nodes from id:%d to id:%d\n", from->id().ToInt(),
1765 : to->id().ToInt());
1766 629467 : NodeVector* from_nodes = scheduled_nodes_[from->id().ToSize()];
1767 215031 : NodeVector* to_nodes = scheduled_nodes_[to->id().ToSize()];
1768 430063 : if (!from_nodes) return;
1769 :
1770 3591212 : for (Node* const node : *from_nodes) {
1771 3192403 : schedule_->SetBlockForNode(to, node);
1772 : }
1773 199405 : if (to_nodes) {
1774 0 : to_nodes->insert(to_nodes->end(), from_nodes->begin(), from_nodes->end());
1775 : from_nodes->clear();
1776 : } else {
1777 : std::swap(scheduled_nodes_[from->id().ToSize()],
1778 : scheduled_nodes_[to->id().ToSize()]);
1779 : }
1780 : }
1781 :
1782 : } // namespace compiler
1783 : } // namespace internal
1784 : } // namespace v8
|