Line data Source code
1 : // Copyright 2015 the V8 project authors. All rights reserved.
2 : // Use of this source code is governed by a BSD-style license that can be
3 : // found in the LICENSE file.
4 :
5 : #include "src/compiler/backend/live-range-separator.h"
6 : #include "src/compiler/backend/register-allocator.h"
7 :
8 : namespace v8 {
9 : namespace internal {
10 : namespace compiler {
11 :
12 : #define TRACE(...) \
13 : do { \
14 : if (FLAG_trace_alloc) PrintF(__VA_ARGS__); \
15 : } while (false)
16 :
17 : namespace {
18 :
19 45251988 : void CreateSplinter(TopLevelLiveRange* range, RegisterAllocationData* data,
20 : LifetimePosition first_cut, LifetimePosition last_cut) {
21 : DCHECK(!range->IsSplinter());
22 : // We can ignore ranges that live solely in deferred blocks.
23 : // If a range ends right at the end of a deferred block, it is marked by
24 : // the range builder as ending at gap start of the next block - since the
25 : // end is a position where the variable isn't live. We need to take that
26 : // into consideration.
27 : LifetimePosition max_allowed_end = last_cut.NextFullStart();
28 :
29 41591827 : if (first_cut <= range->Start() && max_allowed_end >= range->End()) {
30 18111236 : return;
31 : }
32 :
33 : LifetimePosition start = Max(first_cut, range->Start());
34 : LifetimePosition end = Min(last_cut, range->End());
35 :
36 18111223 : if (start < end) {
37 : // Ensure the original range has a spill range associated, before it gets
38 : // splintered. Splinters will point to it. This way, when attempting to
39 : // reuse spill slots of splinters, during allocation, we avoid clobbering
40 : // such slots.
41 13570389 : if (range->MayRequireSpillRange()) {
42 2224444 : data->CreateSpillRangeForLiveRange(range);
43 : }
44 13570391 : if (range->splinter() == nullptr) {
45 5369379 : TopLevelLiveRange* splinter =
46 5369394 : data->NextLiveRange(range->representation());
47 : DCHECK_NULL(data->live_ranges()[splinter->vreg()]);
48 10738758 : data->live_ranges()[splinter->vreg()] = splinter;
49 5369379 : range->SetSplinter(splinter);
50 : }
51 : Zone* zone = data->allocation_zone();
52 13570387 : TRACE("creating splinter %d for range %d between %d and %d\n",
53 : range->splinter()->vreg(), range->vreg(), start.ToInstructionIndex(),
54 : end.ToInstructionIndex());
55 13570387 : range->Splinter(start, end, zone);
56 : }
57 : }
58 :
59 13277864 : void SetSlotUse(TopLevelLiveRange* range) {
60 : range->set_has_slot_use(false);
61 19812154 : for (const UsePosition* pos = range->first_pos();
62 9906077 : !range->has_slot_use() && pos != nullptr; pos = pos->next()) {
63 6534290 : if (pos->type() == UsePositionType::kRequiresSlot) {
64 : range->set_has_slot_use(true);
65 : }
66 : }
67 3371787 : }
68 :
69 65182454 : void SplinterLiveRange(TopLevelLiveRange* range, RegisterAllocationData* data) {
70 167841162 : const InstructionSequence* code = data->code();
71 69042277 : UseInterval* interval = range->first_interval();
72 :
73 : LifetimePosition first_cut = LifetimePosition::Invalid();
74 30689327 : LifetimePosition last_cut = LifetimePosition::Invalid();
75 :
76 99731770 : while (interval != nullptr) {
77 : // We have to cache these here, as splintering might destroy the original
78 : // interval below.
79 : UseInterval* next_interval = interval->next();
80 38352950 : LifetimePosition interval_end = interval->end();
81 : const InstructionBlock* first_block =
82 38352950 : code->GetInstructionBlock(interval->FirstGapIndex());
83 : const InstructionBlock* last_block =
84 38352977 : code->GetInstructionBlock(interval->LastGapIndex());
85 : int first_block_nr = first_block->rpo_number().ToInt();
86 : int last_block_nr = last_block->rpo_number().ToInt();
87 206194277 : for (int block_id = first_block_nr; block_id <= last_block_nr; ++block_id) {
88 224672760 : const InstructionBlock* current_block =
89 167841162 : code->InstructionBlockAt(RpoNumber::FromInt(block_id));
90 167840848 : if (current_block->IsDeferred()) {
91 38720668 : if (!first_cut.IsValid()) {
92 : first_cut = LifetimePosition::GapFromInstructionIndex(
93 : current_block->first_instruction_index());
94 : }
95 : // We splinter until the last gap in the block. I assume this is done to
96 : // leave a little range to be allocated by normal register allocation
97 : // and then use that range to connect when splinters are merged back.
98 : // This might be done as control flow resolution does not insert moves
99 : // if two consecutive blocks in rpo order are also consecutive in
100 : // control flow.
101 : last_cut = LifetimePosition::GapFromInstructionIndex(
102 38720668 : current_block->last_instruction_index());
103 : } else {
104 129120180 : if (first_cut.IsValid()) {
105 14297578 : CreateSplinter(range, data, first_cut, last_cut);
106 : first_cut = LifetimePosition::Invalid();
107 14297601 : last_cut = LifetimePosition::Invalid();
108 : }
109 : }
110 : }
111 : // If we reach the end of an interval with a first_cut and last_cut set, it
112 : // means that we can splinter to the end of the interval, as the value dies
113 : // in this control flow branch or is not live in the next block. In the
114 : // former case, we won't need to reload the value, so we can splinter to the
115 : // end of its lifetime. In the latter case, control flow resolution will
116 : // have to connect blocks anyway, so we can also splinter to the end of the
117 : // block, too.
118 38353115 : if (first_cut.IsValid()) {
119 3813697 : CreateSplinter(range, data, first_cut, interval_end);
120 : first_cut = LifetimePosition::Invalid();
121 3813698 : last_cut = LifetimePosition::Invalid();
122 : }
123 : interval = next_interval;
124 : }
125 :
126 : // Redo has_slot_use
127 32807232 : if (range->has_slot_use() && range->splinter() != nullptr) {
128 1685897 : SetSlotUse(range);
129 1685895 : SetSlotUse(range->splinter());
130 : }
131 30689493 : }
132 :
133 : } // namespace
134 :
135 113544325 : void LiveRangeSeparator::Splinter() {
136 2141320 : size_t virt_reg_count = data()->live_ranges().size();
137 82855144 : for (size_t vreg = 0; vreg < virt_reg_count; ++vreg) {
138 202707020 : TopLevelLiveRange* range = data()->live_ranges()[vreg];
139 165104377 : if (range == nullptr || range->IsEmpty() || range->IsSplinter()) {
140 : continue;
141 : }
142 : int first_instr = range->first_interval()->FirstGapIndex();
143 35910588 : if (!data()->code()->GetInstructionBlock(first_instr)->IsDeferred()) {
144 30689378 : SplinterLiveRange(range, data());
145 : }
146 : }
147 2141517 : }
148 :
149 3020742 : void LiveRangeMerger::MarkRangesSpilledInDeferredBlocks() {
150 3020742 : const InstructionSequence* code = data()->code();
151 126276139 : for (TopLevelLiveRange* top : data()->live_ranges()) {
152 170473184 : if (top == nullptr || top->IsEmpty() || top->splinter() == nullptr ||
153 83723810 : top->HasSpillOperand() || !top->splinter()->HasSpillRange()) {
154 : continue;
155 : }
156 :
157 6213212 : LiveRange* child = top;
158 4842882 : for (; child != nullptr; child = child->next()) {
159 6213794 : if (child->spilled() ||
160 2250049 : child->NextSlotPosition(child->Start()) != nullptr) {
161 : break;
162 : }
163 : }
164 2593415 : if (child == nullptr) {
165 : top->TreatAsSpilledInDeferredBlock(data()->allocation_zone(),
166 879137 : code->InstructionBlockCount());
167 : }
168 : }
169 2141514 : }
170 :
171 90366775 : void LiveRangeMerger::Merge() {
172 2141465 : MarkRangesSpilledInDeferredBlocks();
173 :
174 4283728 : int live_range_count = static_cast<int>(data()->live_ranges().size());
175 82855589 : for (int i = 0; i < live_range_count; ++i) {
176 288791760 : TopLevelLiveRange* range = data()->live_ranges()[i];
177 165105099 : if (range == nullptr || range->IsEmpty() || !range->IsSplinter()) {
178 : continue;
179 : }
180 : TopLevelLiveRange* splinter_parent = range->splintered_from();
181 :
182 : int to_remove = range->vreg();
183 5369721 : splinter_parent->Merge(range, data()->allocation_zone());
184 16108221 : data()->live_ranges()[to_remove] = nullptr;
185 : }
186 2141550 : }
187 :
188 : #undef TRACE
189 :
190 : } // namespace compiler
191 : } // namespace internal
192 178779 : } // namespace v8
|