Line data Source code
1 : // Copyright 2016 the V8 project authors. All rights reserved.
2 : // Use of this source code is governed by a BSD-style license that can be
3 : // found in the LICENSE file.
4 :
5 : #include "src/compiler/memory-optimizer.h"
6 :
7 : #include "src/compiler/js-graph.h"
8 : #include "src/compiler/linkage.h"
9 : #include "src/compiler/node-matchers.h"
10 : #include "src/compiler/node-properties.h"
11 : #include "src/compiler/node.h"
12 : #include "src/compiler/simplified-operator.h"
13 : #include "src/interface-descriptors.h"
14 :
15 : namespace v8 {
16 : namespace internal {
17 : namespace compiler {
18 :
19 523167 : MemoryOptimizer::MemoryOptimizer(JSGraph* jsgraph, Zone* zone,
20 : PoisoningMitigationLevel poisoning_level,
21 : AllocationFolding allocation_folding)
22 : : jsgraph_(jsgraph),
23 : empty_state_(AllocationState::Empty(zone)),
24 : pending_(zone),
25 : tokens_(zone),
26 : zone_(zone),
27 : graph_assembler_(jsgraph, nullptr, nullptr, zone),
28 : poisoning_level_(poisoning_level),
29 1569509 : allocation_folding_(allocation_folding) {}
30 :
31 523168 : void MemoryOptimizer::Optimize() {
32 523168 : EnqueueUses(graph()->start(), empty_state());
33 15017381 : while (!tokens_.empty()) {
34 13971041 : Token const token = tokens_.front();
35 : tokens_.pop();
36 13971045 : VisitNode(token.node, token.state);
37 : }
38 : DCHECK(pending_.empty());
39 : DCHECK(tokens_.empty());
40 523167 : }
41 :
42 31406 : MemoryOptimizer::AllocationGroup::AllocationGroup(Node* node,
43 : PretenureFlag pretenure,
44 : Zone* zone)
45 15703 : : node_ids_(zone), pretenure_(pretenure), size_(nullptr) {
46 31406 : node_ids_.insert(node->id());
47 15703 : }
48 :
49 359056 : MemoryOptimizer::AllocationGroup::AllocationGroup(Node* node,
50 : PretenureFlag pretenure,
51 : Node* size, Zone* zone)
52 179528 : : node_ids_(zone), pretenure_(pretenure), size_(size) {
53 359056 : node_ids_.insert(node->id());
54 179528 : }
55 :
56 41201 : void MemoryOptimizer::AllocationGroup::Add(Node* node) {
57 82402 : node_ids_.insert(node->id());
58 0 : }
59 :
60 1959800 : bool MemoryOptimizer::AllocationGroup::Contains(Node* node) const {
61 3919601 : return node_ids_.find(node->id()) != node_ids_.end();
62 : }
63 :
64 0 : MemoryOptimizer::AllocationState::AllocationState()
65 523171 : : group_(nullptr), size_(std::numeric_limits<int>::max()), top_(nullptr) {}
66 :
67 0 : MemoryOptimizer::AllocationState::AllocationState(AllocationGroup* group)
68 15942 : : group_(group), size_(std::numeric_limits<int>::max()), top_(nullptr) {}
69 :
70 0 : MemoryOptimizer::AllocationState::AllocationState(AllocationGroup* group,
71 : intptr_t size, Node* top)
72 220729 : : group_(group), size_(size), top_(top) {}
73 :
74 2746520 : bool MemoryOptimizer::AllocationState::IsNewSpaceAllocation() const {
75 4711113 : return group() && group()->IsNewSpaceAllocation();
76 : }
77 :
78 13971039 : void MemoryOptimizer::VisitNode(Node* node, AllocationState const* state) {
79 : DCHECK(!node->IsDead());
80 : DCHECK_LT(0, node->op()->EffectInputCount());
81 13971039 : switch (node->opcode()) {
82 : case IrOpcode::kAllocate:
83 : // Allocate nodes were purged from the graph in effect-control
84 : // linearization.
85 0 : UNREACHABLE();
86 : case IrOpcode::kAllocateRaw:
87 236432 : return VisitAllocateRaw(node, state);
88 : case IrOpcode::kCall:
89 4127632 : return VisitCall(node, state);
90 : case IrOpcode::kCallWithCallerSavedRegisters:
91 340 : return VisitCallWithCallerSavedRegisters(node, state);
92 : case IrOpcode::kLoadElement:
93 28028 : return VisitLoadElement(node, state);
94 : case IrOpcode::kLoadField:
95 1898308 : return VisitLoadField(node, state);
96 : case IrOpcode::kStoreElement:
97 42564 : return VisitStoreElement(node, state);
98 : case IrOpcode::kStoreField:
99 2440556 : return VisitStoreField(node, state);
100 : case IrOpcode::kStore:
101 263400 : return VisitStore(node, state);
102 : case IrOpcode::kBitcastTaggedToWord:
103 : case IrOpcode::kBitcastWordToTagged:
104 : case IrOpcode::kComment:
105 : case IrOpcode::kDebugAbort:
106 : case IrOpcode::kDebugBreak:
107 : case IrOpcode::kDeoptimizeIf:
108 : case IrOpcode::kDeoptimizeUnless:
109 : case IrOpcode::kIfException:
110 : case IrOpcode::kLoad:
111 : case IrOpcode::kPoisonedLoad:
112 : case IrOpcode::kProtectedLoad:
113 : case IrOpcode::kProtectedStore:
114 : case IrOpcode::kRetain:
115 : case IrOpcode::kTaggedPoisonOnSpeculation:
116 : case IrOpcode::kUnalignedLoad:
117 : case IrOpcode::kUnalignedStore:
118 : case IrOpcode::kUnsafePointerAdd:
119 : case IrOpcode::kUnreachable:
120 : case IrOpcode::kWord32AtomicAdd:
121 : case IrOpcode::kWord32AtomicAnd:
122 : case IrOpcode::kWord32AtomicCompareExchange:
123 : case IrOpcode::kWord32AtomicExchange:
124 : case IrOpcode::kWord32AtomicLoad:
125 : case IrOpcode::kWord32AtomicOr:
126 : case IrOpcode::kWord32AtomicPairAdd:
127 : case IrOpcode::kWord32AtomicPairAnd:
128 : case IrOpcode::kWord32AtomicPairCompareExchange:
129 : case IrOpcode::kWord32AtomicPairExchange:
130 : case IrOpcode::kWord32AtomicPairLoad:
131 : case IrOpcode::kWord32AtomicPairOr:
132 : case IrOpcode::kWord32AtomicPairStore:
133 : case IrOpcode::kWord32AtomicPairSub:
134 : case IrOpcode::kWord32AtomicPairXor:
135 : case IrOpcode::kWord32AtomicStore:
136 : case IrOpcode::kWord32AtomicSub:
137 : case IrOpcode::kWord32AtomicXor:
138 : case IrOpcode::kWord32PoisonOnSpeculation:
139 : case IrOpcode::kWord64AtomicAdd:
140 : case IrOpcode::kWord64AtomicAnd:
141 : case IrOpcode::kWord64AtomicCompareExchange:
142 : case IrOpcode::kWord64AtomicExchange:
143 : case IrOpcode::kWord64AtomicLoad:
144 : case IrOpcode::kWord64AtomicOr:
145 : case IrOpcode::kWord64AtomicStore:
146 : case IrOpcode::kWord64AtomicSub:
147 : case IrOpcode::kWord64AtomicXor:
148 : case IrOpcode::kWord64PoisonOnSpeculation:
149 : // These operations cannot trigger GC.
150 : return VisitOtherEffect(node, state);
151 : default:
152 : break;
153 : }
154 : DCHECK_EQ(0, node->op()->EffectOutputCount());
155 : }
156 :
157 : #define __ gasm()->
158 :
159 472864 : void MemoryOptimizer::VisitAllocateRaw(Node* node,
160 810632 : AllocationState const* state) {
161 : DCHECK_EQ(IrOpcode::kAllocateRaw, node->opcode());
162 : Node* value;
163 : Node* size = node->InputAt(0);
164 : Node* effect = node->InputAt(1);
165 : Node* control = node->InputAt(2);
166 :
167 236432 : gasm()->Reset(effect, control);
168 :
169 236432 : PretenureFlag pretenure = PretenureFlagOf(node->op());
170 :
171 : // Propagate tenuring from outer allocations to inner allocations, i.e.
172 : // when we allocate an object in old space and store a newly allocated
173 : // child object into the pretenured object, then the newly allocated
174 : // child object also should get pretenured to old space.
175 236432 : if (pretenure == TENURED) {
176 13588 : for (Edge const edge : node->use_edges()) {
177 12396 : Node* const user = edge.from();
178 12396 : if (user->opcode() == IrOpcode::kStoreField && edge.index() == 0) {
179 3652 : Node* const child = user->InputAt(1);
180 3652 : if (child->opcode() == IrOpcode::kAllocateRaw &&
181 0 : PretenureFlagOf(child->op()) == NOT_TENURED) {
182 0 : NodeProperties::ChangeOp(child, node->op());
183 0 : break;
184 : }
185 : }
186 : }
187 : } else {
188 : DCHECK_EQ(NOT_TENURED, pretenure);
189 5876384 : for (Edge const edge : node->use_edges()) {
190 5404712 : Node* const user = edge.from();
191 5404712 : if (user->opcode() == IrOpcode::kStoreField && edge.index() == 1) {
192 70699 : Node* const parent = user->InputAt(0);
193 114282 : if (parent->opcode() == IrOpcode::kAllocateRaw &&
194 43583 : PretenureFlagOf(parent->op()) == TENURED) {
195 : pretenure = TENURED;
196 : break;
197 : }
198 : }
199 : }
200 : }
201 :
202 : // Determine the top/limit addresses.
203 : Node* top_address = __ ExternalConstant(
204 : pretenure == NOT_TENURED
205 : ? ExternalReference::new_space_allocation_top_address(isolate())
206 472864 : : ExternalReference::old_space_allocation_top_address(isolate()));
207 : Node* limit_address = __ ExternalConstant(
208 : pretenure == NOT_TENURED
209 : ? ExternalReference::new_space_allocation_limit_address(isolate())
210 472862 : : ExternalReference::old_space_allocation_limit_address(isolate()));
211 :
212 : // Check if we can fold this allocation into a previous allocation represented
213 : // by the incoming {state}.
214 : IntPtrMatcher m(size);
215 236430 : if (m.IsInRange(0, kMaxRegularHeapObjectSize)) {
216 : intptr_t const object_size = m.Value();
217 614418 : if (allocation_folding_ == AllocationFolding::kDoAllocationFolding &&
218 434892 : state->size() <= kMaxRegularHeapObjectSize - object_size &&
219 41201 : state->group()->pretenure() == pretenure) {
220 : // We can fold this Allocate {node} into the allocation {group}
221 : // represented by the given {state}. Compute the upper bound for
222 : // the new {state}.
223 41201 : intptr_t const state_size = state->size() + object_size;
224 :
225 : // Update the reservation check to the actual maximum upper bound.
226 82402 : AllocationGroup* const group = state->group();
227 41201 : if (machine()->Is64()) {
228 41201 : if (OpParameter<int64_t>(group->size()->op()) < state_size) {
229 : NodeProperties::ChangeOp(group->size(),
230 82402 : common()->Int64Constant(state_size));
231 : }
232 : } else {
233 0 : if (OpParameter<int32_t>(group->size()->op()) < state_size) {
234 : NodeProperties::ChangeOp(
235 : group->size(),
236 0 : common()->Int32Constant(static_cast<int32_t>(state_size)));
237 : }
238 : }
239 :
240 : // Update the allocation top with the new object allocation.
241 : // TODO(bmeurer): Defer writing back top as much as possible.
242 41201 : Node* top = __ IntAdd(state->top(), size);
243 : __ Store(StoreRepresentation(MachineType::PointerRepresentation(),
244 : kNoWriteBarrier),
245 82402 : top_address, __ IntPtrConstant(0), top);
246 :
247 : // Compute the effective inner allocated address.
248 : value = __ BitcastWordToTagged(
249 82402 : __ IntAdd(state->top(), __ IntPtrConstant(kHeapObjectTag)));
250 :
251 : // Extend the allocation {group}.
252 : group->Add(value);
253 : state = AllocationState::Open(group, state_size, top, zone());
254 : } else {
255 179526 : auto call_runtime = __ MakeDeferredLabel();
256 179526 : auto done = __ MakeLabel(MachineType::PointerRepresentation());
257 :
258 : // Setup a mutable reservation size node; will be patched as we fold
259 : // additional allocations into this new group.
260 179526 : Node* size = __ UniqueIntPtrConstant(object_size);
261 :
262 : // Load allocation top and limit.
263 : Node* top =
264 179528 : __ Load(MachineType::Pointer(), top_address, __ IntPtrConstant(0));
265 : Node* limit =
266 179527 : __ Load(MachineType::Pointer(), limit_address, __ IntPtrConstant(0));
267 :
268 : // Check if we need to collect garbage before we can start bump pointer
269 : // allocation (always done for folded allocations).
270 179527 : Node* check = __ UintLessThan(__ IntAdd(top, size), limit);
271 :
272 179528 : __ GotoIfNot(check, &call_runtime);
273 : __ Goto(&done, top);
274 :
275 : __ Bind(&call_runtime);
276 : {
277 : Node* target =
278 : pretenure == NOT_TENURED ? __ AllocateInNewSpaceStubConstant()
279 : : __
280 179527 : AllocateInOldSpaceStubConstant();
281 179526 : if (!allocate_operator_.is_set()) {
282 : auto descriptor = AllocateDescriptor{};
283 : auto call_descriptor = Linkage::GetStubCallDescriptor(
284 : graph()->zone(), descriptor, descriptor.GetStackParameterCount(),
285 106877 : CallDescriptor::kCanUseRoots, Operator::kNoThrow);
286 106879 : allocate_operator_.set(common()->Call(call_descriptor));
287 : }
288 : Node* vfalse = __ BitcastTaggedToWord(
289 179528 : __ Call(allocate_operator_.get(), target, size));
290 179528 : vfalse = __ IntSub(vfalse, __ IntPtrConstant(kHeapObjectTag));
291 : __ Goto(&done, vfalse);
292 : }
293 :
294 : __ Bind(&done);
295 :
296 : // Compute the new top and write it back.
297 359055 : top = __ IntAdd(done.PhiAt(0), __ IntPtrConstant(object_size));
298 : __ Store(StoreRepresentation(MachineType::PointerRepresentation(),
299 : kNoWriteBarrier),
300 359055 : top_address, __ IntPtrConstant(0), top);
301 :
302 : // Compute the initial object address.
303 : value = __ BitcastWordToTagged(
304 359056 : __ IntAdd(done.PhiAt(0), __ IntPtrConstant(kHeapObjectTag)));
305 :
306 : // Start a new allocation group.
307 : AllocationGroup* group =
308 179528 : new (zone()) AllocationGroup(value, pretenure, size, zone());
309 : state = AllocationState::Open(group, object_size, top, zone());
310 : }
311 : } else {
312 15703 : auto call_runtime = __ MakeDeferredLabel();
313 15703 : auto done = __ MakeLabel(MachineRepresentation::kTaggedPointer);
314 :
315 : // Load allocation top and limit.
316 : Node* top =
317 15703 : __ Load(MachineType::Pointer(), top_address, __ IntPtrConstant(0));
318 : Node* limit =
319 15703 : __ Load(MachineType::Pointer(), limit_address, __ IntPtrConstant(0));
320 :
321 : // Compute the new top.
322 15703 : Node* new_top = __ IntAdd(top, size);
323 :
324 : // Check if we can do bump pointer allocation here.
325 15703 : Node* check = __ UintLessThan(new_top, limit);
326 15703 : __ GotoIfNot(check, &call_runtime);
327 : __ Store(StoreRepresentation(MachineType::PointerRepresentation(),
328 : kNoWriteBarrier),
329 31406 : top_address, __ IntPtrConstant(0), new_top);
330 : __ Goto(&done, __ BitcastWordToTagged(
331 15703 : __ IntAdd(top, __ IntPtrConstant(kHeapObjectTag))));
332 :
333 : __ Bind(&call_runtime);
334 : Node* target =
335 : pretenure == NOT_TENURED ? __ AllocateInNewSpaceStubConstant()
336 : : __
337 15703 : AllocateInOldSpaceStubConstant();
338 15703 : if (!allocate_operator_.is_set()) {
339 : auto descriptor = AllocateDescriptor{};
340 : auto call_descriptor = Linkage::GetStubCallDescriptor(
341 : graph()->zone(), descriptor, descriptor.GetStackParameterCount(),
342 4824 : CallDescriptor::kCanUseRoots, Operator::kNoThrow);
343 4824 : allocate_operator_.set(common()->Call(call_descriptor));
344 : }
345 15703 : __ Goto(&done, __ Call(allocate_operator_.get(), target, size));
346 :
347 : __ Bind(&done);
348 : value = done.PhiAt(0);
349 :
350 : // Create an unfoldable allocation group.
351 : AllocationGroup* group =
352 15703 : new (zone()) AllocationGroup(value, pretenure, zone());
353 : state = AllocationState::Closed(group, zone());
354 : }
355 :
356 236432 : effect = __ ExtractCurrentEffect();
357 236431 : control = __ ExtractCurrentControl();
358 :
359 : // Replace all effect uses of {node} with the {effect}, enqueue the
360 : // effect uses for further processing, and replace all value uses of
361 : // {node} with the {value}.
362 11307079 : for (Edge edge : node->use_edges()) {
363 5417108 : if (NodeProperties::IsEffectEdge(edge)) {
364 473088 : EnqueueUse(edge.from(), edge.index(), state);
365 236544 : edge.UpdateTo(effect);
366 5180564 : } else if (NodeProperties::IsValueEdge(edge)) {
367 2878607 : edge.UpdateTo(value);
368 : } else {
369 : DCHECK(NodeProperties::IsControlEdge(edge));
370 2301958 : edge.UpdateTo(control);
371 : }
372 : }
373 :
374 : // Kill the {node} to make sure we don't leave dangling dead uses.
375 236432 : node->Kill();
376 236432 : }
377 :
378 : #undef __
379 :
380 8233757 : void MemoryOptimizer::VisitCall(Node* node, AllocationState const* state) {
381 : DCHECK_EQ(IrOpcode::kCall, node->opcode());
382 : // If the call can allocate, we start with a fresh state.
383 8255246 : if (!(CallDescriptorOf(node->op())->flags() & CallDescriptor::kNoAllocate)) {
384 : state = empty_state();
385 : }
386 4127623 : EnqueueUses(node, state);
387 4127628 : }
388 :
389 340 : void MemoryOptimizer::VisitCallWithCallerSavedRegisters(
390 680 : Node* node, AllocationState const* state) {
391 : DCHECK_EQ(IrOpcode::kCallWithCallerSavedRegisters, node->opcode());
392 : // If the call can allocate, we start with a fresh state.
393 680 : if (!(CallDescriptorOf(node->op())->flags() & CallDescriptor::kNoAllocate)) {
394 : state = empty_state();
395 : }
396 340 : EnqueueUses(node, state);
397 340 : }
398 :
399 28028 : void MemoryOptimizer::VisitLoadElement(Node* node,
400 : AllocationState const* state) {
401 : DCHECK_EQ(IrOpcode::kLoadElement, node->opcode());
402 28028 : ElementAccess const& access = ElementAccessOf(node->op());
403 : Node* index = node->InputAt(1);
404 28028 : node->ReplaceInput(1, ComputeIndex(access, index));
405 28028 : if (NeedsPoisoning(access.load_sensitivity) &&
406 0 : access.machine_type.representation() !=
407 : MachineRepresentation::kTaggedPointer) {
408 : NodeProperties::ChangeOp(node,
409 0 : machine()->PoisonedLoad(access.machine_type));
410 : } else {
411 28028 : NodeProperties::ChangeOp(node, machine()->Load(access.machine_type));
412 : }
413 28028 : EnqueueUses(node, state);
414 28028 : }
415 :
416 3796616 : void MemoryOptimizer::VisitLoadField(Node* node, AllocationState const* state) {
417 : DCHECK_EQ(IrOpcode::kLoadField, node->opcode());
418 3796616 : FieldAccess const& access = FieldAccessOf(node->op());
419 5694924 : Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag());
420 1898307 : node->InsertInput(graph()->zone(), 1, offset);
421 1898308 : if (NeedsPoisoning(access.load_sensitivity) &&
422 0 : access.machine_type.representation() !=
423 : MachineRepresentation::kTaggedPointer) {
424 : NodeProperties::ChangeOp(node,
425 0 : machine()->PoisonedLoad(access.machine_type));
426 : } else {
427 1898308 : NodeProperties::ChangeOp(node, machine()->Load(access.machine_type));
428 : }
429 1898308 : EnqueueUses(node, state);
430 1898308 : }
431 :
432 42564 : void MemoryOptimizer::VisitStoreElement(Node* node,
433 : AllocationState const* state) {
434 : DCHECK_EQ(IrOpcode::kStoreElement, node->opcode());
435 42564 : ElementAccess const& access = ElementAccessOf(node->op());
436 : Node* object = node->InputAt(0);
437 : Node* index = node->InputAt(1);
438 : WriteBarrierKind write_barrier_kind =
439 42564 : ComputeWriteBarrierKind(object, state, access.write_barrier_kind);
440 42564 : node->ReplaceInput(1, ComputeIndex(access, index));
441 : NodeProperties::ChangeOp(
442 : node, machine()->Store(StoreRepresentation(
443 85128 : access.machine_type.representation(), write_barrier_kind)));
444 42564 : EnqueueUses(node, state);
445 42564 : }
446 :
447 2440555 : void MemoryOptimizer::VisitStoreField(Node* node,
448 2440556 : AllocationState const* state) {
449 : DCHECK_EQ(IrOpcode::kStoreField, node->opcode());
450 4881111 : FieldAccess const& access = FieldAccessOf(node->op());
451 : Node* object = node->InputAt(0);
452 : WriteBarrierKind write_barrier_kind =
453 2440556 : ComputeWriteBarrierKind(object, state, access.write_barrier_kind);
454 7321668 : Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag());
455 2440558 : node->InsertInput(graph()->zone(), 1, offset);
456 : NodeProperties::ChangeOp(
457 : node, machine()->Store(StoreRepresentation(
458 4881114 : access.machine_type.representation(), write_barrier_kind)));
459 2440557 : EnqueueUses(node, state);
460 2440557 : }
461 :
462 263400 : void MemoryOptimizer::VisitStore(Node* node, AllocationState const* state) {
463 : DCHECK_EQ(IrOpcode::kStore, node->opcode());
464 263400 : StoreRepresentation representation = StoreRepresentationOf(node->op());
465 : Node* object = node->InputAt(0);
466 : WriteBarrierKind write_barrier_kind = ComputeWriteBarrierKind(
467 263400 : object, state, representation.write_barrier_kind());
468 263400 : if (write_barrier_kind != representation.write_barrier_kind()) {
469 : NodeProperties::ChangeOp(
470 : node, machine()->Store(StoreRepresentation(
471 1136 : representation.representation(), write_barrier_kind)));
472 : }
473 263400 : EnqueueUses(node, state);
474 263400 : }
475 :
476 0 : void MemoryOptimizer::VisitOtherEffect(Node* node,
477 : AllocationState const* state) {
478 3847749 : EnqueueUses(node, state);
479 0 : }
480 :
481 268395 : Node* MemoryOptimizer::ComputeIndex(ElementAccess const& access, Node* index) {
482 : int const element_size_shift =
483 70592 : ElementSizeLog2Of(access.machine_type.representation());
484 70592 : if (element_size_shift) {
485 : index = graph()->NewNode(machine()->WordShl(), index,
486 196041 : jsgraph()->IntPtrConstant(element_size_shift));
487 : }
488 141184 : int const fixed_offset = access.header_size - access.tag();
489 70592 : if (fixed_offset) {
490 : index = graph()->NewNode(machine()->IntAdd(), index,
491 185592 : jsgraph()->IntPtrConstant(fixed_offset));
492 : }
493 70592 : return index;
494 : }
495 :
496 2746520 : WriteBarrierKind MemoryOptimizer::ComputeWriteBarrierKind(
497 : Node* object, AllocationState const* state,
498 : WriteBarrierKind write_barrier_kind) {
499 4706321 : if (state->IsNewSpaceAllocation() && state->group()->Contains(object)) {
500 : write_barrier_kind = kNoWriteBarrier;
501 : }
502 2746521 : return write_barrier_kind;
503 : }
504 :
505 1774090 : MemoryOptimizer::AllocationState const* MemoryOptimizer::MergeStates(
506 85916 : AllocationStates const& states) {
507 : // Check if all states are the same; or at least if all allocation
508 : // states belong to the same allocation group.
509 6357572 : AllocationState const* state = states.front();
510 : AllocationGroup* group = state->group();
511 9166964 : for (size_t i = 1; i < states.size(); ++i) {
512 2809392 : if (states[i] != state) state = nullptr;
513 2809392 : if (states[i]->group() != group) group = nullptr;
514 : }
515 1774090 : if (state == nullptr) {
516 85916 : if (group != nullptr) {
517 : // We cannot fold any more allocations into this group, but we can still
518 : // eliminate write barriers on stores to this group.
519 : // TODO(bmeurer): We could potentially just create a Phi here to merge
520 : // the various tops; but we need to pay special attention not to create
521 : // an unschedulable graph.
522 : state = AllocationState::Closed(group, zone());
523 : } else {
524 : // The states are from different allocation groups.
525 : state = empty_state();
526 : }
527 : }
528 1774090 : return state;
529 : }
530 :
531 4866373 : void MemoryOptimizer::EnqueueMerge(Node* node, int index,
532 1915528 : AllocationState const* state) {
533 : DCHECK_EQ(IrOpcode::kEffectPhi, node->opcode());
534 4866373 : int const input_count = node->InputCount() - 1;
535 : DCHECK_LT(0, input_count);
536 4866373 : Node* const control = node->InputAt(input_count);
537 4866373 : if (control->opcode() == IrOpcode::kLoop) {
538 : // For loops we always start with an empty state at the beginning.
539 424320 : if (index == 0) EnqueueUses(node, empty_state());
540 : } else {
541 : DCHECK_EQ(IrOpcode::kMerge, control->opcode());
542 : // Check if we already know about this pending merge.
543 4583493 : NodeId const id = node->id();
544 : auto it = pending_.find(id);
545 4583493 : if (it == pending_.end()) {
546 : // Insert a new pending merge.
547 1774094 : it = pending_.insert(std::make_pair(id, AllocationStates(zone()))).first;
548 : }
549 : // Add the next input state.
550 4583499 : it->second.push_back(state);
551 : // Check if states for all inputs are available by now.
552 9167004 : if (it->second.size() == static_cast<size_t>(input_count)) {
553 : // All inputs to this effect merge are done, merge the states given all
554 : // input constraints, drop the pending merge and enqueue uses of the
555 : // EffectPhi {node}.
556 1774096 : state = MergeStates(it->second);
557 1774089 : EnqueueUses(node, state);
558 : pending_.erase(it);
559 : }
560 : }
561 4866372 : }
562 :
563 15087271 : void MemoryOptimizer::EnqueueUses(Node* node, AllocationState const* state) {
564 120483247 : for (Edge const edge : node->use_edges()) {
565 45154348 : if (NodeProperties::IsEffectEdge(edge)) {
566 18600881 : EnqueueUse(edge.from(), edge.index(), state);
567 : }
568 : }
569 15087280 : }
570 :
571 18837427 : void MemoryOptimizer::EnqueueUse(Node* node, int index,
572 : AllocationState const* state) {
573 18837427 : if (node->opcode() == IrOpcode::kEffectPhi) {
574 : // An EffectPhi represents a merge of different effect chains, which
575 : // needs special handling depending on whether the merge is part of a
576 : // loop or just a normal control join.
577 4866382 : EnqueueMerge(node, index, state);
578 : } else {
579 13971045 : Token token = {node, state};
580 : tokens_.push(token);
581 : }
582 18837424 : }
583 :
584 5100945 : Graph* MemoryOptimizer::graph() const { return jsgraph()->graph(); }
585 :
586 472863 : Isolate* MemoryOptimizer::isolate() const { return jsgraph()->isolate(); }
587 :
588 111703 : CommonOperatorBuilder* MemoryOptimizer::common() const {
589 152904 : return jsgraph()->common();
590 : }
591 :
592 4579005 : MachineOperatorBuilder* MemoryOptimizer::machine() const {
593 4579005 : return jsgraph()->machine();
594 : }
595 :
596 1926336 : bool MemoryOptimizer::NeedsPoisoning(LoadSensitivity load_sensitivity) const {
597 : // Safe loads do not need poisoning.
598 1926336 : if (load_sensitivity == LoadSensitivity::kSafe) return false;
599 :
600 1926336 : switch (poisoning_level_) {
601 : case PoisoningMitigationLevel::kDontPoison:
602 : return false;
603 : case PoisoningMitigationLevel::kPoisonAll:
604 0 : return true;
605 : case PoisoningMitigationLevel::kPoisonCriticalOnly:
606 0 : return load_sensitivity == LoadSensitivity::kCritical;
607 : }
608 0 : UNREACHABLE();
609 : }
610 :
611 : } // namespace compiler
612 : } // namespace internal
613 178779 : } // namespace v8
|