Line data Source code
1 : // Copyright 2016 the V8 project authors. All rights reserved.
2 : // Use of this source code is governed by a BSD-style license that can be
3 : // found in the LICENSE file.
4 :
5 : #include "src/compiler/memory-optimizer.h"
6 :
7 : #include "src/compiler/js-graph.h"
8 : #include "src/compiler/linkage.h"
9 : #include "src/compiler/node-matchers.h"
10 : #include "src/compiler/node-properties.h"
11 : #include "src/compiler/node.h"
12 : #include "src/compiler/simplified-operator.h"
13 : #include "src/interface-descriptors.h"
14 :
15 : namespace v8 {
16 : namespace internal {
17 : namespace compiler {
18 :
19 523017 : MemoryOptimizer::MemoryOptimizer(JSGraph* jsgraph, Zone* zone,
20 : PoisoningMitigationLevel poisoning_level,
21 : AllocationFolding allocation_folding)
22 : : jsgraph_(jsgraph),
23 : empty_state_(AllocationState::Empty(zone)),
24 : pending_(zone),
25 : tokens_(zone),
26 : zone_(zone),
27 : graph_assembler_(jsgraph, nullptr, nullptr, zone),
28 : poisoning_level_(poisoning_level),
29 1569067 : allocation_folding_(allocation_folding) {}
30 :
31 523011 : void MemoryOptimizer::Optimize() {
32 523011 : EnqueueUses(graph()->start(), empty_state());
33 12731766 : while (!tokens_.empty()) {
34 11685713 : Token const token = tokens_.front();
35 : tokens_.pop();
36 11685718 : VisitNode(token.node, token.state);
37 : }
38 : DCHECK(pending_.empty());
39 : DCHECK(tokens_.empty());
40 523025 : }
41 :
42 31714 : MemoryOptimizer::AllocationGroup::AllocationGroup(Node* node,
43 : PretenureFlag pretenure,
44 : Zone* zone)
45 15857 : : node_ids_(zone), pretenure_(pretenure), size_(nullptr) {
46 31714 : node_ids_.insert(node->id());
47 15857 : }
48 :
49 357884 : MemoryOptimizer::AllocationGroup::AllocationGroup(Node* node,
50 : PretenureFlag pretenure,
51 : Node* size, Zone* zone)
52 178942 : : node_ids_(zone), pretenure_(pretenure), size_(size) {
53 357884 : node_ids_.insert(node->id());
54 178942 : }
55 :
56 41669 : void MemoryOptimizer::AllocationGroup::Add(Node* node) {
57 83338 : node_ids_.insert(node->id());
58 0 : }
59 :
60 1232316 : bool MemoryOptimizer::AllocationGroup::Contains(Node* node) const {
61 2464631 : return node_ids_.find(node->id()) != node_ids_.end();
62 : }
63 :
64 0 : MemoryOptimizer::AllocationState::AllocationState()
65 523025 : : group_(nullptr), size_(std::numeric_limits<int>::max()), top_(nullptr) {}
66 :
67 0 : MemoryOptimizer::AllocationState::AllocationState(AllocationGroup* group)
68 16112 : : group_(group), size_(std::numeric_limits<int>::max()), top_(nullptr) {}
69 :
70 0 : MemoryOptimizer::AllocationState::AllocationState(AllocationGroup* group,
71 : intptr_t size, Node* top)
72 220611 : : group_(group), size_(size), top_(top) {}
73 :
74 1766701 : bool MemoryOptimizer::AllocationState::IsNewSpaceAllocation() const {
75 3003023 : return group() && group()->IsNewSpaceAllocation();
76 : }
77 :
78 11685684 : void MemoryOptimizer::VisitNode(Node* node, AllocationState const* state) {
79 : DCHECK(!node->IsDead());
80 : DCHECK_LT(0, node->op()->EffectInputCount());
81 11685684 : switch (node->opcode()) {
82 : case IrOpcode::kAllocate:
83 : // Allocate nodes were purged from the graph in effect-control
84 : // linearization.
85 0 : UNREACHABLE();
86 : case IrOpcode::kAllocateRaw:
87 236467 : return VisitAllocateRaw(node, state);
88 : case IrOpcode::kCall:
89 3526998 : return VisitCall(node, state);
90 : case IrOpcode::kCallWithCallerSavedRegisters:
91 341 : return VisitCallWithCallerSavedRegisters(node, state);
92 : case IrOpcode::kLoadElement:
93 28493 : return VisitLoadElement(node, state);
94 : case IrOpcode::kLoadField:
95 1367397 : return VisitLoadField(node, state);
96 : case IrOpcode::kStoreElement:
97 45749 : return VisitStoreElement(node, state);
98 : case IrOpcode::kStoreField:
99 1720956 : return VisitStoreField(node, state);
100 : case IrOpcode::kBitcastTaggedToWord:
101 : case IrOpcode::kBitcastWordToTagged:
102 : case IrOpcode::kComment:
103 : case IrOpcode::kDebugAbort:
104 : case IrOpcode::kDebugBreak:
105 : case IrOpcode::kDeoptimizeIf:
106 : case IrOpcode::kDeoptimizeUnless:
107 : case IrOpcode::kIfException:
108 : case IrOpcode::kLoad:
109 : case IrOpcode::kPoisonedLoad:
110 : case IrOpcode::kProtectedLoad:
111 : case IrOpcode::kProtectedStore:
112 : case IrOpcode::kRetain:
113 : case IrOpcode::kStore:
114 : case IrOpcode::kTaggedPoisonOnSpeculation:
115 : case IrOpcode::kUnalignedLoad:
116 : case IrOpcode::kUnalignedStore:
117 : case IrOpcode::kUnsafePointerAdd:
118 : case IrOpcode::kUnreachable:
119 : case IrOpcode::kWord32AtomicAdd:
120 : case IrOpcode::kWord32AtomicAnd:
121 : case IrOpcode::kWord32AtomicCompareExchange:
122 : case IrOpcode::kWord32AtomicExchange:
123 : case IrOpcode::kWord32AtomicLoad:
124 : case IrOpcode::kWord32AtomicOr:
125 : case IrOpcode::kWord32AtomicPairAdd:
126 : case IrOpcode::kWord32AtomicPairAnd:
127 : case IrOpcode::kWord32AtomicPairCompareExchange:
128 : case IrOpcode::kWord32AtomicPairExchange:
129 : case IrOpcode::kWord32AtomicPairLoad:
130 : case IrOpcode::kWord32AtomicPairOr:
131 : case IrOpcode::kWord32AtomicPairStore:
132 : case IrOpcode::kWord32AtomicPairSub:
133 : case IrOpcode::kWord32AtomicPairXor:
134 : case IrOpcode::kWord32AtomicStore:
135 : case IrOpcode::kWord32AtomicSub:
136 : case IrOpcode::kWord32AtomicXor:
137 : case IrOpcode::kWord32PoisonOnSpeculation:
138 : case IrOpcode::kWord64AtomicAdd:
139 : case IrOpcode::kWord64AtomicAnd:
140 : case IrOpcode::kWord64AtomicCompareExchange:
141 : case IrOpcode::kWord64AtomicExchange:
142 : case IrOpcode::kWord64AtomicLoad:
143 : case IrOpcode::kWord64AtomicOr:
144 : case IrOpcode::kWord64AtomicStore:
145 : case IrOpcode::kWord64AtomicSub:
146 : case IrOpcode::kWord64AtomicXor:
147 : case IrOpcode::kWord64PoisonOnSpeculation:
148 : // These operations cannot trigger GC.
149 : return VisitOtherEffect(node, state);
150 : default:
151 : break;
152 : }
153 : DCHECK_EQ(0, node->op()->EffectOutputCount());
154 : }
155 :
156 : #define __ gasm()->
157 :
158 472932 : void MemoryOptimizer::VisitAllocateRaw(Node* node,
159 811377 : AllocationState const* state) {
160 : DCHECK_EQ(IrOpcode::kAllocateRaw, node->opcode());
161 : Node* value;
162 : Node* size = node->InputAt(0);
163 : Node* effect = node->InputAt(1);
164 : Node* control = node->InputAt(2);
165 :
166 236466 : gasm()->Reset(effect, control);
167 :
168 236466 : PretenureFlag pretenure = PretenureFlagOf(node->op());
169 :
170 : // Propagate tenuring from outer allocations to inner allocations, i.e.
171 : // when we allocate an object in old space and store a newly allocated
172 : // child object into the pretenured object, then the newly allocated
173 : // child object also should get pretenured to old space.
174 236467 : if (pretenure == TENURED) {
175 13196 : for (Edge const edge : node->use_edges()) {
176 12004 : Node* const user = edge.from();
177 12004 : if (user->opcode() == IrOpcode::kStoreField && edge.index() == 0) {
178 3652 : Node* const child = user->InputAt(1);
179 3652 : if (child->opcode() == IrOpcode::kAllocateRaw &&
180 0 : PretenureFlagOf(child->op()) == NOT_TENURED) {
181 0 : NodeProperties::ChangeOp(child, node->op());
182 0 : break;
183 : }
184 : }
185 : }
186 : } else {
187 : DCHECK_EQ(NOT_TENURED, pretenure);
188 4463922 : for (Edge const edge : node->use_edges()) {
189 3992179 : Node* const user = edge.from();
190 3992179 : if (user->opcode() == IrOpcode::kStoreField && edge.index() == 1) {
191 70695 : Node* const parent = user->InputAt(0);
192 115577 : if (parent->opcode() == IrOpcode::kAllocateRaw &&
193 44882 : PretenureFlagOf(parent->op()) == TENURED) {
194 : pretenure = TENURED;
195 : break;
196 : }
197 : }
198 : }
199 : }
200 :
201 : // Determine the top/limit addresses.
202 : Node* top_address = __ ExternalConstant(
203 : pretenure == NOT_TENURED
204 : ? ExternalReference::new_space_allocation_top_address(isolate())
205 472934 : : ExternalReference::old_space_allocation_top_address(isolate()));
206 : Node* limit_address = __ ExternalConstant(
207 : pretenure == NOT_TENURED
208 : ? ExternalReference::new_space_allocation_limit_address(isolate())
209 472932 : : ExternalReference::old_space_allocation_limit_address(isolate()));
210 :
211 : // Check if we can fold this allocation into a previous allocation represented
212 : // by the incoming {state}.
213 : IntPtrMatcher m(size);
214 236465 : if (m.IsInRange(0, kMaxRegularHeapObjectSize)) {
215 : intptr_t const object_size = m.Value();
216 612982 : if (allocation_folding_ == AllocationFolding::kDoAllocationFolding &&
217 434043 : state->size() <= kMaxRegularHeapObjectSize - object_size &&
218 41669 : state->group()->pretenure() == pretenure) {
219 : // We can fold this Allocate {node} into the allocation {group}
220 : // represented by the given {state}. Compute the upper bound for
221 : // the new {state}.
222 41669 : intptr_t const state_size = state->size() + object_size;
223 :
224 : // Update the reservation check to the actual maximum upper bound.
225 83338 : AllocationGroup* const group = state->group();
226 41669 : if (machine()->Is64()) {
227 41669 : if (OpParameter<int64_t>(group->size()->op()) < state_size) {
228 : NodeProperties::ChangeOp(group->size(),
229 83338 : common()->Int64Constant(state_size));
230 : }
231 : } else {
232 0 : if (OpParameter<int32_t>(group->size()->op()) < state_size) {
233 : NodeProperties::ChangeOp(
234 : group->size(),
235 0 : common()->Int32Constant(static_cast<int32_t>(state_size)));
236 : }
237 : }
238 :
239 : // Update the allocation top with the new object allocation.
240 : // TODO(bmeurer): Defer writing back top as much as possible.
241 41669 : Node* top = __ IntAdd(state->top(), size);
242 : __ Store(StoreRepresentation(MachineType::PointerRepresentation(),
243 : kNoWriteBarrier),
244 83338 : top_address, __ IntPtrConstant(0), top);
245 :
246 : // Compute the effective inner allocated address.
247 : value = __ BitcastWordToTagged(
248 83338 : __ IntAdd(state->top(), __ IntPtrConstant(kHeapObjectTag)));
249 :
250 : // Extend the allocation {group}.
251 : group->Add(value);
252 : state = AllocationState::Open(group, state_size, top, zone());
253 : } else {
254 178939 : auto call_runtime = __ MakeDeferredLabel();
255 178939 : auto done = __ MakeLabel(MachineType::PointerRepresentation());
256 :
257 : // Setup a mutable reservation size node; will be patched as we fold
258 : // additional allocations into this new group.
259 178939 : Node* size = __ UniqueIntPtrConstant(object_size);
260 :
261 : // Load allocation top and limit.
262 : Node* top =
263 178940 : __ Load(MachineType::Pointer(), top_address, __ IntPtrConstant(0));
264 : Node* limit =
265 178942 : __ Load(MachineType::Pointer(), limit_address, __ IntPtrConstant(0));
266 :
267 : // Check if we need to collect garbage before we can start bump pointer
268 : // allocation (always done for folded allocations).
269 178942 : Node* check = __ UintLessThan(__ IntAdd(top, size), limit);
270 :
271 178942 : __ GotoIfNot(check, &call_runtime);
272 : __ Goto(&done, top);
273 :
274 : __ Bind(&call_runtime);
275 : {
276 : Node* target =
277 : pretenure == NOT_TENURED ? __ AllocateInNewSpaceStubConstant()
278 : : __
279 178941 : AllocateInOldSpaceStubConstant();
280 178940 : if (!allocate_operator_.is_set()) {
281 : auto descriptor = AllocateDescriptor{};
282 : auto call_descriptor = Linkage::GetStubCallDescriptor(
283 : graph()->zone(), descriptor, descriptor.GetStackParameterCount(),
284 106872 : CallDescriptor::kCanUseRoots, Operator::kNoThrow);
285 106873 : allocate_operator_.set(common()->Call(call_descriptor));
286 : }
287 : Node* vfalse = __ BitcastTaggedToWord(
288 178942 : __ Call(allocate_operator_.get(), target, size));
289 178942 : vfalse = __ IntSub(vfalse, __ IntPtrConstant(kHeapObjectTag));
290 : __ Goto(&done, vfalse);
291 : }
292 :
293 : __ Bind(&done);
294 :
295 : // Compute the new top and write it back.
296 357884 : top = __ IntAdd(done.PhiAt(0), __ IntPtrConstant(object_size));
297 : __ Store(StoreRepresentation(MachineType::PointerRepresentation(),
298 : kNoWriteBarrier),
299 357884 : top_address, __ IntPtrConstant(0), top);
300 :
301 : // Compute the initial object address.
302 : value = __ BitcastWordToTagged(
303 357883 : __ IntAdd(done.PhiAt(0), __ IntPtrConstant(kHeapObjectTag)));
304 :
305 : // Start a new allocation group.
306 : AllocationGroup* group =
307 178942 : new (zone()) AllocationGroup(value, pretenure, size, zone());
308 : state = AllocationState::Open(group, object_size, top, zone());
309 : }
310 : } else {
311 15857 : auto call_runtime = __ MakeDeferredLabel();
312 15857 : auto done = __ MakeLabel(MachineRepresentation::kTaggedPointer);
313 :
314 : // Load allocation top and limit.
315 : Node* top =
316 15857 : __ Load(MachineType::Pointer(), top_address, __ IntPtrConstant(0));
317 : Node* limit =
318 15857 : __ Load(MachineType::Pointer(), limit_address, __ IntPtrConstant(0));
319 :
320 : // Compute the new top.
321 15857 : Node* new_top = __ IntAdd(top, size);
322 :
323 : // Check if we can do bump pointer allocation here.
324 15857 : Node* check = __ UintLessThan(new_top, limit);
325 15857 : __ GotoIfNot(check, &call_runtime);
326 : __ Store(StoreRepresentation(MachineType::PointerRepresentation(),
327 : kNoWriteBarrier),
328 31714 : top_address, __ IntPtrConstant(0), new_top);
329 : __ Goto(&done, __ BitcastWordToTagged(
330 15857 : __ IntAdd(top, __ IntPtrConstant(kHeapObjectTag))));
331 :
332 : __ Bind(&call_runtime);
333 : Node* target =
334 : pretenure == NOT_TENURED ? __ AllocateInNewSpaceStubConstant()
335 : : __
336 15857 : AllocateInOldSpaceStubConstant();
337 15857 : if (!allocate_operator_.is_set()) {
338 : auto descriptor = AllocateDescriptor{};
339 : auto call_descriptor = Linkage::GetStubCallDescriptor(
340 : graph()->zone(), descriptor, descriptor.GetStackParameterCount(),
341 5145 : CallDescriptor::kCanUseRoots, Operator::kNoThrow);
342 5145 : allocate_operator_.set(common()->Call(call_descriptor));
343 : }
344 15857 : __ Goto(&done, __ Call(allocate_operator_.get(), target, size));
345 :
346 : __ Bind(&done);
347 : value = done.PhiAt(0);
348 :
349 : // Create an unfoldable allocation group.
350 : AllocationGroup* group =
351 15857 : new (zone()) AllocationGroup(value, pretenure, zone());
352 : state = AllocationState::Closed(group, zone());
353 : }
354 :
355 236468 : effect = __ ExtractCurrentEffect();
356 236466 : control = __ ExtractCurrentControl();
357 :
358 : // Replace all effect uses of {node} with the {effect}, enqueue the
359 : // effect uses for further processing, and replace all value uses of
360 : // {node} with the {value}.
361 8481309 : for (Edge edge : node->use_edges()) {
362 4004187 : if (NodeProperties::IsEffectEdge(edge)) {
363 472936 : EnqueueUse(edge.from(), edge.index(), state);
364 236467 : edge.UpdateTo(effect);
365 3767721 : } else if (NodeProperties::IsValueEdge(edge)) {
366 2157079 : edge.UpdateTo(value);
367 : } else {
368 : DCHECK(NodeProperties::IsControlEdge(edge));
369 1610640 : edge.UpdateTo(control);
370 : }
371 : }
372 :
373 : // Kill the {node} to make sure we don't leave dangling dead uses.
374 236468 : node->Kill();
375 236468 : }
376 :
377 : #undef __
378 :
379 7034736 : void MemoryOptimizer::VisitCall(Node* node, AllocationState const* state) {
380 : DCHECK_EQ(IrOpcode::kCall, node->opcode());
381 : // If the call can allocate, we start with a fresh state.
382 7053935 : if (!(CallDescriptorOf(node->op())->flags() & CallDescriptor::kNoAllocate)) {
383 : state = empty_state();
384 : }
385 3526976 : EnqueueUses(node, state);
386 3526999 : }
387 :
388 341 : void MemoryOptimizer::VisitCallWithCallerSavedRegisters(
389 682 : Node* node, AllocationState const* state) {
390 : DCHECK_EQ(IrOpcode::kCallWithCallerSavedRegisters, node->opcode());
391 : // If the call can allocate, we start with a fresh state.
392 682 : if (!(CallDescriptorOf(node->op())->flags() & CallDescriptor::kNoAllocate)) {
393 : state = empty_state();
394 : }
395 341 : EnqueueUses(node, state);
396 341 : }
397 :
398 28493 : void MemoryOptimizer::VisitLoadElement(Node* node,
399 : AllocationState const* state) {
400 : DCHECK_EQ(IrOpcode::kLoadElement, node->opcode());
401 28493 : ElementAccess const& access = ElementAccessOf(node->op());
402 : Node* index = node->InputAt(1);
403 28493 : node->ReplaceInput(1, ComputeIndex(access, index));
404 28493 : if (NeedsPoisoning(access.load_sensitivity) &&
405 0 : access.machine_type.representation() !=
406 : MachineRepresentation::kTaggedPointer) {
407 : NodeProperties::ChangeOp(node,
408 0 : machine()->PoisonedLoad(access.machine_type));
409 : } else {
410 28493 : NodeProperties::ChangeOp(node, machine()->Load(access.machine_type));
411 : }
412 28493 : EnqueueUses(node, state);
413 28493 : }
414 :
415 2734791 : void MemoryOptimizer::VisitLoadField(Node* node, AllocationState const* state) {
416 : DCHECK_EQ(IrOpcode::kLoadField, node->opcode());
417 2734791 : FieldAccess const& access = FieldAccessOf(node->op());
418 4102188 : Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag());
419 1367400 : node->InsertInput(graph()->zone(), 1, offset);
420 1367397 : if (NeedsPoisoning(access.load_sensitivity) &&
421 0 : access.machine_type.representation() !=
422 : MachineRepresentation::kTaggedPointer) {
423 : NodeProperties::ChangeOp(node,
424 0 : machine()->PoisonedLoad(access.machine_type));
425 : } else {
426 1367399 : NodeProperties::ChangeOp(node, machine()->Load(access.machine_type));
427 : }
428 1367396 : EnqueueUses(node, state);
429 1367397 : }
430 :
431 45749 : void MemoryOptimizer::VisitStoreElement(Node* node,
432 : AllocationState const* state) {
433 : DCHECK_EQ(IrOpcode::kStoreElement, node->opcode());
434 45749 : ElementAccess const& access = ElementAccessOf(node->op());
435 : Node* object = node->InputAt(0);
436 : Node* index = node->InputAt(1);
437 : WriteBarrierKind write_barrier_kind =
438 45749 : ComputeWriteBarrierKind(object, state, access.write_barrier_kind);
439 45749 : node->ReplaceInput(1, ComputeIndex(access, index));
440 : NodeProperties::ChangeOp(
441 : node, machine()->Store(StoreRepresentation(
442 91498 : access.machine_type.representation(), write_barrier_kind)));
443 45749 : EnqueueUses(node, state);
444 45749 : }
445 :
446 1720953 : void MemoryOptimizer::VisitStoreField(Node* node,
447 1720955 : AllocationState const* state) {
448 : DCHECK_EQ(IrOpcode::kStoreField, node->opcode());
449 3441908 : FieldAccess const& access = FieldAccessOf(node->op());
450 : Node* object = node->InputAt(0);
451 : WriteBarrierKind write_barrier_kind =
452 1720953 : ComputeWriteBarrierKind(object, state, access.write_barrier_kind);
453 5162865 : Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag());
454 1720956 : node->InsertInput(graph()->zone(), 1, offset);
455 : NodeProperties::ChangeOp(
456 : node, machine()->Store(StoreRepresentation(
457 3441910 : access.machine_type.representation(), write_barrier_kind)));
458 1720951 : EnqueueUses(node, state);
459 1720956 : }
460 :
461 0 : void MemoryOptimizer::VisitOtherEffect(Node* node,
462 : AllocationState const* state) {
463 3685867 : EnqueueUses(node, state);
464 0 : }
465 :
466 282921 : Node* MemoryOptimizer::ComputeIndex(ElementAccess const& access, Node* index) {
467 : int const element_size_shift =
468 74242 : ElementSizeLog2Of(access.machine_type.representation());
469 74242 : if (element_size_shift) {
470 : index = graph()->NewNode(machine()->WordShl(), index,
471 206853 : jsgraph()->IntPtrConstant(element_size_shift));
472 : }
473 148484 : int const fixed_offset = access.header_size - access.tag();
474 74242 : if (fixed_offset) {
475 : index = graph()->NewNode(machine()->IntAdd(), index,
476 196458 : jsgraph()->IntPtrConstant(fixed_offset));
477 : }
478 74242 : return index;
479 : }
480 :
481 1766701 : WriteBarrierKind MemoryOptimizer::ComputeWriteBarrierKind(
482 : Node* object, AllocationState const* state,
483 : WriteBarrierKind write_barrier_kind) {
484 2999016 : if (state->IsNewSpaceAllocation() && state->group()->Contains(object)) {
485 : write_barrier_kind = kNoWriteBarrier;
486 : }
487 1766700 : return write_barrier_kind;
488 : }
489 :
490 1639606 : MemoryOptimizer::AllocationState const* MemoryOptimizer::MergeStates(
491 89260 : AllocationStates const& states) {
492 : // Check if all states are the same; or at least if all allocation
493 : // states belong to the same allocation group.
494 5891536 : AllocationState const* state = states.front();
495 : AllocationGroup* group = state->group();
496 8503860 : for (size_t i = 1; i < states.size(); ++i) {
497 2612324 : if (states[i] != state) state = nullptr;
498 2612324 : if (states[i]->group() != group) group = nullptr;
499 : }
500 1639606 : if (state == nullptr) {
501 89260 : if (group != nullptr) {
502 : // We cannot fold any more allocations into this group, but we can still
503 : // eliminate write barriers on stores to this group.
504 : // TODO(bmeurer): We could potentially just create a Phi here to merge
505 : // the various tops; but we need to pay special attention not to create
506 : // an unschedulable graph.
507 : state = AllocationState::Closed(group, zone());
508 : } else {
509 : // The states are from different allocation groups.
510 : state = empty_state();
511 : }
512 : }
513 1639606 : return state;
514 : }
515 :
516 4533903 : void MemoryOptimizer::EnqueueMerge(Node* node, int index,
517 1780591 : AllocationState const* state) {
518 : DCHECK_EQ(IrOpcode::kEffectPhi, node->opcode());
519 4533903 : int const input_count = node->InputCount() - 1;
520 : DCHECK_LT(0, input_count);
521 4533903 : Node* const control = node->InputAt(input_count);
522 4533903 : if (control->opcode() == IrOpcode::kLoop) {
523 : // For loops we always start with an empty state at the beginning.
524 422898 : if (index == 0) EnqueueUses(node, empty_state());
525 : } else {
526 : DCHECK_EQ(IrOpcode::kMerge, control->opcode());
527 : // Check if we already know about this pending merge.
528 4251971 : NodeId const id = node->id();
529 : auto it = pending_.find(id);
530 4251972 : if (it == pending_.end()) {
531 : // Insert a new pending merge.
532 1639620 : it = pending_.insert(std::make_pair(id, AllocationStates(zone()))).first;
533 : }
534 : // Add the next input state.
535 4251967 : it->second.push_back(state);
536 : // Check if states for all inputs are available by now.
537 8503964 : if (it->second.size() == static_cast<size_t>(input_count)) {
538 : // All inputs to this effect merge are done, merge the states given all
539 : // input constraints, drop the pending merge and enqueue uses of the
540 : // EffectPhi {node}.
541 1639629 : state = MergeStates(it->second);
542 1639612 : EnqueueUses(node, state);
543 : pending_.erase(it);
544 : }
545 : }
546 4533894 : }
547 :
548 12679432 : void MemoryOptimizer::EnqueueUses(Node* node, AllocationState const* state) {
549 101711931 : for (Edge const edge : node->use_edges()) {
550 38176521 : if (NodeProperties::IsEffectEdge(edge)) {
551 15983182 : EnqueueUse(edge.from(), edge.index(), state);
552 : }
553 : }
554 12679457 : }
555 :
556 16219651 : void MemoryOptimizer::EnqueueUse(Node* node, int index,
557 : AllocationState const* state) {
558 16219651 : if (node->opcode() == IrOpcode::kEffectPhi) {
559 : // An EffectPhi represents a merge of different effect chains, which
560 : // needs special handling depending on whether the merge is part of a
561 : // loop or just a normal control join.
562 4533930 : EnqueueMerge(node, index, state);
563 : } else {
564 11685721 : Token token = {node, state};
565 : tokens_.push(token);
566 : }
567 16219638 : }
568 :
569 3857821 : Graph* MemoryOptimizer::graph() const { return jsgraph()->graph(); }
570 :
571 472933 : Isolate* MemoryOptimizer::isolate() const { return jsgraph()->isolate(); }
572 :
573 112018 : CommonOperatorBuilder* MemoryOptimizer::common() const {
574 153687 : return jsgraph()->common();
575 : }
576 :
577 3338702 : MachineOperatorBuilder* MemoryOptimizer::machine() const {
578 3338702 : return jsgraph()->machine();
579 : }
580 :
581 1395890 : bool MemoryOptimizer::NeedsPoisoning(LoadSensitivity load_sensitivity) const {
582 : // Safe loads do not need poisoning.
583 1395890 : if (load_sensitivity == LoadSensitivity::kSafe) return false;
584 :
585 1395891 : switch (poisoning_level_) {
586 : case PoisoningMitigationLevel::kDontPoison:
587 : return false;
588 : case PoisoningMitigationLevel::kPoisonAll:
589 0 : return true;
590 : case PoisoningMitigationLevel::kPoisonCriticalOnly:
591 0 : return load_sensitivity == LoadSensitivity::kCritical;
592 : }
593 0 : UNREACHABLE();
594 : }
595 :
596 : } // namespace compiler
597 : } // namespace internal
598 183867 : } // namespace v8
|