Line data Source code
1 : // Copyright 2016 the V8 project authors. All rights reserved.
2 : // Use of this source code is governed by a BSD-style license that can be
3 : // found in the LICENSE file.
4 :
5 : #include "src/compiler/memory-optimizer.h"
6 :
7 : #include "src/compiler/js-graph.h"
8 : #include "src/compiler/linkage.h"
9 : #include "src/compiler/node-matchers.h"
10 : #include "src/compiler/node-properties.h"
11 : #include "src/compiler/node.h"
12 : #include "src/compiler/simplified-operator.h"
13 : #include "src/interface-descriptors.h"
14 :
15 : namespace v8 {
16 : namespace internal {
17 : namespace compiler {
18 :
19 530740 : MemoryOptimizer::MemoryOptimizer(JSGraph* jsgraph, Zone* zone,
20 : PoisoningMitigationLevel poisoning_level,
21 : AllocationFolding allocation_folding)
22 : : jsgraph_(jsgraph),
23 : empty_state_(AllocationState::Empty(zone)),
24 : pending_(zone),
25 : tokens_(zone),
26 : zone_(zone),
27 : graph_assembler_(jsgraph, nullptr, nullptr, zone),
28 : poisoning_level_(poisoning_level),
29 1592246 : allocation_folding_(allocation_folding) {}
30 :
31 530735 : void MemoryOptimizer::Optimize() {
32 530735 : EnqueueUses(graph()->start(), empty_state());
33 28449403 : while (!tokens_.empty()) {
34 13959323 : Token const token = tokens_.front();
35 : tokens_.pop();
36 13959325 : VisitNode(token.node, token.state);
37 : }
38 : DCHECK(pending_.empty());
39 : DCHECK(tokens_.empty());
40 530759 : }
41 :
42 14030 : MemoryOptimizer::AllocationGroup::AllocationGroup(Node* node,
43 : AllocationType allocation,
44 : Zone* zone)
45 14030 : : node_ids_(zone), allocation_(allocation), size_(nullptr) {
46 28060 : node_ids_.insert(node->id());
47 14030 : }
48 :
49 0 : MemoryOptimizer::AllocationGroup::AllocationGroup(Node* node,
50 : AllocationType allocation,
51 : Node* size, Zone* zone)
52 203238 : : node_ids_(zone), allocation_(allocation), size_(size) {
53 406475 : node_ids_.insert(node->id());
54 0 : }
55 :
56 0 : void MemoryOptimizer::AllocationGroup::Add(Node* node) {
57 86350 : node_ids_.insert(node->id());
58 0 : }
59 :
60 0 : bool MemoryOptimizer::AllocationGroup::Contains(Node* node) const {
61 0 : return node_ids_.find(node->id()) != node_ids_.end();
62 : }
63 :
64 0 : MemoryOptimizer::AllocationState::AllocationState()
65 530753 : : group_(nullptr), size_(std::numeric_limits<int>::max()), top_(nullptr) {}
66 :
67 0 : MemoryOptimizer::AllocationState::AllocationState(AllocationGroup* group)
68 14468 : : group_(group), size_(std::numeric_limits<int>::max()), top_(nullptr) {}
69 :
70 0 : MemoryOptimizer::AllocationState::AllocationState(AllocationGroup* group,
71 : intptr_t size, Node* top)
72 246412 : : group_(group), size_(size), top_(top) {}
73 :
74 0 : bool MemoryOptimizer::AllocationState::IsYoungGenerationAllocation() const {
75 2754439 : return group() && group()->IsYoungGenerationAllocation();
76 : }
77 :
78 13959306 : void MemoryOptimizer::VisitNode(Node* node, AllocationState const* state) {
79 : DCHECK(!node->IsDead());
80 : DCHECK_LT(0, node->op()->EffectInputCount());
81 13959306 : switch (node->opcode()) {
82 : case IrOpcode::kAllocate:
83 : // Allocate nodes were purged from the graph in effect-control
84 : // linearization.
85 0 : UNREACHABLE();
86 : case IrOpcode::kAllocateRaw:
87 260441 : return VisitAllocateRaw(node, state);
88 : case IrOpcode::kCall:
89 4164435 : return VisitCall(node, state);
90 : case IrOpcode::kCallWithCallerSavedRegisters:
91 340 : return VisitCallWithCallerSavedRegisters(node, state);
92 : case IrOpcode::kLoadElement:
93 26899 : return VisitLoadElement(node, state);
94 : case IrOpcode::kLoadField:
95 1843249 : return VisitLoadField(node, state);
96 : case IrOpcode::kStoreElement:
97 43635 : return VisitStoreElement(node, state);
98 : case IrOpcode::kStoreField:
99 2510269 : return VisitStoreField(node, state);
100 : case IrOpcode::kStore:
101 200537 : return VisitStore(node, state);
102 : case IrOpcode::kBitcastTaggedToWord:
103 : case IrOpcode::kBitcastWordToTagged:
104 : case IrOpcode::kComment:
105 : case IrOpcode::kDebugAbort:
106 : case IrOpcode::kDebugBreak:
107 : case IrOpcode::kDeoptimizeIf:
108 : case IrOpcode::kDeoptimizeUnless:
109 : case IrOpcode::kIfException:
110 : case IrOpcode::kLoad:
111 : case IrOpcode::kPoisonedLoad:
112 : case IrOpcode::kProtectedLoad:
113 : case IrOpcode::kProtectedStore:
114 : case IrOpcode::kRetain:
115 : case IrOpcode::kTaggedPoisonOnSpeculation:
116 : case IrOpcode::kUnalignedLoad:
117 : case IrOpcode::kUnalignedStore:
118 : case IrOpcode::kUnsafePointerAdd:
119 : case IrOpcode::kUnreachable:
120 : case IrOpcode::kWord32AtomicAdd:
121 : case IrOpcode::kWord32AtomicAnd:
122 : case IrOpcode::kWord32AtomicCompareExchange:
123 : case IrOpcode::kWord32AtomicExchange:
124 : case IrOpcode::kWord32AtomicLoad:
125 : case IrOpcode::kWord32AtomicOr:
126 : case IrOpcode::kWord32AtomicPairAdd:
127 : case IrOpcode::kWord32AtomicPairAnd:
128 : case IrOpcode::kWord32AtomicPairCompareExchange:
129 : case IrOpcode::kWord32AtomicPairExchange:
130 : case IrOpcode::kWord32AtomicPairLoad:
131 : case IrOpcode::kWord32AtomicPairOr:
132 : case IrOpcode::kWord32AtomicPairStore:
133 : case IrOpcode::kWord32AtomicPairSub:
134 : case IrOpcode::kWord32AtomicPairXor:
135 : case IrOpcode::kWord32AtomicStore:
136 : case IrOpcode::kWord32AtomicSub:
137 : case IrOpcode::kWord32AtomicXor:
138 : case IrOpcode::kWord32PoisonOnSpeculation:
139 : case IrOpcode::kWord64AtomicAdd:
140 : case IrOpcode::kWord64AtomicAnd:
141 : case IrOpcode::kWord64AtomicCompareExchange:
142 : case IrOpcode::kWord64AtomicExchange:
143 : case IrOpcode::kWord64AtomicLoad:
144 : case IrOpcode::kWord64AtomicOr:
145 : case IrOpcode::kWord64AtomicStore:
146 : case IrOpcode::kWord64AtomicSub:
147 : case IrOpcode::kWord64AtomicXor:
148 : case IrOpcode::kWord64PoisonOnSpeculation:
149 : // These operations cannot trigger GC.
150 : return VisitOtherEffect(node, state);
151 : default:
152 : break;
153 : }
154 : DCHECK_EQ(0, node->op()->EffectOutputCount());
155 : }
156 :
157 : #define __ gasm()->
158 :
159 260443 : void MemoryOptimizer::VisitAllocateRaw(Node* node,
160 : AllocationState const* state) {
161 : DCHECK_EQ(IrOpcode::kAllocateRaw, node->opcode());
162 : Node* value;
163 : Node* size = node->InputAt(0);
164 : Node* effect = node->InputAt(1);
165 : Node* control = node->InputAt(2);
166 :
167 260443 : gasm()->Reset(effect, control);
168 :
169 260441 : AllocationType allocation = AllocationTypeOf(node->op());
170 :
171 : // Propagate tenuring from outer allocations to inner allocations, i.e.
172 : // when we allocate an object in old space and store a newly allocated
173 : // child object into the pretenured object, then the newly allocated
174 : // child object also should get pretenured to old space.
175 260443 : if (allocation == AllocationType::kOld) {
176 13090 : for (Edge const edge : node->use_edges()) {
177 : Node* const user = edge.from();
178 12480 : if (user->opcode() == IrOpcode::kStoreField && edge.index() == 0) {
179 : Node* const child = user->InputAt(1);
180 3678 : if (child->opcode() == IrOpcode::kAllocateRaw &&
181 0 : AllocationTypeOf(child->op()) == AllocationType::kYoung) {
182 0 : NodeProperties::ChangeOp(child, node->op());
183 0 : break;
184 : }
185 : }
186 : }
187 : } else {
188 : DCHECK_EQ(AllocationType::kYoung, allocation);
189 5846224 : for (Edge const edge : node->use_edges()) {
190 : Node* const user = edge.from();
191 5586391 : if (user->opcode() == IrOpcode::kStoreField && edge.index() == 1) {
192 : Node* const parent = user->InputAt(0);
193 118355 : if (parent->opcode() == IrOpcode::kAllocateRaw &&
194 45198 : AllocationTypeOf(parent->op()) == AllocationType::kOld) {
195 : allocation = AllocationType::kOld;
196 : break;
197 : }
198 : }
199 : }
200 : }
201 :
202 : // Determine the top/limit addresses.
203 260443 : Node* top_address = __ ExternalConstant(
204 : allocation == AllocationType::kYoung
205 : ? ExternalReference::new_space_allocation_top_address(isolate())
206 260441 : : ExternalReference::old_space_allocation_top_address(isolate()));
207 260442 : Node* limit_address = __ ExternalConstant(
208 : allocation == AllocationType::kYoung
209 : ? ExternalReference::new_space_allocation_limit_address(isolate())
210 260442 : : ExternalReference::old_space_allocation_limit_address(isolate()));
211 :
212 : // Check if we can fold this allocation into a previous allocation represented
213 : // by the incoming {state}.
214 : IntPtrMatcher m(size);
215 260443 : if (m.IsInRange(0, kMaxRegularHeapObjectSize)) {
216 : intptr_t const object_size = m.Value();
217 687222 : if (allocation_folding_ == AllocationFolding::kDoAllocationFolding &&
218 289588 : state->size() <= kMaxRegularHeapObjectSize - object_size &&
219 : state->group()->allocation() == allocation) {
220 : // We can fold this Allocate {node} into the allocation {group}
221 : // represented by the given {state}. Compute the upper bound for
222 : // the new {state}.
223 43175 : intptr_t const state_size = state->size() + object_size;
224 :
225 : // Update the reservation check to the actual maximum upper bound.
226 : AllocationGroup* const group = state->group();
227 43175 : if (machine()->Is64()) {
228 43175 : if (OpParameter<int64_t>(group->size()->op()) < state_size) {
229 43175 : NodeProperties::ChangeOp(group->size(),
230 43175 : common()->Int64Constant(state_size));
231 : }
232 : } else {
233 0 : if (OpParameter<int32_t>(group->size()->op()) < state_size) {
234 0 : NodeProperties::ChangeOp(
235 : group->size(),
236 0 : common()->Int32Constant(static_cast<int32_t>(state_size)));
237 : }
238 : }
239 :
240 : // Update the allocation top with the new object allocation.
241 : // TODO(bmeurer): Defer writing back top as much as possible.
242 43175 : Node* top = __ IntAdd(state->top(), size);
243 86350 : __ Store(StoreRepresentation(MachineType::PointerRepresentation(),
244 : kNoWriteBarrier),
245 43175 : top_address, __ IntPtrConstant(0), top);
246 :
247 : // Compute the effective inner allocated address.
248 43175 : value = __ BitcastWordToTagged(
249 43175 : __ IntAdd(state->top(), __ IntPtrConstant(kHeapObjectTag)));
250 :
251 : // Extend the allocation {group}.
252 : group->Add(value);
253 : state = AllocationState::Open(group, state_size, top, zone());
254 : } else {
255 203238 : auto call_runtime = __ MakeDeferredLabel();
256 406476 : auto done = __ MakeLabel(MachineType::PointerRepresentation());
257 :
258 : // Setup a mutable reservation size node; will be patched as we fold
259 : // additional allocations into this new group.
260 203238 : Node* size = __ UniqueIntPtrConstant(object_size);
261 :
262 : // Load allocation top and limit.
263 : Node* top =
264 203238 : __ Load(MachineType::Pointer(), top_address, __ IntPtrConstant(0));
265 : Node* limit =
266 203237 : __ Load(MachineType::Pointer(), limit_address, __ IntPtrConstant(0));
267 :
268 : // Check if we need to collect garbage before we can start bump pointer
269 : // allocation (always done for folded allocations).
270 203238 : Node* check = __ UintLessThan(__ IntAdd(top, size), limit);
271 :
272 203237 : __ GotoIfNot(check, &call_runtime);
273 : __ Goto(&done, top);
274 :
275 : __ Bind(&call_runtime);
276 : {
277 : Node* target = allocation == AllocationType::kYoung
278 : ? __
279 : AllocateInYoungGenerationStubConstant()
280 : : __
281 203238 : AllocateInOldGenerationStubConstant();
282 203236 : if (!allocate_operator_.is_set()) {
283 : auto descriptor = AllocateDescriptor{};
284 125004 : auto call_descriptor = Linkage::GetStubCallDescriptor(
285 : graph()->zone(), descriptor, descriptor.GetStackParameterCount(),
286 125003 : CallDescriptor::kCanUseRoots, Operator::kNoThrow);
287 125004 : allocate_operator_.set(common()->Call(call_descriptor));
288 : }
289 203238 : Node* vfalse = __ BitcastTaggedToWord(
290 203238 : __ Call(allocate_operator_.get(), target, size));
291 203237 : vfalse = __ IntSub(vfalse, __ IntPtrConstant(kHeapObjectTag));
292 : __ Goto(&done, vfalse);
293 : }
294 :
295 : __ Bind(&done);
296 :
297 : // Compute the new top and write it back.
298 406475 : top = __ IntAdd(done.PhiAt(0), __ IntPtrConstant(object_size));
299 406476 : __ Store(StoreRepresentation(MachineType::PointerRepresentation(),
300 : kNoWriteBarrier),
301 203237 : top_address, __ IntPtrConstant(0), top);
302 :
303 : // Compute the initial object address.
304 406475 : value = __ BitcastWordToTagged(
305 203237 : __ IntAdd(done.PhiAt(0), __ IntPtrConstant(kHeapObjectTag)));
306 :
307 : // Start a new allocation group.
308 : AllocationGroup* group =
309 : new (zone()) AllocationGroup(value, allocation, size, zone());
310 : state = AllocationState::Open(group, object_size, top, zone());
311 : }
312 : } else {
313 14030 : auto call_runtime = __ MakeDeferredLabel();
314 28060 : auto done = __ MakeLabel(MachineRepresentation::kTaggedPointer);
315 :
316 : // Load allocation top and limit.
317 : Node* top =
318 14030 : __ Load(MachineType::Pointer(), top_address, __ IntPtrConstant(0));
319 : Node* limit =
320 14030 : __ Load(MachineType::Pointer(), limit_address, __ IntPtrConstant(0));
321 :
322 : // Compute the new top.
323 14030 : Node* new_top = __ IntAdd(top, size);
324 :
325 : // Check if we can do bump pointer allocation here.
326 14030 : Node* check = __ UintLessThan(new_top, limit);
327 14030 : __ GotoIfNot(check, &call_runtime);
328 28060 : __ Store(StoreRepresentation(MachineType::PointerRepresentation(),
329 : kNoWriteBarrier),
330 14030 : top_address, __ IntPtrConstant(0), new_top);
331 14030 : __ Goto(&done, __ BitcastWordToTagged(
332 : __ IntAdd(top, __ IntPtrConstant(kHeapObjectTag))));
333 :
334 : __ Bind(&call_runtime);
335 : Node* target = allocation == AllocationType::kYoung
336 : ? __
337 : AllocateInYoungGenerationStubConstant()
338 : : __
339 14030 : AllocateInOldGenerationStubConstant();
340 14030 : if (!allocate_operator_.is_set()) {
341 : auto descriptor = AllocateDescriptor{};
342 4063 : auto call_descriptor = Linkage::GetStubCallDescriptor(
343 : graph()->zone(), descriptor, descriptor.GetStackParameterCount(),
344 4063 : CallDescriptor::kCanUseRoots, Operator::kNoThrow);
345 4063 : allocate_operator_.set(common()->Call(call_descriptor));
346 : }
347 14030 : __ Goto(&done, __ Call(allocate_operator_.get(), target, size));
348 :
349 : __ Bind(&done);
350 : value = done.PhiAt(0);
351 :
352 : // Create an unfoldable allocation group.
353 : AllocationGroup* group =
354 14030 : new (zone()) AllocationGroup(value, allocation, zone());
355 : state = AllocationState::Closed(group, zone());
356 : }
357 :
358 260442 : effect = __ ExtractCurrentEffect();
359 260442 : control = __ ExtractCurrentControl();
360 :
361 : // Replace all effect uses of {node} with the {effect}, enqueue the
362 : // effect uses for further processing, and replace all value uses of
363 : // {node} with the {value}.
364 11458191 : for (Edge edge : node->use_edges()) {
365 5598874 : if (NodeProperties::IsEffectEdge(edge)) {
366 260555 : EnqueueUse(edge.from(), edge.index(), state);
367 260555 : edge.UpdateTo(effect);
368 5338317 : } else if (NodeProperties::IsValueEdge(edge)) {
369 2961870 : edge.UpdateTo(value);
370 : } else {
371 : DCHECK(NodeProperties::IsControlEdge(edge));
372 2376447 : edge.UpdateTo(control);
373 : }
374 : }
375 :
376 : // Kill the {node} to make sure we don't leave dangling dead uses.
377 260443 : node->Kill();
378 260443 : }
379 :
380 : #undef __
381 :
382 4164409 : void MemoryOptimizer::VisitCall(Node* node, AllocationState const* state) {
383 : DCHECK_EQ(IrOpcode::kCall, node->opcode());
384 : // If the call can allocate, we start with a fresh state.
385 8328824 : if (!(CallDescriptorOf(node->op())->flags() & CallDescriptor::kNoAllocate)) {
386 : state = empty_state();
387 : }
388 4164415 : EnqueueUses(node, state);
389 4164441 : }
390 :
391 340 : void MemoryOptimizer::VisitCallWithCallerSavedRegisters(
392 : Node* node, AllocationState const* state) {
393 : DCHECK_EQ(IrOpcode::kCallWithCallerSavedRegisters, node->opcode());
394 : // If the call can allocate, we start with a fresh state.
395 680 : if (!(CallDescriptorOf(node->op())->flags() & CallDescriptor::kNoAllocate)) {
396 : state = empty_state();
397 : }
398 340 : EnqueueUses(node, state);
399 340 : }
400 :
401 26899 : void MemoryOptimizer::VisitLoadElement(Node* node,
402 : AllocationState const* state) {
403 : DCHECK_EQ(IrOpcode::kLoadElement, node->opcode());
404 26899 : ElementAccess const& access = ElementAccessOf(node->op());
405 : Node* index = node->InputAt(1);
406 26899 : node->ReplaceInput(1, ComputeIndex(access, index));
407 26899 : if (NeedsPoisoning(access.load_sensitivity) &&
408 : access.machine_type.representation() !=
409 : MachineRepresentation::kTaggedPointer) {
410 0 : NodeProperties::ChangeOp(node,
411 0 : machine()->PoisonedLoad(access.machine_type));
412 : } else {
413 26899 : NodeProperties::ChangeOp(node, machine()->Load(access.machine_type));
414 : }
415 26899 : EnqueueUses(node, state);
416 26899 : }
417 :
418 1843249 : void MemoryOptimizer::VisitLoadField(Node* node, AllocationState const* state) {
419 : DCHECK_EQ(IrOpcode::kLoadField, node->opcode());
420 1843249 : FieldAccess const& access = FieldAccessOf(node->op());
421 3686498 : Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag());
422 1843251 : node->InsertInput(graph()->zone(), 1, offset);
423 1843255 : if (NeedsPoisoning(access.load_sensitivity) &&
424 : access.machine_type.representation() !=
425 : MachineRepresentation::kTaggedPointer) {
426 0 : NodeProperties::ChangeOp(node,
427 0 : machine()->PoisonedLoad(access.machine_type));
428 : } else {
429 1843255 : NodeProperties::ChangeOp(node, machine()->Load(access.machine_type));
430 : }
431 1843253 : EnqueueUses(node, state);
432 1843250 : }
433 :
434 43635 : void MemoryOptimizer::VisitStoreElement(Node* node,
435 : AllocationState const* state) {
436 : DCHECK_EQ(IrOpcode::kStoreElement, node->opcode());
437 43635 : ElementAccess const& access = ElementAccessOf(node->op());
438 : Node* object = node->InputAt(0);
439 : Node* index = node->InputAt(1);
440 : WriteBarrierKind write_barrier_kind =
441 43635 : ComputeWriteBarrierKind(object, state, access.write_barrier_kind);
442 43635 : node->ReplaceInput(1, ComputeIndex(access, index));
443 87270 : NodeProperties::ChangeOp(
444 : node, machine()->Store(StoreRepresentation(
445 43635 : access.machine_type.representation(), write_barrier_kind)));
446 43635 : EnqueueUses(node, state);
447 43635 : }
448 :
449 2510266 : void MemoryOptimizer::VisitStoreField(Node* node,
450 : AllocationState const* state) {
451 : DCHECK_EQ(IrOpcode::kStoreField, node->opcode());
452 2510266 : FieldAccess const& access = FieldAccessOf(node->op());
453 : Node* object = node->InputAt(0);
454 : WriteBarrierKind write_barrier_kind =
455 2510268 : ComputeWriteBarrierKind(object, state, access.write_barrier_kind);
456 5020540 : Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag());
457 2510269 : node->InsertInput(graph()->zone(), 1, offset);
458 5020535 : NodeProperties::ChangeOp(
459 : node, machine()->Store(StoreRepresentation(
460 2510264 : access.machine_type.representation(), write_barrier_kind)));
461 2510265 : EnqueueUses(node, state);
462 2510269 : }
463 :
464 200537 : void MemoryOptimizer::VisitStore(Node* node, AllocationState const* state) {
465 : DCHECK_EQ(IrOpcode::kStore, node->opcode());
466 200537 : StoreRepresentation representation = StoreRepresentationOf(node->op());
467 : Node* object = node->InputAt(0);
468 200537 : WriteBarrierKind write_barrier_kind = ComputeWriteBarrierKind(
469 200537 : object, state, representation.write_barrier_kind());
470 200537 : if (write_barrier_kind != representation.write_barrier_kind()) {
471 1472 : NodeProperties::ChangeOp(
472 : node, machine()->Store(StoreRepresentation(
473 736 : representation.representation(), write_barrier_kind)));
474 : }
475 200537 : EnqueueUses(node, state);
476 200537 : }
477 :
478 0 : void MemoryOptimizer::VisitOtherEffect(Node* node,
479 : AllocationState const* state) {
480 3815356 : EnqueueUses(node, state);
481 0 : }
482 :
483 70534 : Node* MemoryOptimizer::ComputeIndex(ElementAccess const& access, Node* index) {
484 : int const element_size_shift =
485 70534 : ElementSizeLog2Of(access.machine_type.representation());
486 70534 : if (element_size_shift) {
487 65192 : index = graph()->NewNode(machine()->WordShl(), index,
488 : jsgraph()->IntPtrConstant(element_size_shift));
489 : }
490 141068 : int const fixed_offset = access.header_size - access.tag();
491 70534 : if (fixed_offset) {
492 61608 : index = graph()->NewNode(machine()->IntAdd(), index,
493 : jsgraph()->IntPtrConstant(fixed_offset));
494 : }
495 70534 : return index;
496 : }
497 :
498 2754439 : WriteBarrierKind MemoryOptimizer::ComputeWriteBarrierKind(
499 : Node* object, AllocationState const* state,
500 : WriteBarrierKind write_barrier_kind) {
501 4759273 : if (state->IsYoungGenerationAllocation() &&
502 : state->group()->Contains(object)) {
503 : write_barrier_kind = kNoWriteBarrier;
504 : }
505 2754439 : return write_barrier_kind;
506 : }
507 :
508 1751821 : MemoryOptimizer::AllocationState const* MemoryOptimizer::MergeStates(
509 : AllocationStates const& states) {
510 : // Check if all states are the same; or at least if all allocation
511 : // states belong to the same allocation group.
512 1751821 : AllocationState const* state = states.front();
513 : AllocationGroup* group = state->group();
514 7312761 : for (size_t i = 1; i < states.size(); ++i) {
515 2780470 : if (states[i] != state) state = nullptr;
516 2780470 : if (states[i]->group() != group) group = nullptr;
517 : }
518 1751821 : if (state == nullptr) {
519 95976 : if (group != nullptr) {
520 : // We cannot fold any more allocations into this group, but we can still
521 : // eliminate write barriers on stores to this group.
522 : // TODO(bmeurer): We could potentially just create a Phi here to merge
523 : // the various tops; but we need to pay special attention not to create
524 : // an unschedulable graph.
525 : state = AllocationState::Closed(group, zone());
526 : } else {
527 : // The states are from different allocation groups.
528 : state = empty_state();
529 : }
530 : }
531 1751821 : return state;
532 : }
533 :
534 4799891 : void MemoryOptimizer::EnqueueMerge(Node* node, int index,
535 : AllocationState const* state) {
536 : DCHECK_EQ(IrOpcode::kEffectPhi, node->opcode());
537 4799891 : int const input_count = node->InputCount() - 1;
538 : DCHECK_LT(0, input_count);
539 : Node* const control = node->InputAt(input_count);
540 4799891 : if (control->opcode() == IrOpcode::kLoop) {
541 : // For loops we always start with an empty state at the beginning.
542 267582 : if (index == 0) EnqueueUses(node, empty_state());
543 : } else {
544 : DCHECK_EQ(IrOpcode::kMerge, control->opcode());
545 : // Check if we already know about this pending merge.
546 : NodeId const id = node->id();
547 : auto it = pending_.find(id);
548 4532309 : if (it == pending_.end()) {
549 : // Insert a new pending merge.
550 1751831 : it = pending_.insert(std::make_pair(id, AllocationStates(zone()))).first;
551 : }
552 : // Add the next input state.
553 4532320 : it->second.push_back(state);
554 : // Check if states for all inputs are available by now.
555 4532334 : if (it->second.size() == static_cast<size_t>(input_count)) {
556 : // All inputs to this effect merge are done, merge the states given all
557 : // input constraints, drop the pending merge and enqueue uses of the
558 : // EffectPhi {node}.
559 1751836 : state = MergeStates(it->second);
560 1751818 : EnqueueUses(node, state);
561 : pending_.erase(it);
562 : }
563 : }
564 4799899 : }
565 :
566 15021187 : void MemoryOptimizer::EnqueueUses(Node* node, AllocationState const* state) {
567 105408370 : for (Edge const edge : node->use_edges()) {
568 45193635 : if (NodeProperties::IsEffectEdge(edge)) {
569 18498653 : EnqueueUse(edge.from(), edge.index(), state);
570 : }
571 : }
572 15021100 : }
573 :
574 18759239 : void MemoryOptimizer::EnqueueUse(Node* node, int index,
575 : AllocationState const* state) {
576 18759239 : if (node->opcode() == IrOpcode::kEffectPhi) {
577 : // An EffectPhi represents a merge of different effect chains, which
578 : // needs special handling depending on whether the merge is part of a
579 : // loop or just a normal control join.
580 4799919 : EnqueueMerge(node, index, state);
581 : } else {
582 13959320 : Token token = {node, state};
583 : tokens_.push(token);
584 : }
585 18759206 : }
586 :
587 0 : Graph* MemoryOptimizer::graph() const { return jsgraph()->graph(); }
588 :
589 0 : Isolate* MemoryOptimizer::isolate() const { return jsgraph()->isolate(); }
590 :
591 0 : CommonOperatorBuilder* MemoryOptimizer::common() const {
592 0 : return jsgraph()->common();
593 : }
594 :
595 0 : MachineOperatorBuilder* MemoryOptimizer::machine() const {
596 0 : return jsgraph()->machine();
597 : }
598 :
599 1870154 : bool MemoryOptimizer::NeedsPoisoning(LoadSensitivity load_sensitivity) const {
600 : // Safe loads do not need poisoning.
601 1870154 : if (load_sensitivity == LoadSensitivity::kSafe) return false;
602 :
603 1870154 : switch (poisoning_level_) {
604 : case PoisoningMitigationLevel::kDontPoison:
605 : return false;
606 : case PoisoningMitigationLevel::kPoisonAll:
607 0 : return true;
608 : case PoisoningMitigationLevel::kPoisonCriticalOnly:
609 0 : return load_sensitivity == LoadSensitivity::kCritical;
610 : }
611 0 : UNREACHABLE();
612 : }
613 :
614 : } // namespace compiler
615 : } // namespace internal
616 120216 : } // namespace v8
|