Line data Source code
1 : // Copyright 2016 the V8 project authors. All rights reserved.
2 : // Use of this source code is governed by a BSD-style license that can be
3 : // found in the LICENSE file.
4 :
5 : #include "src/compiler/memory-optimizer.h"
6 :
7 : #include "src/compiler/js-graph.h"
8 : #include "src/compiler/linkage.h"
9 : #include "src/compiler/node-matchers.h"
10 : #include "src/compiler/node-properties.h"
11 : #include "src/compiler/node.h"
12 : #include "src/compiler/simplified-operator.h"
13 : #include "src/interface-descriptors.h"
14 :
15 : namespace v8 {
16 : namespace internal {
17 : namespace compiler {
18 :
19 531429 : MemoryOptimizer::MemoryOptimizer(JSGraph* jsgraph, Zone* zone,
20 : PoisoningMitigationLevel poisoning_level,
21 : AllocationFolding allocation_folding)
22 : : jsgraph_(jsgraph),
23 : empty_state_(AllocationState::Empty(zone)),
24 : pending_(zone),
25 : tokens_(zone),
26 : zone_(zone),
27 : graph_assembler_(jsgraph, nullptr, nullptr, zone),
28 : poisoning_level_(poisoning_level),
29 1594287 : allocation_folding_(allocation_folding) {}
30 :
31 531426 : void MemoryOptimizer::Optimize() {
32 531426 : EnqueueUses(graph()->start(), empty_state());
33 27676896 : while (!tokens_.empty()) {
34 13572733 : Token const token = tokens_.front();
35 : tokens_.pop();
36 13572733 : VisitNode(token.node, token.state);
37 : }
38 : DCHECK(pending_.empty());
39 : DCHECK(tokens_.empty());
40 531433 : }
41 :
42 14015 : MemoryOptimizer::AllocationGroup::AllocationGroup(Node* node,
43 : AllocationType allocation,
44 : Zone* zone)
45 14015 : : node_ids_(zone), allocation_(allocation), size_(nullptr) {
46 28030 : node_ids_.insert(node->id());
47 14015 : }
48 :
49 0 : MemoryOptimizer::AllocationGroup::AllocationGroup(Node* node,
50 : AllocationType allocation,
51 : Node* size, Zone* zone)
52 177894 : : node_ids_(zone), allocation_(allocation), size_(size) {
53 355787 : node_ids_.insert(node->id());
54 0 : }
55 :
56 0 : void MemoryOptimizer::AllocationGroup::Add(Node* node) {
57 87190 : node_ids_.insert(node->id());
58 0 : }
59 :
60 0 : bool MemoryOptimizer::AllocationGroup::Contains(Node* node) const {
61 0 : return node_ids_.find(node->id()) != node_ids_.end();
62 : }
63 :
64 0 : MemoryOptimizer::AllocationState::AllocationState()
65 531429 : : group_(nullptr), size_(std::numeric_limits<int>::max()), top_(nullptr) {}
66 :
67 0 : MemoryOptimizer::AllocationState::AllocationState(AllocationGroup* group)
68 14334 : : group_(group), size_(std::numeric_limits<int>::max()), top_(nullptr) {}
69 :
70 0 : MemoryOptimizer::AllocationState::AllocationState(AllocationGroup* group,
71 : intptr_t size, Node* top)
72 221489 : : group_(group), size_(size), top_(top) {}
73 :
74 0 : bool MemoryOptimizer::AllocationState::IsYoungGenerationAllocation() const {
75 2777178 : return group() && group()->IsYoungGenerationAllocation();
76 : }
77 :
78 : namespace {
79 :
80 4998448 : bool CanAllocate(const Node* node) {
81 4998448 : switch (node->opcode()) {
82 : case IrOpcode::kBitcastTaggedToWord:
83 : case IrOpcode::kBitcastWordToTagged:
84 : case IrOpcode::kComment:
85 : case IrOpcode::kDebugAbort:
86 : case IrOpcode::kDebugBreak:
87 : case IrOpcode::kDeoptimizeIf:
88 : case IrOpcode::kDeoptimizeUnless:
89 : case IrOpcode::kIfException:
90 : case IrOpcode::kLoad:
91 : case IrOpcode::kLoadElement:
92 : case IrOpcode::kLoadField:
93 : case IrOpcode::kPoisonedLoad:
94 : case IrOpcode::kProtectedLoad:
95 : case IrOpcode::kProtectedStore:
96 : case IrOpcode::kRetain:
97 : case IrOpcode::kStoreElement:
98 : case IrOpcode::kStoreField:
99 : case IrOpcode::kTaggedPoisonOnSpeculation:
100 : case IrOpcode::kUnalignedLoad:
101 : case IrOpcode::kUnalignedStore:
102 : case IrOpcode::kUnsafePointerAdd:
103 : case IrOpcode::kUnreachable:
104 : case IrOpcode::kWord32AtomicAdd:
105 : case IrOpcode::kWord32AtomicAnd:
106 : case IrOpcode::kWord32AtomicCompareExchange:
107 : case IrOpcode::kWord32AtomicExchange:
108 : case IrOpcode::kWord32AtomicLoad:
109 : case IrOpcode::kWord32AtomicOr:
110 : case IrOpcode::kWord32AtomicPairAdd:
111 : case IrOpcode::kWord32AtomicPairAnd:
112 : case IrOpcode::kWord32AtomicPairCompareExchange:
113 : case IrOpcode::kWord32AtomicPairExchange:
114 : case IrOpcode::kWord32AtomicPairLoad:
115 : case IrOpcode::kWord32AtomicPairOr:
116 : case IrOpcode::kWord32AtomicPairStore:
117 : case IrOpcode::kWord32AtomicPairSub:
118 : case IrOpcode::kWord32AtomicPairXor:
119 : case IrOpcode::kWord32AtomicStore:
120 : case IrOpcode::kWord32AtomicSub:
121 : case IrOpcode::kWord32AtomicXor:
122 : case IrOpcode::kWord32PoisonOnSpeculation:
123 : case IrOpcode::kWord64AtomicAdd:
124 : case IrOpcode::kWord64AtomicAnd:
125 : case IrOpcode::kWord64AtomicCompareExchange:
126 : case IrOpcode::kWord64AtomicExchange:
127 : case IrOpcode::kWord64AtomicLoad:
128 : case IrOpcode::kWord64AtomicOr:
129 : case IrOpcode::kWord64AtomicStore:
130 : case IrOpcode::kWord64AtomicSub:
131 : case IrOpcode::kWord64AtomicXor:
132 : case IrOpcode::kWord64PoisonOnSpeculation:
133 : return false;
134 :
135 : case IrOpcode::kCall:
136 : case IrOpcode::kCallWithCallerSavedRegisters:
137 23312 : return !(CallDescriptorOf(node->op())->flags() &
138 : CallDescriptor::kNoAllocate);
139 :
140 : case IrOpcode::kStore:
141 : // Store is not safe because it could be part of CSA's bump pointer
142 : // allocation(?).
143 31464 : return true;
144 :
145 : default:
146 : break;
147 : }
148 1146249 : return true;
149 : }
150 :
151 134974 : bool CanLoopAllocate(Node* loop_effect_phi, Zone* temp_zone) {
152 134974 : Node* const control = NodeProperties::GetControlInput(loop_effect_phi);
153 :
154 134974 : ZoneQueue<Node*> queue(temp_zone);
155 : ZoneSet<Node*> visited(temp_zone);
156 : visited.insert(loop_effect_phi);
157 :
158 : // Start the effect chain walk from the loop back edges.
159 404923 : for (int i = 1; i < control->InputCount(); ++i) {
160 404922 : queue.push(loop_effect_phi->InputAt(i));
161 : }
162 :
163 358729 : while (!queue.empty()) {
164 225106 : Node* const current = queue.front();
165 : queue.pop();
166 225105 : if (visited.find(current) == visited.end()) {
167 : visited.insert(current);
168 :
169 203360 : if (CanAllocate(current)) return true;
170 :
171 450660 : for (int i = 0; i < current->op()->EffectInputCount(); ++i) {
172 180264 : queue.push(NodeProperties::GetEffectInput(current, i));
173 : }
174 : }
175 : }
176 : return false;
177 : }
178 :
179 : } // namespace
180 :
181 13572860 : void MemoryOptimizer::VisitNode(Node* node, AllocationState const* state) {
182 : DCHECK(!node->IsDead());
183 : DCHECK_LT(0, node->op()->EffectInputCount());
184 13572860 : switch (node->opcode()) {
185 : case IrOpcode::kAllocate:
186 : // Allocate nodes were purged from the graph in effect-control
187 : // linearization.
188 0 : UNREACHABLE();
189 : case IrOpcode::kAllocateRaw:
190 235504 : return VisitAllocateRaw(node, state);
191 : case IrOpcode::kCall:
192 3899464 : return VisitCall(node, state);
193 : case IrOpcode::kCallWithCallerSavedRegisters:
194 452 : return VisitCallWithCallerSavedRegisters(node, state);
195 : case IrOpcode::kLoadElement:
196 27065 : return VisitLoadElement(node, state);
197 : case IrOpcode::kLoadField:
198 1838104 : return VisitLoadField(node, state);
199 : case IrOpcode::kStoreElement:
200 45289 : return VisitStoreElement(node, state);
201 : case IrOpcode::kStoreField:
202 2479524 : return VisitStoreField(node, state);
203 : case IrOpcode::kStore:
204 252371 : return VisitStore(node, state);
205 : default:
206 4795087 : if (!CanAllocate(node)) {
207 : // These operations cannot trigger GC.
208 : return VisitOtherEffect(node, state);
209 : }
210 : }
211 : DCHECK_EQ(0, node->op()->EffectOutputCount());
212 : }
213 :
214 : #define __ gasm()->
215 :
216 235504 : void MemoryOptimizer::VisitAllocateRaw(Node* node,
217 : AllocationState const* state) {
218 : DCHECK_EQ(IrOpcode::kAllocateRaw, node->opcode());
219 : Node* value;
220 : Node* size = node->InputAt(0);
221 : Node* effect = node->InputAt(1);
222 : Node* control = node->InputAt(2);
223 :
224 235504 : gasm()->Reset(effect, control);
225 :
226 235504 : AllocationType allocation = AllocationTypeOf(node->op());
227 :
228 : // Propagate tenuring from outer allocations to inner allocations, i.e.
229 : // when we allocate an object in old space and store a newly allocated
230 : // child object into the pretenured object, then the newly allocated
231 : // child object also should get pretenured to old space.
232 235504 : if (allocation == AllocationType::kOld) {
233 12992 : for (Edge const edge : node->use_edges()) {
234 : Node* const user = edge.from();
235 12396 : if (user->opcode() == IrOpcode::kStoreField && edge.index() == 0) {
236 : Node* const child = user->InputAt(1);
237 3652 : if (child->opcode() == IrOpcode::kAllocateRaw &&
238 0 : AllocationTypeOf(child->op()) == AllocationType::kYoung) {
239 0 : NodeProperties::ChangeOp(child, node->op());
240 0 : break;
241 : }
242 : }
243 : }
244 : } else {
245 : DCHECK_EQ(AllocationType::kYoung, allocation);
246 5668361 : for (Edge const edge : node->use_edges()) {
247 : Node* const user = edge.from();
248 5433453 : if (user->opcode() == IrOpcode::kStoreField && edge.index() == 1) {
249 : Node* const parent = user->InputAt(0);
250 119784 : if (parent->opcode() == IrOpcode::kAllocateRaw &&
251 45949 : AllocationTypeOf(parent->op()) == AllocationType::kOld) {
252 : allocation = AllocationType::kOld;
253 : break;
254 : }
255 : }
256 : }
257 : }
258 :
259 : // Determine the top/limit addresses.
260 235504 : Node* top_address = __ ExternalConstant(
261 : allocation == AllocationType::kYoung
262 : ? ExternalReference::new_space_allocation_top_address(isolate())
263 235503 : : ExternalReference::old_space_allocation_top_address(isolate()));
264 235504 : Node* limit_address = __ ExternalConstant(
265 : allocation == AllocationType::kYoung
266 : ? ExternalReference::new_space_allocation_limit_address(isolate())
267 235504 : : ExternalReference::old_space_allocation_limit_address(isolate()));
268 :
269 : // Check if we can fold this allocation into a previous allocation represented
270 : // by the incoming {state}.
271 : IntPtrMatcher m(size);
272 235504 : if (m.IsInRange(0, kMaxRegularHeapObjectSize)) {
273 : intptr_t const object_size = m.Value();
274 618114 : if (allocation_folding_ == AllocationFolding::kDoAllocationFolding &&
275 265084 : state->size() <= kMaxRegularHeapObjectSize - object_size &&
276 : state->group()->allocation() == allocation) {
277 : // We can fold this Allocate {node} into the allocation {group}
278 : // represented by the given {state}. Compute the upper bound for
279 : // the new {state}.
280 43595 : intptr_t const state_size = state->size() + object_size;
281 :
282 : // Update the reservation check to the actual maximum upper bound.
283 : AllocationGroup* const group = state->group();
284 43595 : if (machine()->Is64()) {
285 43595 : if (OpParameter<int64_t>(group->size()->op()) < state_size) {
286 43595 : NodeProperties::ChangeOp(group->size(),
287 43595 : common()->Int64Constant(state_size));
288 : }
289 : } else {
290 0 : if (OpParameter<int32_t>(group->size()->op()) < state_size) {
291 0 : NodeProperties::ChangeOp(
292 : group->size(),
293 0 : common()->Int32Constant(static_cast<int32_t>(state_size)));
294 : }
295 : }
296 :
297 : // Update the allocation top with the new object allocation.
298 : // TODO(bmeurer): Defer writing back top as much as possible.
299 43595 : Node* top = __ IntAdd(state->top(), size);
300 87190 : __ Store(StoreRepresentation(MachineType::PointerRepresentation(),
301 : kNoWriteBarrier),
302 43595 : top_address, __ IntPtrConstant(0), top);
303 :
304 : // Compute the effective inner allocated address.
305 43595 : value = __ BitcastWordToTagged(
306 43595 : __ IntAdd(state->top(), __ IntPtrConstant(kHeapObjectTag)));
307 :
308 : // Extend the allocation {group}.
309 : group->Add(value);
310 : state = AllocationState::Open(group, state_size, top, zone());
311 : } else {
312 177894 : auto call_runtime = __ MakeDeferredLabel();
313 355788 : auto done = __ MakeLabel(MachineType::PointerRepresentation());
314 :
315 : // Setup a mutable reservation size node; will be patched as we fold
316 : // additional allocations into this new group.
317 177894 : Node* size = __ UniqueIntPtrConstant(object_size);
318 :
319 : // Load allocation top and limit.
320 : Node* top =
321 177893 : __ Load(MachineType::Pointer(), top_address, __ IntPtrConstant(0));
322 : Node* limit =
323 177894 : __ Load(MachineType::Pointer(), limit_address, __ IntPtrConstant(0));
324 :
325 : // Check if we need to collect garbage before we can start bump pointer
326 : // allocation (always done for folded allocations).
327 177894 : Node* check = __ UintLessThan(__ IntAdd(top, size), limit);
328 :
329 177894 : __ GotoIfNot(check, &call_runtime);
330 : __ Goto(&done, top);
331 :
332 : __ Bind(&call_runtime);
333 : {
334 : Node* target = allocation == AllocationType::kYoung
335 : ? __
336 : AllocateInYoungGenerationStubConstant()
337 : : __
338 177893 : AllocateInOldGenerationStubConstant();
339 177894 : if (!allocate_operator_.is_set()) {
340 : auto descriptor = AllocateDescriptor{};
341 108261 : auto call_descriptor = Linkage::GetStubCallDescriptor(
342 : graph()->zone(), descriptor, descriptor.GetStackParameterCount(),
343 108262 : CallDescriptor::kCanUseRoots, Operator::kNoThrow);
344 108261 : allocate_operator_.set(common()->Call(call_descriptor));
345 : }
346 177892 : Node* vfalse = __ BitcastTaggedToWord(
347 177894 : __ Call(allocate_operator_.get(), target, size));
348 177894 : vfalse = __ IntSub(vfalse, __ IntPtrConstant(kHeapObjectTag));
349 : __ Goto(&done, vfalse);
350 : }
351 :
352 : __ Bind(&done);
353 :
354 : // Compute the new top and write it back.
355 355788 : top = __ IntAdd(done.PhiAt(0), __ IntPtrConstant(object_size));
356 355788 : __ Store(StoreRepresentation(MachineType::PointerRepresentation(),
357 : kNoWriteBarrier),
358 177894 : top_address, __ IntPtrConstant(0), top);
359 :
360 : // Compute the initial object address.
361 355788 : value = __ BitcastWordToTagged(
362 177894 : __ IntAdd(done.PhiAt(0), __ IntPtrConstant(kHeapObjectTag)));
363 :
364 : // Start a new allocation group.
365 : AllocationGroup* group =
366 : new (zone()) AllocationGroup(value, allocation, size, zone());
367 : state = AllocationState::Open(group, object_size, top, zone());
368 : }
369 : } else {
370 14015 : auto call_runtime = __ MakeDeferredLabel();
371 28030 : auto done = __ MakeLabel(MachineRepresentation::kTaggedPointer);
372 :
373 : // Load allocation top and limit.
374 : Node* top =
375 14015 : __ Load(MachineType::Pointer(), top_address, __ IntPtrConstant(0));
376 : Node* limit =
377 14015 : __ Load(MachineType::Pointer(), limit_address, __ IntPtrConstant(0));
378 :
379 : // Compute the new top.
380 14015 : Node* new_top = __ IntAdd(top, size);
381 :
382 : // Check if we can do bump pointer allocation here.
383 14015 : Node* check = __ UintLessThan(new_top, limit);
384 14015 : __ GotoIfNot(check, &call_runtime);
385 28030 : __ Store(StoreRepresentation(MachineType::PointerRepresentation(),
386 : kNoWriteBarrier),
387 14015 : top_address, __ IntPtrConstant(0), new_top);
388 14015 : __ Goto(&done, __ BitcastWordToTagged(
389 : __ IntAdd(top, __ IntPtrConstant(kHeapObjectTag))));
390 :
391 : __ Bind(&call_runtime);
392 : Node* target = allocation == AllocationType::kYoung
393 : ? __
394 : AllocateInYoungGenerationStubConstant()
395 : : __
396 14015 : AllocateInOldGenerationStubConstant();
397 14015 : if (!allocate_operator_.is_set()) {
398 : auto descriptor = AllocateDescriptor{};
399 4155 : auto call_descriptor = Linkage::GetStubCallDescriptor(
400 : graph()->zone(), descriptor, descriptor.GetStackParameterCount(),
401 4155 : CallDescriptor::kCanUseRoots, Operator::kNoThrow);
402 4155 : allocate_operator_.set(common()->Call(call_descriptor));
403 : }
404 14015 : __ Goto(&done, __ Call(allocate_operator_.get(), target, size));
405 :
406 : __ Bind(&done);
407 : value = done.PhiAt(0);
408 :
409 : // Create an unfoldable allocation group.
410 : AllocationGroup* group =
411 14015 : new (zone()) AllocationGroup(value, allocation, zone());
412 : state = AllocationState::Closed(group, zone());
413 : }
414 :
415 235504 : effect = __ ExtractCurrentEffect();
416 235503 : control = __ ExtractCurrentControl();
417 :
418 : // Replace all effect uses of {node} with the {effect}, enqueue the
419 : // effect uses for further processing, and replace all value uses of
420 : // {node} with the {value}.
421 11127206 : for (Edge edge : node->use_edges()) {
422 5445851 : if (NodeProperties::IsEffectEdge(edge)) {
423 235620 : EnqueueUse(edge.from(), edge.index(), state);
424 235619 : edge.UpdateTo(effect);
425 5210233 : } else if (NodeProperties::IsValueEdge(edge)) {
426 2905972 : edge.UpdateTo(value);
427 : } else {
428 : DCHECK(NodeProperties::IsControlEdge(edge));
429 2304261 : edge.UpdateTo(control);
430 : }
431 : }
432 :
433 : // Kill the {node} to make sure we don't leave dangling dead uses.
434 235504 : node->Kill();
435 235503 : }
436 :
437 : #undef __
438 :
439 3899452 : void MemoryOptimizer::VisitCall(Node* node, AllocationState const* state) {
440 : DCHECK_EQ(IrOpcode::kCall, node->opcode());
441 : // If the call can allocate, we start with a fresh state.
442 7798910 : if (!(CallDescriptorOf(node->op())->flags() & CallDescriptor::kNoAllocate)) {
443 : state = empty_state();
444 : }
445 3899458 : EnqueueUses(node, state);
446 3899464 : }
447 :
448 452 : void MemoryOptimizer::VisitCallWithCallerSavedRegisters(
449 : Node* node, AllocationState const* state) {
450 : DCHECK_EQ(IrOpcode::kCallWithCallerSavedRegisters, node->opcode());
451 : // If the call can allocate, we start with a fresh state.
452 904 : if (!(CallDescriptorOf(node->op())->flags() & CallDescriptor::kNoAllocate)) {
453 : state = empty_state();
454 : }
455 452 : EnqueueUses(node, state);
456 452 : }
457 :
458 27065 : void MemoryOptimizer::VisitLoadElement(Node* node,
459 : AllocationState const* state) {
460 : DCHECK_EQ(IrOpcode::kLoadElement, node->opcode());
461 27065 : ElementAccess const& access = ElementAccessOf(node->op());
462 : Node* index = node->InputAt(1);
463 27065 : node->ReplaceInput(1, ComputeIndex(access, index));
464 27065 : if (NeedsPoisoning(access.load_sensitivity) &&
465 : access.machine_type.representation() !=
466 : MachineRepresentation::kTaggedPointer) {
467 0 : NodeProperties::ChangeOp(node,
468 0 : machine()->PoisonedLoad(access.machine_type));
469 : } else {
470 27065 : NodeProperties::ChangeOp(node, machine()->Load(access.machine_type));
471 : }
472 27065 : EnqueueUses(node, state);
473 27065 : }
474 :
475 1838104 : void MemoryOptimizer::VisitLoadField(Node* node, AllocationState const* state) {
476 : DCHECK_EQ(IrOpcode::kLoadField, node->opcode());
477 1838104 : FieldAccess const& access = FieldAccessOf(node->op());
478 3676208 : Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag());
479 1838106 : node->InsertInput(graph()->zone(), 1, offset);
480 1838107 : if (NeedsPoisoning(access.load_sensitivity) &&
481 : access.machine_type.representation() !=
482 : MachineRepresentation::kTaggedPointer) {
483 0 : NodeProperties::ChangeOp(node,
484 0 : machine()->PoisonedLoad(access.machine_type));
485 : } else {
486 1838108 : NodeProperties::ChangeOp(node, machine()->Load(access.machine_type));
487 : }
488 1838105 : EnqueueUses(node, state);
489 1838107 : }
490 :
491 45289 : void MemoryOptimizer::VisitStoreElement(Node* node,
492 : AllocationState const* state) {
493 : DCHECK_EQ(IrOpcode::kStoreElement, node->opcode());
494 45289 : ElementAccess const& access = ElementAccessOf(node->op());
495 : Node* object = node->InputAt(0);
496 : Node* index = node->InputAt(1);
497 : WriteBarrierKind write_barrier_kind =
498 45289 : ComputeWriteBarrierKind(object, state, access.write_barrier_kind);
499 45289 : node->ReplaceInput(1, ComputeIndex(access, index));
500 90578 : NodeProperties::ChangeOp(
501 : node, machine()->Store(StoreRepresentation(
502 45289 : access.machine_type.representation(), write_barrier_kind)));
503 45289 : EnqueueUses(node, state);
504 45289 : }
505 :
506 2479520 : void MemoryOptimizer::VisitStoreField(Node* node,
507 : AllocationState const* state) {
508 : DCHECK_EQ(IrOpcode::kStoreField, node->opcode());
509 2479520 : FieldAccess const& access = FieldAccessOf(node->op());
510 : Node* object = node->InputAt(0);
511 : WriteBarrierKind write_barrier_kind =
512 2479521 : ComputeWriteBarrierKind(object, state, access.write_barrier_kind);
513 4959038 : Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag());
514 2479523 : node->InsertInput(graph()->zone(), 1, offset);
515 4959072 : NodeProperties::ChangeOp(
516 : node, machine()->Store(StoreRepresentation(
517 2479535 : access.machine_type.representation(), write_barrier_kind)));
518 2479533 : EnqueueUses(node, state);
519 2479514 : }
520 :
521 252371 : void MemoryOptimizer::VisitStore(Node* node, AllocationState const* state) {
522 : DCHECK_EQ(IrOpcode::kStore, node->opcode());
523 252371 : StoreRepresentation representation = StoreRepresentationOf(node->op());
524 : Node* object = node->InputAt(0);
525 252371 : WriteBarrierKind write_barrier_kind = ComputeWriteBarrierKind(
526 252371 : object, state, representation.write_barrier_kind());
527 252371 : if (write_barrier_kind != representation.write_barrier_kind()) {
528 688 : NodeProperties::ChangeOp(
529 : node, machine()->Store(StoreRepresentation(
530 344 : representation.representation(), write_barrier_kind)));
531 : }
532 252371 : EnqueueUses(node, state);
533 252371 : }
534 :
535 0 : void MemoryOptimizer::VisitOtherEffect(Node* node,
536 : AllocationState const* state) {
537 3707302 : EnqueueUses(node, state);
538 0 : }
539 :
540 72354 : Node* MemoryOptimizer::ComputeIndex(ElementAccess const& access, Node* index) {
541 : int const element_size_shift =
542 72354 : ElementSizeLog2Of(access.machine_type.representation());
543 72354 : if (element_size_shift) {
544 67054 : index = graph()->NewNode(machine()->WordShl(), index,
545 : jsgraph()->IntPtrConstant(element_size_shift));
546 : }
547 144708 : int const fixed_offset = access.header_size - access.tag();
548 72354 : if (fixed_offset) {
549 63429 : index = graph()->NewNode(machine()->IntAdd(), index,
550 : jsgraph()->IntPtrConstant(fixed_offset));
551 : }
552 72354 : return index;
553 : }
554 :
555 2777178 : WriteBarrierKind MemoryOptimizer::ComputeWriteBarrierKind(
556 : Node* object, AllocationState const* state,
557 : WriteBarrierKind write_barrier_kind) {
558 4737853 : if (state->IsYoungGenerationAllocation() &&
559 : state->group()->Contains(object)) {
560 : write_barrier_kind = kNoWriteBarrier;
561 : }
562 2777178 : return write_barrier_kind;
563 : }
564 :
565 1717853 : MemoryOptimizer::AllocationState const* MemoryOptimizer::MergeStates(
566 : AllocationStates const& states) {
567 : // Check if all states are the same; or at least if all allocation
568 : // states belong to the same allocation group.
569 1717853 : AllocationState const* state = states.front();
570 : AllocationGroup* group = state->group();
571 7163697 : for (size_t i = 1; i < states.size(); ++i) {
572 2722922 : if (states[i] != state) state = nullptr;
573 2722922 : if (states[i]->group() != group) group = nullptr;
574 : }
575 1717853 : if (state == nullptr) {
576 83304 : if (group != nullptr) {
577 : // We cannot fold any more allocations into this group, but we can still
578 : // eliminate write barriers on stores to this group.
579 : // TODO(bmeurer): We could potentially just create a Phi here to merge
580 : // the various tops; but we need to pay special attention not to create
581 : // an unschedulable graph.
582 : state = AllocationState::Closed(group, zone());
583 : } else {
584 : // The states are from different allocation groups.
585 : state = empty_state();
586 : }
587 : }
588 1717853 : return state;
589 : }
590 :
591 4710736 : void MemoryOptimizer::EnqueueMerge(Node* node, int index,
592 : AllocationState const* state) {
593 : DCHECK_EQ(IrOpcode::kEffectPhi, node->opcode());
594 4710736 : int const input_count = node->InputCount() - 1;
595 : DCHECK_LT(0, input_count);
596 : Node* const control = node->InputAt(input_count);
597 4710736 : if (control->opcode() == IrOpcode::kLoop) {
598 269948 : if (index == 0) {
599 134974 : if (CanLoopAllocate(node, zone())) {
600 : // If the loop can allocate, we start with an empty state at the
601 : // beginning.
602 113228 : EnqueueUses(node, empty_state());
603 : } else {
604 : // If the loop cannot allocate, we can just propagate the state from
605 : // before the loop.
606 21745 : EnqueueUses(node, state);
607 : }
608 : } else {
609 : // Do not revisit backedges.
610 : }
611 : } else {
612 : DCHECK_EQ(IrOpcode::kMerge, control->opcode());
613 : // Check if we already know about this pending merge.
614 : NodeId const id = node->id();
615 : auto it = pending_.find(id);
616 4440788 : if (it == pending_.end()) {
617 : // Insert a new pending merge.
618 1717859 : it = pending_.insert(std::make_pair(id, AllocationStates(zone()))).first;
619 : }
620 : // Add the next input state.
621 4440792 : it->second.push_back(state);
622 : // Check if states for all inputs are available by now.
623 4440798 : if (it->second.size() == static_cast<size_t>(input_count)) {
624 : // All inputs to this effect merge are done, merge the states given all
625 : // input constraints, drop the pending merge and enqueue uses of the
626 : // EffectPhi {node}.
627 1717865 : state = MergeStates(it->second);
628 1717853 : EnqueueUses(node, state);
629 : pending_.erase(it);
630 : }
631 : }
632 4710733 : }
633 :
634 14634152 : void MemoryOptimizer::EnqueueUses(Node* node, AllocationState const* state) {
635 102935257 : for (Edge const edge : node->use_edges()) {
636 44150774 : if (NodeProperties::IsEffectEdge(edge)) {
637 18047839 : EnqueueUse(edge.from(), edge.index(), state);
638 : }
639 : }
640 14633709 : }
641 :
642 18283483 : void MemoryOptimizer::EnqueueUse(Node* node, int index,
643 : AllocationState const* state) {
644 18283483 : if (node->opcode() == IrOpcode::kEffectPhi) {
645 : // An EffectPhi represents a merge of different effect chains, which
646 : // needs special handling depending on whether the merge is part of a
647 : // loop or just a normal control join.
648 4710747 : EnqueueMerge(node, index, state);
649 : } else {
650 13572736 : Token token = {node, state};
651 : tokens_.push(token);
652 : }
653 18283459 : }
654 :
655 0 : Graph* MemoryOptimizer::graph() const { return jsgraph()->graph(); }
656 :
657 0 : Isolate* MemoryOptimizer::isolate() const { return jsgraph()->isolate(); }
658 :
659 0 : CommonOperatorBuilder* MemoryOptimizer::common() const {
660 0 : return jsgraph()->common();
661 : }
662 :
663 0 : MachineOperatorBuilder* MemoryOptimizer::machine() const {
664 0 : return jsgraph()->machine();
665 : }
666 :
667 1865172 : bool MemoryOptimizer::NeedsPoisoning(LoadSensitivity load_sensitivity) const {
668 : // Safe loads do not need poisoning.
669 1865172 : if (load_sensitivity == LoadSensitivity::kSafe) return false;
670 :
671 1865172 : switch (poisoning_level_) {
672 : case PoisoningMitigationLevel::kDontPoison:
673 : return false;
674 : case PoisoningMitigationLevel::kPoisonAll:
675 0 : return true;
676 : case PoisoningMitigationLevel::kPoisonCriticalOnly:
677 0 : return load_sensitivity == LoadSensitivity::kCritical;
678 : }
679 0 : UNREACHABLE();
680 : }
681 :
682 : } // namespace compiler
683 : } // namespace internal
684 122004 : } // namespace v8
|