Line data Source code
1 : // Copyright 2012 the V8 project authors. All rights reserved.
2 : // Use of this source code is governed by a BSD-style license that can be
3 : // found in the LICENSE file.
4 :
5 : #include "src/heap/heap.h"
6 :
7 : #include <unordered_map>
8 : #include <unordered_set>
9 :
10 : #include "src/accessors.h"
11 : #include "src/api.h"
12 : #include "src/assembler-inl.h"
13 : #include "src/ast/context-slot-cache.h"
14 : #include "src/base/bits.h"
15 : #include "src/base/once.h"
16 : #include "src/base/utils/random-number-generator.h"
17 : #include "src/bootstrapper.h"
18 : #include "src/code-stubs.h"
19 : #include "src/compilation-cache.h"
20 : #include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
21 : #include "src/conversions.h"
22 : #include "src/debug/debug.h"
23 : #include "src/deoptimizer.h"
24 : #include "src/feedback-vector.h"
25 : #include "src/global-handles.h"
26 : #include "src/heap/array-buffer-tracker-inl.h"
27 : #include "src/heap/barrier.h"
28 : #include "src/heap/code-stats.h"
29 : #include "src/heap/concurrent-marking.h"
30 : #include "src/heap/embedder-tracing.h"
31 : #include "src/heap/gc-idle-time-handler.h"
32 : #include "src/heap/gc-tracer.h"
33 : #include "src/heap/incremental-marking.h"
34 : #include "src/heap/item-parallel-job.h"
35 : #include "src/heap/mark-compact-inl.h"
36 : #include "src/heap/mark-compact.h"
37 : #include "src/heap/memory-reducer.h"
38 : #include "src/heap/object-stats.h"
39 : #include "src/heap/objects-visiting-inl.h"
40 : #include "src/heap/objects-visiting.h"
41 : #include "src/heap/remembered-set.h"
42 : #include "src/heap/scavenge-job.h"
43 : #include "src/heap/scavenger-inl.h"
44 : #include "src/heap/store-buffer.h"
45 : #include "src/interpreter/interpreter.h"
46 : #include "src/objects/object-macros.h"
47 : #include "src/objects/shared-function-info.h"
48 : #include "src/regexp/jsregexp.h"
49 : #include "src/runtime-profiler.h"
50 : #include "src/snapshot/natives.h"
51 : #include "src/snapshot/serializer-common.h"
52 : #include "src/snapshot/snapshot.h"
53 : #include "src/tracing/trace-event.h"
54 : #include "src/trap-handler/trap-handler.h"
55 : #include "src/unicode-inl.h"
56 : #include "src/utils-inl.h"
57 : #include "src/utils.h"
58 : #include "src/v8.h"
59 : #include "src/vm-state-inl.h"
60 :
61 : namespace v8 {
62 : namespace internal {
63 :
64 31 : void Heap::SetArgumentsAdaptorDeoptPCOffset(int pc_offset) {
65 : DCHECK_EQ(Smi::kZero, arguments_adaptor_deopt_pc_offset());
66 : set_arguments_adaptor_deopt_pc_offset(Smi::FromInt(pc_offset));
67 31 : }
68 :
69 62 : void Heap::SetConstructStubCreateDeoptPCOffset(int pc_offset) {
70 : // TODO(tebbi): Remove second half of DCHECK once
71 : // FLAG_harmony_restrict_constructor_return is gone.
72 : DCHECK(construct_stub_create_deopt_pc_offset() == Smi::kZero ||
73 : construct_stub_create_deopt_pc_offset() == Smi::FromInt(pc_offset));
74 : set_construct_stub_create_deopt_pc_offset(Smi::FromInt(pc_offset));
75 62 : }
76 :
77 62 : void Heap::SetConstructStubInvokeDeoptPCOffset(int pc_offset) {
78 : // TODO(tebbi): Remove second half of DCHECK once
79 : // FLAG_harmony_restrict_constructor_return is gone.
80 : DCHECK(construct_stub_invoke_deopt_pc_offset() == Smi::kZero ||
81 : construct_stub_invoke_deopt_pc_offset() == Smi::FromInt(pc_offset));
82 : set_construct_stub_invoke_deopt_pc_offset(Smi::FromInt(pc_offset));
83 62 : }
84 :
85 31 : void Heap::SetGetterStubDeoptPCOffset(int pc_offset) {
86 : DCHECK_EQ(Smi::kZero, getter_stub_deopt_pc_offset());
87 : set_getter_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
88 31 : }
89 :
90 31 : void Heap::SetSetterStubDeoptPCOffset(int pc_offset) {
91 : DCHECK_EQ(Smi::kZero, setter_stub_deopt_pc_offset());
92 : set_setter_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
93 31 : }
94 :
95 31 : void Heap::SetInterpreterEntryReturnPCOffset(int pc_offset) {
96 : DCHECK_EQ(Smi::kZero, interpreter_entry_return_pc_offset());
97 : set_interpreter_entry_return_pc_offset(Smi::FromInt(pc_offset));
98 31 : }
99 :
100 121 : void Heap::SetSerializedTemplates(FixedArray* templates) {
101 : DCHECK_EQ(empty_fixed_array(), serialized_templates());
102 : DCHECK(isolate()->serializer_enabled());
103 : set_serialized_templates(templates);
104 121 : }
105 :
106 121 : void Heap::SetSerializedGlobalProxySizes(FixedArray* sizes) {
107 : DCHECK_EQ(empty_fixed_array(), serialized_global_proxy_sizes());
108 : DCHECK(isolate()->serializer_enabled());
109 : set_serialized_global_proxy_sizes(sizes);
110 121 : }
111 :
112 0 : bool Heap::GCCallbackTuple::operator==(
113 : const Heap::GCCallbackTuple& other) const {
114 0 : return other.callback == callback && other.data == data;
115 : }
116 :
117 0 : Heap::GCCallbackTuple& Heap::GCCallbackTuple::operator=(
118 : const Heap::GCCallbackTuple& other) {
119 70 : callback = other.callback;
120 70 : gc_type = other.gc_type;
121 70 : data = other.data;
122 0 : return *this;
123 : }
124 :
125 : struct Heap::StrongRootsList {
126 : Object** start;
127 : Object** end;
128 : StrongRootsList* next;
129 : };
130 :
131 106730 : class IdleScavengeObserver : public AllocationObserver {
132 : public:
133 : IdleScavengeObserver(Heap& heap, intptr_t step_size)
134 54999 : : AllocationObserver(step_size), heap_(heap) {}
135 :
136 53650 : void Step(int bytes_allocated, Address, size_t) override {
137 53650 : heap_.ScheduleIdleScavengeIfNeeded(bytes_allocated);
138 53650 : }
139 :
140 : private:
141 : Heap& heap_;
142 : };
143 :
144 54999 : Heap::Heap()
145 : : external_memory_(0),
146 : external_memory_limit_(kExternalAllocationSoftLimit),
147 : external_memory_at_last_mark_compact_(0),
148 : isolate_(nullptr),
149 : code_range_size_(0),
150 : // semispace_size_ should be a power of 2 and old_generation_size_ should
151 : // be a multiple of Page::kPageSize.
152 : max_semi_space_size_(8 * (kPointerSize / 4) * MB),
153 : initial_semispace_size_(kMinSemiSpaceSizeInKB * KB),
154 : max_old_generation_size_(700ul * (kPointerSize / 4) * MB),
155 : initial_max_old_generation_size_(max_old_generation_size_),
156 : initial_old_generation_size_(max_old_generation_size_ /
157 : kInitalOldGenerationLimitFactor),
158 : old_generation_size_configured_(false),
159 : // Variables set based on semispace_size_ and old_generation_size_ in
160 : // ConfigureHeap.
161 : // Will be 4 * reserved_semispace_size_ to ensure that young
162 : // generation can be aligned to its size.
163 : maximum_committed_(0),
164 : survived_since_last_expansion_(0),
165 : survived_last_scavenge_(0),
166 : always_allocate_scope_count_(0),
167 : memory_pressure_level_(MemoryPressureLevel::kNone),
168 : out_of_memory_callback_(nullptr),
169 : out_of_memory_callback_data_(nullptr),
170 : contexts_disposed_(0),
171 : number_of_disposed_maps_(0),
172 : new_space_(nullptr),
173 : old_space_(nullptr),
174 : code_space_(nullptr),
175 : map_space_(nullptr),
176 : lo_space_(nullptr),
177 : gc_state_(NOT_IN_GC),
178 : gc_post_processing_depth_(0),
179 : allocations_count_(0),
180 : raw_allocations_hash_(0),
181 : ms_count_(0),
182 : gc_count_(0),
183 : mmap_region_base_(0),
184 : remembered_unmapped_pages_index_(0),
185 : #ifdef DEBUG
186 : allocation_timeout_(0),
187 : #endif // DEBUG
188 : old_generation_allocation_limit_(initial_old_generation_size_),
189 : inline_allocation_disabled_(false),
190 : tracer_(nullptr),
191 : promoted_objects_size_(0),
192 : promotion_ratio_(0),
193 : semi_space_copied_object_size_(0),
194 : previous_semi_space_copied_object_size_(0),
195 : semi_space_copied_rate_(0),
196 : nodes_died_in_new_space_(0),
197 : nodes_copied_in_new_space_(0),
198 : nodes_promoted_(0),
199 : maximum_size_scavenges_(0),
200 : last_idle_notification_time_(0.0),
201 : last_gc_time_(0.0),
202 : mark_compact_collector_(nullptr),
203 : minor_mark_compact_collector_(nullptr),
204 : memory_allocator_(nullptr),
205 : store_buffer_(nullptr),
206 : incremental_marking_(nullptr),
207 : concurrent_marking_(nullptr),
208 : gc_idle_time_handler_(nullptr),
209 : memory_reducer_(nullptr),
210 : live_object_stats_(nullptr),
211 : dead_object_stats_(nullptr),
212 : scavenge_job_(nullptr),
213 : parallel_scavenge_semaphore_(0),
214 : idle_scavenge_observer_(nullptr),
215 : new_space_allocation_counter_(0),
216 : old_generation_allocation_counter_at_last_gc_(0),
217 : old_generation_size_at_last_gc_(0),
218 : global_pretenuring_feedback_(kInitialFeedbackCapacity),
219 : is_marking_flag_(false),
220 : ring_buffer_full_(false),
221 : ring_buffer_end_(0),
222 : configured_(false),
223 : current_gc_flags_(Heap::kNoGCFlags),
224 : current_gc_callback_flags_(GCCallbackFlags::kNoGCCallbackFlags),
225 : external_string_table_(this),
226 : gc_callbacks_depth_(0),
227 : deserialization_complete_(false),
228 : strong_roots_list_(nullptr),
229 : heap_iterator_depth_(0),
230 : local_embedder_heap_tracer_(nullptr),
231 : fast_promotion_mode_(false),
232 : use_tasks_(true),
233 : force_oom_(false),
234 : delay_sweeper_tasks_for_testing_(false),
235 329994 : pending_layout_change_object_(nullptr) {
236 : // Ensure old_generation_size_ is a multiple of kPageSize.
237 : DCHECK_EQ(0, max_old_generation_size_ & (Page::kPageSize - 1));
238 :
239 54999 : memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
240 : set_native_contexts_list(nullptr);
241 : set_allocation_sites_list(Smi::kZero);
242 : set_encountered_weak_collections(Smi::kZero);
243 : // Put a dummy entry in the remembered pages so we can find the list the
244 : // minidump even if there are no real unmapped pages.
245 : RememberUnmappedPage(nullptr, false);
246 54999 : }
247 :
248 21 : size_t Heap::Capacity() {
249 21 : if (!HasBeenSetUp()) return 0;
250 :
251 42 : return new_space_->Capacity() + OldGenerationCapacity();
252 : }
253 :
254 497882 : size_t Heap::OldGenerationCapacity() {
255 497882 : if (!HasBeenSetUp()) return 0;
256 :
257 1493646 : return old_space_->Capacity() + code_space_->Capacity() +
258 995764 : map_space_->Capacity() + lo_space_->SizeOfObjects();
259 : }
260 :
261 777647 : size_t Heap::CommittedOldGenerationMemory() {
262 777647 : if (!HasBeenSetUp()) return 0;
263 :
264 1555294 : return old_space_->CommittedMemory() + code_space_->CommittedMemory() +
265 1555294 : map_space_->CommittedMemory() + lo_space_->Size();
266 : }
267 :
268 664038 : size_t Heap::CommittedMemory() {
269 664038 : if (!HasBeenSetUp()) return 0;
270 :
271 664038 : return new_space_->CommittedMemory() + CommittedOldGenerationMemory();
272 : }
273 :
274 :
275 6 : size_t Heap::CommittedPhysicalMemory() {
276 6 : if (!HasBeenSetUp()) return 0;
277 :
278 12 : return new_space_->CommittedPhysicalMemory() +
279 12 : old_space_->CommittedPhysicalMemory() +
280 12 : code_space_->CommittedPhysicalMemory() +
281 6 : map_space_->CommittedPhysicalMemory() +
282 6 : lo_space_->CommittedPhysicalMemory();
283 : }
284 :
285 256200 : size_t Heap::CommittedMemoryExecutable() {
286 128100 : if (!HasBeenSetUp()) return 0;
287 :
288 128100 : return static_cast<size_t>(memory_allocator()->SizeExecutable());
289 : }
290 :
291 :
292 226269 : void Heap::UpdateMaximumCommitted() {
293 452538 : if (!HasBeenSetUp()) return;
294 :
295 226269 : const size_t current_committed_memory = CommittedMemory();
296 226269 : if (current_committed_memory > maximum_committed_) {
297 82497 : maximum_committed_ = current_committed_memory;
298 : }
299 : }
300 :
301 27 : size_t Heap::Available() {
302 27 : if (!HasBeenSetUp()) return 0;
303 :
304 : size_t total = 0;
305 : AllSpaces spaces(this);
306 162 : for (Space* space = spaces.next(); space != nullptr; space = spaces.next()) {
307 135 : total += space->Available();
308 : }
309 : return total;
310 : }
311 :
312 :
313 18 : bool Heap::HasBeenSetUp() {
314 15148029 : return old_space_ != nullptr && code_space_ != nullptr &&
315 15148029 : map_space_ != nullptr && lo_space_ != nullptr;
316 : }
317 :
318 :
319 86452 : GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
320 59310 : const char** reason) {
321 : // Is global GC requested?
322 86452 : if (space != NEW_SPACE) {
323 112846 : isolate_->counters()->gc_compactor_caused_by_request()->Increment();
324 56423 : *reason = "GC in old space requested";
325 56423 : return MARK_COMPACTOR;
326 : }
327 :
328 30029 : if (FLAG_gc_global || (FLAG_stress_compaction && (gc_count_ & 1) != 0)) {
329 371 : *reason = "GC in old space forced by flags";
330 371 : return MARK_COMPACTOR;
331 : }
332 :
333 30721 : if (incremental_marking()->NeedsFinalization() &&
334 1063 : AllocationLimitOvershotByLargeMargin()) {
335 6 : *reason = "Incremental marking needs finalization";
336 6 : return MARK_COMPACTOR;
337 : }
338 :
339 : // Is there enough space left in OLD to guarantee that a scavenge can
340 : // succeed?
341 : //
342 : // Note that MemoryAllocator->MaxAvailable() undercounts the memory available
343 : // for object promotion. It counts only the bytes that the memory
344 : // allocator has not yet allocated from the OS and assigned to any space,
345 : // and does not count available bytes already in the old space or code
346 : // space. Undercounting is safe---we may get an unrequested full GC when
347 : // a scavenge would have succeeded.
348 29652 : if (memory_allocator()->MaxAvailable() <= new_space_->Size()) {
349 : isolate_->counters()
350 : ->gc_compactor_caused_by_oldspace_exhaustion()
351 0 : ->Increment();
352 0 : *reason = "scavenge might not succeed";
353 0 : return MARK_COMPACTOR;
354 : }
355 :
356 : // Default
357 29652 : *reason = nullptr;
358 29652 : return YoungGenerationCollector();
359 : }
360 :
361 0 : void Heap::SetGCState(HeapState state) {
362 172904 : gc_state_ = state;
363 0 : }
364 :
365 : // TODO(1238405): Combine the infrastructure for --heap-stats and
366 : // --log-gc to avoid the complicated preprocessor and flag testing.
367 0 : void Heap::ReportStatisticsBeforeGC() {
368 : // Heap::ReportHeapStatistics will also log NewSpace statistics when
369 : // compiled --log-gc is set. The following logic is used to avoid
370 : // double logging.
371 : #ifdef DEBUG
372 : if (FLAG_heap_stats || FLAG_log_gc) new_space_->CollectStatistics();
373 : if (FLAG_heap_stats) {
374 : ReportHeapStatistics("Before GC");
375 : } else if (FLAG_log_gc) {
376 : new_space_->ReportStatistics();
377 : }
378 : if (FLAG_heap_stats || FLAG_log_gc) new_space_->ClearHistograms();
379 : #else
380 0 : if (FLAG_log_gc) {
381 0 : new_space_->CollectStatistics();
382 0 : new_space_->ReportStatistics();
383 0 : new_space_->ClearHistograms();
384 : }
385 : #endif // DEBUG
386 0 : }
387 :
388 :
389 26 : void Heap::PrintShortHeapStatistics() {
390 52 : if (!FLAG_trace_gc_verbose) return;
391 : PrintIsolate(isolate_, "Memory allocator, used: %6" PRIuS
392 : " KB,"
393 : " available: %6" PRIuS " KB\n",
394 : memory_allocator()->Size() / KB,
395 0 : memory_allocator()->Available() / KB);
396 : PrintIsolate(isolate_, "New space, used: %6" PRIuS
397 : " KB"
398 : ", available: %6" PRIuS
399 : " KB"
400 : ", committed: %6" PRIuS " KB\n",
401 0 : new_space_->Size() / KB, new_space_->Available() / KB,
402 0 : new_space_->CommittedMemory() / KB);
403 : PrintIsolate(isolate_, "Old space, used: %6" PRIuS
404 : " KB"
405 : ", available: %6" PRIuS
406 : " KB"
407 : ", committed: %6" PRIuS " KB\n",
408 0 : old_space_->SizeOfObjects() / KB, old_space_->Available() / KB,
409 0 : old_space_->CommittedMemory() / KB);
410 : PrintIsolate(isolate_, "Code space, used: %6" PRIuS
411 : " KB"
412 : ", available: %6" PRIuS
413 : " KB"
414 : ", committed: %6" PRIuS "KB\n",
415 0 : code_space_->SizeOfObjects() / KB, code_space_->Available() / KB,
416 0 : code_space_->CommittedMemory() / KB);
417 : PrintIsolate(isolate_, "Map space, used: %6" PRIuS
418 : " KB"
419 : ", available: %6" PRIuS
420 : " KB"
421 : ", committed: %6" PRIuS " KB\n",
422 0 : map_space_->SizeOfObjects() / KB, map_space_->Available() / KB,
423 0 : map_space_->CommittedMemory() / KB);
424 : PrintIsolate(isolate_, "Large object space, used: %6" PRIuS
425 : " KB"
426 : ", available: %6" PRIuS
427 : " KB"
428 : ", committed: %6" PRIuS " KB\n",
429 0 : lo_space_->SizeOfObjects() / KB, lo_space_->Available() / KB,
430 0 : lo_space_->CommittedMemory() / KB);
431 : PrintIsolate(isolate_, "All spaces, used: %6" PRIuS
432 : " KB"
433 : ", available: %6" PRIuS
434 : " KB"
435 : ", committed: %6" PRIuS "KB\n",
436 0 : this->SizeOfObjects() / KB, this->Available() / KB,
437 0 : this->CommittedMemory() / KB);
438 : PrintIsolate(isolate_, "External memory reported: %6" PRId64 " KB\n",
439 0 : external_memory_ / KB);
440 : PrintIsolate(isolate_, "External memory global %zu KB\n",
441 0 : external_memory_callback_() / KB);
442 : PrintIsolate(isolate_, "Total time spent in GC : %.1f ms\n",
443 0 : total_gc_time_ms_);
444 : }
445 :
446 : // TODO(1238405): Combine the infrastructure for --heap-stats and
447 : // --log-gc to avoid the complicated preprocessor and flag testing.
448 0 : void Heap::ReportStatisticsAfterGC() {
449 : // Similar to the before GC, we use some complicated logic to ensure that
450 : // NewSpace statistics are logged exactly once when --log-gc is turned on.
451 : #if defined(DEBUG)
452 : if (FLAG_heap_stats) {
453 : new_space_->CollectStatistics();
454 : ReportHeapStatistics("After GC");
455 : } else if (FLAG_log_gc) {
456 : new_space_->ReportStatistics();
457 : }
458 : #else
459 0 : if (FLAG_log_gc) new_space_->ReportStatistics();
460 : #endif // DEBUG
461 0 : for (int i = 0; i < static_cast<int>(v8::Isolate::kUseCounterFeatureCount);
462 : ++i) {
463 0 : int count = deferred_counters_[i];
464 0 : deferred_counters_[i] = 0;
465 0 : while (count > 0) {
466 0 : count--;
467 0 : isolate()->CountUsage(static_cast<v8::Isolate::UseCounterFeature>(i));
468 : }
469 : }
470 0 : }
471 :
472 0 : void Heap::AddRetainingPathTarget(Handle<HeapObject> object) {
473 0 : if (!FLAG_track_retaining_path) {
474 0 : PrintF("Retaining path tracking requires --trace-retaining-path\n");
475 : } else {
476 : Handle<WeakFixedArray> array = WeakFixedArray::Add(
477 0 : handle(retaining_path_targets(), isolate()), object);
478 : set_retaining_path_targets(*array);
479 : }
480 0 : }
481 :
482 0 : bool Heap::IsRetainingPathTarget(HeapObject* object) {
483 : WeakFixedArray::Iterator it(retaining_path_targets());
484 : HeapObject* target;
485 0 : while ((target = it.Next<HeapObject>()) != nullptr) {
486 0 : if (target == object) return true;
487 : }
488 : return false;
489 : }
490 :
491 : namespace {
492 0 : const char* RootToString(Root root) {
493 0 : switch (root) {
494 : #define ROOT_CASE(root_id, ignore, description) \
495 : case Root::root_id: \
496 : return description;
497 0 : ROOT_ID_LIST(ROOT_CASE)
498 : #undef ROOT_CASE
499 : case Root::kCodeFlusher:
500 0 : return "(Code flusher)";
501 : case Root::kPartialSnapshotCache:
502 0 : return "(Partial snapshot cache)";
503 : case Root::kWeakCollections:
504 0 : return "(Weak collections)";
505 : case Root::kWrapperTracing:
506 0 : return "(Wrapper tracing)";
507 : case Root::kUnknown:
508 0 : return "(Unknown)";
509 : }
510 0 : UNREACHABLE();
511 : return nullptr;
512 : }
513 : } // namespace
514 :
515 0 : void Heap::PrintRetainingPath(HeapObject* target) {
516 0 : PrintF("\n\n\n");
517 0 : PrintF("#################################################\n");
518 0 : PrintF("Retaining path for %p:\n", static_cast<void*>(target));
519 0 : HeapObject* object = target;
520 : std::vector<HeapObject*> retaining_path;
521 : Root root = Root::kUnknown;
522 : while (true) {
523 0 : retaining_path.push_back(object);
524 0 : if (retainer_.count(object)) {
525 0 : object = retainer_[object];
526 : } else {
527 0 : if (retaining_root_.count(object)) {
528 0 : root = retaining_root_[object];
529 : }
530 : break;
531 : }
532 : }
533 0 : int distance = static_cast<int>(retaining_path.size());
534 0 : for (auto object : retaining_path) {
535 0 : PrintF("\n");
536 0 : PrintF("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n");
537 0 : PrintF("Distance from root %d: ", distance);
538 0 : object->ShortPrint();
539 0 : PrintF("\n");
540 : #ifdef OBJECT_PRINT
541 : object->Print();
542 : PrintF("\n");
543 : #endif
544 0 : --distance;
545 : }
546 0 : PrintF("\n");
547 0 : PrintF("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n");
548 0 : PrintF("Root: %s\n", RootToString(root));
549 0 : PrintF("-------------------------------------------------\n");
550 0 : }
551 :
552 0 : void Heap::AddRetainer(HeapObject* retainer, HeapObject* object) {
553 0 : retainer_[object] = retainer;
554 0 : if (IsRetainingPathTarget(object)) {
555 0 : PrintRetainingPath(object);
556 : }
557 0 : }
558 :
559 0 : void Heap::AddRetainingRoot(Root root, HeapObject* object) {
560 0 : retaining_root_[object] = root;
561 0 : if (IsRetainingPathTarget(object)) {
562 0 : PrintRetainingPath(object);
563 : }
564 0 : }
565 :
566 0 : void Heap::IncrementDeferredCount(v8::Isolate::UseCounterFeature feature) {
567 0 : deferred_counters_[feature]++;
568 0 : }
569 :
570 22708 : bool Heap::UncommitFromSpace() { return new_space_->UncommitFromSpace(); }
571 :
572 86452 : void Heap::GarbageCollectionPrologue() {
573 345808 : TRACE_GC(tracer(), GCTracer::Scope::HEAP_PROLOGUE);
574 : {
575 : AllowHeapAllocation for_the_first_part_of_prologue;
576 86452 : gc_count_++;
577 :
578 : #ifdef VERIFY_HEAP
579 : if (FLAG_verify_heap) {
580 : Verify();
581 : }
582 : #endif
583 : }
584 :
585 : // Reset GC statistics.
586 86452 : promoted_objects_size_ = 0;
587 86452 : previous_semi_space_copied_object_size_ = semi_space_copied_object_size_;
588 86452 : semi_space_copied_object_size_ = 0;
589 86452 : nodes_died_in_new_space_ = 0;
590 86452 : nodes_copied_in_new_space_ = 0;
591 86452 : nodes_promoted_ = 0;
592 :
593 86452 : UpdateMaximumCommitted();
594 :
595 : #ifdef DEBUG
596 : DCHECK(!AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC);
597 :
598 : if (FLAG_gc_verbose) Print();
599 :
600 : ReportStatisticsBeforeGC();
601 : #endif // DEBUG
602 :
603 172904 : if (new_space_->IsAtMaximumCapacity()) {
604 2329 : maximum_size_scavenges_++;
605 : } else {
606 84123 : maximum_size_scavenges_ = 0;
607 : }
608 86452 : CheckNewSpaceExpansionCriteria();
609 : UpdateNewSpaceAllocationCounter();
610 86452 : if (FLAG_track_retaining_path) {
611 : retainer_.clear();
612 : retaining_root_.clear();
613 86452 : }
614 86452 : }
615 :
616 558506 : size_t Heap::SizeOfObjects() {
617 : size_t total = 0;
618 : AllSpaces spaces(this);
619 3351023 : for (Space* space = spaces.next(); space != nullptr; space = spaces.next()) {
620 2792516 : total += space->SizeOfObjects();
621 : }
622 558504 : return total;
623 : }
624 :
625 :
626 0 : const char* Heap::GetSpaceName(int idx) {
627 0 : switch (idx) {
628 : case NEW_SPACE:
629 : return "new_space";
630 : case OLD_SPACE:
631 0 : return "old_space";
632 : case MAP_SPACE:
633 0 : return "map_space";
634 : case CODE_SPACE:
635 0 : return "code_space";
636 : case LO_SPACE:
637 0 : return "large_object_space";
638 : default:
639 0 : UNREACHABLE();
640 : }
641 : return nullptr;
642 : }
643 :
644 44268 : void Heap::SetRootCodeStubs(UnseededNumberDictionary* value) {
645 44268 : roots_[kCodeStubsRootIndex] = value;
646 44268 : }
647 :
648 54968 : void Heap::RepairFreeListsAfterDeserialization() {
649 : PagedSpaces spaces(this);
650 219872 : for (PagedSpace* space = spaces.next(); space != nullptr;
651 : space = spaces.next()) {
652 164904 : space->RepairFreeListsAfterDeserialization();
653 : }
654 54968 : }
655 :
656 100468 : void Heap::MergeAllocationSitePretenuringFeedback(
657 : const PretenuringFeedbackMap& local_pretenuring_feedback) {
658 : AllocationSite* site = nullptr;
659 310412 : for (auto& site_and_count : local_pretenuring_feedback) {
660 109476 : site = site_and_count.first;
661 : MapWord map_word = site_and_count.first->map_word();
662 109476 : if (map_word.IsForwardingAddress()) {
663 861 : site = AllocationSite::cast(map_word.ToForwardingAddress());
664 : }
665 :
666 : // We have not validated the allocation site yet, since we have not
667 : // dereferenced the site during collecting information.
668 : // This is an inlined check of AllocationMemento::IsValid.
669 218952 : if (!site->IsAllocationSite() || site->IsZombie()) continue;
670 :
671 99080 : const int value = static_cast<int>(site_and_count.second);
672 : DCHECK_LT(0, value);
673 99080 : if (site->IncrementMementoFoundCount(value)) {
674 : // For sites in the global map the count is accessed through the site.
675 1922 : global_pretenuring_feedback_.insert(std::make_pair(site, 0));
676 : }
677 : }
678 100468 : }
679 :
680 : class Heap::SkipStoreBufferScope {
681 : public:
682 : explicit SkipStoreBufferScope(StoreBuffer* store_buffer)
683 : : store_buffer_(store_buffer) {
684 86452 : store_buffer_->MoveAllEntriesToRememberedSet();
685 : store_buffer_->SetMode(StoreBuffer::IN_GC);
686 : }
687 :
688 : ~SkipStoreBufferScope() {
689 : DCHECK(store_buffer_->Empty());
690 : store_buffer_->SetMode(StoreBuffer::NOT_IN_GC);
691 : }
692 :
693 : private:
694 : StoreBuffer* store_buffer_;
695 : };
696 :
697 : namespace {
698 821 : inline bool MakePretenureDecision(
699 : AllocationSite* site, AllocationSite::PretenureDecision current_decision,
700 : double ratio, bool maximum_size_scavenge) {
701 : // Here we just allow state transitions from undecided or maybe tenure
702 : // to don't tenure, maybe tenure, or tenure.
703 1642 : if ((current_decision == AllocationSite::kUndecided ||
704 821 : current_decision == AllocationSite::kMaybeTenure)) {
705 817 : if (ratio >= AllocationSite::kPretenureRatio) {
706 : // We just transition into tenure state when the semi-space was at
707 : // maximum capacity.
708 759 : if (maximum_size_scavenge) {
709 80 : site->set_deopt_dependent_code(true);
710 80 : site->set_pretenure_decision(AllocationSite::kTenure);
711 : // Currently we just need to deopt when we make a state transition to
712 : // tenure.
713 80 : return true;
714 : }
715 679 : site->set_pretenure_decision(AllocationSite::kMaybeTenure);
716 : } else {
717 58 : site->set_pretenure_decision(AllocationSite::kDontTenure);
718 : }
719 : }
720 : return false;
721 : }
722 :
723 821 : inline bool DigestPretenuringFeedback(Isolate* isolate, AllocationSite* site,
724 : bool maximum_size_scavenge) {
725 : bool deopt = false;
726 : int create_count = site->memento_create_count();
727 : int found_count = site->memento_found_count();
728 : bool minimum_mementos_created =
729 821 : create_count >= AllocationSite::kPretenureMinimumCreated;
730 0 : double ratio = minimum_mementos_created || FLAG_trace_pretenuring_statistics
731 821 : ? static_cast<double>(found_count) / create_count
732 1642 : : 0.0;
733 : AllocationSite::PretenureDecision current_decision =
734 : site->pretenure_decision();
735 :
736 821 : if (minimum_mementos_created) {
737 : deopt = MakePretenureDecision(site, current_decision, ratio,
738 821 : maximum_size_scavenge);
739 : }
740 :
741 821 : if (FLAG_trace_pretenuring_statistics) {
742 : PrintIsolate(isolate,
743 : "pretenuring: AllocationSite(%p): (created, found, ratio) "
744 : "(%d, %d, %f) %s => %s\n",
745 : static_cast<void*>(site), create_count, found_count, ratio,
746 : site->PretenureDecisionName(current_decision),
747 0 : site->PretenureDecisionName(site->pretenure_decision()));
748 : }
749 :
750 : // Clear feedback calculation fields until the next gc.
751 821 : site->set_memento_found_count(0);
752 : site->set_memento_create_count(0);
753 821 : return deopt;
754 : }
755 : } // namespace
756 :
757 0 : void Heap::RemoveAllocationSitePretenuringFeedback(AllocationSite* site) {
758 : global_pretenuring_feedback_.erase(site);
759 0 : }
760 :
761 0 : bool Heap::DeoptMaybeTenuredAllocationSites() {
762 172904 : return new_space_->IsAtMaximumCapacity() && maximum_size_scavenges_ == 0;
763 : }
764 :
765 173029 : void Heap::ProcessPretenuringFeedback() {
766 : bool trigger_deoptimization = false;
767 86452 : if (FLAG_allocation_site_pretenuring) {
768 : int tenure_decisions = 0;
769 : int dont_tenure_decisions = 0;
770 : int allocation_mementos_found = 0;
771 : int allocation_sites = 0;
772 : int active_allocation_sites = 0;
773 :
774 : AllocationSite* site = nullptr;
775 :
776 : // Step 1: Digest feedback for recorded allocation sites.
777 : bool maximum_size_scavenge = MaximumSizeScavenge();
778 173725 : for (auto& site_and_count : global_pretenuring_feedback_) {
779 821 : allocation_sites++;
780 821 : site = site_and_count.first;
781 : // Count is always access through the site.
782 : DCHECK_EQ(0, site_and_count.second);
783 : int found_count = site->memento_found_count();
784 : // An entry in the storage does not imply that the count is > 0 because
785 : // allocation sites might have been reset due to too many objects dying
786 : // in old space.
787 821 : if (found_count > 0) {
788 : DCHECK(site->IsAllocationSite());
789 821 : active_allocation_sites++;
790 821 : allocation_mementos_found += found_count;
791 821 : if (DigestPretenuringFeedback(isolate_, site, maximum_size_scavenge)) {
792 : trigger_deoptimization = true;
793 : }
794 821 : if (site->GetPretenureMode() == TENURED) {
795 80 : tenure_decisions++;
796 : } else {
797 741 : dont_tenure_decisions++;
798 : }
799 : }
800 : }
801 :
802 : // Step 2: Deopt maybe tenured allocation sites if necessary.
803 : bool deopt_maybe_tenured = DeoptMaybeTenuredAllocationSites();
804 86452 : if (deopt_maybe_tenured) {
805 : Object* list_element = allocation_sites_list();
806 1511 : while (list_element->IsAllocationSite()) {
807 : site = AllocationSite::cast(list_element);
808 : DCHECK(site->IsAllocationSite());
809 1261 : allocation_sites++;
810 1261 : if (site->IsMaybeTenure()) {
811 14 : site->set_deopt_dependent_code(true);
812 : trigger_deoptimization = true;
813 : }
814 : list_element = site->weak_next();
815 : }
816 : }
817 :
818 86452 : if (trigger_deoptimization) {
819 33 : isolate_->stack_guard()->RequestDeoptMarkedAllocationSites();
820 : }
821 :
822 86452 : if (FLAG_trace_pretenuring_statistics &&
823 0 : (allocation_mementos_found > 0 || tenure_decisions > 0 ||
824 : dont_tenure_decisions > 0)) {
825 : PrintIsolate(isolate(),
826 : "pretenuring: deopt_maybe_tenured=%d visited_sites=%d "
827 : "active_sites=%d "
828 : "mementos=%d tenured=%d not_tenured=%d\n",
829 : deopt_maybe_tenured ? 1 : 0, allocation_sites,
830 : active_allocation_sites, allocation_mementos_found,
831 0 : tenure_decisions, dont_tenure_decisions);
832 : }
833 :
834 : global_pretenuring_feedback_.clear();
835 : global_pretenuring_feedback_.reserve(kInitialFeedbackCapacity);
836 : }
837 86452 : }
838 :
839 :
840 32 : void Heap::DeoptMarkedAllocationSites() {
841 : // TODO(hpayer): If iterating over the allocation sites list becomes a
842 : // performance issue, use a cache data structure in heap instead.
843 : Object* list_element = allocation_sites_list();
844 273 : while (list_element->IsAllocationSite()) {
845 : AllocationSite* site = AllocationSite::cast(list_element);
846 209 : if (site->deopt_dependent_code()) {
847 : site->dependent_code()->MarkCodeForDeoptimization(
848 186 : isolate_, DependentCode::kAllocationSiteTenuringChangedGroup);
849 93 : site->set_deopt_dependent_code(false);
850 : }
851 : list_element = site->weak_next();
852 : }
853 32 : Deoptimizer::DeoptimizeMarkedCode(isolate_);
854 32 : }
855 :
856 :
857 2772321 : void Heap::GarbageCollectionEpilogue() {
858 345808 : TRACE_GC(tracer(), GCTracer::Scope::HEAP_EPILOGUE);
859 : // In release mode, we only zap the from space under heap verification.
860 : if (Heap::ShouldZapGarbage()) {
861 : ZapFromSpace();
862 : }
863 :
864 : #ifdef VERIFY_HEAP
865 : if (FLAG_verify_heap) {
866 : Verify();
867 : }
868 : #endif
869 :
870 : AllowHeapAllocation for_the_rest_of_the_epilogue;
871 :
872 : #ifdef DEBUG
873 : if (FLAG_print_global_handles) isolate_->global_handles()->Print();
874 : if (FLAG_print_handles) PrintHandles();
875 : if (FLAG_gc_verbose) Print();
876 : if (FLAG_code_stats) ReportCodeStatistics("After GC");
877 : if (FLAG_check_handle_count) CheckHandleCount();
878 : #endif
879 :
880 86452 : UpdateMaximumCommitted();
881 :
882 : isolate_->counters()->alive_after_last_gc()->Set(
883 172904 : static_cast<int>(SizeOfObjects()));
884 :
885 : isolate_->counters()->string_table_capacity()->Set(
886 172904 : string_table()->Capacity());
887 : isolate_->counters()->number_of_symbols()->Set(
888 86452 : string_table()->NumberOfElements());
889 :
890 86452 : if (CommittedMemory() > 0) {
891 : isolate_->counters()->external_fragmentation_total()->AddSample(
892 172904 : static_cast<int>(100 - (SizeOfObjects() * 100.0) / CommittedMemory()));
893 :
894 : isolate_->counters()->heap_sample_total_committed()->AddSample(
895 172904 : static_cast<int>(CommittedMemory() / KB));
896 : isolate_->counters()->heap_sample_total_used()->AddSample(
897 172903 : static_cast<int>(SizeOfObjects() / KB));
898 : isolate_->counters()->heap_sample_map_space_committed()->AddSample(
899 172902 : static_cast<int>(map_space()->CommittedMemory() / KB));
900 : isolate_->counters()->heap_sample_code_space_committed()->AddSample(
901 172902 : static_cast<int>(code_space()->CommittedMemory() / KB));
902 :
903 : isolate_->counters()->heap_sample_maximum_committed()->AddSample(
904 172902 : static_cast<int>(MaximumCommittedMemory() / KB));
905 : }
906 :
907 : #define UPDATE_COUNTERS_FOR_SPACE(space) \
908 : isolate_->counters()->space##_bytes_available()->Set( \
909 : static_cast<int>(space()->Available())); \
910 : isolate_->counters()->space##_bytes_committed()->Set( \
911 : static_cast<int>(space()->CommittedMemory())); \
912 : isolate_->counters()->space##_bytes_used()->Set( \
913 : static_cast<int>(space()->SizeOfObjects()));
914 : #define UPDATE_FRAGMENTATION_FOR_SPACE(space) \
915 : if (space()->CommittedMemory() > 0) { \
916 : isolate_->counters()->external_fragmentation_##space()->AddSample( \
917 : static_cast<int>(100 - \
918 : (space()->SizeOfObjects() * 100.0) / \
919 : space()->CommittedMemory())); \
920 : }
921 : #define UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(space) \
922 : UPDATE_COUNTERS_FOR_SPACE(space) \
923 : UPDATE_FRAGMENTATION_FOR_SPACE(space)
924 :
925 518706 : UPDATE_COUNTERS_FOR_SPACE(new_space)
926 864519 : UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_space)
927 864520 : UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(code_space)
928 864520 : UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(map_space)
929 613960 : UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(lo_space)
930 : #undef UPDATE_COUNTERS_FOR_SPACE
931 : #undef UPDATE_FRAGMENTATION_FOR_SPACE
932 : #undef UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE
933 :
934 : #ifdef DEBUG
935 : ReportStatisticsAfterGC();
936 : #endif // DEBUG
937 :
938 86452 : last_gc_time_ = MonotonicallyIncreasingTimeInMs();
939 :
940 : {
941 345808 : TRACE_GC(tracer(), GCTracer::Scope::HEAP_EPILOGUE_REDUCE_NEW_SPACE);
942 172904 : ReduceNewSpaceSize();
943 86452 : }
944 86452 : }
945 :
946 :
947 56800 : void Heap::PreprocessStackTraces() {
948 : WeakFixedArray::Iterator iterator(weak_stack_trace_list());
949 : FixedArray* elements;
950 56800 : while ((elements = iterator.Next<FixedArray>()) != nullptr) {
951 0 : for (int j = 1; j < elements->length(); j += 4) {
952 0 : Object* maybe_code = elements->get(j + 2);
953 : // If GC happens while adding a stack trace to the weak fixed array,
954 : // which has been copied into a larger backing store, we may run into
955 : // a stack trace that has already been preprocessed. Guard against this.
956 0 : if (!maybe_code->IsAbstractCode()) break;
957 : AbstractCode* abstract_code = AbstractCode::cast(maybe_code);
958 0 : int offset = Smi::ToInt(elements->get(j + 3));
959 0 : int pos = abstract_code->SourcePosition(offset);
960 : elements->set(j + 2, Smi::FromInt(pos));
961 : }
962 : }
963 : // We must not compact the weak fixed list here, as we may be in the middle
964 : // of writing to it, when the GC triggered. Instead, we reset the root value.
965 : set_weak_stack_trace_list(Smi::kZero);
966 56800 : }
967 :
968 :
969 : class GCCallbacksScope {
970 : public:
971 : explicit GCCallbacksScope(Heap* heap) : heap_(heap) {
972 202975 : heap_->gc_callbacks_depth_++;
973 : }
974 202976 : ~GCCallbacksScope() { heap_->gc_callbacks_depth_--; }
975 :
976 : bool CheckReenter() { return heap_->gc_callbacks_depth_ == 1; }
977 :
978 : private:
979 : Heap* heap_;
980 : };
981 :
982 :
983 3720 : void Heap::HandleGCRequest() {
984 1860 : if (HighMemoryPressure()) {
985 : incremental_marking()->reset_request_type();
986 5 : CheckMemoryPressure();
987 1855 : } else if (incremental_marking()->request_type() ==
988 : IncrementalMarking::COMPLETE_MARKING) {
989 : incremental_marking()->reset_request_type();
990 : CollectAllGarbage(current_gc_flags_,
991 : GarbageCollectionReason::kFinalizeMarkingViaStackGuard,
992 862 : current_gc_callback_flags_);
993 993 : } else if (incremental_marking()->request_type() ==
994 958 : IncrementalMarking::FINALIZATION &&
995 1951 : incremental_marking()->IsMarking() &&
996 958 : !incremental_marking()->finalize_marking_completed()) {
997 : incremental_marking()->reset_request_type();
998 : FinalizeIncrementalMarking(
999 958 : GarbageCollectionReason::kFinalizeMarkingViaStackGuard);
1000 : }
1001 1860 : }
1002 :
1003 :
1004 0 : void Heap::ScheduleIdleScavengeIfNeeded(int bytes_allocated) {
1005 53650 : scavenge_job_->ScheduleIdleTaskIfNeeded(this, bytes_allocated);
1006 0 : }
1007 :
1008 75180 : void Heap::FinalizeIncrementalMarking(GarbageCollectionReason gc_reason) {
1009 15036 : if (FLAG_trace_incremental_marking) {
1010 : isolate()->PrintWithTimestamp(
1011 : "[IncrementalMarking] (%s).\n",
1012 0 : Heap::GarbageCollectionReasonToString(gc_reason));
1013 : }
1014 :
1015 : HistogramTimerScope incremental_marking_scope(
1016 15036 : isolate()->counters()->gc_incremental_marking_finalize());
1017 45108 : TRACE_EVENT0("v8", "V8.GCIncrementalMarkingFinalize");
1018 60144 : TRACE_GC(tracer(), GCTracer::Scope::MC_INCREMENTAL_FINALIZE);
1019 :
1020 : {
1021 : GCCallbacksScope scope(this);
1022 15036 : if (scope.CheckReenter()) {
1023 : AllowHeapAllocation allow_allocation;
1024 60144 : TRACE_GC(tracer(), GCTracer::Scope::MC_INCREMENTAL_EXTERNAL_PROLOGUE);
1025 30072 : VMState<EXTERNAL> state(isolate_);
1026 15036 : HandleScope handle_scope(isolate_);
1027 30072 : CallGCPrologueCallbacks(kGCTypeIncrementalMarking, kNoGCCallbackFlags);
1028 : }
1029 : }
1030 15036 : incremental_marking()->FinalizeIncrementally();
1031 : {
1032 : GCCallbacksScope scope(this);
1033 15036 : if (scope.CheckReenter()) {
1034 : AllowHeapAllocation allow_allocation;
1035 60144 : TRACE_GC(tracer(), GCTracer::Scope::MC_INCREMENTAL_EXTERNAL_EPILOGUE);
1036 30072 : VMState<EXTERNAL> state(isolate_);
1037 15036 : HandleScope handle_scope(isolate_);
1038 30072 : CallGCEpilogueCallbacks(kGCTypeIncrementalMarking, kNoGCCallbackFlags);
1039 : }
1040 : }
1041 15036 : }
1042 :
1043 :
1044 162140 : HistogramTimer* Heap::GCTypeTimer(GarbageCollector collector) {
1045 86452 : if (IsYoungGenerationCollector(collector)) {
1046 59304 : return isolate_->counters()->gc_scavenger();
1047 : } else {
1048 56800 : if (!incremental_marking()->IsStopped()) {
1049 18888 : if (ShouldReduceMemory()) {
1050 364 : return isolate_->counters()->gc_finalize_reduce_memory();
1051 : } else {
1052 37412 : return isolate_->counters()->gc_finalize();
1053 : }
1054 : } else {
1055 75824 : return isolate_->counters()->gc_compactor();
1056 : }
1057 : }
1058 : }
1059 :
1060 27064 : void Heap::CollectAllGarbage(int flags, GarbageCollectionReason gc_reason,
1061 : const v8::GCCallbackFlags gc_callback_flags) {
1062 : // Since we are ignoring the return value, the exact choice of space does
1063 : // not matter, so long as we do not specify NEW_SPACE, which would not
1064 : // cause a full GC.
1065 : set_current_gc_flags(flags);
1066 42839 : CollectGarbage(OLD_SPACE, gc_reason, gc_callback_flags);
1067 : set_current_gc_flags(kNoGCFlags);
1068 27064 : }
1069 :
1070 6583 : void Heap::CollectAllAvailableGarbage(GarbageCollectionReason gc_reason) {
1071 : // Since we are ignoring the return value, the exact choice of space does
1072 : // not matter, so long as we do not specify NEW_SPACE, which would not
1073 : // cause a full GC.
1074 : // Major GC would invoke weak handle callbacks on weakly reachable
1075 : // handles, but won't collect weakly reachable objects until next
1076 : // major GC. Therefore if we collect aggressively and weak handle callback
1077 : // has been invoked, we rerun major GC to release objects which become
1078 : // garbage.
1079 : // Note: as weak callbacks can execute arbitrary code, we cannot
1080 : // hope that eventually there will be no weak callbacks invocations.
1081 : // Therefore stop recollecting after several attempts.
1082 6583 : if (gc_reason == GarbageCollectionReason::kLastResort) {
1083 : InvokeOutOfMemoryCallback();
1084 : }
1085 : RuntimeCallTimerScope runtime_timer(
1086 6583 : isolate(), &RuntimeCallStats::GC_Custom_AllAvailableGarbage);
1087 6583 : if (isolate()->concurrent_recompilation_enabled()) {
1088 : // The optimizing compiler may be unnecessarily holding on to memory.
1089 : DisallowHeapAllocation no_recursive_gc;
1090 : isolate()->optimizing_compile_dispatcher()->Flush(
1091 6520 : OptimizingCompileDispatcher::BlockingBehavior::kDontBlock);
1092 : }
1093 6583 : isolate()->ClearSerializerData();
1094 : set_current_gc_flags(kMakeHeapIterableMask | kReduceMemoryFootprintMask);
1095 6583 : isolate_->compilation_cache()->Clear();
1096 : const int kMaxNumberOfAttempts = 7;
1097 : const int kMinNumberOfAttempts = 2;
1098 13166 : for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
1099 13166 : if (!CollectGarbage(OLD_SPACE, gc_reason,
1100 13166 : v8::kGCCallbackFlagCollectAllAvailableGarbage) &&
1101 : attempt + 1 >= kMinNumberOfAttempts) {
1102 : break;
1103 : }
1104 : }
1105 :
1106 : set_current_gc_flags(kNoGCFlags);
1107 6583 : new_space_->Shrink();
1108 : UncommitFromSpace();
1109 6583 : }
1110 :
1111 735 : void Heap::ReportExternalMemoryPressure() {
1112 : const GCCallbackFlags kGCCallbackFlagsForExternalMemory =
1113 : static_cast<GCCallbackFlags>(
1114 : kGCCallbackFlagSynchronousPhantomCallbackProcessing |
1115 : kGCCallbackFlagCollectAllExternalMemory);
1116 708 : if (external_memory_ >
1117 708 : (external_memory_at_last_mark_compact_ + external_memory_hard_limit())) {
1118 : CollectAllGarbage(
1119 : kReduceMemoryFootprintMask | kFinalizeIncrementalMarkingMask,
1120 : GarbageCollectionReason::kExternalMemoryPressure,
1121 : static_cast<GCCallbackFlags>(kGCCallbackFlagCollectAllAvailableGarbage |
1122 : kGCCallbackFlagsForExternalMemory));
1123 354 : return;
1124 : }
1125 255 : if (incremental_marking()->IsStopped()) {
1126 129 : if (incremental_marking()->CanBeActivated()) {
1127 : StartIncrementalMarking(i::Heap::kNoGCFlags,
1128 : GarbageCollectionReason::kExternalMemoryPressure,
1129 : kGCCallbackFlagsForExternalMemory);
1130 : } else {
1131 : CollectAllGarbage(i::Heap::kNoGCFlags,
1132 : GarbageCollectionReason::kExternalMemoryPressure,
1133 : kGCCallbackFlagsForExternalMemory);
1134 : }
1135 : } else {
1136 : // Incremental marking is turned on an has already been started.
1137 : const double kMinStepSize = 5;
1138 : const double kMaxStepSize = 10;
1139 : const double ms_step =
1140 : Min(kMaxStepSize,
1141 126 : Max(kMinStepSize, static_cast<double>(external_memory_) /
1142 126 : external_memory_limit_ * kMinStepSize));
1143 126 : const double deadline = MonotonicallyIncreasingTimeInMs() + ms_step;
1144 : // Extend the gc callback flags with external memory flags.
1145 : current_gc_callback_flags_ = static_cast<GCCallbackFlags>(
1146 126 : current_gc_callback_flags_ | kGCCallbackFlagsForExternalMemory);
1147 : incremental_marking()->AdvanceIncrementalMarking(
1148 126 : deadline, IncrementalMarking::GC_VIA_STACK_GUARD, StepOrigin::kV8);
1149 : }
1150 : }
1151 :
1152 86452 : void Heap::EnsureFillerObjectAtTop() {
1153 : // There may be an allocation memento behind objects in new space. Upon
1154 : // evacuation of a non-full new space (or if we are on the last page) there
1155 : // may be uninitialized memory behind top. We fill the remainder of the page
1156 : // with a filler.
1157 86452 : Address to_top = new_space_->top();
1158 86452 : Page* page = Page::FromAddress(to_top - kPointerSize);
1159 86452 : if (page->Contains(to_top)) {
1160 75637 : int remaining_in_page = static_cast<int>(page->area_end() - to_top);
1161 75637 : CreateFillerObjectAt(to_top, remaining_in_page, ClearRecordedSlots::kNo);
1162 : }
1163 86452 : }
1164 :
1165 86452 : bool Heap::CollectGarbage(AllocationSpace space,
1166 : GarbageCollectionReason gc_reason,
1167 288226 : const v8::GCCallbackFlags gc_callback_flags) {
1168 : // The VM is in the GC state until exiting this function.
1169 : VMState<GC> state(isolate());
1170 :
1171 86452 : const char* collector_reason = nullptr;
1172 86452 : GarbageCollector collector = SelectGarbageCollector(space, &collector_reason);
1173 :
1174 : #ifdef DEBUG
1175 : // Reset the allocation timeout to the GC interval, but make sure to
1176 : // allow at least a few allocations after a collection. The reason
1177 : // for this is that we have a lot of allocation sequences and we
1178 : // assume that a garbage collection will allow the subsequent
1179 : // allocation attempts to go through.
1180 : allocation_timeout_ = Max(6, FLAG_gc_interval);
1181 : #endif
1182 :
1183 86452 : EnsureFillerObjectAtTop();
1184 :
1185 116104 : if (IsYoungGenerationCollector(collector) &&
1186 : !incremental_marking()->IsStopped()) {
1187 2662 : if (FLAG_trace_incremental_marking) {
1188 : isolate()->PrintWithTimestamp(
1189 0 : "[IncrementalMarking] Scavenge during marking.\n");
1190 : }
1191 : }
1192 :
1193 : bool next_gc_likely_to_collect_more = false;
1194 : size_t committed_memory_before = 0;
1195 :
1196 86452 : if (collector == MARK_COMPACTOR) {
1197 56800 : committed_memory_before = CommittedOldGenerationMemory();
1198 : }
1199 :
1200 : {
1201 172904 : tracer()->Start(collector, gc_reason, collector_reason);
1202 : DCHECK(AllowHeapAllocation::IsAllowed());
1203 : DisallowHeapAllocation no_allocation_during_gc;
1204 86452 : GarbageCollectionPrologue();
1205 :
1206 : {
1207 86452 : HistogramTimer* gc_type_timer = GCTypeTimer(collector);
1208 : HistogramTimerScope histogram_timer_scope(gc_type_timer);
1209 259356 : TRACE_EVENT0("v8", gc_type_timer->name());
1210 :
1211 : next_gc_likely_to_collect_more =
1212 86452 : PerformGarbageCollection(collector, gc_callback_flags);
1213 : }
1214 :
1215 86452 : GarbageCollectionEpilogue();
1216 86452 : if (collector == MARK_COMPACTOR && FLAG_track_detached_contexts) {
1217 56800 : isolate()->CheckDetachedContextsAfterGC();
1218 : }
1219 :
1220 86452 : if (collector == MARK_COMPACTOR) {
1221 56800 : size_t committed_memory_after = CommittedOldGenerationMemory();
1222 56800 : size_t used_memory_after = PromotedSpaceSizeOfObjects();
1223 : MemoryReducer::Event event;
1224 56800 : event.type = MemoryReducer::kMarkCompact;
1225 56800 : event.time_ms = MonotonicallyIncreasingTimeInMs();
1226 : // Trigger one more GC if
1227 : // - this GC decreased committed memory,
1228 : // - there is high fragmentation,
1229 : // - there are live detached contexts.
1230 : event.next_gc_likely_to_collect_more =
1231 112818 : (committed_memory_before > committed_memory_after + MB) ||
1232 112818 : HasHighFragmentation(used_memory_after, committed_memory_after) ||
1233 56800 : (detached_contexts()->length() > 0);
1234 56800 : event.committed_memory = committed_memory_after;
1235 56800 : if (deserialization_complete_) {
1236 56800 : memory_reducer_->NotifyMarkCompact(event);
1237 : }
1238 : memory_pressure_level_.SetValue(MemoryPressureLevel::kNone);
1239 : }
1240 :
1241 86452 : tracer()->Stop(collector);
1242 : }
1243 :
1244 143250 : if (collector == MARK_COMPACTOR &&
1245 56799 : (gc_callback_flags & (kGCCallbackFlagForced |
1246 : kGCCallbackFlagCollectAllAvailableGarbage)) != 0) {
1247 22686 : isolate()->CountUsage(v8::Isolate::kForcedGC);
1248 : }
1249 :
1250 : // Start incremental marking for the next cycle. The heap snapshot
1251 : // generator needs incremental marking to stay off after it aborted.
1252 : // We do this only for scavenger to avoid a loop where mark-compact
1253 : // causes another mark-compact.
1254 116103 : if (IsYoungGenerationCollector(collector) &&
1255 : !ShouldAbortIncrementalMarking()) {
1256 : StartIncrementalMarkingIfAllocationLimitIsReached(
1257 29642 : kNoGCFlags, kGCCallbackScheduleIdleGarbageCollection);
1258 : }
1259 :
1260 86451 : return next_gc_likely_to_collect_more;
1261 : }
1262 :
1263 :
1264 18178 : int Heap::NotifyContextDisposed(bool dependant_context) {
1265 6056 : if (!dependant_context) {
1266 10 : tracer()->ResetSurvivalEvents();
1267 10 : old_generation_size_configured_ = false;
1268 : MemoryReducer::Event event;
1269 10 : event.type = MemoryReducer::kPossibleGarbage;
1270 10 : event.time_ms = MonotonicallyIncreasingTimeInMs();
1271 10 : memory_reducer_->NotifyPossibleGarbage(event);
1272 : }
1273 6056 : if (isolate()->concurrent_recompilation_enabled()) {
1274 : // Flush the queued recompilation tasks.
1275 : isolate()->optimizing_compile_dispatcher()->Flush(
1276 6042 : OptimizingCompileDispatcher::BlockingBehavior::kDontBlock);
1277 : }
1278 6056 : number_of_disposed_maps_ = retained_maps()->Length();
1279 12112 : tracer()->AddContextDisposalTime(MonotonicallyIncreasingTimeInMs());
1280 6056 : return ++contexts_disposed_;
1281 : }
1282 :
1283 633 : void Heap::StartIncrementalMarking(int gc_flags,
1284 : GarbageCollectionReason gc_reason,
1285 19388 : GCCallbackFlags gc_callback_flags) {
1286 : DCHECK(incremental_marking()->IsStopped());
1287 : set_current_gc_flags(gc_flags);
1288 19388 : current_gc_callback_flags_ = gc_callback_flags;
1289 19388 : incremental_marking()->Start(gc_reason);
1290 633 : }
1291 :
1292 1837149 : void Heap::StartIncrementalMarkingIfAllocationLimitIsReached(
1293 1844450 : int gc_flags, const GCCallbackFlags gc_callback_flags) {
1294 1837149 : if (incremental_marking()->IsStopped()) {
1295 1588419 : IncrementalMarkingLimit reached_limit = IncrementalMarkingLimitReached();
1296 1588419 : if (reached_limit == IncrementalMarkingLimit::kSoftLimit) {
1297 7301 : incremental_marking()->incremental_marking_job()->ScheduleTask(this);
1298 1581118 : } else if (reached_limit == IncrementalMarkingLimit::kHardLimit) {
1299 : StartIncrementalMarking(gc_flags,
1300 : GarbageCollectionReason::kAllocationLimit,
1301 : gc_callback_flags);
1302 : }
1303 : }
1304 1837149 : }
1305 :
1306 5 : void Heap::StartIdleIncrementalMarking(
1307 : GarbageCollectionReason gc_reason,
1308 : const GCCallbackFlags gc_callback_flags) {
1309 5 : gc_idle_time_handler_->ResetNoProgressCounter();
1310 : StartIncrementalMarking(kReduceMemoryFootprintMask, gc_reason,
1311 : gc_callback_flags);
1312 5 : }
1313 :
1314 :
1315 3055 : void Heap::MoveElements(FixedArray* array, int dst_index, int src_index,
1316 6103 : int len) {
1317 6110 : if (len == 0) return;
1318 :
1319 : DCHECK(array->map() != fixed_cow_array_map());
1320 3055 : Object** dst = array->data_start() + dst_index;
1321 3055 : Object** src = array->data_start() + src_index;
1322 6103 : if (FLAG_concurrent_marking && incremental_marking()->IsMarking()) {
1323 370 : if (dst < src) {
1324 100481 : for (int i = 0; i < len; i++) {
1325 : base::AsAtomicPointer::Relaxed_Store(
1326 200962 : dst + i, base::AsAtomicPointer::Relaxed_Load(src + i));
1327 : }
1328 : } else {
1329 456 : for (int i = len - 1; i >= 0; i--) {
1330 : base::AsAtomicPointer::Relaxed_Store(
1331 594 : dst + i, base::AsAtomicPointer::Relaxed_Load(src + i));
1332 : }
1333 : }
1334 : } else {
1335 2685 : MemMove(dst, src, len * kPointerSize);
1336 : }
1337 3055 : FIXED_ARRAY_ELEMENTS_WRITE_BARRIER(this, array, dst_index, len);
1338 : }
1339 :
1340 :
1341 : #ifdef VERIFY_HEAP
1342 : // Helper class for verifying the string table.
1343 : class StringTableVerifier : public ObjectVisitor {
1344 : public:
1345 : void VisitPointers(HeapObject* host, Object** start, Object** end) override {
1346 : // Visit all HeapObject pointers in [start, end).
1347 : for (Object** p = start; p < end; p++) {
1348 : if ((*p)->IsHeapObject()) {
1349 : HeapObject* object = HeapObject::cast(*p);
1350 : Isolate* isolate = object->GetIsolate();
1351 : // Check that the string is actually internalized.
1352 : CHECK(object->IsTheHole(isolate) || object->IsUndefined(isolate) ||
1353 : object->IsInternalizedString());
1354 : }
1355 : }
1356 : }
1357 : };
1358 :
1359 :
1360 : static void VerifyStringTable(Heap* heap) {
1361 : StringTableVerifier verifier;
1362 : heap->string_table()->IterateElements(&verifier);
1363 : }
1364 : #endif // VERIFY_HEAP
1365 :
1366 29357963 : bool Heap::ReserveSpace(Reservation* reservations, std::vector<Address>* maps) {
1367 : bool gc_performed = true;
1368 : int counter = 0;
1369 : static const int kThreshold = 20;
1370 416460 : while (gc_performed && counter++ < kThreshold) {
1371 : gc_performed = false;
1372 694098 : for (int space = NEW_SPACE; space < SerializerDeserializer::kNumberOfSpaces;
1373 : space++) {
1374 694101 : Reservation* reservation = &reservations[space];
1375 : DCHECK_LE(1, reservation->size());
1376 694108 : if (reservation->at(0).size == 0) continue;
1377 : bool perform_gc = false;
1378 332694 : if (space == MAP_SPACE) {
1379 : // We allocate each map individually to avoid fragmentation.
1380 : maps->clear();
1381 : DCHECK_LE(reservation->size(), 2);
1382 : int reserved_size = 0;
1383 415557 : for (const Chunk& c : *reservation) reserved_size += c.size;
1384 : DCHECK_EQ(0, reserved_size % Map::kSize);
1385 138519 : int num_maps = reserved_size / Map::kSize;
1386 58714826 : for (int i = 0; i < num_maps; i++) {
1387 : // The deserializer will update the skip list.
1388 : AllocationResult allocation = map_space()->AllocateRawUnaligned(
1389 29218895 : Map::kSize, PagedSpace::IGNORE_SKIP_LIST);
1390 : HeapObject* free_space = nullptr;
1391 29218894 : if (allocation.To(&free_space)) {
1392 : // Mark with a free list node, in case we have a GC before
1393 : // deserializing.
1394 29218894 : Address free_space_address = free_space->address();
1395 : CreateFillerObjectAt(free_space_address, Map::kSize,
1396 29218894 : ClearRecordedSlots::kNo);
1397 29218903 : maps->push_back(free_space_address);
1398 : } else {
1399 : perform_gc = true;
1400 0 : break;
1401 : }
1402 : }
1403 194175 : } else if (space == LO_SPACE) {
1404 : // Just check that we can allocate during deserialization.
1405 : DCHECK_LE(reservation->size(), 2);
1406 : int reserved_size = 0;
1407 105 : for (const Chunk& c : *reservation) reserved_size += c.size;
1408 70 : perform_gc = !CanExpandOldGeneration(reserved_size);
1409 : } else {
1410 25329536 : for (auto& chunk : *reservation) {
1411 : AllocationResult allocation;
1412 25135402 : int size = chunk.size;
1413 : DCHECK_LE(static_cast<size_t>(size),
1414 : MemoryAllocator::PageAreaSize(
1415 : static_cast<AllocationSpace>(space)));
1416 25135402 : if (space == NEW_SPACE) {
1417 : allocation = new_space()->AllocateRawUnaligned(size);
1418 : } else {
1419 : // The deserializer will update the skip list.
1420 : allocation = paged_space(space)->AllocateRawUnaligned(
1421 25135153 : size, PagedSpace::IGNORE_SKIP_LIST);
1422 : }
1423 : HeapObject* free_space = nullptr;
1424 25135438 : if (allocation.To(&free_space)) {
1425 : // Mark with a free list node, in case we have a GC before
1426 : // deserializing.
1427 25135438 : Address free_space_address = free_space->address();
1428 : CreateFillerObjectAt(free_space_address, size,
1429 25135438 : ClearRecordedSlots::kNo);
1430 : DCHECK_GT(SerializerDeserializer::kNumberOfPreallocatedSpaces,
1431 : space);
1432 25135396 : chunk.start = free_space_address;
1433 25135396 : chunk.end = free_space_address + size;
1434 : } else {
1435 : perform_gc = true;
1436 : break;
1437 : }
1438 : }
1439 : }
1440 332687 : if (perform_gc) {
1441 3 : if (space == NEW_SPACE) {
1442 0 : CollectGarbage(NEW_SPACE, GarbageCollectionReason::kDeserializer);
1443 : } else {
1444 3 : if (counter > 1) {
1445 : CollectAllGarbage(
1446 : kReduceMemoryFootprintMask | kAbortIncrementalMarkingMask,
1447 : GarbageCollectionReason::kDeserializer);
1448 : } else {
1449 : CollectAllGarbage(kAbortIncrementalMarkingMask,
1450 : GarbageCollectionReason::kDeserializer);
1451 : }
1452 : }
1453 : gc_performed = true;
1454 : break; // Abort for-loop over spaces and retry.
1455 : }
1456 : }
1457 : }
1458 :
1459 138819 : return !gc_performed;
1460 : }
1461 :
1462 :
1463 86452 : void Heap::EnsureFromSpaceIsCommitted() {
1464 259356 : if (new_space_->CommitFromSpaceIfNeeded()) return;
1465 :
1466 : // Committing memory to from space failed.
1467 : // Memory is exhausted and we will die.
1468 0 : V8::FatalProcessOutOfMemory("Committing semi space failed.");
1469 : }
1470 :
1471 :
1472 162431 : void Heap::UpdateSurvivalStatistics(int start_new_space_size) {
1473 172903 : if (start_new_space_size == 0) return;
1474 :
1475 75979 : promotion_ratio_ = (static_cast<double>(promoted_objects_size_) /
1476 75979 : static_cast<double>(start_new_space_size) * 100);
1477 :
1478 75979 : if (previous_semi_space_copied_object_size_ > 0) {
1479 : promotion_rate_ =
1480 52461 : (static_cast<double>(promoted_objects_size_) /
1481 52461 : static_cast<double>(previous_semi_space_copied_object_size_) * 100);
1482 : } else {
1483 23518 : promotion_rate_ = 0;
1484 : }
1485 :
1486 : semi_space_copied_rate_ =
1487 75979 : (static_cast<double>(semi_space_copied_object_size_) /
1488 75979 : static_cast<double>(start_new_space_size) * 100);
1489 :
1490 75979 : double survival_rate = promotion_ratio_ + semi_space_copied_rate_;
1491 75979 : tracer()->AddSurvivalRatio(survival_rate);
1492 : }
1493 :
1494 86452 : bool Heap::PerformGarbageCollection(
1495 548302 : GarbageCollector collector, const v8::GCCallbackFlags gc_callback_flags) {
1496 : int freed_global_handles = 0;
1497 :
1498 86452 : if (!IsYoungGenerationCollector(collector)) {
1499 286504 : PROFILE(isolate_, CodeMovingGCEvent());
1500 : }
1501 :
1502 : #ifdef VERIFY_HEAP
1503 : if (FLAG_verify_heap) {
1504 : VerifyStringTable(this);
1505 : }
1506 : #endif
1507 :
1508 : GCType gc_type =
1509 86452 : collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
1510 :
1511 : {
1512 : GCCallbacksScope scope(this);
1513 86452 : if (scope.CheckReenter()) {
1514 : AllowHeapAllocation allow_allocation;
1515 345688 : TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_PROLOGUE);
1516 172844 : VMState<EXTERNAL> state(isolate_);
1517 86422 : HandleScope handle_scope(isolate_);
1518 172844 : CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags);
1519 : }
1520 : }
1521 :
1522 86452 : EnsureFromSpaceIsCommitted();
1523 :
1524 86452 : int start_new_space_size = static_cast<int>(Heap::new_space()->Size());
1525 :
1526 : {
1527 86452 : Heap::SkipStoreBufferScope skip_store_buffer_scope(store_buffer_);
1528 :
1529 86452 : switch (collector) {
1530 : case MARK_COMPACTOR:
1531 : UpdateOldGenerationAllocationCounter();
1532 : // Perform mark-sweep with optional compaction.
1533 56800 : MarkCompact();
1534 56800 : old_generation_size_configured_ = true;
1535 : // This should be updated before PostGarbageCollectionProcessing, which
1536 : // can cause another GC. Take into account the objects promoted during
1537 : // GC.
1538 : old_generation_allocation_counter_at_last_gc_ +=
1539 56800 : static_cast<size_t>(promoted_objects_size_);
1540 56800 : old_generation_size_at_last_gc_ = PromotedSpaceSizeOfObjects();
1541 56800 : break;
1542 : case MINOR_MARK_COMPACTOR:
1543 0 : MinorMarkCompact();
1544 0 : break;
1545 : case SCAVENGER:
1546 29652 : if ((fast_promotion_mode_ &&
1547 0 : CanExpandOldGeneration(new_space()->Size()))) {
1548 : tracer()->NotifyYoungGenerationHandling(
1549 0 : YoungGenerationHandling::kFastPromotionDuringScavenge);
1550 0 : EvacuateYoungGeneration();
1551 : } else {
1552 : tracer()->NotifyYoungGenerationHandling(
1553 29652 : YoungGenerationHandling::kRegularScavenge);
1554 :
1555 29652 : Scavenge();
1556 : }
1557 : break;
1558 : }
1559 :
1560 86452 : ProcessPretenuringFeedback();
1561 : }
1562 :
1563 86452 : UpdateSurvivalStatistics(start_new_space_size);
1564 86451 : ConfigureInitialOldGenerationSize();
1565 :
1566 86451 : if (!fast_promotion_mode_ || collector == MARK_COMPACTOR) {
1567 86451 : ComputeFastPromotionMode(promotion_ratio_ + semi_space_copied_rate_);
1568 : }
1569 :
1570 172904 : isolate_->counters()->objs_since_last_young()->Set(0);
1571 :
1572 86452 : gc_post_processing_depth_++;
1573 : {
1574 : AllowHeapAllocation allow_allocation;
1575 345808 : TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_WEAK_GLOBAL_HANDLES);
1576 : freed_global_handles =
1577 : isolate_->global_handles()->PostGarbageCollectionProcessing(
1578 259356 : collector, gc_callback_flags);
1579 : }
1580 86452 : gc_post_processing_depth_--;
1581 :
1582 172904 : isolate_->eternal_handles()->PostGarbageCollectionProcessing(this);
1583 :
1584 : // Update relocatables.
1585 86452 : Relocatable::PostGarbageCollectionProcessing(isolate_);
1586 :
1587 86451 : double gc_speed = tracer()->CombinedMarkCompactSpeedInBytesPerMillisecond();
1588 : double mutator_speed =
1589 86452 : tracer()->CurrentOldGenerationAllocationThroughputInBytesPerMillisecond();
1590 86452 : size_t old_gen_size = PromotedSpaceSizeOfObjects();
1591 86452 : if (collector == MARK_COMPACTOR) {
1592 : // Register the amount of external allocated memory.
1593 56800 : external_memory_at_last_mark_compact_ = external_memory_;
1594 56800 : external_memory_limit_ = external_memory_ + kExternalAllocationSoftLimit;
1595 56800 : SetOldGenerationAllocationLimit(old_gen_size, gc_speed, mutator_speed);
1596 29652 : } else if (HasLowYoungGenerationAllocationRate() &&
1597 : old_generation_size_configured_) {
1598 83 : DampenOldGenerationAllocationLimit(old_gen_size, gc_speed, mutator_speed);
1599 : }
1600 :
1601 : {
1602 : GCCallbacksScope scope(this);
1603 86451 : if (scope.CheckReenter()) {
1604 : AllowHeapAllocation allow_allocation;
1605 345687 : TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_EPILOGUE);
1606 172844 : VMState<EXTERNAL> state(isolate_);
1607 86422 : HandleScope handle_scope(isolate_);
1608 172844 : CallGCEpilogueCallbacks(gc_type, gc_callback_flags);
1609 : }
1610 : }
1611 :
1612 : #ifdef VERIFY_HEAP
1613 : if (FLAG_verify_heap) {
1614 : VerifyStringTable(this);
1615 : }
1616 : #endif
1617 :
1618 86452 : return freed_global_handles > 0;
1619 : }
1620 :
1621 :
1622 101545 : void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
1623 : RuntimeCallTimerScope runtime_timer(isolate(),
1624 101545 : &RuntimeCallStats::GCPrologueCallback);
1625 203157 : for (const GCCallbackTuple& info : gc_prologue_callbacks_) {
1626 67 : if (gc_type & info.gc_type) {
1627 : v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
1628 67 : info.callback(isolate, gc_type, flags, info.data);
1629 : }
1630 : }
1631 101545 : }
1632 :
1633 101545 : void Heap::CallGCEpilogueCallbacks(GCType gc_type, GCCallbackFlags flags) {
1634 : RuntimeCallTimerScope runtime_timer(isolate(),
1635 101545 : &RuntimeCallStats::GCEpilogueCallback);
1636 203155 : for (const GCCallbackTuple& info : gc_epilogue_callbacks_) {
1637 65 : if (gc_type & info.gc_type) {
1638 : v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
1639 65 : info.callback(isolate, gc_type, flags, info.data);
1640 : }
1641 : }
1642 101545 : }
1643 :
1644 :
1645 170400 : void Heap::MarkCompact() {
1646 56800 : PauseAllocationObserversScope pause_observers(this);
1647 :
1648 : SetGCState(MARK_COMPACT);
1649 :
1650 113600 : LOG(isolate_, ResourceEvent("markcompact", "begin"));
1651 :
1652 56800 : uint64_t size_of_objects_before_gc = SizeOfObjects();
1653 :
1654 56800 : mark_compact_collector()->Prepare();
1655 :
1656 56800 : ms_count_++;
1657 :
1658 56800 : MarkCompactPrologue();
1659 :
1660 56800 : mark_compact_collector()->CollectGarbage();
1661 :
1662 113600 : LOG(isolate_, ResourceEvent("markcompact", "end"));
1663 :
1664 56800 : MarkCompactEpilogue();
1665 :
1666 56800 : if (FLAG_allocation_site_pretenuring) {
1667 56800 : EvaluateOldSpaceLocalPretenuring(size_of_objects_before_gc);
1668 56800 : }
1669 56800 : }
1670 :
1671 0 : void Heap::MinorMarkCompact() {
1672 : DCHECK(FLAG_minor_mc);
1673 :
1674 : SetGCState(MINOR_MARK_COMPACT);
1675 0 : LOG(isolate_, ResourceEvent("MinorMarkCompact", "begin"));
1676 :
1677 0 : TRACE_GC(tracer(), GCTracer::Scope::MINOR_MC);
1678 : AlwaysAllocateScope always_allocate(isolate());
1679 0 : PauseAllocationObserversScope pause_observers(this);
1680 : IncrementalMarking::PauseBlackAllocationScope pause_black_allocation(
1681 : incremental_marking());
1682 :
1683 0 : minor_mark_compact_collector()->CollectGarbage();
1684 :
1685 0 : LOG(isolate_, ResourceEvent("MinorMarkCompact", "end"));
1686 0 : SetGCState(NOT_IN_GC);
1687 0 : }
1688 :
1689 113600 : void Heap::MarkCompactEpilogue() {
1690 227200 : TRACE_GC(tracer(), GCTracer::Scope::MC_EPILOGUE);
1691 : SetGCState(NOT_IN_GC);
1692 :
1693 113600 : isolate_->counters()->objs_since_last_full()->Set(0);
1694 :
1695 56800 : incremental_marking()->Epilogue();
1696 :
1697 56800 : PreprocessStackTraces();
1698 56800 : DCHECK(incremental_marking()->IsStopped());
1699 56800 : }
1700 :
1701 :
1702 170400 : void Heap::MarkCompactPrologue() {
1703 227200 : TRACE_GC(tracer(), GCTracer::Scope::MC_PROLOGUE);
1704 170400 : isolate_->context_slot_cache()->Clear();
1705 113600 : isolate_->descriptor_lookup_cache()->Clear();
1706 56800 : RegExpResultsCache::Clear(string_split_cache());
1707 56800 : RegExpResultsCache::Clear(regexp_multiple_cache());
1708 :
1709 113600 : isolate_->compilation_cache()->MarkCompactPrologue();
1710 :
1711 113600 : FlushNumberStringCache();
1712 56800 : }
1713 :
1714 :
1715 86452 : void Heap::CheckNewSpaceExpansionCriteria() {
1716 86452 : if (FLAG_experimental_new_space_growth_heuristic) {
1717 0 : if (new_space_->TotalCapacity() < new_space_->MaximumCapacity() &&
1718 0 : survived_last_scavenge_ * 100 / new_space_->TotalCapacity() >= 10) {
1719 : // Grow the size of new space if there is room to grow, and more than 10%
1720 : // have survived the last scavenge.
1721 0 : new_space_->Grow();
1722 0 : survived_since_last_expansion_ = 0;
1723 : }
1724 257027 : } else if (new_space_->TotalCapacity() < new_space_->MaximumCapacity() &&
1725 84123 : survived_since_last_expansion_ > new_space_->TotalCapacity()) {
1726 : // Grow the size of new space if there is room to grow, and enough data
1727 : // has survived scavenge since the last expansion.
1728 1850 : new_space_->Grow();
1729 1850 : survived_since_last_expansion_ = 0;
1730 : }
1731 86452 : }
1732 :
1733 17003 : static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
1734 50993 : return heap->InNewSpace(*p) &&
1735 17003 : !HeapObject::cast(*p)->map_word().IsForwardingAddress();
1736 : }
1737 :
1738 29652 : class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
1739 : public:
1740 29652 : explicit ScavengeWeakObjectRetainer(Heap* heap) : heap_(heap) {}
1741 :
1742 69303 : virtual Object* RetainAs(Object* object) {
1743 69303 : if (!heap_->InFromSpace(object)) {
1744 : return object;
1745 : }
1746 :
1747 : MapWord map_word = HeapObject::cast(object)->map_word();
1748 0 : if (map_word.IsForwardingAddress()) {
1749 0 : return map_word.ToForwardingAddress();
1750 : }
1751 : return nullptr;
1752 : }
1753 :
1754 : private:
1755 : Heap* heap_;
1756 : };
1757 :
1758 0 : void Heap::EvacuateYoungGeneration() {
1759 0 : TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_FAST_PROMOTE);
1760 0 : base::LockGuard<base::Mutex> guard(relocation_mutex());
1761 0 : ConcurrentMarking::PauseScope pause_scope(concurrent_marking());
1762 : if (!FLAG_concurrent_marking) {
1763 : DCHECK(fast_promotion_mode_);
1764 : DCHECK(CanExpandOldGeneration(new_space()->Size()));
1765 : }
1766 :
1767 0 : mark_compact_collector()->sweeper().EnsureNewSpaceCompleted();
1768 :
1769 : SetGCState(SCAVENGE);
1770 0 : LOG(isolate_, ResourceEvent("scavenge", "begin"));
1771 :
1772 : // Move pages from new->old generation.
1773 0 : PageRange range(new_space()->bottom(), new_space()->top());
1774 0 : for (auto it = range.begin(); it != range.end();) {
1775 0 : Page* p = (*++it)->prev_page();
1776 0 : p->Unlink();
1777 0 : Page::ConvertNewToOld(p);
1778 0 : if (incremental_marking()->IsMarking())
1779 0 : mark_compact_collector()->RecordLiveSlotsOnPage(p);
1780 : }
1781 :
1782 : // Reset new space.
1783 0 : if (!new_space()->Rebalance()) {
1784 : FatalProcessOutOfMemory("NewSpace::Rebalance");
1785 : }
1786 0 : new_space()->ResetAllocationInfo();
1787 : new_space()->set_age_mark(new_space()->top());
1788 :
1789 : // Fix up special trackers.
1790 0 : external_string_table_.PromoteAllNewSpaceStrings();
1791 : // GlobalHandles are updated in PostGarbageCollectonProcessing
1792 :
1793 0 : IncrementYoungSurvivorsCounter(new_space()->Size());
1794 0 : IncrementPromotedObjectsSize(new_space()->Size());
1795 : IncrementSemiSpaceCopiedObjectSize(0);
1796 :
1797 0 : LOG(isolate_, ResourceEvent("scavenge", "end"));
1798 0 : SetGCState(NOT_IN_GC);
1799 0 : }
1800 :
1801 88591 : static bool IsLogging(Isolate* isolate) {
1802 59299 : return FLAG_verify_predictable || isolate->logger()->is_logging() ||
1803 58944 : isolate->is_profiling() ||
1804 29292 : (isolate->heap_profiler() != nullptr &&
1805 29292 : isolate->heap_profiler()->is_tracking_object_moves());
1806 : }
1807 :
1808 : class PageScavengingItem final : public ItemParallelJob::Item {
1809 : public:
1810 145645 : explicit PageScavengingItem(MemoryChunk* chunk) : chunk_(chunk) {}
1811 291290 : virtual ~PageScavengingItem() {}
1812 :
1813 145624 : void Process(Scavenger* scavenger) { scavenger->ScavengePage(chunk_); }
1814 :
1815 : private:
1816 : MemoryChunk* const chunk_;
1817 : };
1818 :
1819 88769 : class ScavengingTask final : public ItemParallelJob::Task {
1820 : public:
1821 44406 : ScavengingTask(Heap* heap, Scavenger* scavenger, OneshotBarrier* barrier)
1822 : : ItemParallelJob::Task(heap->isolate()),
1823 : heap_(heap),
1824 : scavenger_(scavenger),
1825 44406 : barrier_(barrier) {}
1826 :
1827 44126 : void RunInParallel() final {
1828 44126 : double scavenging_time = 0.0;
1829 : {
1830 44126 : barrier_->Start();
1831 : TimedScope scope(&scavenging_time);
1832 145624 : PageScavengingItem* item = nullptr;
1833 233937 : while ((item = GetItem<PageScavengingItem>()) != nullptr) {
1834 145624 : item->Process(scavenger_);
1835 145642 : item->MarkFinished();
1836 : }
1837 45677 : do {
1838 45674 : scavenger_->Process(barrier_);
1839 45668 : } while (!barrier_->Wait());
1840 44156 : scavenger_->Process();
1841 : }
1842 44158 : if (FLAG_trace_parallel_scavenge) {
1843 0 : PrintIsolate(heap_->isolate(),
1844 : "scavenge[%p]: time=%.2f copied=%zu promoted=%zu\n",
1845 : static_cast<void*>(this), scavenging_time,
1846 0 : scavenger_->bytes_copied(), scavenger_->bytes_promoted());
1847 : }
1848 44158 : };
1849 :
1850 : private:
1851 : Heap* const heap_;
1852 : Scavenger* const scavenger_;
1853 : OneshotBarrier* const barrier_;
1854 : };
1855 :
1856 58865 : int Heap::NumberOfScavengeTasks() {
1857 29652 : if (!FLAG_parallel_scavenge) return 1;
1858 : const int num_scavenge_tasks =
1859 29213 : static_cast<int>(new_space()->TotalCapacity()) / MB;
1860 : return Max(
1861 : 1,
1862 : Min(Min(num_scavenge_tasks, kMaxScavengerTasks),
1863 : static_cast<int>(
1864 58426 : V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads())));
1865 : }
1866 :
1867 574878 : void Heap::Scavenge() {
1868 118608 : TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE);
1869 29652 : base::LockGuard<base::Mutex> guard(relocation_mutex());
1870 59304 : ConcurrentMarking::PauseScope pause_scope(concurrent_marking());
1871 : // There are soft limits in the allocation code, designed to trigger a mark
1872 : // sweep collection by failing allocations. There is no sense in trying to
1873 : // trigger one during scavenge: scavenges allocation should always succeed.
1874 : AlwaysAllocateScope scope(isolate());
1875 :
1876 : // Bump-pointer allocations done during scavenge are not real allocations.
1877 : // Pause the inline allocation steps.
1878 59304 : PauseAllocationObserversScope pause_observers(this);
1879 :
1880 : IncrementalMarking::PauseBlackAllocationScope pause_black_allocation(
1881 : incremental_marking());
1882 :
1883 30416 : if (mark_compact_collector()->sweeper().sweeping_in_progress() &&
1884 1528 : memory_allocator_->unmapper()->NumberOfDelayedChunks() >
1885 1528 : static_cast<int>(new_space_->MaximumCapacity() / Page::kPageSize)) {
1886 0 : mark_compact_collector()->EnsureSweepingCompleted();
1887 : }
1888 :
1889 : // TODO(mlippautz): Untangle the dependency of the unmapper from the sweeper.
1890 29652 : mark_compact_collector()->sweeper().EnsureNewSpaceCompleted();
1891 :
1892 : SetGCState(SCAVENGE);
1893 :
1894 : // Implements Cheney's copying algorithm
1895 59304 : LOG(isolate_, ResourceEvent("scavenge", "begin"));
1896 :
1897 : // Flip the semispaces. After flipping, to space is empty, from space has
1898 : // live objects.
1899 29652 : new_space_->Flip();
1900 29652 : new_space_->ResetAllocationInfo();
1901 :
1902 : ItemParallelJob job(isolate()->cancelable_task_manager(),
1903 59304 : ¶llel_scavenge_semaphore_);
1904 : const int kMainThreadId = 0;
1905 : Scavenger* scavengers[kMaxScavengerTasks];
1906 : const bool is_logging = IsLogging(isolate());
1907 29652 : const int num_scavenge_tasks = NumberOfScavengeTasks();
1908 29652 : OneshotBarrier barrier;
1909 59304 : Scavenger::CopiedList copied_list(num_scavenge_tasks);
1910 59304 : Scavenger::PromotionList promotion_list(num_scavenge_tasks);
1911 74058 : for (int i = 0; i < num_scavenge_tasks; i++) {
1912 : scavengers[i] =
1913 44406 : new Scavenger(this, is_logging, &copied_list, &promotion_list, i);
1914 44406 : job.AddTask(new ScavengingTask(this, scavengers[i], &barrier));
1915 : }
1916 :
1917 : CodeSpaceMemoryModificationScope code_modification(this);
1918 :
1919 : RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
1920 145645 : this, [&job](MemoryChunk* chunk) {
1921 291290 : job.AddItem(new PageScavengingItem(chunk));
1922 175297 : });
1923 :
1924 : {
1925 : MarkCompactCollector::Sweeper::PauseOrCompleteScope sweeper_scope(
1926 29652 : &mark_compact_collector()->sweeper());
1927 29652 : RootScavengeVisitor root_scavenge_visitor(this, scavengers[kMainThreadId]);
1928 :
1929 : {
1930 : // Identify weak unmodified handles. Requires an unmodified graph.
1931 118608 : TRACE_GC(
1932 : tracer(),
1933 : GCTracer::Scope::SCAVENGER_SCAVENGE_WEAK_GLOBAL_HANDLES_IDENTIFY);
1934 : isolate()->global_handles()->IdentifyWeakUnmodifiedObjects(
1935 59304 : &JSObject::IsUnmodifiedApiObject);
1936 : }
1937 : {
1938 : // Copy roots.
1939 118608 : TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_ROOTS);
1940 59304 : IterateRoots(&root_scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
1941 : }
1942 : {
1943 : // Weak collections are held strongly by the Scavenger.
1944 118608 : TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_WEAK);
1945 29652 : IterateEncounteredWeakCollections(&root_scavenge_visitor);
1946 : }
1947 : {
1948 : // Parallel phase scavenging all copied and promoted objects.
1949 118608 : TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_PARALLEL);
1950 29652 : job.Run();
1951 : DCHECK(copied_list.IsGlobalEmpty());
1952 29652 : DCHECK(promotion_list.IsGlobalEmpty());
1953 : }
1954 : {
1955 : // Scavenge weak global handles.
1956 118608 : TRACE_GC(tracer(),
1957 : GCTracer::Scope::SCAVENGER_SCAVENGE_WEAK_GLOBAL_HANDLES_PROCESS);
1958 : isolate()->global_handles()->MarkNewSpaceWeakUnmodifiedObjectsPending(
1959 29652 : &IsUnscavengedHeapObject);
1960 : isolate()->global_handles()->IterateNewSpaceWeakUnmodifiedRoots(
1961 29652 : &root_scavenge_visitor);
1962 59304 : scavengers[kMainThreadId]->Process();
1963 29652 : }
1964 : }
1965 :
1966 74058 : for (int i = 0; i < num_scavenge_tasks; i++) {
1967 44406 : scavengers[i]->Finalize();
1968 44406 : delete scavengers[i];
1969 : }
1970 :
1971 : UpdateNewSpaceReferencesInExternalStringTable(
1972 : &UpdateNewSpaceReferenceInExternalStringTableEntry);
1973 :
1974 29652 : incremental_marking()->UpdateMarkingWorklistAfterScavenge();
1975 :
1976 29652 : if (FLAG_concurrent_marking) {
1977 : // Ensure that concurrent marker does not track pages that are
1978 : // going to be unmapped.
1979 232746 : for (Page* p : PageRange(new_space()->FromSpaceStart(),
1980 57836 : new_space()->FromSpaceEnd())) {
1981 101914 : concurrent_marking()->ClearLiveness(p);
1982 : }
1983 : }
1984 :
1985 : ScavengeWeakObjectRetainer weak_object_retainer(this);
1986 : ProcessYoungWeakReferences(&weak_object_retainer);
1987 :
1988 : // Set age mark.
1989 29652 : new_space_->set_age_mark(new_space_->top());
1990 :
1991 29652 : ArrayBufferTracker::FreeDeadInNewSpace(this);
1992 :
1993 146191 : RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(this, [](MemoryChunk* chunk) {
1994 146191 : if (chunk->SweepingDone()) {
1995 145939 : RememberedSet<OLD_TO_NEW>::FreeEmptyBuckets(chunk);
1996 : } else {
1997 252 : RememberedSet<OLD_TO_NEW>::PreFreeEmptyBuckets(chunk);
1998 : }
1999 175843 : });
2000 :
2001 : // Update how much has survived scavenge.
2002 : IncrementYoungSurvivorsCounter(SurvivedNewSpaceObjectSize());
2003 :
2004 : // Scavenger may find new wrappers by iterating objects promoted onto a black
2005 : // page.
2006 29652 : local_embedder_heap_tracer()->RegisterWrappersWithRemoteTracer();
2007 :
2008 59304 : LOG(isolate_, ResourceEvent("scavenge", "end"));
2009 :
2010 29652 : SetGCState(NOT_IN_GC);
2011 29652 : }
2012 :
2013 86452 : void Heap::ComputeFastPromotionMode(double survival_rate) {
2014 : const size_t survived_in_new_space =
2015 172904 : survived_last_scavenge_ * 100 / new_space_->Capacity();
2016 : fast_promotion_mode_ =
2017 172904 : !FLAG_optimize_for_size && FLAG_fast_promotion_new_space &&
2018 86452 : !ShouldReduceMemory() && new_space_->IsAtMaximumCapacity() &&
2019 86452 : survived_in_new_space >= kMinPromotedPercentForFastPromotionMode;
2020 86452 : if (FLAG_trace_gc_verbose) {
2021 : PrintIsolate(
2022 : isolate(), "Fast promotion mode: %s survival rate: %" PRIuS "%%\n",
2023 0 : fast_promotion_mode_ ? "true" : "false", survived_in_new_space);
2024 : }
2025 86452 : }
2026 :
2027 1411 : String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
2028 : Object** p) {
2029 1411 : MapWord first_word = HeapObject::cast(*p)->map_word();
2030 :
2031 1411 : if (!first_word.IsForwardingAddress()) {
2032 : // Unreachable external string can be finalized.
2033 227 : String* string = String::cast(*p);
2034 227 : if (!string->IsExternalString()) {
2035 : // Original external string has been internalized.
2036 : DCHECK(string->IsThinString());
2037 : return nullptr;
2038 : }
2039 : heap->FinalizeExternalString(string);
2040 : return nullptr;
2041 : }
2042 :
2043 : // String is still reachable.
2044 1184 : String* string = String::cast(first_word.ToForwardingAddress());
2045 1184 : if (string->IsThinString()) string = ThinString::cast(string)->actual();
2046 : // Internalization can replace external strings with non-external strings.
2047 1184 : return string->IsExternalString() ? string : nullptr;
2048 : }
2049 :
2050 0 : void Heap::ExternalStringTable::Verify() {
2051 : #ifdef DEBUG
2052 : for (size_t i = 0; i < new_space_strings_.size(); ++i) {
2053 : Object* obj = Object::cast(new_space_strings_[i]);
2054 : DCHECK(heap_->InNewSpace(obj));
2055 : DCHECK(!obj->IsTheHole(heap_->isolate()));
2056 : }
2057 : for (size_t i = 0; i < old_space_strings_.size(); ++i) {
2058 : Object* obj = Object::cast(old_space_strings_[i]);
2059 : DCHECK(!heap_->InNewSpace(obj));
2060 : DCHECK(!obj->IsTheHole(heap_->isolate()));
2061 : }
2062 : #endif
2063 0 : }
2064 :
2065 86452 : void Heap::ExternalStringTable::UpdateNewSpaceReferences(
2066 : Heap::ExternalStringTableUpdaterCallback updater_func) {
2067 172904 : if (new_space_strings_.empty()) return;
2068 :
2069 : Object** start = new_space_strings_.data();
2070 207 : Object** end = start + new_space_strings_.size();
2071 : Object** last = start;
2072 :
2073 3202 : for (Object** p = start; p < end; ++p) {
2074 2995 : String* target = updater_func(heap_, p);
2075 :
2076 2995 : if (target == nullptr) continue;
2077 :
2078 : DCHECK(target->IsExternalString());
2079 :
2080 2768 : if (heap_->InNewSpace(target)) {
2081 : // String is still in new space. Update the table entry.
2082 1427 : *last = target;
2083 1427 : ++last;
2084 : } else {
2085 : // String got promoted. Move it to the old string list.
2086 2682 : old_space_strings_.push_back(target);
2087 : }
2088 : }
2089 :
2090 : DCHECK_LE(last, end);
2091 207 : new_space_strings_.resize(static_cast<size_t>(last - start));
2092 : #ifdef VERIFY_HEAP
2093 : if (FLAG_verify_heap) {
2094 : Verify();
2095 : }
2096 : #endif
2097 : }
2098 :
2099 0 : void Heap::ExternalStringTable::PromoteAllNewSpaceStrings() {
2100 0 : old_space_strings_.reserve(old_space_strings_.size() +
2101 0 : new_space_strings_.size());
2102 : std::move(std::begin(new_space_strings_), std::end(new_space_strings_),
2103 : std::back_inserter(old_space_strings_));
2104 : new_space_strings_.clear();
2105 0 : }
2106 :
2107 113919 : void Heap::ExternalStringTable::IterateNewSpaceStrings(RootVisitor* v) {
2108 113919 : if (!new_space_strings_.empty()) {
2109 : v->VisitRootPointers(Root::kExternalStringsTable, new_space_strings_.data(),
2110 342 : new_space_strings_.data() + new_space_strings_.size());
2111 : }
2112 113919 : }
2113 :
2114 113919 : void Heap::ExternalStringTable::IterateAll(RootVisitor* v) {
2115 113919 : IterateNewSpaceStrings(v);
2116 113919 : if (!old_space_strings_.empty()) {
2117 : v->VisitRootPointers(Root::kExternalStringsTable, old_space_strings_.data(),
2118 227678 : old_space_strings_.data() + old_space_strings_.size());
2119 : }
2120 113919 : }
2121 :
2122 0 : void Heap::UpdateNewSpaceReferencesInExternalStringTable(
2123 : ExternalStringTableUpdaterCallback updater_func) {
2124 29652 : external_string_table_.UpdateNewSpaceReferences(updater_func);
2125 0 : }
2126 :
2127 56800 : void Heap::ExternalStringTable::UpdateReferences(
2128 : Heap::ExternalStringTableUpdaterCallback updater_func) {
2129 113600 : if (old_space_strings_.size() > 0) {
2130 : Object** start = old_space_strings_.data();
2131 56740 : Object** end = start + old_space_strings_.size();
2132 56740 : for (Object** p = start; p < end; ++p) *p = updater_func(heap_, p);
2133 : }
2134 :
2135 56800 : UpdateNewSpaceReferences(updater_func);
2136 56800 : }
2137 :
2138 56800 : void Heap::UpdateReferencesInExternalStringTable(
2139 : ExternalStringTableUpdaterCallback updater_func) {
2140 56800 : external_string_table_.UpdateReferences(updater_func);
2141 56800 : }
2142 :
2143 :
2144 56800 : void Heap::ProcessAllWeakReferences(WeakObjectRetainer* retainer) {
2145 : ProcessNativeContexts(retainer);
2146 : ProcessAllocationSites(retainer);
2147 56800 : }
2148 :
2149 :
2150 0 : void Heap::ProcessYoungWeakReferences(WeakObjectRetainer* retainer) {
2151 : ProcessNativeContexts(retainer);
2152 0 : }
2153 :
2154 :
2155 86452 : void Heap::ProcessNativeContexts(WeakObjectRetainer* retainer) {
2156 86452 : Object* head = VisitWeakList<Context>(this, native_contexts_list(), retainer);
2157 : // Update the head of the list of contexts.
2158 : set_native_contexts_list(head);
2159 0 : }
2160 :
2161 :
2162 56800 : void Heap::ProcessAllocationSites(WeakObjectRetainer* retainer) {
2163 : Object* allocation_site_obj =
2164 56800 : VisitWeakList<AllocationSite>(this, allocation_sites_list(), retainer);
2165 : set_allocation_sites_list(allocation_site_obj);
2166 0 : }
2167 :
2168 170400 : void Heap::ProcessWeakListRoots(WeakObjectRetainer* retainer) {
2169 113600 : set_native_contexts_list(retainer->RetainAs(native_contexts_list()));
2170 113600 : set_allocation_sites_list(retainer->RetainAs(allocation_sites_list()));
2171 56800 : }
2172 :
2173 113 : void Heap::ResetAllAllocationSitesDependentCode(PretenureFlag flag) {
2174 : DisallowHeapAllocation no_allocation_scope;
2175 : Object* cur = allocation_sites_list();
2176 : bool marked = false;
2177 1496 : while (cur->IsAllocationSite()) {
2178 : AllocationSite* casted = AllocationSite::cast(cur);
2179 1270 : if (casted->GetPretenureMode() == flag) {
2180 0 : casted->ResetPretenureDecision();
2181 0 : casted->set_deopt_dependent_code(true);
2182 : marked = true;
2183 : RemoveAllocationSitePretenuringFeedback(casted);
2184 : }
2185 : cur = casted->weak_next();
2186 : }
2187 113 : if (marked) isolate_->stack_guard()->RequestDeoptMarkedAllocationSites();
2188 113 : }
2189 :
2190 :
2191 56800 : void Heap::EvaluateOldSpaceLocalPretenuring(
2192 : uint64_t size_of_objects_before_gc) {
2193 56800 : uint64_t size_of_objects_after_gc = SizeOfObjects();
2194 : double old_generation_survival_rate =
2195 56800 : (static_cast<double>(size_of_objects_after_gc) * 100) /
2196 56800 : static_cast<double>(size_of_objects_before_gc);
2197 :
2198 56800 : if (old_generation_survival_rate < kOldSurvivalRateLowThreshold) {
2199 : // Too many objects died in the old generation, pretenuring of wrong
2200 : // allocation sites may be the cause for that. We have to deopt all
2201 : // dependent code registered in the allocation sites to re-evaluate
2202 : // our pretenuring decisions.
2203 113 : ResetAllAllocationSitesDependentCode(TENURED);
2204 113 : if (FLAG_trace_pretenuring) {
2205 : PrintF(
2206 : "Deopt all allocation sites dependent code due to low survival "
2207 : "rate in the old generation %f\n",
2208 0 : old_generation_survival_rate);
2209 : }
2210 : }
2211 56800 : }
2212 :
2213 :
2214 5 : void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
2215 : DisallowHeapAllocation no_allocation;
2216 : // All external strings are listed in the external string table.
2217 :
2218 0 : class ExternalStringTableVisitorAdapter : public RootVisitor {
2219 : public:
2220 : explicit ExternalStringTableVisitorAdapter(
2221 : v8::ExternalResourceVisitor* visitor)
2222 5 : : visitor_(visitor) {}
2223 5 : virtual void VisitRootPointers(Root root, Object** start, Object** end) {
2224 75 : for (Object** p = start; p < end; p++) {
2225 : DCHECK((*p)->IsExternalString());
2226 : visitor_->VisitExternalString(
2227 140 : Utils::ToLocal(Handle<String>(String::cast(*p))));
2228 : }
2229 5 : }
2230 :
2231 : private:
2232 : v8::ExternalResourceVisitor* visitor_;
2233 : } external_string_table_visitor(visitor);
2234 :
2235 5 : external_string_table_.IterateAll(&external_string_table_visitor);
2236 5 : }
2237 :
2238 : STATIC_ASSERT((FixedDoubleArray::kHeaderSize & kDoubleAlignmentMask) ==
2239 : 0); // NOLINT
2240 : STATIC_ASSERT((FixedTypedArrayBase::kDataOffset & kDoubleAlignmentMask) ==
2241 : 0); // NOLINT
2242 : #ifdef V8_HOST_ARCH_32_BIT
2243 : STATIC_ASSERT((HeapNumber::kValueOffset & kDoubleAlignmentMask) !=
2244 : 0); // NOLINT
2245 : #endif
2246 :
2247 :
2248 15 : int Heap::GetMaximumFillToAlign(AllocationAlignment alignment) {
2249 15 : switch (alignment) {
2250 : case kWordAligned:
2251 : return 0;
2252 : case kDoubleAligned:
2253 : case kDoubleUnaligned:
2254 : return kDoubleSize - kPointerSize;
2255 : default:
2256 0 : UNREACHABLE();
2257 : }
2258 : return 0;
2259 : }
2260 :
2261 :
2262 78024033 : int Heap::GetFillToAlign(Address address, AllocationAlignment alignment) {
2263 : intptr_t offset = OffsetFrom(address);
2264 78024033 : if (alignment == kDoubleAligned && (offset & kDoubleAlignmentMask) != 0)
2265 : return kPointerSize;
2266 : if (alignment == kDoubleUnaligned && (offset & kDoubleAlignmentMask) == 0)
2267 : return kDoubleSize - kPointerSize; // No fill if double is always aligned.
2268 : return 0;
2269 : }
2270 :
2271 :
2272 0 : HeapObject* Heap::PrecedeWithFiller(HeapObject* object, int filler_size) {
2273 0 : CreateFillerObjectAt(object->address(), filler_size, ClearRecordedSlots::kNo);
2274 0 : return HeapObject::FromAddress(object->address() + filler_size);
2275 : }
2276 :
2277 :
2278 0 : HeapObject* Heap::AlignWithFiller(HeapObject* object, int object_size,
2279 : int allocation_size,
2280 : AllocationAlignment alignment) {
2281 0 : int filler_size = allocation_size - object_size;
2282 : DCHECK_LT(0, filler_size);
2283 0 : int pre_filler = GetFillToAlign(object->address(), alignment);
2284 0 : if (pre_filler) {
2285 0 : object = PrecedeWithFiller(object, pre_filler);
2286 0 : filler_size -= pre_filler;
2287 : }
2288 0 : if (filler_size)
2289 0 : CreateFillerObjectAt(object->address() + object_size, filler_size,
2290 0 : ClearRecordedSlots::kNo);
2291 0 : return object;
2292 : }
2293 :
2294 243808 : void Heap::RegisterNewArrayBuffer(JSArrayBuffer* buffer) {
2295 243808 : ArrayBufferTracker::RegisterNew(this, buffer);
2296 243808 : }
2297 :
2298 :
2299 2541 : void Heap::UnregisterArrayBuffer(JSArrayBuffer* buffer) {
2300 2541 : ArrayBufferTracker::Unregister(this, buffer);
2301 2541 : }
2302 :
2303 128003 : void Heap::ConfigureInitialOldGenerationSize() {
2304 107227 : if (!old_generation_size_configured_ && tracer()->SurvivalEventsRecorded()) {
2305 : old_generation_allocation_limit_ =
2306 : Max(MinimumAllocationLimitGrowingStep(),
2307 : static_cast<size_t>(
2308 41552 : static_cast<double>(old_generation_allocation_limit_) *
2309 62328 : (tracer()->AverageSurvivalRatio() / 100)));
2310 : }
2311 86451 : }
2312 :
2313 868 : AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
2314 : int instance_size) {
2315 : Object* result = nullptr;
2316 868 : AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE);
2317 868 : if (!allocation.To(&result)) return allocation;
2318 : // Map::cast cannot be used due to uninitialized map field.
2319 : Map* map = reinterpret_cast<Map*>(result);
2320 : map->set_map_after_allocation(reinterpret_cast<Map*>(root(kMetaMapRootIndex)),
2321 868 : SKIP_WRITE_BARRIER);
2322 : map->set_instance_type(instance_type);
2323 : map->set_instance_size(instance_size);
2324 : // Initialize to only containing tagged fields.
2325 : if (FLAG_unbox_double_fields) {
2326 868 : map->set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
2327 : }
2328 : // GetVisitorId requires a properly initialized LayoutDescriptor.
2329 868 : map->set_visitor_id(Map::GetVisitorId(map));
2330 : map->set_inobject_properties_or_constructor_function_index(0);
2331 868 : map->SetInObjectUnusedPropertyFields(0);
2332 : map->set_bit_field(0);
2333 : map->set_bit_field2(0);
2334 : int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
2335 : Map::OwnsDescriptors::encode(true) |
2336 : Map::ConstructionCounter::encode(Map::kNoSlackTracking);
2337 : map->set_bit_field3(bit_field3);
2338 868 : map->set_weak_cell_cache(Smi::kZero);
2339 868 : return map;
2340 : }
2341 :
2342 26426740 : AllocationResult Heap::AllocateMap(InstanceType instance_type,
2343 : int instance_size,
2344 : ElementsKind elements_kind,
2345 132133665 : int inobject_properties) {
2346 : HeapObject* result = nullptr;
2347 26426740 : AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE);
2348 26426744 : if (!allocation.To(&result)) return allocation;
2349 :
2350 26426738 : isolate()->counters()->maps_created()->Increment();
2351 26426736 : result->set_map_after_allocation(meta_map(), SKIP_WRITE_BARRIER);
2352 : Map* map = Map::cast(result);
2353 : map->set_instance_type(instance_type);
2354 26426732 : map->set_prototype(null_value(), SKIP_WRITE_BARRIER);
2355 26426734 : map->set_constructor_or_backpointer(null_value(), SKIP_WRITE_BARRIER);
2356 : map->set_instance_size(instance_size);
2357 : map->set_inobject_properties_or_constructor_function_index(
2358 : inobject_properties);
2359 : map->set_dependent_code(DependentCode::cast(empty_fixed_array()),
2360 26426731 : SKIP_WRITE_BARRIER);
2361 26426727 : map->set_weak_cell_cache(Smi::kZero);
2362 26426733 : map->set_raw_transitions(Smi::kZero);
2363 26426731 : map->SetInObjectUnusedPropertyFields(inobject_properties);
2364 26426732 : map->set_instance_descriptors(empty_descriptor_array());
2365 : if (FLAG_unbox_double_fields) {
2366 26426732 : map->set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
2367 : }
2368 : // Must be called only after |instance_type|, |instance_size| and
2369 : // |layout_descriptor| are set.
2370 26426732 : map->set_visitor_id(Map::GetVisitorId(map));
2371 : map->set_bit_field(0);
2372 : map->set_bit_field2(1 << Map::kIsExtensible);
2373 : int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
2374 : Map::OwnsDescriptors::encode(true) |
2375 : Map::ConstructionCounter::encode(Map::kNoSlackTracking);
2376 : map->set_bit_field3(bit_field3);
2377 : map->set_elements_kind(elements_kind);
2378 : map->set_new_target_is_base(true);
2379 :
2380 26426729 : return map;
2381 : }
2382 :
2383 :
2384 166075 : AllocationResult Heap::AllocateFillerObject(int size, bool double_align,
2385 : AllocationSpace space) {
2386 : HeapObject* obj = nullptr;
2387 : {
2388 166075 : AllocationAlignment align = double_align ? kDoubleAligned : kWordAligned;
2389 166075 : AllocationResult allocation = AllocateRaw(size, space, align);
2390 166075 : if (!allocation.To(&obj)) return allocation;
2391 : }
2392 : #ifdef DEBUG
2393 : MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
2394 : DCHECK(chunk->owner()->identity() == space);
2395 : #endif
2396 149262 : CreateFillerObjectAt(obj->address(), size, ClearRecordedSlots::kNo);
2397 149262 : return obj;
2398 : }
2399 :
2400 :
2401 5355738 : AllocationResult Heap::AllocateHeapNumber(MutableMode mode,
2402 21437 : PretenureFlag pretenure) {
2403 : // Statically ensure that it is safe to allocate heap numbers in paged
2404 : // spaces.
2405 : int size = HeapNumber::kSize;
2406 : STATIC_ASSERT(HeapNumber::kSize <= kMaxRegularHeapObjectSize);
2407 :
2408 : AllocationSpace space = SelectSpace(pretenure);
2409 :
2410 : HeapObject* result = nullptr;
2411 : {
2412 5355738 : AllocationResult allocation = AllocateRaw(size, space, kDoubleUnaligned);
2413 5355738 : if (!allocation.To(&result)) return allocation;
2414 : }
2415 :
2416 5355711 : Map* map = mode == MUTABLE ? mutable_heap_number_map() : heap_number_map();
2417 5355711 : HeapObject::cast(result)->set_map_after_allocation(map, SKIP_WRITE_BARRIER);
2418 5355711 : return result;
2419 : }
2420 :
2421 2579 : AllocationResult Heap::AllocateBigInt(int length, bool zero_initialize,
2422 2579 : PretenureFlag pretenure) {
2423 2579 : if (length < 0 || length > BigInt::kMaxLength) {
2424 : v8::internal::Heap::FatalProcessOutOfMemory("invalid BigInt length", true);
2425 : }
2426 : int size = BigInt::SizeFor(length);
2427 : AllocationSpace space = SelectSpace(pretenure);
2428 : HeapObject* result = nullptr;
2429 : {
2430 2579 : AllocationResult allocation = AllocateRaw(size, space);
2431 2579 : if (!allocation.To(&result)) return allocation;
2432 : }
2433 2579 : result->set_map_after_allocation(bigint_map(), SKIP_WRITE_BARRIER);
2434 2579 : BigInt::cast(result)->Initialize(length, zero_initialize);
2435 2579 : return result;
2436 : }
2437 :
2438 22312374 : AllocationResult Heap::AllocateCell(Object* value) {
2439 : int size = Cell::kSize;
2440 : STATIC_ASSERT(Cell::kSize <= kMaxRegularHeapObjectSize);
2441 :
2442 : HeapObject* result = nullptr;
2443 : {
2444 11156187 : AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
2445 11156187 : if (!allocation.To(&result)) return allocation;
2446 : }
2447 11156187 : result->set_map_after_allocation(cell_map(), SKIP_WRITE_BARRIER);
2448 11156187 : Cell::cast(result)->set_value(value);
2449 11156187 : return result;
2450 : }
2451 :
2452 28742708 : AllocationResult Heap::AllocatePropertyCell(Name* name) {
2453 : DCHECK(name->IsUniqueName());
2454 : int size = PropertyCell::kSize;
2455 : STATIC_ASSERT(PropertyCell::kSize <= kMaxRegularHeapObjectSize);
2456 :
2457 : HeapObject* result = nullptr;
2458 7185677 : AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
2459 7185677 : if (!allocation.To(&result)) return allocation;
2460 :
2461 : result->set_map_after_allocation(global_property_cell_map(),
2462 7185677 : SKIP_WRITE_BARRIER);
2463 : PropertyCell* cell = PropertyCell::cast(result);
2464 : cell->set_dependent_code(DependentCode::cast(empty_fixed_array()),
2465 7185677 : SKIP_WRITE_BARRIER);
2466 : cell->set_property_details(PropertyDetails(Smi::kZero));
2467 7185677 : cell->set_name(name);
2468 7185677 : cell->set_value(the_hole_value());
2469 7185677 : return result;
2470 : }
2471 :
2472 :
2473 59956353 : AllocationResult Heap::AllocateWeakCell(HeapObject* value) {
2474 : int size = WeakCell::kSize;
2475 : STATIC_ASSERT(WeakCell::kSize <= kMaxRegularHeapObjectSize);
2476 : HeapObject* result = nullptr;
2477 : {
2478 29978174 : AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
2479 29978180 : if (!allocation.To(&result)) return allocation;
2480 : }
2481 29978179 : result->set_map_after_allocation(weak_cell_map(), SKIP_WRITE_BARRIER);
2482 29978179 : WeakCell::cast(result)->initialize(value);
2483 29978180 : return result;
2484 : }
2485 :
2486 :
2487 2168231 : AllocationResult Heap::AllocateTransitionArray(int capacity) {
2488 : DCHECK_LT(0, capacity);
2489 : HeapObject* raw_array = nullptr;
2490 : {
2491 531041 : AllocationResult allocation = AllocateRawFixedArray(capacity, TENURED);
2492 531041 : if (!allocation.To(&raw_array)) return allocation;
2493 : }
2494 : raw_array->set_map_after_allocation(transition_array_map(),
2495 531041 : SKIP_WRITE_BARRIER);
2496 : TransitionArray* array = TransitionArray::cast(raw_array);
2497 : array->set_length(capacity);
2498 531041 : MemsetPointer(array->data_start(), undefined_value(), capacity);
2499 : // Transition arrays are tenured. When black allocation is on we have to
2500 : // add the transition array to the list of encountered_transition_arrays.
2501 531041 : if (incremental_marking()->black_allocation()) {
2502 : mark_compact_collector()->AddTransitionArray(array);
2503 : }
2504 531041 : return array;
2505 : }
2506 :
2507 31 : void Heap::CreateJSEntryStub() {
2508 : JSEntryStub stub(isolate(), StackFrame::ENTRY);
2509 62 : set_js_entry_code(*stub.GetCode());
2510 31 : }
2511 :
2512 :
2513 31 : void Heap::CreateJSConstructEntryStub() {
2514 : JSEntryStub stub(isolate(), StackFrame::CONSTRUCT_ENTRY);
2515 62 : set_js_construct_entry_code(*stub.GetCode());
2516 31 : }
2517 :
2518 :
2519 31 : void Heap::CreateFixedStubs() {
2520 : // Here we create roots for fixed stubs. They are needed at GC
2521 : // for cooking and uncooking (check out frames.cc).
2522 : // The eliminates the need for doing dictionary lookup in the
2523 : // stub cache for these stubs.
2524 : HandleScope scope(isolate());
2525 : // Canonicalize handles, so that we can share constant pool entries pointing
2526 : // to code targets without dereferencing their handles.
2527 62 : CanonicalHandleScope canonical(isolate());
2528 :
2529 : // Create stubs that should be there, so we don't unexpectedly have to
2530 : // create them if we need them during the creation of another stub.
2531 : // Stub creation mixes raw pointers and handles in an unsafe manner so
2532 : // we cannot create stubs while we are creating stubs.
2533 31 : CodeStub::GenerateStubsAheadOfTime(isolate());
2534 :
2535 : // MacroAssembler::Abort calls (usually enabled with --debug-code) depend on
2536 : // CEntryStub, so we need to call GenerateStubsAheadOfTime before JSEntryStub
2537 : // is created.
2538 :
2539 : // gcc-4.4 has problem generating correct code of following snippet:
2540 : // { JSEntryStub stub;
2541 : // js_entry_code_ = *stub.GetCode();
2542 : // }
2543 : // { JSConstructEntryStub stub;
2544 : // js_construct_entry_code_ = *stub.GetCode();
2545 : // }
2546 : // To workaround the problem, make separate functions without inlining.
2547 31 : Heap::CreateJSEntryStub();
2548 31 : Heap::CreateJSConstructEntryStub();
2549 31 : }
2550 :
2551 1743187 : bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
2552 9672826 : switch (root_index) {
2553 : case kNumberStringCacheRootIndex:
2554 : case kCodeStubsRootIndex:
2555 : case kScriptListRootIndex:
2556 : case kMaterializedObjectsRootIndex:
2557 : case kMicrotaskQueueRootIndex:
2558 : case kDetachedContextsRootIndex:
2559 : case kWeakObjectToCodeTableRootIndex:
2560 : case kWeakNewSpaceObjectToCodeListRootIndex:
2561 : case kRetainedMapsRootIndex:
2562 : case kRetainingPathTargetsRootIndex:
2563 : case kFeedbackVectorsForProfilingToolsRootIndex:
2564 : case kNoScriptSharedFunctionInfosRootIndex:
2565 : case kWeakStackTraceListRootIndex:
2566 : case kSerializedTemplatesRootIndex:
2567 : case kSerializedGlobalProxySizesRootIndex:
2568 : case kPublicSymbolTableRootIndex:
2569 : case kApiSymbolTableRootIndex:
2570 : case kApiPrivateSymbolTableRootIndex:
2571 : case kMessageListenersRootIndex:
2572 : // Smi values
2573 : #define SMI_ENTRY(type, name, Name) case k##Name##RootIndex:
2574 : SMI_ROOT_LIST(SMI_ENTRY)
2575 : #undef SMI_ENTRY
2576 : // String table
2577 : case kStringTableRootIndex:
2578 : return true;
2579 :
2580 : default:
2581 1743096 : return false;
2582 : }
2583 : }
2584 :
2585 7929639 : bool Heap::RootCanBeTreatedAsConstant(RootListIndex root_index) {
2586 15535477 : bool can_be = !RootCanBeWrittenAfterInitialization(root_index) &&
2587 : !InNewSpace(root(root_index));
2588 : DCHECK_IMPLIES(can_be, IsImmovable(HeapObject::cast(root(root_index))));
2589 7929639 : return can_be;
2590 : }
2591 :
2592 29646612 : int Heap::FullSizeNumberStringCacheLength() {
2593 : // Compute the size of the number string cache based on the max newspace size.
2594 : // The number string cache has a minimum size based on twice the initial cache
2595 : // size to ensure that it is bigger after being made 'full size'.
2596 29646612 : size_t number_string_cache_size = max_semi_space_size_ / 512;
2597 : number_string_cache_size =
2598 : Max(static_cast<size_t>(kInitialNumberStringCacheSize * 2),
2599 : Min<size_t>(0x4000u, number_string_cache_size));
2600 : // There is a string and a number per entry so the length is twice the number
2601 : // of entries.
2602 29646612 : return static_cast<int>(number_string_cache_size * 2);
2603 : }
2604 :
2605 :
2606 264650208 : void Heap::FlushNumberStringCache() {
2607 : // Flush the number to string cache.
2608 : int len = number_string_cache()->length();
2609 264650208 : for (int i = 0; i < len; i++) {
2610 264593408 : number_string_cache()->set_undefined(i);
2611 : }
2612 56800 : }
2613 :
2614 :
2615 558 : Map* Heap::MapForFixedTypedArray(ExternalArrayType array_type) {
2616 13739 : return Map::cast(roots_[RootIndexForFixedTypedArray(array_type)]);
2617 : }
2618 :
2619 :
2620 13739 : Heap::RootListIndex Heap::RootIndexForFixedTypedArray(
2621 : ExternalArrayType array_type) {
2622 13739 : switch (array_type) {
2623 : #define ARRAY_TYPE_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
2624 : case kExternal##Type##Array: \
2625 : return kFixed##Type##ArrayMapRootIndex;
2626 :
2627 880 : TYPED_ARRAYS(ARRAY_TYPE_TO_ROOT_INDEX)
2628 : #undef ARRAY_TYPE_TO_ROOT_INDEX
2629 :
2630 : default:
2631 0 : UNREACHABLE();
2632 : }
2633 : }
2634 :
2635 :
2636 2978 : Heap::RootListIndex Heap::RootIndexForEmptyFixedTypedArray(
2637 : ElementsKind elementsKind) {
2638 2978 : switch (elementsKind) {
2639 : #define ELEMENT_KIND_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
2640 : case TYPE##_ELEMENTS: \
2641 : return kEmptyFixed##Type##ArrayRootIndex;
2642 :
2643 298 : TYPED_ARRAYS(ELEMENT_KIND_TO_ROOT_INDEX)
2644 : #undef ELEMENT_KIND_TO_ROOT_INDEX
2645 : default:
2646 0 : UNREACHABLE();
2647 : }
2648 : }
2649 :
2650 0 : FixedTypedArrayBase* Heap::EmptyFixedTypedArrayForMap(const Map* map) {
2651 : return FixedTypedArrayBase::cast(
2652 2978 : roots_[RootIndexForEmptyFixedTypedArray(map->elements_kind())]);
2653 : }
2654 :
2655 :
2656 4019441 : AllocationResult Heap::AllocateForeign(Address address,
2657 4019441 : PretenureFlag pretenure) {
2658 : // Statically ensure that it is safe to allocate foreigns in paged spaces.
2659 : STATIC_ASSERT(Foreign::kSize <= kMaxRegularHeapObjectSize);
2660 4019441 : AllocationSpace space = (pretenure == TENURED) ? OLD_SPACE : NEW_SPACE;
2661 : Foreign* result = nullptr;
2662 4019441 : AllocationResult allocation = Allocate(foreign_map(), space);
2663 4019441 : if (!allocation.To(&result)) return allocation;
2664 : result->set_foreign_address(address);
2665 4019432 : return result;
2666 : }
2667 :
2668 54 : AllocationResult Heap::AllocateSmallOrderedHashSet(int capacity,
2669 54 : PretenureFlag pretenure) {
2670 : DCHECK_EQ(0, capacity % SmallOrderedHashSet::kLoadFactor);
2671 54 : CHECK_GE(SmallOrderedHashSet::kMaxCapacity, capacity);
2672 :
2673 : int size = SmallOrderedHashSet::Size(capacity);
2674 : AllocationSpace space = SelectSpace(pretenure);
2675 : HeapObject* result = nullptr;
2676 : {
2677 54 : AllocationResult allocation = AllocateRaw(size, space);
2678 54 : if (!allocation.To(&result)) return allocation;
2679 : }
2680 :
2681 : result->set_map_after_allocation(small_ordered_hash_set_map(),
2682 54 : SKIP_WRITE_BARRIER);
2683 : Handle<SmallOrderedHashSet> table(SmallOrderedHashSet::cast(result));
2684 54 : table->Initialize(isolate(), capacity);
2685 54 : return result;
2686 : }
2687 :
2688 54 : AllocationResult Heap::AllocateSmallOrderedHashMap(int capacity,
2689 54 : PretenureFlag pretenure) {
2690 : DCHECK_EQ(0, capacity % SmallOrderedHashMap::kLoadFactor);
2691 54 : CHECK_GE(SmallOrderedHashMap::kMaxCapacity, capacity);
2692 :
2693 : int size = SmallOrderedHashMap::Size(capacity);
2694 : AllocationSpace space = SelectSpace(pretenure);
2695 : HeapObject* result = nullptr;
2696 : {
2697 54 : AllocationResult allocation = AllocateRaw(size, space);
2698 54 : if (!allocation.To(&result)) return allocation;
2699 : }
2700 :
2701 : result->set_map_after_allocation(small_ordered_hash_map_map(),
2702 54 : SKIP_WRITE_BARRIER);
2703 : Handle<SmallOrderedHashMap> table(SmallOrderedHashMap::cast(result));
2704 54 : table->Initialize(isolate(), capacity);
2705 54 : return result;
2706 : }
2707 :
2708 9860564 : AllocationResult Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
2709 4930287 : if (length < 0 || length > ByteArray::kMaxLength) {
2710 : v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
2711 : }
2712 : int size = ByteArray::SizeFor(length);
2713 : AllocationSpace space = SelectSpace(pretenure);
2714 : HeapObject* result = nullptr;
2715 : {
2716 4930289 : AllocationResult allocation = AllocateRaw(size, space);
2717 4930289 : if (!allocation.To(&result)) return allocation;
2718 : }
2719 :
2720 4930277 : result->set_map_after_allocation(byte_array_map(), SKIP_WRITE_BARRIER);
2721 : ByteArray::cast(result)->set_length(length);
2722 4930278 : ByteArray::cast(result)->clear_padding();
2723 4930277 : return result;
2724 : }
2725 :
2726 :
2727 2153885 : AllocationResult Heap::AllocateBytecodeArray(int length,
2728 : const byte* const raw_bytecodes,
2729 : int frame_size,
2730 : int parameter_count,
2731 6461658 : FixedArray* constant_pool) {
2732 2153885 : if (length < 0 || length > BytecodeArray::kMaxLength) {
2733 : v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
2734 : }
2735 : // Bytecode array is pretenured, so constant pool array should be to.
2736 : DCHECK(!InNewSpace(constant_pool));
2737 :
2738 : int size = BytecodeArray::SizeFor(length);
2739 : HeapObject* result = nullptr;
2740 : {
2741 2153885 : AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
2742 2153886 : if (!allocation.To(&result)) return allocation;
2743 : }
2744 :
2745 2153886 : result->set_map_after_allocation(bytecode_array_map(), SKIP_WRITE_BARRIER);
2746 : BytecodeArray* instance = BytecodeArray::cast(result);
2747 : instance->set_length(length);
2748 : instance->set_frame_size(frame_size);
2749 : instance->set_parameter_count(parameter_count);
2750 : instance->set_incoming_new_target_or_generator_register(
2751 : interpreter::Register::invalid_value());
2752 : instance->set_interrupt_budget(interpreter::Interpreter::kInterruptBudget);
2753 : instance->set_osr_loop_nesting_level(0);
2754 : instance->set_bytecode_age(BytecodeArray::kNoAgeBytecodeAge);
2755 2153886 : instance->set_constant_pool(constant_pool);
2756 2153886 : instance->set_handler_table(empty_fixed_array());
2757 2153886 : instance->set_source_position_table(empty_byte_array());
2758 2153886 : CopyBytes(instance->GetFirstBytecodeAddress(), raw_bytecodes, length);
2759 2153885 : instance->clear_padding();
2760 :
2761 2153886 : return result;
2762 : }
2763 :
2764 267745549 : HeapObject* Heap::CreateFillerObjectAt(Address addr, int size,
2765 : ClearRecordedSlots mode) {
2766 267745549 : if (size == 0) return nullptr;
2767 267148903 : HeapObject* filler = HeapObject::FromAddress(addr);
2768 267148903 : if (size == kPointerSize) {
2769 : filler->set_map_after_allocation(
2770 : reinterpret_cast<Map*>(root(kOnePointerFillerMapRootIndex)),
2771 3576254 : SKIP_WRITE_BARRIER);
2772 263572649 : } else if (size == 2 * kPointerSize) {
2773 : filler->set_map_after_allocation(
2774 : reinterpret_cast<Map*>(root(kTwoPointerFillerMapRootIndex)),
2775 57919054 : SKIP_WRITE_BARRIER);
2776 : } else {
2777 : DCHECK_GT(size, 2 * kPointerSize);
2778 : filler->set_map_after_allocation(
2779 : reinterpret_cast<Map*>(root(kFreeSpaceMapRootIndex)),
2780 205653595 : SKIP_WRITE_BARRIER);
2781 : FreeSpace::cast(filler)->relaxed_write_size(size);
2782 : }
2783 267070845 : if (mode == ClearRecordedSlots::kYes) {
2784 6950220 : ClearRecordedSlotRange(addr, addr + size);
2785 : }
2786 :
2787 : // At this point, we may be deserializing the heap from a snapshot, and
2788 : // none of the maps have been created yet and are nullptr.
2789 : DCHECK((filler->map() == nullptr && !deserialization_complete_) ||
2790 : filler->map()->IsMap());
2791 267070845 : return filler;
2792 : }
2793 :
2794 :
2795 369138 : bool Heap::CanMoveObjectStart(HeapObject* object) {
2796 184569 : if (!FLAG_move_object_start) return false;
2797 :
2798 : // Sampling heap profiler may have a reference to the object.
2799 369138 : if (isolate()->heap_profiler()->is_sampling_allocations()) return false;
2800 :
2801 184569 : Address address = object->address();
2802 :
2803 184569 : if (lo_space()->Contains(object)) return false;
2804 :
2805 : // We can move the object start if the page was already swept.
2806 184554 : return Page::FromAddress(address)->SweepingDone();
2807 : }
2808 :
2809 10 : bool Heap::IsImmovable(HeapObject* object) {
2810 10 : MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
2811 20 : return chunk->NeverEvacuate() || chunk->owner()->identity() == LO_SPACE;
2812 : }
2813 :
2814 184709 : FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object,
2815 184709 : int elements_to_trim) {
2816 184709 : CHECK_NOT_NULL(object);
2817 : DCHECK(CanMoveObjectStart(object));
2818 : DCHECK(!object->IsFixedTypedArrayBase());
2819 : DCHECK(!object->IsByteArray());
2820 : const int element_size = object->IsFixedArray() ? kPointerSize : kDoubleSize;
2821 184709 : const int bytes_to_trim = elements_to_trim * element_size;
2822 : Map* map = object->map();
2823 :
2824 : // For now this trick is only applied to objects in new and paged space.
2825 : // In large object space the object's start must coincide with chunk
2826 : // and thus the trick is just not applicable.
2827 : DCHECK(!lo_space()->Contains(object));
2828 : DCHECK(object->map() != fixed_cow_array_map());
2829 :
2830 : STATIC_ASSERT(FixedArrayBase::kMapOffset == 0);
2831 : STATIC_ASSERT(FixedArrayBase::kLengthOffset == kPointerSize);
2832 : STATIC_ASSERT(FixedArrayBase::kHeaderSize == 2 * kPointerSize);
2833 :
2834 : const int len = object->length();
2835 : DCHECK(elements_to_trim <= len);
2836 :
2837 : // Calculate location of new array start.
2838 184709 : Address old_start = object->address();
2839 184709 : Address new_start = old_start + bytes_to_trim;
2840 :
2841 184709 : if (incremental_marking()->IsMarking()) {
2842 : incremental_marking()->NotifyLeftTrimming(
2843 155 : object, HeapObject::FromAddress(new_start));
2844 : }
2845 :
2846 : // Technically in new space this write might be omitted (except for
2847 : // debug mode which iterates through the heap), but to play safer
2848 : // we still do it.
2849 184709 : CreateFillerObjectAt(old_start, bytes_to_trim, ClearRecordedSlots::kYes);
2850 :
2851 : // Initialize header of the trimmed array. Since left trimming is only
2852 : // performed on pages which are not concurrently swept creating a filler
2853 : // object does not require synchronization.
2854 184709 : RELAXED_WRITE_FIELD(object, bytes_to_trim, map);
2855 369418 : RELAXED_WRITE_FIELD(object, bytes_to_trim + kPointerSize,
2856 : Smi::FromInt(len - elements_to_trim));
2857 :
2858 : FixedArrayBase* new_object =
2859 184709 : FixedArrayBase::cast(HeapObject::FromAddress(new_start));
2860 :
2861 : // Remove recorded slots for the new map and length offset.
2862 : ClearRecordedSlot(new_object, HeapObject::RawField(new_object, 0));
2863 : ClearRecordedSlot(new_object, HeapObject::RawField(
2864 184709 : new_object, FixedArrayBase::kLengthOffset));
2865 :
2866 : // Notify the heap profiler of change in object layout.
2867 184709 : OnMoveEvent(new_object, object, new_object->Size());
2868 184709 : return new_object;
2869 : }
2870 :
2871 19262194 : void Heap::RightTrimFixedArray(FixedArrayBase* object, int elements_to_trim) {
2872 : const int len = object->length();
2873 : DCHECK_LE(elements_to_trim, len);
2874 : DCHECK_GE(elements_to_trim, 0);
2875 :
2876 : int bytes_to_trim;
2877 : DCHECK(!object->IsFixedTypedArrayBase());
2878 6420755 : if (object->IsByteArray()) {
2879 12096 : int new_size = ByteArray::SizeFor(len - elements_to_trim);
2880 12096 : bytes_to_trim = ByteArray::SizeFor(len) - new_size;
2881 : DCHECK_GE(bytes_to_trim, 0);
2882 6453072 : } else if (object->IsFixedArray() || object->IsTransitionArray()) {
2883 6384016 : bytes_to_trim = elements_to_trim * kPointerSize;
2884 : } else {
2885 : DCHECK(object->IsFixedDoubleArray());
2886 24643 : bytes_to_trim = elements_to_trim * kDoubleSize;
2887 : }
2888 :
2889 :
2890 : // For now this trick is only applied to objects in new and paged space.
2891 : DCHECK(object->map() != fixed_cow_array_map());
2892 :
2893 6420755 : if (bytes_to_trim == 0) {
2894 : // No need to create filler and update live bytes counters, just initialize
2895 : // header of the trimmed array.
2896 0 : object->synchronized_set_length(len - elements_to_trim);
2897 6420755 : return;
2898 : }
2899 :
2900 : // Calculate location of new array end.
2901 6420755 : Address old_end = object->address() + object->Size();
2902 6420755 : Address new_end = old_end - bytes_to_trim;
2903 :
2904 : // Technically in new space this write might be omitted (except for
2905 : // debug mode which iterates through the heap), but to play safer
2906 : // we still do it.
2907 : // We do not create a filler for objects in large object space.
2908 : // TODO(hpayer): We should shrink the large object page if the size
2909 : // of the object changed significantly.
2910 6420755 : if (!lo_space()->Contains(object)) {
2911 : HeapObject* filler =
2912 6420684 : CreateFillerObjectAt(new_end, bytes_to_trim, ClearRecordedSlots::kYes);
2913 : DCHECK_NOT_NULL(filler);
2914 : // Clear the mark bits of the black area that belongs now to the filler.
2915 : // This is an optimization. The sweeper will release black fillers anyway.
2916 6793193 : if (incremental_marking()->black_allocation() &&
2917 : incremental_marking()->marking_state()->IsBlackOrGrey(filler)) {
2918 : Page* page = Page::FromAddress(new_end);
2919 : incremental_marking()->marking_state()->bitmap(page)->ClearRange(
2920 : page->AddressToMarkbitIndex(new_end),
2921 4354 : page->AddressToMarkbitIndex(new_end + bytes_to_trim));
2922 : }
2923 : }
2924 :
2925 : // Initialize header of the trimmed array. We are storing the new length
2926 : // using release store after creating a filler for the left-over space to
2927 : // avoid races with the sweeper thread.
2928 6420755 : object->synchronized_set_length(len - elements_to_trim);
2929 :
2930 : // Notify the heap profiler of change in object layout. The array may not be
2931 : // moved during GC, and size has to be adjusted nevertheless.
2932 6420755 : HeapProfiler* profiler = isolate()->heap_profiler();
2933 6420755 : if (profiler->is_tracking_allocations()) {
2934 0 : profiler->UpdateObjectSizeEvent(object->address(), object->Size());
2935 : }
2936 : }
2937 :
2938 :
2939 12592 : AllocationResult Heap::AllocateFixedTypedArrayWithExternalPointer(
2940 : int length, ExternalArrayType array_type, void* external_pointer,
2941 : PretenureFlag pretenure) {
2942 : int size = FixedTypedArrayBase::kHeaderSize;
2943 : AllocationSpace space = SelectSpace(pretenure);
2944 : HeapObject* result = nullptr;
2945 : {
2946 12592 : AllocationResult allocation = AllocateRaw(size, space);
2947 12592 : if (!allocation.To(&result)) return allocation;
2948 : }
2949 :
2950 : result->set_map_after_allocation(MapForFixedTypedArray(array_type),
2951 12592 : SKIP_WRITE_BARRIER);
2952 : FixedTypedArrayBase* elements = FixedTypedArrayBase::cast(result);
2953 12592 : elements->set_base_pointer(Smi::kZero, SKIP_WRITE_BARRIER);
2954 : elements->set_external_pointer(external_pointer, SKIP_WRITE_BARRIER);
2955 : elements->set_length(length);
2956 12592 : return elements;
2957 : }
2958 :
2959 589 : static void ForFixedTypedArray(ExternalArrayType array_type, int* element_size,
2960 : ElementsKind* element_kind) {
2961 589 : switch (array_type) {
2962 : #define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
2963 : case kExternal##Type##Array: \
2964 : *element_size = size; \
2965 : *element_kind = TYPE##_ELEMENTS; \
2966 : return;
2967 :
2968 37 : TYPED_ARRAYS(TYPED_ARRAY_CASE)
2969 : #undef TYPED_ARRAY_CASE
2970 :
2971 : default:
2972 0 : *element_size = 0; // Bogus
2973 0 : *element_kind = UINT8_ELEMENTS; // Bogus
2974 0 : UNREACHABLE();
2975 : }
2976 : }
2977 :
2978 :
2979 589 : AllocationResult Heap::AllocateFixedTypedArray(int length,
2980 : ExternalArrayType array_type,
2981 : bool initialize,
2982 : PretenureFlag pretenure) {
2983 : int element_size;
2984 : ElementsKind elements_kind;
2985 589 : ForFixedTypedArray(array_type, &element_size, &elements_kind);
2986 589 : int size = OBJECT_POINTER_ALIGN(length * element_size +
2987 : FixedTypedArrayBase::kDataOffset);
2988 : AllocationSpace space = SelectSpace(pretenure);
2989 :
2990 : HeapObject* object = nullptr;
2991 : AllocationResult allocation = AllocateRaw(
2992 : size, space,
2993 589 : array_type == kExternalFloat64Array ? kDoubleAligned : kWordAligned);
2994 589 : if (!allocation.To(&object)) return allocation;
2995 :
2996 : object->set_map_after_allocation(MapForFixedTypedArray(array_type),
2997 589 : SKIP_WRITE_BARRIER);
2998 : FixedTypedArrayBase* elements = FixedTypedArrayBase::cast(object);
2999 589 : elements->set_base_pointer(elements, SKIP_WRITE_BARRIER);
3000 : elements->set_external_pointer(
3001 1178 : ExternalReference::fixed_typed_array_base_data_offset().address(),
3002 : SKIP_WRITE_BARRIER);
3003 : elements->set_length(length);
3004 589 : if (initialize) memset(elements->DataPtr(), 0, elements->DataSize());
3005 589 : return elements;
3006 : }
3007 :
3008 :
3009 3540225 : AllocationResult Heap::AllocateCode(int object_size, bool immovable) {
3010 : DCHECK(IsAligned(static_cast<intptr_t>(object_size), kCodeAlignment));
3011 1770113 : AllocationResult allocation = AllocateRaw(object_size, CODE_SPACE);
3012 :
3013 : HeapObject* result = nullptr;
3014 1770112 : if (!allocation.To(&result)) return allocation;
3015 1770112 : if (immovable) {
3016 10 : Address address = result->address();
3017 : MemoryChunk* chunk = MemoryChunk::FromAddress(address);
3018 : // Code objects which should stay at a fixed address are allocated either
3019 : // in the first page of code space (objects on the first page of each space
3020 : // are never moved), in large object space, or (during snapshot creation)
3021 : // the containing page is marked as immovable.
3022 20 : if (!Heap::IsImmovable(result) &&
3023 10 : !code_space_->FirstPage()->Contains(address)) {
3024 10 : if (isolate()->serializer_enabled()) {
3025 : chunk->MarkNeverEvacuate();
3026 : } else {
3027 : // Discard the first code allocation, which was on a page where it could
3028 : // be moved.
3029 : CreateFillerObjectAt(result->address(), object_size,
3030 5 : ClearRecordedSlots::kNo);
3031 5 : allocation = lo_space_->AllocateRaw(object_size, EXECUTABLE);
3032 5 : if (!allocation.To(&result)) return allocation;
3033 5 : OnAllocationEvent(result, object_size);
3034 : }
3035 : }
3036 : }
3037 :
3038 1770112 : result->set_map_after_allocation(code_map(), SKIP_WRITE_BARRIER);
3039 : Code* code = Code::cast(result);
3040 : DCHECK(IsAligned(bit_cast<intptr_t>(code->address()), kCodeAlignment));
3041 : DCHECK(!memory_allocator()->code_range()->valid() ||
3042 : memory_allocator()->code_range()->contains(code->address()) ||
3043 : object_size <= code_space()->AreaSize());
3044 1770113 : return code;
3045 : }
3046 :
3047 :
3048 177832 : AllocationResult Heap::CopyCode(Code* code) {
3049 : CodeSpaceMemoryModificationScope code_modification(this);
3050 : AllocationResult allocation;
3051 :
3052 : HeapObject* result = nullptr;
3053 : // Allocate an object the same size as the code object.
3054 88916 : int obj_size = code->Size();
3055 88916 : allocation = AllocateRaw(obj_size, CODE_SPACE);
3056 88916 : if (!allocation.To(&result)) return allocation;
3057 :
3058 : // Copy code object.
3059 88916 : Address old_addr = code->address();
3060 88916 : Address new_addr = result->address();
3061 : CopyBlock(new_addr, old_addr, obj_size);
3062 : Code* new_code = Code::cast(result);
3063 :
3064 : // Relocate the copy.
3065 : DCHECK(IsAligned(bit_cast<intptr_t>(new_code->address()), kCodeAlignment));
3066 : DCHECK(!memory_allocator()->code_range()->valid() ||
3067 : memory_allocator()->code_range()->contains(code->address()) ||
3068 : obj_size <= code_space()->AreaSize());
3069 :
3070 : // Clear the trap handler index since they can't be shared between code. We
3071 : // have to do this before calling Relocate becauase relocate would adjust the
3072 : // base pointer for the old code.
3073 88916 : new_code->set_trap_handler_index(Smi::FromInt(trap_handler::kInvalidIndex));
3074 :
3075 88916 : new_code->Relocate(new_addr - old_addr);
3076 : // We have to iterate over the object and process its pointers when black
3077 : // allocation is on.
3078 88916 : incremental_marking()->ProcessBlackAllocatedObject(new_code);
3079 : // Record all references to embedded objects in the new code object.
3080 88916 : RecordWritesIntoCode(new_code);
3081 88916 : return new_code;
3082 : }
3083 :
3084 16682 : AllocationResult Heap::CopyBytecodeArray(BytecodeArray* bytecode_array) {
3085 : int size = BytecodeArray::SizeFor(bytecode_array->length());
3086 : HeapObject* result = nullptr;
3087 : {
3088 8341 : AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
3089 8341 : if (!allocation.To(&result)) return allocation;
3090 : }
3091 :
3092 8341 : result->set_map_after_allocation(bytecode_array_map(), SKIP_WRITE_BARRIER);
3093 : BytecodeArray* copy = BytecodeArray::cast(result);
3094 : copy->set_length(bytecode_array->length());
3095 : copy->set_frame_size(bytecode_array->frame_size());
3096 : copy->set_parameter_count(bytecode_array->parameter_count());
3097 : copy->set_incoming_new_target_or_generator_register(
3098 : bytecode_array->incoming_new_target_or_generator_register());
3099 8341 : copy->set_constant_pool(bytecode_array->constant_pool());
3100 8341 : copy->set_handler_table(bytecode_array->handler_table());
3101 8341 : copy->set_source_position_table(bytecode_array->source_position_table());
3102 : copy->set_interrupt_budget(bytecode_array->interrupt_budget());
3103 : copy->set_osr_loop_nesting_level(bytecode_array->osr_loop_nesting_level());
3104 : copy->set_bytecode_age(bytecode_array->bytecode_age());
3105 8341 : bytecode_array->CopyBytecodesTo(copy);
3106 8341 : return copy;
3107 : }
3108 :
3109 2132406 : void Heap::InitializeAllocationMemento(AllocationMemento* memento,
3110 2132406 : AllocationSite* allocation_site) {
3111 : memento->set_map_after_allocation(allocation_memento_map(),
3112 2132406 : SKIP_WRITE_BARRIER);
3113 : DCHECK(allocation_site->map() == allocation_site_map());
3114 2132406 : memento->set_allocation_site(allocation_site, SKIP_WRITE_BARRIER);
3115 2132406 : if (FLAG_allocation_site_pretenuring) {
3116 2132406 : allocation_site->IncrementMementoCreateCount();
3117 : }
3118 2132406 : }
3119 :
3120 :
3121 84883987 : AllocationResult Heap::Allocate(Map* map, AllocationSpace space,
3122 : AllocationSite* allocation_site) {
3123 : DCHECK(gc_state_ == NOT_IN_GC);
3124 : DCHECK(map->instance_type() != MAP_TYPE);
3125 : int size = map->instance_size();
3126 84883987 : if (allocation_site != nullptr) {
3127 11581 : size += AllocationMemento::kSize;
3128 : }
3129 : HeapObject* result = nullptr;
3130 84883987 : AllocationResult allocation = AllocateRaw(size, space);
3131 84883984 : if (!allocation.To(&result)) return allocation;
3132 : // New space objects are allocated white.
3133 : WriteBarrierMode write_barrier_mode =
3134 84882966 : space == NEW_SPACE ? SKIP_WRITE_BARRIER : UPDATE_WRITE_BARRIER;
3135 84882966 : result->set_map_after_allocation(map, write_barrier_mode);
3136 84882951 : if (allocation_site != nullptr) {
3137 : AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
3138 11581 : reinterpret_cast<Address>(result) + map->instance_size());
3139 11581 : InitializeAllocationMemento(alloc_memento, allocation_site);
3140 : }
3141 84882951 : return result;
3142 : }
3143 :
3144 24960614 : void Heap::InitializeJSObjectFromMap(JSObject* obj, Object* properties,
3145 : Map* map) {
3146 24960614 : obj->set_raw_properties_or_hash(properties);
3147 24960626 : obj->initialize_elements();
3148 : // TODO(1240798): Initialize the object's body using valid initial values
3149 : // according to the object's initial map. For example, if the map's
3150 : // instance type is JS_ARRAY_TYPE, the length field should be initialized
3151 : // to a number (e.g. Smi::kZero) and the elements initialized to a
3152 : // fixed array (e.g. Heap::empty_fixed_array()). Currently, the object
3153 : // verification code has to cope with (temporarily) invalid objects. See
3154 : // for example, JSArray::JSArrayVerify).
3155 24960628 : InitializeJSObjectBody(obj, map, JSObject::kHeaderSize);
3156 24960633 : }
3157 :
3158 :
3159 40297695 : void Heap::InitializeJSObjectBody(JSObject* obj, Map* map, int start_offset) {
3160 80090926 : if (start_offset == map->instance_size()) return;
3161 : DCHECK_LT(start_offset, map->instance_size());
3162 :
3163 : // We cannot always fill with one_pointer_filler_map because objects
3164 : // created from API functions expect their embedder fields to be initialized
3165 : // with undefined_value.
3166 : // Pre-allocated fields need to be initialized with undefined_value as well
3167 : // so that object accesses before the constructor completes (e.g. in the
3168 : // debugger) will not cause a crash.
3169 :
3170 : // In case of Array subclassing the |map| could already be transitioned
3171 : // to different elements kind from the initial map on which we track slack.
3172 : bool in_progress = map->IsInobjectSlackTrackingInProgress();
3173 : Object* filler;
3174 23512368 : if (in_progress) {
3175 : filler = one_pointer_filler_map();
3176 : } else {
3177 : filler = undefined_value();
3178 : }
3179 23512368 : obj->InitializeBody(map, start_offset, Heap::undefined_value(), filler);
3180 23512364 : if (in_progress) {
3181 252230 : map->FindRootMap()->InobjectSlackTrackingStep();
3182 : }
3183 : }
3184 :
3185 :
3186 24793676 : AllocationResult Heap::AllocateJSObjectFromMap(
3187 24793676 : Map* map, PretenureFlag pretenure, AllocationSite* allocation_site) {
3188 : // JSFunctions should be allocated using AllocateFunction to be
3189 : // properly initialized.
3190 : DCHECK(map->instance_type() != JS_FUNCTION_TYPE);
3191 :
3192 : // Both types of global objects should be allocated using
3193 : // AllocateGlobalObject to be properly initialized.
3194 : DCHECK(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
3195 :
3196 : // Allocate the backing storage for the properties.
3197 : FixedArray* properties = empty_fixed_array();
3198 :
3199 : // Allocate the JSObject.
3200 : AllocationSpace space = SelectSpace(pretenure);
3201 : JSObject* js_obj = nullptr;
3202 24793676 : AllocationResult allocation = Allocate(map, space, allocation_site);
3203 24793655 : if (!allocation.To(&js_obj)) return allocation;
3204 :
3205 : // Initialize the JSObject.
3206 24793420 : InitializeJSObjectFromMap(js_obj, properties, map);
3207 : DCHECK(js_obj->HasFastElements() || js_obj->HasFixedTypedArrayElements() ||
3208 : js_obj->HasFastStringWrapperElements() ||
3209 : js_obj->HasFastArgumentsElements());
3210 24793460 : return js_obj;
3211 : }
3212 :
3213 :
3214 11586492 : AllocationResult Heap::AllocateJSObject(JSFunction* constructor,
3215 : PretenureFlag pretenure,
3216 : AllocationSite* allocation_site) {
3217 : DCHECK(constructor->has_initial_map());
3218 :
3219 : // Allocate the object based on the constructors initial map.
3220 : AllocationResult allocation = AllocateJSObjectFromMap(
3221 11586492 : constructor->initial_map(), pretenure, allocation_site);
3222 : #ifdef DEBUG
3223 : // Make sure result is NOT a global object if valid.
3224 : HeapObject* obj = nullptr;
3225 : DCHECK(!allocation.To(&obj) || !obj->IsJSGlobalObject());
3226 : #endif
3227 11586493 : return allocation;
3228 : }
3229 :
3230 :
3231 3843414 : AllocationResult Heap::CopyJSObject(JSObject* source, AllocationSite* site) {
3232 : // Make the clone.
3233 : Map* map = source->map();
3234 :
3235 : // We can only clone regexps, normal objects, api objects, errors or arrays.
3236 : // Copying anything else will break invariants.
3237 3011262 : CHECK(map->instance_type() == JS_REGEXP_TYPE ||
3238 : map->instance_type() == JS_OBJECT_TYPE ||
3239 : map->instance_type() == JS_ERROR_TYPE ||
3240 : map->instance_type() == JS_ARRAY_TYPE ||
3241 : map->instance_type() == JS_API_OBJECT_TYPE ||
3242 : map->instance_type() == WASM_INSTANCE_TYPE ||
3243 : map->instance_type() == WASM_MEMORY_TYPE ||
3244 : map->instance_type() == WASM_MODULE_TYPE ||
3245 : map->instance_type() == WASM_TABLE_TYPE ||
3246 : map->instance_type() == JS_SPECIAL_API_OBJECT_TYPE);
3247 :
3248 : int object_size = map->instance_size();
3249 : HeapObject* clone = nullptr;
3250 :
3251 : DCHECK(site == nullptr || AllocationSite::CanTrack(map->instance_type()));
3252 :
3253 : int adjusted_object_size =
3254 3011262 : site != nullptr ? object_size + AllocationMemento::kSize : object_size;
3255 3011262 : AllocationResult allocation = AllocateRaw(adjusted_object_size, NEW_SPACE);
3256 3011262 : if (!allocation.To(&clone)) return allocation;
3257 :
3258 : SLOW_DCHECK(InNewSpace(clone));
3259 : // Since we know the clone is allocated in new space, we can copy
3260 : // the contents without worrying about updating the write barrier.
3261 3011185 : CopyBlock(clone->address(), source->address(), object_size);
3262 :
3263 3011185 : if (site != nullptr) {
3264 : AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
3265 2120825 : reinterpret_cast<Address>(clone) + object_size);
3266 2120825 : InitializeAllocationMemento(alloc_memento, site);
3267 : }
3268 :
3269 : SLOW_DCHECK(JSObject::cast(clone)->GetElementsKind() ==
3270 : source->GetElementsKind());
3271 : FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
3272 : // Update elements if necessary.
3273 3011185 : if (elements->length() > 0) {
3274 : FixedArrayBase* elem = nullptr;
3275 : {
3276 : AllocationResult allocation;
3277 832152 : if (elements->map() == fixed_cow_array_map()) {
3278 90421 : allocation = FixedArray::cast(elements);
3279 741731 : } else if (source->HasDoubleElements()) {
3280 6087 : allocation = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
3281 : } else {
3282 735644 : allocation = CopyFixedArray(FixedArray::cast(elements));
3283 : }
3284 832152 : if (!allocation.To(&elem)) return allocation;
3285 : }
3286 832135 : JSObject::cast(clone)->set_elements(elem, SKIP_WRITE_BARRIER);
3287 : }
3288 :
3289 : // Update properties if necessary.
3290 3011168 : if (source->HasFastProperties()) {
3291 3008256 : if (source->property_array()->length() > 0) {
3292 : PropertyArray* properties = source->property_array();
3293 : PropertyArray* prop = nullptr;
3294 : {
3295 : // TODO(gsathya): Do not copy hash code.
3296 : AllocationResult allocation = CopyPropertyArray(properties);
3297 885 : if (!allocation.To(&prop)) return allocation;
3298 : }
3299 : JSObject::cast(clone)->set_raw_properties_or_hash(prop,
3300 885 : SKIP_WRITE_BARRIER);
3301 : }
3302 : } else {
3303 : FixedArray* properties = FixedArray::cast(source->property_dictionary());
3304 : FixedArray* prop = nullptr;
3305 : {
3306 2912 : AllocationResult allocation = CopyFixedArray(properties);
3307 2912 : if (!allocation.To(&prop)) return allocation;
3308 : }
3309 2908 : JSObject::cast(clone)->set_raw_properties_or_hash(prop, SKIP_WRITE_BARRIER);
3310 : }
3311 : // Return the new clone.
3312 3011164 : return clone;
3313 : }
3314 :
3315 :
3316 : static inline void WriteOneByteData(Vector<const char> vector, uint8_t* chars,
3317 : int len) {
3318 : // Only works for one byte strings.
3319 : DCHECK(vector.length() == len);
3320 : MemCopy(chars, vector.start(), len);
3321 : }
3322 :
3323 495 : static inline void WriteTwoByteData(Vector<const char> vector, uint16_t* chars,
3324 : int len) {
3325 495 : const uint8_t* stream = reinterpret_cast<const uint8_t*>(vector.start());
3326 990 : size_t stream_length = vector.length();
3327 25740 : while (stream_length != 0) {
3328 24750 : size_t consumed = 0;
3329 24750 : uint32_t c = unibrow::Utf8::ValueOf(stream, stream_length, &consumed);
3330 : DCHECK_NE(unibrow::Utf8::kBadChar, c);
3331 : DCHECK(consumed <= stream_length);
3332 24750 : stream_length -= consumed;
3333 24750 : stream += consumed;
3334 24750 : if (c > unibrow::Utf16::kMaxNonSurrogateCharCode) {
3335 0 : len -= 2;
3336 0 : if (len < 0) break;
3337 0 : *chars++ = unibrow::Utf16::LeadSurrogate(c);
3338 0 : *chars++ = unibrow::Utf16::TrailSurrogate(c);
3339 : } else {
3340 24750 : len -= 1;
3341 24750 : if (len < 0) break;
3342 24750 : *chars++ = c;
3343 : }
3344 : }
3345 : DCHECK_EQ(0, stream_length);
3346 : DCHECK_EQ(0, len);
3347 495 : }
3348 :
3349 :
3350 : static inline void WriteOneByteData(String* s, uint8_t* chars, int len) {
3351 : DCHECK(s->length() == len);
3352 1893841 : String::WriteToFlat(s, chars, 0, len);
3353 : }
3354 :
3355 :
3356 : static inline void WriteTwoByteData(String* s, uint16_t* chars, int len) {
3357 : DCHECK(s->length() == len);
3358 20675 : String::WriteToFlat(s, chars, 0, len);
3359 : }
3360 :
3361 :
3362 : template <bool is_one_byte, typename T>
3363 1915011 : AllocationResult Heap::AllocateInternalizedStringImpl(T t, int chars,
3364 1914516 : uint32_t hash_field) {
3365 : DCHECK_LE(0, chars);
3366 : // Compute map and object size.
3367 : int size;
3368 : Map* map;
3369 :
3370 : DCHECK_LE(0, chars);
3371 : DCHECK_GE(String::kMaxLength, chars);
3372 : if (is_one_byte) {
3373 : map = one_byte_internalized_string_map();
3374 : size = SeqOneByteString::SizeFor(chars);
3375 : } else {
3376 : map = internalized_string_map();
3377 : size = SeqTwoByteString::SizeFor(chars);
3378 : }
3379 :
3380 : // Allocate string.
3381 : HeapObject* result = nullptr;
3382 : {
3383 1915011 : AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
3384 1915011 : if (!allocation.To(&result)) return allocation;
3385 : }
3386 :
3387 1915011 : result->set_map_after_allocation(map, SKIP_WRITE_BARRIER);
3388 : // Set length and hash fields of the allocated string.
3389 : String* answer = String::cast(result);
3390 : answer->set_length(chars);
3391 : answer->set_hash_field(hash_field);
3392 :
3393 : DCHECK_EQ(size, answer->Size());
3394 :
3395 : if (is_one_byte) {
3396 1893841 : WriteOneByteData(t, SeqOneByteString::cast(answer)->GetChars(), chars);
3397 : } else {
3398 21170 : WriteTwoByteData(t, SeqTwoByteString::cast(answer)->GetChars(), chars);
3399 : }
3400 1915011 : return answer;
3401 : }
3402 :
3403 :
3404 : // Need explicit instantiations.
3405 : template AllocationResult Heap::AllocateInternalizedStringImpl<true>(String*,
3406 : int,
3407 : uint32_t);
3408 : template AllocationResult Heap::AllocateInternalizedStringImpl<false>(String*,
3409 : int,
3410 : uint32_t);
3411 : template AllocationResult Heap::AllocateInternalizedStringImpl<false>(
3412 : Vector<const char>, int, uint32_t);
3413 :
3414 :
3415 103525973 : AllocationResult Heap::AllocateRawOneByteString(int length,
3416 103522484 : PretenureFlag pretenure) {
3417 : DCHECK_LE(0, length);
3418 : DCHECK_GE(String::kMaxLength, length);
3419 : int size = SeqOneByteString::SizeFor(length);
3420 : DCHECK_GE(SeqOneByteString::kMaxSize, size);
3421 : AllocationSpace space = SelectSpace(pretenure);
3422 :
3423 : HeapObject* result = nullptr;
3424 : {
3425 103525973 : AllocationResult allocation = AllocateRaw(size, space);
3426 103525988 : if (!allocation.To(&result)) return allocation;
3427 : }
3428 :
3429 : // Partially initialize the object.
3430 103522484 : result->set_map_after_allocation(one_byte_string_map(), SKIP_WRITE_BARRIER);
3431 : String::cast(result)->set_length(length);
3432 : String::cast(result)->set_hash_field(String::kEmptyHashField);
3433 : DCHECK_EQ(size, HeapObject::cast(result)->Size());
3434 :
3435 103522477 : return result;
3436 : }
3437 :
3438 :
3439 21599643 : AllocationResult Heap::AllocateRawTwoByteString(int length,
3440 21598775 : PretenureFlag pretenure) {
3441 : DCHECK_LE(0, length);
3442 : DCHECK_GE(String::kMaxLength, length);
3443 : int size = SeqTwoByteString::SizeFor(length);
3444 : DCHECK_GE(SeqTwoByteString::kMaxSize, size);
3445 : AllocationSpace space = SelectSpace(pretenure);
3446 :
3447 : HeapObject* result = nullptr;
3448 : {
3449 21599643 : AllocationResult allocation = AllocateRaw(size, space);
3450 21599643 : if (!allocation.To(&result)) return allocation;
3451 : }
3452 :
3453 : // Partially initialize the object.
3454 21598775 : result->set_map_after_allocation(string_map(), SKIP_WRITE_BARRIER);
3455 : String::cast(result)->set_length(length);
3456 : String::cast(result)->set_hash_field(String::kEmptyHashField);
3457 : DCHECK_EQ(size, HeapObject::cast(result)->Size());
3458 21598775 : return result;
3459 : }
3460 :
3461 :
3462 62 : AllocationResult Heap::AllocateEmptyFixedArray() {
3463 : int size = FixedArray::SizeFor(0);
3464 : HeapObject* result = nullptr;
3465 : {
3466 31 : AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
3467 31 : if (!allocation.To(&result)) return allocation;
3468 : }
3469 : // Initialize the object.
3470 31 : result->set_map_after_allocation(fixed_array_map(), SKIP_WRITE_BARRIER);
3471 : FixedArray::cast(result)->set_length(0);
3472 31 : return result;
3473 : }
3474 :
3475 62 : AllocationResult Heap::AllocateEmptyScopeInfo() {
3476 : int size = FixedArray::SizeFor(0);
3477 : HeapObject* result = nullptr;
3478 : {
3479 31 : AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
3480 31 : if (!allocation.To(&result)) return allocation;
3481 : }
3482 : // Initialize the object.
3483 31 : result->set_map_after_allocation(scope_info_map(), SKIP_WRITE_BARRIER);
3484 : FixedArray::cast(result)->set_length(0);
3485 31 : return result;
3486 : }
3487 :
3488 0 : AllocationResult Heap::CopyAndTenureFixedCOWArray(FixedArray* src) {
3489 0 : if (!InNewSpace(src)) {
3490 0 : return src;
3491 : }
3492 :
3493 : int len = src->length();
3494 : HeapObject* obj = nullptr;
3495 : {
3496 0 : AllocationResult allocation = AllocateRawFixedArray(len, TENURED);
3497 0 : if (!allocation.To(&obj)) return allocation;
3498 : }
3499 0 : obj->set_map_after_allocation(fixed_array_map(), SKIP_WRITE_BARRIER);
3500 : FixedArray* result = FixedArray::cast(obj);
3501 : result->set_length(len);
3502 :
3503 : // Copy the content.
3504 : DisallowHeapAllocation no_gc;
3505 0 : WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
3506 0 : for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
3507 :
3508 : // TODO(mvstanton): The map is set twice because of protection against calling
3509 : // set() on a COW FixedArray. Issue v8:3221 created to track this, and
3510 : // we might then be able to remove this whole method.
3511 : HeapObject::cast(obj)->set_map_after_allocation(fixed_cow_array_map(),
3512 0 : SKIP_WRITE_BARRIER);
3513 0 : return result;
3514 : }
3515 :
3516 :
3517 279 : AllocationResult Heap::AllocateEmptyFixedTypedArray(
3518 : ExternalArrayType array_type) {
3519 279 : return AllocateFixedTypedArray(0, array_type, false, TENURED);
3520 : }
3521 :
3522 : namespace {
3523 : template <typename T>
3524 : void initialize_length(T* array, int length) {
3525 : array->set_length(length);
3526 : }
3527 :
3528 : template <>
3529 : void initialize_length<PropertyArray>(PropertyArray* array, int length) {
3530 : array->initialize_length(length);
3531 : }
3532 :
3533 : } // namespace
3534 :
3535 : template <typename T>
3536 3696588 : AllocationResult Heap::CopyArrayAndGrow(T* src, int grow_by,
3537 3696445 : PretenureFlag pretenure) {
3538 : int old_len = src->length();
3539 3696588 : int new_len = old_len + grow_by;
3540 : DCHECK(new_len >= old_len);
3541 : HeapObject* obj = nullptr;
3542 : {
3543 3696588 : AllocationResult allocation = AllocateRawFixedArray(new_len, pretenure);
3544 3696588 : if (!allocation.To(&obj)) return allocation;
3545 : }
3546 :
3547 3696445 : obj->set_map_after_allocation(src->map(), SKIP_WRITE_BARRIER);
3548 : T* result = T::cast(obj);
3549 : initialize_length(result, new_len);
3550 :
3551 : // Copy the content.
3552 : DisallowHeapAllocation no_gc;
3553 3696445 : WriteBarrierMode mode = obj->GetWriteBarrierMode(no_gc);
3554 114501987 : for (int i = 0; i < old_len; i++) result->set(i, src->get(i), mode);
3555 3696445 : MemsetPointer(result->data_start() + old_len, undefined_value(), grow_by);
3556 3696445 : return result;
3557 : }
3558 :
3559 : template AllocationResult Heap::CopyArrayAndGrow(FixedArray* src, int grow_by,
3560 : PretenureFlag pretenure);
3561 : template AllocationResult Heap::CopyArrayAndGrow(PropertyArray* src,
3562 : int grow_by,
3563 : PretenureFlag pretenure);
3564 :
3565 2087289 : AllocationResult Heap::CopyFixedArrayUpTo(FixedArray* src, int new_len,
3566 2087183 : PretenureFlag pretenure) {
3567 2087298 : if (new_len == 0) return empty_fixed_array();
3568 :
3569 : DCHECK_LE(new_len, src->length());
3570 :
3571 : HeapObject* obj = nullptr;
3572 : {
3573 2087280 : AllocationResult allocation = AllocateRawFixedArray(new_len, pretenure);
3574 2087280 : if (!allocation.To(&obj)) return allocation;
3575 : }
3576 2087174 : obj->set_map_after_allocation(fixed_array_map(), SKIP_WRITE_BARRIER);
3577 :
3578 : FixedArray* result = FixedArray::cast(obj);
3579 : result->set_length(new_len);
3580 :
3581 : // Copy the content.
3582 : DisallowHeapAllocation no_gc;
3583 2087174 : WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
3584 14020736 : for (int i = 0; i < new_len; i++) result->set(i, src->get(i), mode);
3585 2087174 : return result;
3586 : }
3587 :
3588 : template <typename T>
3589 1155653 : AllocationResult Heap::CopyArrayWithMap(T* src, Map* map) {
3590 : int len = src->length();
3591 : HeapObject* obj = nullptr;
3592 : {
3593 1155653 : AllocationResult allocation = AllocateRawFixedArray(len, NOT_TENURED);
3594 1155653 : if (!allocation.To(&obj)) return allocation;
3595 : }
3596 1155625 : obj->set_map_after_allocation(map, SKIP_WRITE_BARRIER);
3597 :
3598 : T* result = T::cast(obj);
3599 : DisallowHeapAllocation no_gc;
3600 1155625 : WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
3601 :
3602 : // Eliminate the write barrier if possible.
3603 1155625 : if (mode == SKIP_WRITE_BARRIER) {
3604 : CopyBlock(obj->address() + kPointerSize, src->address() + kPointerSize,
3605 1086287 : T::SizeFor(len) - kPointerSize);
3606 1086287 : return obj;
3607 : }
3608 :
3609 : // Slow case: Just copy the content one-by-one.
3610 : initialize_length(result, len);
3611 12459527 : for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
3612 69338 : return result;
3613 : }
3614 :
3615 : template AllocationResult Heap::CopyArrayWithMap(FixedArray* src, Map* map);
3616 : template AllocationResult Heap::CopyArrayWithMap(PropertyArray* src, Map* map);
3617 :
3618 416212 : AllocationResult Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
3619 1154768 : return CopyArrayWithMap(src, map);
3620 : }
3621 :
3622 885 : AllocationResult Heap::CopyPropertyArray(PropertyArray* src) {
3623 885 : return CopyArrayWithMap(src, property_array_map());
3624 : }
3625 :
3626 9874 : AllocationResult Heap::CopyFixedDoubleArrayWithMap(FixedDoubleArray* src,
3627 : Map* map) {
3628 : int len = src->length();
3629 : HeapObject* obj = nullptr;
3630 : {
3631 9874 : AllocationResult allocation = AllocateRawFixedDoubleArray(len, NOT_TENURED);
3632 9874 : if (!allocation.To(&obj)) return allocation;
3633 : }
3634 9874 : obj->set_map_after_allocation(map, SKIP_WRITE_BARRIER);
3635 : CopyBlock(obj->address() + FixedDoubleArray::kLengthOffset,
3636 : src->address() + FixedDoubleArray::kLengthOffset,
3637 9874 : FixedDoubleArray::SizeFor(len) - FixedDoubleArray::kLengthOffset);
3638 9874 : return obj;
3639 : }
3640 :
3641 0 : AllocationResult Heap::CopyFeedbackVector(FeedbackVector* src) {
3642 : int len = src->length();
3643 : HeapObject* obj = nullptr;
3644 : {
3645 0 : AllocationResult allocation = AllocateRawFeedbackVector(len, NOT_TENURED);
3646 0 : if (!allocation.To(&obj)) return allocation;
3647 : }
3648 0 : obj->set_map_after_allocation(feedback_vector_map(), SKIP_WRITE_BARRIER);
3649 :
3650 : FeedbackVector* result = FeedbackVector::cast(obj);
3651 :
3652 : DisallowHeapAllocation no_gc;
3653 0 : WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
3654 :
3655 : // Eliminate the write barrier if possible.
3656 0 : if (mode == SKIP_WRITE_BARRIER) {
3657 : CopyBlock(result->address() + kPointerSize,
3658 : result->address() + kPointerSize,
3659 0 : FeedbackVector::SizeFor(len) - kPointerSize);
3660 0 : return result;
3661 : }
3662 :
3663 : // Slow case: Just copy the content one-by-one.
3664 0 : result->set_shared_function_info(src->shared_function_info());
3665 0 : result->set_optimized_code_cell(src->optimized_code_cell());
3666 : result->set_invocation_count(src->invocation_count());
3667 : result->set_profiler_ticks(src->profiler_ticks());
3668 : result->set_deopt_count(src->deopt_count());
3669 0 : for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
3670 0 : return result;
3671 : }
3672 :
3673 61168639 : AllocationResult Heap::AllocateRawFixedArray(int length,
3674 : PretenureFlag pretenure) {
3675 61168639 : if (length < 0 || length > FixedArray::kMaxLength) {
3676 : v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
3677 : }
3678 : int size = FixedArray::SizeFor(length);
3679 : AllocationSpace space = SelectSpace(pretenure);
3680 :
3681 61168652 : AllocationResult result = AllocateRaw(size, space);
3682 61168653 : if (!result.IsRetry() && size > kMaxRegularHeapObjectSize &&
3683 : FLAG_use_marking_progress_bar) {
3684 : MemoryChunk* chunk =
3685 7039 : MemoryChunk::FromAddress(result.ToObjectChecked()->address());
3686 : chunk->SetFlag<AccessMode::ATOMIC>(MemoryChunk::HAS_PROGRESS_BAR);
3687 : }
3688 61168653 : return result;
3689 : }
3690 :
3691 :
3692 53547798 : AllocationResult Heap::AllocateFixedArrayWithFiller(int length,
3693 : PretenureFlag pretenure,
3694 53544888 : Object* filler) {
3695 : DCHECK_LE(0, length);
3696 : DCHECK(empty_fixed_array()->IsFixedArray());
3697 58130297 : if (length == 0) return empty_fixed_array();
3698 :
3699 : DCHECK(!InNewSpace(filler));
3700 : HeapObject* result = nullptr;
3701 : {
3702 48965299 : AllocationResult allocation = AllocateRawFixedArray(length, pretenure);
3703 48965310 : if (!allocation.To(&result)) return allocation;
3704 : }
3705 :
3706 48962389 : result->set_map_after_allocation(fixed_array_map(), SKIP_WRITE_BARRIER);
3707 : FixedArray* array = FixedArray::cast(result);
3708 : array->set_length(length);
3709 48962389 : MemsetPointer(array->data_start(), filler, length);
3710 48962371 : return array;
3711 : }
3712 :
3713 4732762 : AllocationResult Heap::AllocatePropertyArray(int length,
3714 9465334 : PretenureFlag pretenure) {
3715 : DCHECK_LE(0, length);
3716 : DCHECK(!InNewSpace(undefined_value()));
3717 : HeapObject* result = nullptr;
3718 : {
3719 4732762 : AllocationResult allocation = AllocateRawFixedArray(length, pretenure);
3720 4732763 : if (!allocation.To(&result)) return allocation;
3721 : }
3722 :
3723 4732667 : result->set_map_after_allocation(property_array_map(), SKIP_WRITE_BARRIER);
3724 : PropertyArray* array = PropertyArray::cast(result);
3725 : array->initialize_length(length);
3726 4732667 : MemsetPointer(array->data_start(), undefined_value(), length);
3727 4732667 : return result;
3728 : }
3729 :
3730 31 : AllocationResult Heap::AllocateUninitializedFixedArray(
3731 31 : int length, PretenureFlag pretenure) {
3732 31 : if (length == 0) return empty_fixed_array();
3733 :
3734 : HeapObject* obj = nullptr;
3735 : {
3736 31 : AllocationResult allocation = AllocateRawFixedArray(length, pretenure);
3737 31 : if (!allocation.To(&obj)) return allocation;
3738 : }
3739 :
3740 31 : obj->set_map_after_allocation(fixed_array_map(), SKIP_WRITE_BARRIER);
3741 : FixedArray::cast(obj)->set_length(length);
3742 31 : return obj;
3743 : }
3744 :
3745 :
3746 1201231 : AllocationResult Heap::AllocateUninitializedFixedDoubleArray(
3747 1200747 : int length, PretenureFlag pretenure) {
3748 1201231 : if (length == 0) return empty_fixed_array();
3749 :
3750 : HeapObject* elements = nullptr;
3751 1201231 : AllocationResult allocation = AllocateRawFixedDoubleArray(length, pretenure);
3752 1201231 : if (!allocation.To(&elements)) return allocation;
3753 :
3754 : elements->set_map_after_allocation(fixed_double_array_map(),
3755 1200747 : SKIP_WRITE_BARRIER);
3756 : FixedDoubleArray::cast(elements)->set_length(length);
3757 1200747 : return elements;
3758 : }
3759 :
3760 :
3761 1211105 : AllocationResult Heap::AllocateRawFixedDoubleArray(int length,
3762 : PretenureFlag pretenure) {
3763 1211105 : if (length < 0 || length > FixedDoubleArray::kMaxLength) {
3764 : v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
3765 : }
3766 : int size = FixedDoubleArray::SizeFor(length);
3767 : AllocationSpace space = SelectSpace(pretenure);
3768 :
3769 : HeapObject* object = nullptr;
3770 : {
3771 1211105 : AllocationResult allocation = AllocateRaw(size, space, kDoubleAligned);
3772 1211105 : if (!allocation.To(&object)) return allocation;
3773 : }
3774 :
3775 1210621 : return object;
3776 : }
3777 :
3778 6329862 : AllocationResult Heap::AllocateRawFeedbackVector(int length,
3779 : PretenureFlag pretenure) {
3780 : DCHECK_LE(0, length);
3781 :
3782 : int size = FeedbackVector::SizeFor(length);
3783 : AllocationSpace space = SelectSpace(pretenure);
3784 :
3785 : HeapObject* object = nullptr;
3786 : {
3787 6329862 : AllocationResult allocation = AllocateRaw(size, space);
3788 6329863 : if (!allocation.To(&object)) return allocation;
3789 : }
3790 :
3791 6329863 : return object;
3792 : }
3793 :
3794 6329862 : AllocationResult Heap::AllocateFeedbackVector(SharedFunctionInfo* shared,
3795 12659724 : PretenureFlag pretenure) {
3796 6329862 : int length = shared->feedback_metadata()->slot_count();
3797 :
3798 : HeapObject* result = nullptr;
3799 : {
3800 6329863 : AllocationResult allocation = AllocateRawFeedbackVector(length, pretenure);
3801 6329863 : if (!allocation.To(&result)) return allocation;
3802 : }
3803 :
3804 : // Initialize the object's map.
3805 6329863 : result->set_map_after_allocation(feedback_vector_map(), SKIP_WRITE_BARRIER);
3806 : FeedbackVector* vector = FeedbackVector::cast(result);
3807 6329863 : vector->set_shared_function_info(shared);
3808 6329863 : vector->set_optimized_code_cell(Smi::FromEnum(OptimizationMarker::kNone));
3809 : vector->set_length(length);
3810 : vector->set_invocation_count(0);
3811 : vector->set_profiler_ticks(0);
3812 : vector->set_deopt_count(0);
3813 : // TODO(leszeks): Initialize based on the feedback metadata.
3814 6329861 : MemsetPointer(vector->slots_start(), undefined_value(), length);
3815 6329861 : return vector;
3816 : }
3817 :
3818 39603 : AllocationResult Heap::AllocateSymbol() {
3819 : // Statically ensure that it is safe to allocate symbols in paged spaces.
3820 : STATIC_ASSERT(Symbol::kSize <= kMaxRegularHeapObjectSize);
3821 :
3822 : HeapObject* result = nullptr;
3823 13201 : AllocationResult allocation = AllocateRaw(Symbol::kSize, OLD_SPACE);
3824 13201 : if (!allocation.To(&result)) return allocation;
3825 :
3826 13201 : result->set_map_after_allocation(symbol_map(), SKIP_WRITE_BARRIER);
3827 :
3828 : // Generate a random hash value.
3829 13201 : int hash = isolate()->GenerateIdentityHash(Name::kHashBitMask);
3830 :
3831 : Symbol::cast(result)
3832 13201 : ->set_hash_field(Name::kIsNotArrayIndexMask | (hash << Name::kHashShift));
3833 13201 : Symbol::cast(result)->set_name(undefined_value());
3834 : Symbol::cast(result)->set_flags(0);
3835 :
3836 : DCHECK(!Symbol::cast(result)->is_private());
3837 13201 : return result;
3838 : }
3839 :
3840 11643828 : AllocationResult Heap::AllocateStruct(InstanceType type,
3841 11643828 : PretenureFlag pretenure) {
3842 : Map* map;
3843 11643828 : switch (type) {
3844 : #define MAKE_CASE(NAME, Name, name) \
3845 : case NAME##_TYPE: \
3846 : map = name##_map(); \
3847 : break;
3848 11643828 : STRUCT_LIST(MAKE_CASE)
3849 : #undef MAKE_CASE
3850 : default:
3851 0 : UNREACHABLE();
3852 : }
3853 : int size = map->instance_size();
3854 : Struct* result = nullptr;
3855 : {
3856 : AllocationSpace space = SelectSpace(pretenure);
3857 11643828 : AllocationResult allocation = Allocate(map, space);
3858 11643829 : if (!allocation.To(&result)) return allocation;
3859 : }
3860 11643829 : result->InitializeBody(size);
3861 11643828 : return result;
3862 : }
3863 :
3864 :
3865 11093 : void Heap::MakeHeapIterable() {
3866 11093 : mark_compact_collector()->EnsureSweepingCompleted();
3867 0 : }
3868 :
3869 :
3870 : static double ComputeMutatorUtilization(double mutator_speed, double gc_speed) {
3871 : const double kMinMutatorUtilization = 0.0;
3872 : const double kConservativeGcSpeedInBytesPerMillisecond = 200000;
3873 29662 : if (mutator_speed == 0) return kMinMutatorUtilization;
3874 27701 : if (gc_speed == 0) gc_speed = kConservativeGcSpeedInBytesPerMillisecond;
3875 : // Derivation:
3876 : // mutator_utilization = mutator_time / (mutator_time + gc_time)
3877 : // mutator_time = 1 / mutator_speed
3878 : // gc_time = 1 / gc_speed
3879 : // mutator_utilization = (1 / mutator_speed) /
3880 : // (1 / mutator_speed + 1 / gc_speed)
3881 : // mutator_utilization = gc_speed / (mutator_speed + gc_speed)
3882 27701 : return gc_speed / (mutator_speed + gc_speed);
3883 : }
3884 :
3885 :
3886 59322 : double Heap::YoungGenerationMutatorUtilization() {
3887 : double mutator_speed = static_cast<double>(
3888 29661 : tracer()->NewSpaceAllocationThroughputInBytesPerMillisecond());
3889 : double gc_speed =
3890 29661 : tracer()->ScavengeSpeedInBytesPerMillisecond(kForSurvivedObjects);
3891 : double result = ComputeMutatorUtilization(mutator_speed, gc_speed);
3892 29661 : if (FLAG_trace_mutator_utilization) {
3893 : isolate()->PrintWithTimestamp(
3894 : "Young generation mutator utilization = %.3f ("
3895 : "mutator_speed=%.f, gc_speed=%.f)\n",
3896 0 : result, mutator_speed, gc_speed);
3897 : }
3898 29661 : return result;
3899 : }
3900 :
3901 :
3902 2 : double Heap::OldGenerationMutatorUtilization() {
3903 : double mutator_speed = static_cast<double>(
3904 1 : tracer()->OldGenerationAllocationThroughputInBytesPerMillisecond());
3905 : double gc_speed = static_cast<double>(
3906 1 : tracer()->CombinedMarkCompactSpeedInBytesPerMillisecond());
3907 : double result = ComputeMutatorUtilization(mutator_speed, gc_speed);
3908 1 : if (FLAG_trace_mutator_utilization) {
3909 : isolate()->PrintWithTimestamp(
3910 : "Old generation mutator utilization = %.3f ("
3911 : "mutator_speed=%.f, gc_speed=%.f)\n",
3912 0 : result, mutator_speed, gc_speed);
3913 : }
3914 1 : return result;
3915 : }
3916 :
3917 :
3918 0 : bool Heap::HasLowYoungGenerationAllocationRate() {
3919 : const double high_mutator_utilization = 0.993;
3920 29661 : return YoungGenerationMutatorUtilization() > high_mutator_utilization;
3921 : }
3922 :
3923 :
3924 0 : bool Heap::HasLowOldGenerationAllocationRate() {
3925 : const double high_mutator_utilization = 0.993;
3926 1 : return OldGenerationMutatorUtilization() > high_mutator_utilization;
3927 : }
3928 :
3929 :
3930 9 : bool Heap::HasLowAllocationRate() {
3931 10 : return HasLowYoungGenerationAllocationRate() &&
3932 9 : HasLowOldGenerationAllocationRate();
3933 : }
3934 :
3935 :
3936 0 : bool Heap::HasHighFragmentation() {
3937 0 : size_t used = PromotedSpaceSizeOfObjects();
3938 0 : size_t committed = CommittedOldGenerationMemory();
3939 0 : return HasHighFragmentation(used, committed);
3940 : }
3941 :
3942 0 : bool Heap::HasHighFragmentation(size_t used, size_t committed) {
3943 : const size_t kSlack = 16 * MB;
3944 : // Fragmentation is high if committed > 2 * used + kSlack.
3945 : // Rewrite the exression to avoid overflow.
3946 : DCHECK_GE(committed, used);
3947 56018 : return committed - used > used + kSlack;
3948 : }
3949 :
3950 232015 : bool Heap::ShouldOptimizeForMemoryUsage() {
3951 696044 : return FLAG_optimize_for_size || isolate()->IsIsolateInBackground() ||
3952 464029 : HighMemoryPressure();
3953 : }
3954 :
3955 0 : void Heap::ActivateMemoryReducerIfNeeded() {
3956 : // Activate memory reducer when switching to background if
3957 : // - there was no mark compact since the start.
3958 : // - the committed memory can be potentially reduced.
3959 : // 2 pages for the old, code, and map space + 1 page for new space.
3960 : const int kMinCommittedMemory = 7 * Page::kPageSize;
3961 0 : if (ms_count_ == 0 && CommittedMemory() > kMinCommittedMemory &&
3962 0 : isolate()->IsIsolateInBackground()) {
3963 : MemoryReducer::Event event;
3964 0 : event.type = MemoryReducer::kPossibleGarbage;
3965 0 : event.time_ms = MonotonicallyIncreasingTimeInMs();
3966 0 : memory_reducer_->NotifyPossibleGarbage(event);
3967 : }
3968 0 : }
3969 :
3970 172381 : void Heap::ReduceNewSpaceSize() {
3971 : // TODO(ulan): Unify this constant with the similar constant in
3972 : // GCIdleTimeHandler once the change is merged to 4.5.
3973 : static const size_t kLowAllocationThroughput = 1000;
3974 : const double allocation_throughput =
3975 86452 : tracer()->CurrentAllocationThroughputInBytesPerMillisecond();
3976 :
3977 172904 : if (FLAG_predictable) return;
3978 :
3979 171858 : if (ShouldReduceMemory() ||
3980 58481 : ((allocation_throughput != 0) &&
3981 : (allocation_throughput < kLowAllocationThroughput))) {
3982 16125 : new_space_->Shrink();
3983 : UncommitFromSpace();
3984 : }
3985 : }
3986 :
3987 49411 : void Heap::FinalizeIncrementalMarkingIfComplete(
3988 126996 : GarbageCollectionReason gc_reason) {
3989 147823 : if (incremental_marking()->IsMarking() &&
3990 83932 : (incremental_marking()->IsReadyToOverApproximateWeakClosure() ||
3991 20682 : (!incremental_marking()->finalize_marking_completed() &&
3992 20690 : mark_compact_collector()->marking_worklist()->IsEmpty() &&
3993 8 : local_embedder_heap_tracer()->ShouldFinalizeIncrementalMarking()))) {
3994 14078 : FinalizeIncrementalMarking(gc_reason);
3995 91818 : } else if (incremental_marking()->IsComplete() ||
3996 21562 : (mark_compact_collector()->marking_worklist()->IsEmpty() &&
3997 : local_embedder_heap_tracer()
3998 410 : ->ShouldFinalizeIncrementalMarking())) {
3999 14591 : CollectAllGarbage(current_gc_flags_, gc_reason, current_gc_callback_flags_);
4000 : }
4001 49411 : }
4002 :
4003 83851 : void Heap::RegisterDeserializedObjectsForBlackAllocation(
4004 : Reservation* reservations, const std::vector<HeapObject*>& large_objects,
4005 19518294 : const std::vector<Address>& maps) {
4006 : // TODO(ulan): pause black allocation during deserialization to avoid
4007 : // iterating all these objects in one go.
4008 :
4009 167702 : if (!incremental_marking()->black_allocation()) return;
4010 :
4011 : // Iterate black objects in old space, code space, map space, and large
4012 : // object space for side effects.
4013 : IncrementalMarking::MarkingState* marking_state =
4014 : incremental_marking()->marking_state();
4015 41416 : for (int i = OLD_SPACE; i < Serializer<>::kNumberOfSpaces; i++) {
4016 41416 : const Heap::Reservation& res = reservations[i];
4017 124259 : for (auto& chunk : res) {
4018 41427 : Address addr = chunk.start;
4019 16636813 : while (addr < chunk.end) {
4020 16553959 : HeapObject* obj = HeapObject::FromAddress(addr);
4021 : // Objects can have any color because incremental marking can
4022 : // start in the middle of Heap::ReserveSpace().
4023 16553959 : if (marking_state->IsBlack(obj)) {
4024 16553959 : incremental_marking()->ProcessBlackAllocatedObject(obj);
4025 : }
4026 16553958 : addr += obj->Size();
4027 : }
4028 : }
4029 : }
4030 : // We potentially deserialized wrappers which require registering with the
4031 : // embedder as the marker will not find them.
4032 10354 : local_embedder_heap_tracer()->RegisterWrappersWithRemoteTracer();
4033 :
4034 : // Large object space doesn't use reservations, so it needs custom handling.
4035 20719 : for (HeapObject* object : large_objects) {
4036 11 : incremental_marking()->ProcessBlackAllocatedObject(object);
4037 : }
4038 :
4039 : // Map space doesn't use reservations, so it needs custom handling.
4040 2890827 : for (Address addr : maps) {
4041 : incremental_marking()->ProcessBlackAllocatedObject(
4042 5740238 : HeapObject::FromAddress(addr));
4043 : }
4044 : }
4045 :
4046 24676832 : void Heap::NotifyObjectLayoutChange(HeapObject* object, int size,
4047 29535296 : const DisallowHeapAllocation&) {
4048 : DCHECK(InOldSpace(object) || InNewSpace(object) ||
4049 : (lo_space()->Contains(object) && object->IsString()));
4050 49344954 : if (FLAG_incremental_marking && incremental_marking()->IsMarking()) {
4051 2408879 : incremental_marking()->MarkBlackAndPush(object);
4052 2458295 : if (InOldSpace(object) && incremental_marking()->IsCompacting()) {
4053 : // The concurrent marker might have recorded slots for the object.
4054 : // Register this object as invalidated to filter out the slots.
4055 2399 : MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
4056 2399 : chunk->RegisterObjectWithInvalidatedSlots(object, size);
4057 : }
4058 : }
4059 : #ifdef VERIFY_HEAP
4060 : if (FLAG_verify_heap) {
4061 : DCHECK_NULL(pending_layout_change_object_);
4062 : pending_layout_change_object_ = object;
4063 : }
4064 : #endif
4065 24676832 : }
4066 :
4067 : #ifdef VERIFY_HEAP
4068 : // Helper class for collecting slot addresses.
4069 : class SlotCollectingVisitor final : public ObjectVisitor {
4070 : public:
4071 : void VisitPointers(HeapObject* host, Object** start, Object** end) override {
4072 : for (Object** p = start; p < end; p++) {
4073 : slots_.push_back(p);
4074 : }
4075 : }
4076 :
4077 : int number_of_slots() { return static_cast<int>(slots_.size()); }
4078 :
4079 : Object** slot(int i) { return slots_[i]; }
4080 :
4081 : private:
4082 : std::vector<Object**> slots_;
4083 : };
4084 :
4085 : void Heap::VerifyObjectLayoutChange(HeapObject* object, Map* new_map) {
4086 : if (!FLAG_verify_heap) return;
4087 :
4088 : // Check that Heap::NotifyObjectLayout was called for object transitions
4089 : // that are not safe for concurrent marking.
4090 : // If you see this check triggering for a freshly allocated object,
4091 : // use object->set_map_after_allocation() to initialize its map.
4092 : if (pending_layout_change_object_ == nullptr) {
4093 : if (object->IsJSObject()) {
4094 : DCHECK(!object->map()->TransitionRequiresSynchronizationWithGC(new_map));
4095 : } else {
4096 : // Check that the set of slots before and after the transition match.
4097 : SlotCollectingVisitor old_visitor;
4098 : object->IterateFast(&old_visitor);
4099 : MapWord old_map_word = object->map_word();
4100 : // Temporarily set the new map to iterate new slots.
4101 : object->set_map_word(MapWord::FromMap(new_map));
4102 : SlotCollectingVisitor new_visitor;
4103 : object->IterateFast(&new_visitor);
4104 : // Restore the old map.
4105 : object->set_map_word(old_map_word);
4106 : DCHECK_EQ(new_visitor.number_of_slots(), old_visitor.number_of_slots());
4107 : for (int i = 0; i < new_visitor.number_of_slots(); i++) {
4108 : DCHECK_EQ(new_visitor.slot(i), old_visitor.slot(i));
4109 : }
4110 : }
4111 : } else {
4112 : DCHECK_EQ(pending_layout_change_object_, object);
4113 : pending_layout_change_object_ = nullptr;
4114 : }
4115 : }
4116 : #endif
4117 :
4118 17565 : GCIdleTimeHeapState Heap::ComputeHeapState() {
4119 : GCIdleTimeHeapState heap_state;
4120 5855 : heap_state.contexts_disposed = contexts_disposed_;
4121 : heap_state.contexts_disposal_rate =
4122 5855 : tracer()->ContextDisposalRateInMilliseconds();
4123 5855 : heap_state.size_of_objects = static_cast<size_t>(SizeOfObjects());
4124 5855 : heap_state.incremental_marking_stopped = incremental_marking()->IsStopped();
4125 5855 : return heap_state;
4126 : }
4127 :
4128 :
4129 5855 : bool Heap::PerformIdleTimeAction(GCIdleTimeAction action,
4130 : GCIdleTimeHeapState heap_state,
4131 38 : double deadline_in_ms) {
4132 : bool result = false;
4133 5855 : switch (action.type) {
4134 : case DONE:
4135 : result = true;
4136 5502 : break;
4137 : case DO_INCREMENTAL_STEP: {
4138 : const double remaining_idle_time_in_ms =
4139 : incremental_marking()->AdvanceIncrementalMarking(
4140 : deadline_in_ms, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
4141 19 : StepOrigin::kTask);
4142 19 : if (remaining_idle_time_in_ms > 0.0) {
4143 : FinalizeIncrementalMarkingIfComplete(
4144 19 : GarbageCollectionReason::kFinalizeMarkingViaTask);
4145 : }
4146 : result = incremental_marking()->IsStopped();
4147 19 : break;
4148 : }
4149 : case DO_FULL_GC: {
4150 : DCHECK_LT(0, contexts_disposed_);
4151 396 : HistogramTimerScope scope(isolate_->counters()->gc_context());
4152 594 : TRACE_EVENT0("v8", "V8.GCContext");
4153 : CollectAllGarbage(kNoGCFlags, GarbageCollectionReason::kContextDisposal);
4154 : break;
4155 : }
4156 : case DO_NOTHING:
4157 : break;
4158 : }
4159 :
4160 5855 : return result;
4161 : }
4162 :
4163 5855 : void Heap::IdleNotificationEpilogue(GCIdleTimeAction action,
4164 : GCIdleTimeHeapState heap_state,
4165 : double start_ms, double deadline_in_ms) {
4166 5855 : double idle_time_in_ms = deadline_in_ms - start_ms;
4167 5855 : double current_time = MonotonicallyIncreasingTimeInMs();
4168 5855 : last_idle_notification_time_ = current_time;
4169 5855 : double deadline_difference = deadline_in_ms - current_time;
4170 :
4171 5855 : contexts_disposed_ = 0;
4172 :
4173 5855 : if (deadline_in_ms - start_ms >
4174 : GCIdleTimeHandler::kMaxFrameRenderingIdleTime) {
4175 5525 : int committed_memory = static_cast<int>(CommittedMemory() / KB);
4176 5525 : int used_memory = static_cast<int>(heap_state.size_of_objects / KB);
4177 : isolate()->counters()->aggregated_memory_heap_committed()->AddSample(
4178 11050 : start_ms, committed_memory);
4179 : isolate()->counters()->aggregated_memory_heap_used()->AddSample(
4180 11050 : start_ms, used_memory);
4181 : }
4182 :
4183 5855 : if ((FLAG_trace_idle_notification && action.type > DO_NOTHING) ||
4184 : FLAG_trace_idle_notification_verbose) {
4185 : isolate_->PrintWithTimestamp(
4186 : "Idle notification: requested idle time %.2f ms, used idle time %.2f "
4187 : "ms, deadline usage %.2f ms [",
4188 : idle_time_in_ms, idle_time_in_ms - deadline_difference,
4189 0 : deadline_difference);
4190 0 : action.Print();
4191 0 : PrintF("]");
4192 0 : if (FLAG_trace_idle_notification_verbose) {
4193 0 : PrintF("[");
4194 0 : heap_state.Print();
4195 0 : PrintF("]");
4196 : }
4197 0 : PrintF("\n");
4198 : }
4199 5855 : }
4200 :
4201 :
4202 6768388 : double Heap::MonotonicallyIncreasingTimeInMs() {
4203 6768388 : return V8::GetCurrentPlatform()->MonotonicallyIncreasingTime() *
4204 6768390 : static_cast<double>(base::Time::kMillisecondsPerSecond);
4205 : }
4206 :
4207 :
4208 0 : bool Heap::IdleNotification(int idle_time_in_ms) {
4209 : return IdleNotification(
4210 0 : V8::GetCurrentPlatform()->MonotonicallyIncreasingTime() +
4211 0 : (static_cast<double>(idle_time_in_ms) /
4212 0 : static_cast<double>(base::Time::kMillisecondsPerSecond)));
4213 : }
4214 :
4215 :
4216 11710 : bool Heap::IdleNotification(double deadline_in_seconds) {
4217 5855 : CHECK(HasBeenSetUp());
4218 : double deadline_in_ms =
4219 : deadline_in_seconds *
4220 5855 : static_cast<double>(base::Time::kMillisecondsPerSecond);
4221 : HistogramTimerScope idle_notification_scope(
4222 11710 : isolate_->counters()->gc_idle_notification());
4223 17565 : TRACE_EVENT0("v8", "V8.GCIdleNotification");
4224 5855 : double start_ms = MonotonicallyIncreasingTimeInMs();
4225 5855 : double idle_time_in_ms = deadline_in_ms - start_ms;
4226 :
4227 : tracer()->SampleAllocation(start_ms, NewSpaceAllocationCounter(),
4228 5855 : OldGenerationAllocationCounter());
4229 :
4230 5855 : GCIdleTimeHeapState heap_state = ComputeHeapState();
4231 :
4232 : GCIdleTimeAction action =
4233 5855 : gc_idle_time_handler_->Compute(idle_time_in_ms, heap_state);
4234 :
4235 5855 : bool result = PerformIdleTimeAction(action, heap_state, deadline_in_ms);
4236 :
4237 5855 : IdleNotificationEpilogue(action, heap_state, start_ms, deadline_in_ms);
4238 5855 : return result;
4239 : }
4240 :
4241 :
4242 0 : bool Heap::RecentIdleNotificationHappened() {
4243 0 : return (last_idle_notification_time_ +
4244 : GCIdleTimeHandler::kMaxScheduledIdleTime) >
4245 0 : MonotonicallyIncreasingTimeInMs();
4246 : }
4247 :
4248 : class MemoryPressureInterruptTask : public CancelableTask {
4249 : public:
4250 : explicit MemoryPressureInterruptTask(Heap* heap)
4251 11 : : CancelableTask(heap->isolate()), heap_(heap) {}
4252 :
4253 22 : virtual ~MemoryPressureInterruptTask() {}
4254 :
4255 : private:
4256 : // v8::internal::CancelableTask overrides.
4257 11 : void RunInternal() override { heap_->CheckMemoryPressure(); }
4258 :
4259 : Heap* heap_;
4260 : DISALLOW_COPY_AND_ASSIGN(MemoryPressureInterruptTask);
4261 : };
4262 :
4263 1377 : void Heap::CheckMemoryPressure() {
4264 1377 : if (HighMemoryPressure()) {
4265 17 : if (isolate()->concurrent_recompilation_enabled()) {
4266 : // The optimizing compiler may be unnecessarily holding on to memory.
4267 : DisallowHeapAllocation no_recursive_gc;
4268 : isolate()->optimizing_compile_dispatcher()->Flush(
4269 17 : OptimizingCompileDispatcher::BlockingBehavior::kDontBlock);
4270 : }
4271 : }
4272 1377 : if (memory_pressure_level_.Value() == MemoryPressureLevel::kCritical) {
4273 17 : CollectGarbageOnMemoryPressure();
4274 1360 : } else if (memory_pressure_level_.Value() == MemoryPressureLevel::kModerate) {
4275 0 : if (FLAG_incremental_marking && incremental_marking()->IsStopped()) {
4276 : StartIncrementalMarking(kReduceMemoryFootprintMask,
4277 : GarbageCollectionReason::kMemoryPressure);
4278 : }
4279 : }
4280 1377 : if (memory_reducer_) {
4281 : MemoryReducer::Event event;
4282 1377 : event.type = MemoryReducer::kPossibleGarbage;
4283 1377 : event.time_ms = MonotonicallyIncreasingTimeInMs();
4284 1377 : memory_reducer_->NotifyPossibleGarbage(event);
4285 : }
4286 1377 : }
4287 :
4288 17 : void Heap::CollectGarbageOnMemoryPressure() {
4289 : const int kGarbageThresholdInBytes = 8 * MB;
4290 : const double kGarbageThresholdAsFractionOfTotalMemory = 0.1;
4291 : // This constant is the maximum response time in RAIL performance model.
4292 : const double kMaxMemoryPressurePauseMs = 100;
4293 :
4294 17 : double start = MonotonicallyIncreasingTimeInMs();
4295 : CollectAllGarbage(kReduceMemoryFootprintMask | kAbortIncrementalMarkingMask,
4296 : GarbageCollectionReason::kMemoryPressure,
4297 : kGCCallbackFlagCollectAllAvailableGarbage);
4298 17 : double end = MonotonicallyIncreasingTimeInMs();
4299 :
4300 : // Estimate how much memory we can free.
4301 : int64_t potential_garbage =
4302 17 : (CommittedMemory() - SizeOfObjects()) + external_memory_;
4303 : // If we can potentially free large amount of memory, then start GC right
4304 : // away instead of waiting for memory reducer.
4305 22 : if (potential_garbage >= kGarbageThresholdInBytes &&
4306 5 : potential_garbage >=
4307 5 : CommittedMemory() * kGarbageThresholdAsFractionOfTotalMemory) {
4308 : // If we spent less than half of the time budget, then perform full GC
4309 : // Otherwise, start incremental marking.
4310 5 : if (end - start < kMaxMemoryPressurePauseMs / 2) {
4311 : CollectAllGarbage(
4312 : kReduceMemoryFootprintMask | kAbortIncrementalMarkingMask,
4313 : GarbageCollectionReason::kMemoryPressure,
4314 : kGCCallbackFlagCollectAllAvailableGarbage);
4315 : } else {
4316 0 : if (FLAG_incremental_marking && incremental_marking()->IsStopped()) {
4317 : StartIncrementalMarking(kReduceMemoryFootprintMask,
4318 : GarbageCollectionReason::kMemoryPressure);
4319 : }
4320 : }
4321 : }
4322 17 : }
4323 :
4324 27 : void Heap::MemoryPressureNotification(MemoryPressureLevel level,
4325 : bool is_isolate_locked) {
4326 : MemoryPressureLevel previous = memory_pressure_level_.Value();
4327 : memory_pressure_level_.SetValue(level);
4328 54 : if ((previous != MemoryPressureLevel::kCritical &&
4329 37 : level == MemoryPressureLevel::kCritical) ||
4330 20 : (previous == MemoryPressureLevel::kNone &&
4331 10 : level == MemoryPressureLevel::kModerate)) {
4332 22 : if (is_isolate_locked) {
4333 11 : CheckMemoryPressure();
4334 : } else {
4335 : ExecutionAccess access(isolate());
4336 11 : isolate()->stack_guard()->RequestGC();
4337 11 : V8::GetCurrentPlatform()->CallOnForegroundThread(
4338 : reinterpret_cast<v8::Isolate*>(isolate()),
4339 22 : new MemoryPressureInterruptTask(this));
4340 : }
4341 : }
4342 27 : }
4343 :
4344 6157 : void Heap::SetOutOfMemoryCallback(v8::debug::OutOfMemoryCallback callback,
4345 : void* data) {
4346 6157 : out_of_memory_callback_ = callback;
4347 6157 : out_of_memory_callback_data_ = data;
4348 6157 : }
4349 :
4350 0 : void Heap::InvokeOutOfMemoryCallback() {
4351 10 : if (out_of_memory_callback_) {
4352 10 : out_of_memory_callback_(out_of_memory_callback_data_);
4353 : }
4354 0 : }
4355 :
4356 0 : void Heap::CollectCodeStatistics() {
4357 0 : CodeStatistics::ResetCodeAndMetadataStatistics(isolate());
4358 : // We do not look for code in new space, or map space. If code
4359 : // somehow ends up in those spaces, we would miss it here.
4360 0 : CodeStatistics::CollectCodeStatistics(code_space_, isolate());
4361 0 : CodeStatistics::CollectCodeStatistics(old_space_, isolate());
4362 0 : CodeStatistics::CollectCodeStatistics(lo_space_, isolate());
4363 0 : }
4364 :
4365 : #ifdef DEBUG
4366 :
4367 : void Heap::Print() {
4368 : if (!HasBeenSetUp()) return;
4369 : isolate()->PrintStack(stdout);
4370 : AllSpaces spaces(this);
4371 : for (Space* space = spaces.next(); space != nullptr; space = spaces.next()) {
4372 : space->Print();
4373 : }
4374 : }
4375 :
4376 :
4377 : void Heap::ReportCodeStatistics(const char* title) {
4378 : PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
4379 : CollectCodeStatistics();
4380 : CodeStatistics::ReportCodeStatistics(isolate());
4381 : }
4382 :
4383 :
4384 : // This function expects that NewSpace's allocated objects histogram is
4385 : // populated (via a call to CollectStatistics or else as a side effect of a
4386 : // just-completed scavenge collection).
4387 : void Heap::ReportHeapStatistics(const char* title) {
4388 : USE(title);
4389 : PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n", title,
4390 : gc_count_);
4391 : PrintF("old_generation_allocation_limit_ %" PRIuS "\n",
4392 : old_generation_allocation_limit_);
4393 :
4394 : PrintF("\n");
4395 : PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles(isolate_));
4396 : isolate_->global_handles()->PrintStats();
4397 : PrintF("\n");
4398 :
4399 : PrintF("Heap statistics : ");
4400 : memory_allocator()->ReportStatistics();
4401 : PrintF("To space : ");
4402 : new_space_->ReportStatistics();
4403 : PrintF("Old space : ");
4404 : old_space_->ReportStatistics();
4405 : PrintF("Code space : ");
4406 : code_space_->ReportStatistics();
4407 : PrintF("Map space : ");
4408 : map_space_->ReportStatistics();
4409 : PrintF("Large object space : ");
4410 : lo_space_->ReportStatistics();
4411 : PrintF(">>>>>> ========================================= >>>>>>\n");
4412 : }
4413 :
4414 : #endif // DEBUG
4415 :
4416 86435 : const char* Heap::GarbageCollectionReasonToString(
4417 : GarbageCollectionReason gc_reason) {
4418 86435 : switch (gc_reason) {
4419 : case GarbageCollectionReason::kAllocationFailure:
4420 : return "allocation failure";
4421 : case GarbageCollectionReason::kAllocationLimit:
4422 0 : return "allocation limit";
4423 : case GarbageCollectionReason::kContextDisposal:
4424 198 : return "context disposal";
4425 : case GarbageCollectionReason::kCountersExtension:
4426 0 : return "counters extension";
4427 : case GarbageCollectionReason::kDebugger:
4428 12629 : return "debugger";
4429 : case GarbageCollectionReason::kDeserializer:
4430 3 : return "deserialize";
4431 : case GarbageCollectionReason::kExternalMemoryPressure:
4432 99 : return "external memory pressure";
4433 : case GarbageCollectionReason::kFinalizeMarkingViaStackGuard:
4434 862 : return "finalize incremental marking via stack guard";
4435 : case GarbageCollectionReason::kFinalizeMarkingViaTask:
4436 14591 : return "finalize incremental marking via task";
4437 : case GarbageCollectionReason::kFullHashtable:
4438 0 : return "full hash-table";
4439 : case GarbageCollectionReason::kHeapProfiler:
4440 798 : return "heap profiler";
4441 : case GarbageCollectionReason::kIdleTask:
4442 1650 : return "idle task";
4443 : case GarbageCollectionReason::kLastResort:
4444 20 : return "last resort";
4445 : case GarbageCollectionReason::kLowMemoryNotification:
4446 12106 : return "low memory notification";
4447 : case GarbageCollectionReason::kMakeHeapIterable:
4448 0 : return "make heap iterable";
4449 : case GarbageCollectionReason::kMemoryPressure:
4450 22 : return "memory pressure";
4451 : case GarbageCollectionReason::kMemoryReducer:
4452 0 : return "memory reducer";
4453 : case GarbageCollectionReason::kRuntime:
4454 222 : return "runtime";
4455 : case GarbageCollectionReason::kSamplingProfiler:
4456 0 : return "sampling profiler";
4457 : case GarbageCollectionReason::kSnapshotCreator:
4458 242 : return "snapshot creator";
4459 : case GarbageCollectionReason::kTesting:
4460 17023 : return "testing";
4461 : case GarbageCollectionReason::kUnknown:
4462 0 : return "unknown";
4463 : }
4464 0 : UNREACHABLE();
4465 : }
4466 :
4467 5462166 : bool Heap::Contains(HeapObject* value) {
4468 5462166 : if (memory_allocator()->IsOutsideAllocatedSpace(value->address())) {
4469 : return false;
4470 : }
4471 5462166 : return HasBeenSetUp() &&
4472 5458024 : (new_space_->ToSpaceContains(value) || old_space_->Contains(value) ||
4473 1946 : code_space_->Contains(value) || map_space_->Contains(value) ||
4474 2731083 : lo_space_->Contains(value));
4475 : }
4476 :
4477 0 : bool Heap::ContainsSlow(Address addr) {
4478 0 : if (memory_allocator()->IsOutsideAllocatedSpace(addr)) {
4479 : return false;
4480 : }
4481 0 : return HasBeenSetUp() &&
4482 0 : (new_space_->ToSpaceContainsSlow(addr) ||
4483 0 : old_space_->ContainsSlow(addr) || code_space_->ContainsSlow(addr) ||
4484 0 : map_space_->ContainsSlow(addr) || lo_space_->ContainsSlow(addr));
4485 : }
4486 :
4487 128 : bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
4488 128 : if (memory_allocator()->IsOutsideAllocatedSpace(value->address())) {
4489 : return false;
4490 : }
4491 64 : if (!HasBeenSetUp()) return false;
4492 :
4493 64 : switch (space) {
4494 : case NEW_SPACE:
4495 36 : return new_space_->ToSpaceContains(value);
4496 : case OLD_SPACE:
4497 16 : return old_space_->Contains(value);
4498 : case CODE_SPACE:
4499 0 : return code_space_->Contains(value);
4500 : case MAP_SPACE:
4501 0 : return map_space_->Contains(value);
4502 : case LO_SPACE:
4503 30 : return lo_space_->Contains(value);
4504 : }
4505 0 : UNREACHABLE();
4506 : }
4507 :
4508 0 : bool Heap::InSpaceSlow(Address addr, AllocationSpace space) {
4509 0 : if (memory_allocator()->IsOutsideAllocatedSpace(addr)) {
4510 : return false;
4511 : }
4512 0 : if (!HasBeenSetUp()) return false;
4513 :
4514 0 : switch (space) {
4515 : case NEW_SPACE:
4516 0 : return new_space_->ToSpaceContainsSlow(addr);
4517 : case OLD_SPACE:
4518 0 : return old_space_->ContainsSlow(addr);
4519 : case CODE_SPACE:
4520 0 : return code_space_->ContainsSlow(addr);
4521 : case MAP_SPACE:
4522 0 : return map_space_->ContainsSlow(addr);
4523 : case LO_SPACE:
4524 0 : return lo_space_->ContainsSlow(addr);
4525 : }
4526 0 : UNREACHABLE();
4527 : }
4528 :
4529 :
4530 0 : bool Heap::IsValidAllocationSpace(AllocationSpace space) {
4531 0 : switch (space) {
4532 : case NEW_SPACE:
4533 : case OLD_SPACE:
4534 : case CODE_SPACE:
4535 : case MAP_SPACE:
4536 : case LO_SPACE:
4537 : return true;
4538 : default:
4539 0 : return false;
4540 : }
4541 : }
4542 :
4543 :
4544 4187570 : bool Heap::RootIsImmortalImmovable(int root_index) {
4545 4187570 : switch (root_index) {
4546 : #define IMMORTAL_IMMOVABLE_ROOT(name) case Heap::k##name##RootIndex:
4547 : IMMORTAL_IMMOVABLE_ROOT_LIST(IMMORTAL_IMMOVABLE_ROOT)
4548 : #undef IMMORTAL_IMMOVABLE_ROOT
4549 : #define INTERNALIZED_STRING(name, value) case Heap::k##name##RootIndex:
4550 : INTERNALIZED_STRING_LIST(INTERNALIZED_STRING)
4551 : #undef INTERNALIZED_STRING
4552 : #define STRING_TYPE(NAME, size, name, Name) case Heap::k##Name##MapRootIndex:
4553 : STRING_TYPE_LIST(STRING_TYPE)
4554 : #undef STRING_TYPE
4555 : return true;
4556 : default:
4557 388726 : return false;
4558 : }
4559 : }
4560 :
4561 : #ifdef VERIFY_HEAP
4562 : void Heap::Verify() {
4563 : CHECK(HasBeenSetUp());
4564 : HandleScope scope(isolate());
4565 :
4566 : // We have to wait here for the sweeper threads to have an iterable heap.
4567 : mark_compact_collector()->EnsureSweepingCompleted();
4568 :
4569 : VerifyPointersVisitor visitor;
4570 : IterateRoots(&visitor, VISIT_ONLY_STRONG);
4571 :
4572 : VerifySmisVisitor smis_visitor;
4573 : IterateSmiRoots(&smis_visitor);
4574 :
4575 : new_space_->Verify();
4576 :
4577 : old_space_->Verify(&visitor);
4578 : map_space_->Verify(&visitor);
4579 :
4580 : VerifyPointersVisitor no_dirty_regions_visitor;
4581 : code_space_->Verify(&no_dirty_regions_visitor);
4582 :
4583 : lo_space_->Verify();
4584 :
4585 : mark_compact_collector()->VerifyWeakEmbeddedObjectsInCode();
4586 : }
4587 :
4588 : class SlotVerifyingVisitor : public ObjectVisitor {
4589 : public:
4590 : SlotVerifyingVisitor(std::set<Address>* untyped,
4591 : std::set<std::pair<SlotType, Address> >* typed)
4592 : : untyped_(untyped), typed_(typed) {}
4593 :
4594 : virtual bool ShouldHaveBeenRecorded(HeapObject* host, Object* target) = 0;
4595 :
4596 : void VisitPointers(HeapObject* host, Object** start, Object** end) override {
4597 : for (Object** slot = start; slot < end; slot++) {
4598 : if (ShouldHaveBeenRecorded(host, *slot)) {
4599 : CHECK_GT(untyped_->count(reinterpret_cast<Address>(slot)), 0);
4600 : }
4601 : }
4602 : }
4603 :
4604 : void VisitCodeTarget(Code* host, RelocInfo* rinfo) override {
4605 : Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
4606 : if (ShouldHaveBeenRecorded(host, target)) {
4607 : CHECK(
4608 : InTypedSet(CODE_TARGET_SLOT, rinfo->pc()) ||
4609 : (rinfo->IsInConstantPool() &&
4610 : InTypedSet(CODE_ENTRY_SLOT, rinfo->constant_pool_entry_address())));
4611 : }
4612 : }
4613 :
4614 : void VisitEmbeddedPointer(Code* host, RelocInfo* rinfo) override {
4615 : Object* target = rinfo->target_object();
4616 : if (ShouldHaveBeenRecorded(host, target)) {
4617 : CHECK(InTypedSet(EMBEDDED_OBJECT_SLOT, rinfo->pc()) ||
4618 : (rinfo->IsInConstantPool() &&
4619 : InTypedSet(OBJECT_SLOT, rinfo->constant_pool_entry_address())));
4620 : }
4621 : }
4622 :
4623 : private:
4624 : bool InTypedSet(SlotType type, Address slot) {
4625 : return typed_->count(std::make_pair(type, slot)) > 0;
4626 : }
4627 : std::set<Address>* untyped_;
4628 : std::set<std::pair<SlotType, Address> >* typed_;
4629 : };
4630 :
4631 : class OldToNewSlotVerifyingVisitor : public SlotVerifyingVisitor {
4632 : public:
4633 : OldToNewSlotVerifyingVisitor(Heap* heap, std::set<Address>* untyped,
4634 : std::set<std::pair<SlotType, Address> >* typed)
4635 : : SlotVerifyingVisitor(untyped, typed), heap_(heap) {}
4636 :
4637 : bool ShouldHaveBeenRecorded(HeapObject* host, Object* target) override {
4638 : DCHECK_IMPLIES(target->IsHeapObject() && heap_->InNewSpace(target),
4639 : heap_->InToSpace(target));
4640 : return target->IsHeapObject() && heap_->InNewSpace(target) &&
4641 : !heap_->InNewSpace(host);
4642 : }
4643 :
4644 : private:
4645 : Heap* heap_;
4646 : };
4647 :
4648 : template <RememberedSetType direction>
4649 : void CollectSlots(MemoryChunk* chunk, Address start, Address end,
4650 : std::set<Address>* untyped,
4651 : std::set<std::pair<SlotType, Address> >* typed) {
4652 : RememberedSet<direction>::Iterate(chunk,
4653 : [start, end, untyped](Address slot) {
4654 : if (start <= slot && slot < end) {
4655 : untyped->insert(slot);
4656 : }
4657 : return KEEP_SLOT;
4658 : },
4659 : SlotSet::PREFREE_EMPTY_BUCKETS);
4660 : RememberedSet<direction>::IterateTyped(
4661 : chunk, [start, end, typed](SlotType type, Address host, Address slot) {
4662 : if (start <= slot && slot < end) {
4663 : typed->insert(std::make_pair(type, slot));
4664 : }
4665 : return KEEP_SLOT;
4666 : });
4667 : }
4668 :
4669 : void Heap::VerifyRememberedSetFor(HeapObject* object) {
4670 : MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
4671 : base::LockGuard<base::RecursiveMutex> lock_guard(chunk->mutex());
4672 : Address start = object->address();
4673 : Address end = start + object->Size();
4674 : std::set<Address> old_to_new;
4675 : std::set<std::pair<SlotType, Address> > typed_old_to_new;
4676 : if (!InNewSpace(object)) {
4677 : store_buffer()->MoveAllEntriesToRememberedSet();
4678 : CollectSlots<OLD_TO_NEW>(chunk, start, end, &old_to_new, &typed_old_to_new);
4679 : OldToNewSlotVerifyingVisitor visitor(this, &old_to_new, &typed_old_to_new);
4680 : object->IterateBody(&visitor);
4681 : }
4682 : // TODO(ulan): Add old to old slot set verification once all weak objects
4683 : // have their own instance types and slots are recorded for all weal fields.
4684 : }
4685 : #endif
4686 :
4687 : #ifdef DEBUG
4688 : void Heap::VerifyCountersAfterSweeping() {
4689 : PagedSpaces spaces(this);
4690 : for (PagedSpace* space = spaces.next(); space != nullptr;
4691 : space = spaces.next()) {
4692 : space->VerifyCountersAfterSweeping();
4693 : }
4694 : }
4695 :
4696 : void Heap::VerifyCountersBeforeConcurrentSweeping() {
4697 : PagedSpaces spaces(this);
4698 : for (PagedSpace* space = spaces.next(); space != nullptr;
4699 : space = spaces.next()) {
4700 : space->VerifyCountersBeforeConcurrentSweeping();
4701 : }
4702 : }
4703 : #endif
4704 :
4705 0 : void Heap::ZapFromSpace() {
4706 0 : if (!new_space_->IsFromSpaceCommitted()) return;
4707 0 : for (Page* page :
4708 0 : PageRange(new_space_->FromSpaceStart(), new_space_->FromSpaceEnd())) {
4709 0 : for (Address cursor = page->area_start(), limit = page->area_end();
4710 : cursor < limit; cursor += kPointerSize) {
4711 0 : Memory::Address_at(cursor) = kFromSpaceZapValue;
4712 : }
4713 : }
4714 : }
4715 :
4716 88437 : void Heap::IterateRoots(RootVisitor* v, VisitMode mode) {
4717 88437 : IterateStrongRoots(v, mode);
4718 88437 : IterateWeakRoots(v, mode);
4719 88437 : }
4720 :
4721 143566 : void Heap::IterateWeakRoots(RootVisitor* v, VisitMode mode) {
4722 287132 : const bool isMinorGC = mode == VISIT_ALL_IN_SCAVENGE ||
4723 287132 : mode == VISIT_ALL_IN_MINOR_MC_MARK ||
4724 : mode == VISIT_ALL_IN_MINOR_MC_UPDATE;
4725 : v->VisitRootPointer(Root::kStringTable, reinterpret_cast<Object**>(
4726 143566 : &roots_[kStringTableRootIndex]));
4727 143566 : v->Synchronize(VisitorSynchronization::kStringTable);
4728 143566 : if (!isMinorGC && mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
4729 : // Scavenge collections have special processing for this.
4730 57114 : external_string_table_.IterateAll(v);
4731 : }
4732 143566 : v->Synchronize(VisitorSynchronization::kExternalStringsTable);
4733 143566 : }
4734 :
4735 55129 : void Heap::IterateSmiRoots(RootVisitor* v) {
4736 : // Acquire execution access since we are going to read stack limit values.
4737 : ExecutionAccess access(isolate());
4738 : v->VisitRootPointers(Root::kSmiRootList, &roots_[kSmiRootsStart],
4739 55129 : &roots_[kRootListLength]);
4740 55129 : v->Synchronize(VisitorSynchronization::kSmiRootList);
4741 55129 : }
4742 :
4743 0 : void Heap::IterateEncounteredWeakCollections(RootVisitor* visitor) {
4744 : visitor->VisitRootPointer(Root::kWeakCollections,
4745 29652 : &encountered_weak_collections_);
4746 0 : }
4747 :
4748 : // We cannot avoid stale handles to left-trimmed objects, but can only make
4749 : // sure all handles still needed are updated. Filter out a stale pointer
4750 : // and clear the slot to allow post processing of handles (needed because
4751 : // the sweeper might actually free the underlying page).
4752 0 : class FixStaleLeftTrimmedHandlesVisitor : public RootVisitor {
4753 : public:
4754 232423 : explicit FixStaleLeftTrimmedHandlesVisitor(Heap* heap) : heap_(heap) {
4755 : USE(heap_);
4756 : }
4757 :
4758 665 : void VisitRootPointer(Root root, Object** p) override { FixHandle(p); }
4759 :
4760 445984 : void VisitRootPointers(Root root, Object** start, Object** end) override {
4761 445984 : for (Object** p = start; p < end; p++) FixHandle(p);
4762 445985 : }
4763 :
4764 : private:
4765 60103695 : inline void FixHandle(Object** p) {
4766 180311085 : if (!(*p)->IsHeapObject()) return;
4767 : HeapObject* current = reinterpret_cast<HeapObject*>(*p);
4768 : const MapWord map_word = current->map_word();
4769 94938071 : if (!map_word.IsForwardingAddress() && current->IsFiller()) {
4770 : #ifdef DEBUG
4771 : // We need to find a FixedArrayBase map after walking the fillers.
4772 : while (current->IsFiller()) {
4773 : Address next = reinterpret_cast<Address>(current);
4774 : if (current->map() == heap_->one_pointer_filler_map()) {
4775 : next += kPointerSize;
4776 : } else if (current->map() == heap_->two_pointer_filler_map()) {
4777 : next += 2 * kPointerSize;
4778 : } else {
4779 : next += current->Size();
4780 : }
4781 : current = reinterpret_cast<HeapObject*>(next);
4782 : }
4783 : DCHECK(current->IsFixedArrayBase());
4784 : #endif // DEBUG
4785 10 : *p = nullptr;
4786 : }
4787 : }
4788 :
4789 : Heap* heap_;
4790 : };
4791 :
4792 287552 : void Heap::IterateStrongRoots(RootVisitor* v, VisitMode mode) {
4793 575104 : const bool isMinorGC = mode == VISIT_ALL_IN_SCAVENGE ||
4794 575104 : mode == VISIT_ALL_IN_MINOR_MC_MARK ||
4795 : mode == VISIT_ALL_IN_MINOR_MC_UPDATE;
4796 : v->VisitRootPointers(Root::kStrongRootList, &roots_[0],
4797 287552 : &roots_[kStrongRootListLength]);
4798 287552 : v->Synchronize(VisitorSynchronization::kStrongRootList);
4799 : // The serializer/deserializer iterates the root list twice, first to pick
4800 : // off immortal immovable roots to make sure they end up on the first page,
4801 : // and then again for the rest.
4802 342681 : if (mode == VISIT_ONLY_STRONG_ROOT_LIST) return;
4803 :
4804 2061994 : isolate_->bootstrapper()->Iterate(v);
4805 232423 : v->Synchronize(VisitorSynchronization::kBootstrapper);
4806 232423 : isolate_->Iterate(v);
4807 232423 : v->Synchronize(VisitorSynchronization::kTop);
4808 232423 : Relocatable::Iterate(isolate_, v);
4809 232423 : v->Synchronize(VisitorSynchronization::kRelocatable);
4810 464846 : isolate_->debug()->Iterate(v);
4811 232423 : v->Synchronize(VisitorSynchronization::kDebug);
4812 :
4813 464846 : isolate_->compilation_cache()->Iterate(v);
4814 232423 : v->Synchronize(VisitorSynchronization::kCompilationCache);
4815 :
4816 : // Iterate over local handles in handle scopes.
4817 : FixStaleLeftTrimmedHandlesVisitor left_trim_visitor(this);
4818 464846 : isolate_->handle_scope_implementer()->Iterate(&left_trim_visitor);
4819 464846 : isolate_->handle_scope_implementer()->Iterate(v);
4820 232423 : isolate_->IterateDeferredHandles(v);
4821 232423 : v->Synchronize(VisitorSynchronization::kHandleScope);
4822 :
4823 : // Iterate over the builtin code objects and code stubs in the
4824 : // heap. Note that it is not necessary to iterate over code objects
4825 : // on scavenge collections.
4826 232423 : if (!isMinorGC) {
4827 202771 : isolate_->builtins()->IterateBuiltins(v);
4828 202771 : v->Synchronize(VisitorSynchronization::kBuiltins);
4829 405542 : isolate_->interpreter()->IterateDispatchTable(v);
4830 202771 : v->Synchronize(VisitorSynchronization::kDispatchTable);
4831 : }
4832 :
4833 : // Iterate over global handles.
4834 232423 : switch (mode) {
4835 : case VISIT_ONLY_STRONG_ROOT_LIST:
4836 0 : UNREACHABLE();
4837 : break;
4838 : case VISIT_ONLY_STRONG_FOR_SERIALIZATION:
4839 : break;
4840 : case VISIT_ONLY_STRONG:
4841 288288 : isolate_->global_handles()->IterateStrongRoots(v);
4842 144144 : break;
4843 : case VISIT_ALL_IN_SCAVENGE:
4844 59304 : isolate_->global_handles()->IterateNewSpaceStrongAndDependentRoots(v);
4845 29652 : break;
4846 : case VISIT_ALL_IN_MINOR_MC_MARK:
4847 : // Global handles are processed manually be the minor MC.
4848 : break;
4849 : case VISIT_ALL_IN_MINOR_MC_UPDATE:
4850 : // Global handles are processed manually be the minor MC.
4851 : break;
4852 : case VISIT_ALL_IN_SWEEP_NEWSPACE:
4853 : case VISIT_ALL:
4854 116932 : isolate_->global_handles()->IterateAllRoots(v);
4855 58466 : break;
4856 : }
4857 232423 : v->Synchronize(VisitorSynchronization::kGlobalHandles);
4858 :
4859 : // Iterate over eternal handles.
4860 232423 : if (isMinorGC) {
4861 59304 : isolate_->eternal_handles()->IterateNewSpaceRoots(v);
4862 : } else {
4863 405542 : isolate_->eternal_handles()->IterateAllRoots(v);
4864 : }
4865 232423 : v->Synchronize(VisitorSynchronization::kEternalHandles);
4866 :
4867 : // Iterate over pointers being held by inactive threads.
4868 464846 : isolate_->thread_manager()->Iterate(v);
4869 232423 : v->Synchronize(VisitorSynchronization::kThreadManager);
4870 :
4871 : // Iterate over other strong roots (currently only identity maps).
4872 232572 : for (StrongRootsList* list = strong_roots_list_; list; list = list->next) {
4873 149 : v->VisitRootPointers(Root::kStrongRoots, list->start, list->end);
4874 : }
4875 232423 : v->Synchronize(VisitorSynchronization::kStrongRoots);
4876 :
4877 : // Iterate over the partial snapshot cache unless serializing.
4878 232423 : if (mode != VISIT_ONLY_STRONG_FOR_SERIALIZATION) {
4879 232262 : SerializerDeserializer::Iterate(isolate_, v);
4880 : }
4881 : // We don't do a v->Synchronize call here, because in debug mode that will
4882 : // output a flag to the snapshot. However at this point the serializer and
4883 : // deserializer are deliberately a little unsynchronized (see above) so the
4884 : // checking of the sync flag in the snapshot would fail.
4885 : }
4886 :
4887 :
4888 : // TODO(1236194): Since the heap size is configurable on the command line
4889 : // and through the API, we should gracefully handle the case that the heap
4890 : // size is not big enough to fit all the initial objects.
4891 54999 : bool Heap::ConfigureHeap(size_t max_semi_space_size_in_kb,
4892 : size_t max_old_generation_size_in_mb,
4893 : size_t code_range_size_in_mb) {
4894 54999 : if (HasBeenSetUp()) return false;
4895 :
4896 : // Overwrite default configuration.
4897 54999 : if (max_semi_space_size_in_kb != 0) {
4898 : max_semi_space_size_ =
4899 26798 : ROUND_UP(max_semi_space_size_in_kb * KB, Page::kPageSize);
4900 : }
4901 54999 : if (max_old_generation_size_in_mb != 0) {
4902 26808 : max_old_generation_size_ = max_old_generation_size_in_mb * MB;
4903 : }
4904 :
4905 : // If max space size flags are specified overwrite the configuration.
4906 54999 : if (FLAG_max_semi_space_size > 0) {
4907 198 : max_semi_space_size_ = static_cast<size_t>(FLAG_max_semi_space_size) * MB;
4908 : }
4909 54999 : if (FLAG_max_old_space_size > 0) {
4910 : max_old_generation_size_ =
4911 28 : static_cast<size_t>(FLAG_max_old_space_size) * MB;
4912 : }
4913 :
4914 : if (Page::kPageSize > MB) {
4915 : max_semi_space_size_ = ROUND_UP(max_semi_space_size_, Page::kPageSize);
4916 : max_old_generation_size_ =
4917 : ROUND_UP(max_old_generation_size_, Page::kPageSize);
4918 : }
4919 :
4920 54999 : if (FLAG_stress_compaction) {
4921 : // This will cause more frequent GCs when stressing.
4922 112 : max_semi_space_size_ = MB;
4923 : }
4924 :
4925 : // The new space size must be a power of two to support single-bit testing
4926 : // for containment.
4927 : max_semi_space_size_ = base::bits::RoundUpToPowerOfTwo32(
4928 54999 : static_cast<uint32_t>(max_semi_space_size_));
4929 :
4930 54999 : if (max_semi_space_size_ == kMaxSemiSpaceSizeInKB * KB) {
4931 : // Start with at least 1*MB semi-space on machines with a lot of memory.
4932 : initial_semispace_size_ =
4933 109550 : Max(initial_semispace_size_, static_cast<size_t>(1 * MB));
4934 : }
4935 :
4936 54999 : if (FLAG_min_semi_space_size > 0) {
4937 : size_t initial_semispace_size =
4938 32 : static_cast<size_t>(FLAG_min_semi_space_size) * MB;
4939 32 : if (initial_semispace_size > max_semi_space_size_) {
4940 6 : initial_semispace_size_ = max_semi_space_size_;
4941 6 : if (FLAG_trace_gc) {
4942 : PrintIsolate(isolate_,
4943 : "Min semi-space size cannot be more than the maximum "
4944 : "semi-space size of %" PRIuS " MB\n",
4945 0 : max_semi_space_size_ / MB);
4946 : }
4947 : } else {
4948 : initial_semispace_size_ =
4949 26 : ROUND_UP(initial_semispace_size, Page::kPageSize);
4950 : }
4951 : }
4952 :
4953 109996 : initial_semispace_size_ = Min(initial_semispace_size_, max_semi_space_size_);
4954 :
4955 54998 : if (FLAG_semi_space_growth_factor < 2) {
4956 0 : FLAG_semi_space_growth_factor = 2;
4957 : }
4958 :
4959 : // The old generation is paged and needs at least one page for each space.
4960 : int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
4961 : initial_max_old_generation_size_ = max_old_generation_size_ =
4962 : Max(static_cast<size_t>(paged_space_count * Page::kPageSize),
4963 109996 : max_old_generation_size_);
4964 :
4965 54998 : if (FLAG_initial_old_space_size > 0) {
4966 0 : initial_old_generation_size_ = FLAG_initial_old_space_size * MB;
4967 : } else {
4968 : initial_old_generation_size_ =
4969 54998 : max_old_generation_size_ / kInitalOldGenerationLimitFactor;
4970 : }
4971 54998 : old_generation_allocation_limit_ = initial_old_generation_size_;
4972 :
4973 : // We rely on being able to allocate new arrays in paged spaces.
4974 : DCHECK(kMaxRegularHeapObjectSize >=
4975 : (JSArray::kSize +
4976 : FixedArray::SizeFor(JSArray::kInitialMaxFastElementArray) +
4977 : AllocationMemento::kSize));
4978 :
4979 54998 : code_range_size_ = code_range_size_in_mb * MB;
4980 :
4981 54998 : configured_ = true;
4982 54998 : return true;
4983 : }
4984 :
4985 :
4986 86430 : void Heap::AddToRingBuffer(const char* string) {
4987 : size_t first_part =
4988 86430 : Min(strlen(string), kTraceRingBufferSize - ring_buffer_end_);
4989 86430 : memcpy(trace_ring_buffer_ + ring_buffer_end_, string, first_part);
4990 86430 : ring_buffer_end_ += first_part;
4991 86430 : if (first_part < strlen(string)) {
4992 15788 : ring_buffer_full_ = true;
4993 15788 : size_t second_part = strlen(string) - first_part;
4994 15788 : memcpy(trace_ring_buffer_, string + first_part, second_part);
4995 15788 : ring_buffer_end_ = second_part;
4996 : }
4997 86430 : }
4998 :
4999 :
5000 18 : void Heap::GetFromRingBuffer(char* buffer) {
5001 : size_t copied = 0;
5002 18 : if (ring_buffer_full_) {
5003 0 : copied = kTraceRingBufferSize - ring_buffer_end_;
5004 0 : memcpy(buffer, trace_ring_buffer_ + ring_buffer_end_, copied);
5005 : }
5006 18 : memcpy(buffer + copied, trace_ring_buffer_, ring_buffer_end_);
5007 18 : }
5008 :
5009 28191 : bool Heap::ConfigureHeapDefault() { return ConfigureHeap(0, 0, 0); }
5010 :
5011 90 : void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
5012 18 : *stats->start_marker = HeapStats::kStartMarker;
5013 18 : *stats->end_marker = HeapStats::kEndMarker;
5014 18 : *stats->new_space_size = new_space_->Size();
5015 36 : *stats->new_space_capacity = new_space_->Capacity();
5016 18 : *stats->old_space_size = old_space_->SizeOfObjects();
5017 36 : *stats->old_space_capacity = old_space_->Capacity();
5018 18 : *stats->code_space_size = code_space_->SizeOfObjects();
5019 36 : *stats->code_space_capacity = code_space_->Capacity();
5020 18 : *stats->map_space_size = map_space_->SizeOfObjects();
5021 36 : *stats->map_space_capacity = map_space_->Capacity();
5022 18 : *stats->lo_space_size = lo_space_->Size();
5023 54 : isolate_->global_handles()->RecordStats(stats);
5024 36 : *stats->memory_allocator_size = memory_allocator()->Size();
5025 : *stats->memory_allocator_capacity =
5026 36 : memory_allocator()->Size() + memory_allocator()->Available();
5027 18 : *stats->os_error = base::OS::GetLastError();
5028 36 : *stats->malloced_memory = isolate_->allocator()->GetCurrentMemoryUsage();
5029 36 : *stats->malloced_peak_memory = isolate_->allocator()->GetMaxMemoryUsage();
5030 18 : if (take_snapshot) {
5031 0 : HeapIterator iterator(this);
5032 0 : for (HeapObject* obj = iterator.next(); obj != nullptr;
5033 : obj = iterator.next()) {
5034 : InstanceType type = obj->map()->instance_type();
5035 : DCHECK(0 <= type && type <= LAST_TYPE);
5036 0 : stats->objects_per_type[type]++;
5037 0 : stats->size_per_type[type] += obj->Size();
5038 0 : }
5039 : }
5040 18 : if (stats->last_few_messages != nullptr)
5041 18 : GetFromRingBuffer(stats->last_few_messages);
5042 18 : if (stats->js_stacktrace != nullptr) {
5043 : FixedStringAllocator fixed(stats->js_stacktrace, kStacktraceBufferSize - 1);
5044 : StringStream accumulator(&fixed, StringStream::kPrintObjectConcise);
5045 18 : if (gc_state() == Heap::NOT_IN_GC) {
5046 18 : isolate()->PrintStack(&accumulator, Isolate::kPrintStackVerbose);
5047 : } else {
5048 0 : accumulator.Add("Cannot get stack trace in GC.");
5049 : }
5050 : }
5051 18 : }
5052 :
5053 2076201 : size_t Heap::PromotedSpaceSizeOfObjects() {
5054 4152402 : return old_space_->SizeOfObjects() + code_space_->SizeOfObjects() +
5055 4152402 : map_space_->SizeOfObjects() + lo_space_->SizeOfObjects();
5056 : }
5057 :
5058 202 : uint64_t Heap::PromotedExternalMemorySize() {
5059 486174 : if (external_memory_ <= external_memory_at_last_mark_compact_) return 0;
5060 11825 : return static_cast<uint64_t>(external_memory_ -
5061 11825 : external_memory_at_last_mark_compact_);
5062 : }
5063 :
5064 :
5065 : const double Heap::kMinHeapGrowingFactor = 1.1;
5066 : const double Heap::kMaxHeapGrowingFactor = 4.0;
5067 : const double Heap::kMaxHeapGrowingFactorMemoryConstrained = 2.0;
5068 : const double Heap::kMaxHeapGrowingFactorIdle = 1.5;
5069 : const double Heap::kConservativeHeapGrowingFactor = 1.3;
5070 : const double Heap::kTargetMutatorUtilization = 0.97;
5071 :
5072 : // Given GC speed in bytes per ms, the allocation throughput in bytes per ms
5073 : // (mutator speed), this function returns the heap growing factor that will
5074 : // achieve the kTargetMutatorUtilisation if the GC speed and the mutator speed
5075 : // remain the same until the next GC.
5076 : //
5077 : // For a fixed time-frame T = TM + TG, the mutator utilization is the ratio
5078 : // TM / (TM + TG), where TM is the time spent in the mutator and TG is the
5079 : // time spent in the garbage collector.
5080 : //
5081 : // Let MU be kTargetMutatorUtilisation, the desired mutator utilization for the
5082 : // time-frame from the end of the current GC to the end of the next GC. Based
5083 : // on the MU we can compute the heap growing factor F as
5084 : //
5085 : // F = R * (1 - MU) / (R * (1 - MU) - MU), where R = gc_speed / mutator_speed.
5086 : //
5087 : // This formula can be derived as follows.
5088 : //
5089 : // F = Limit / Live by definition, where the Limit is the allocation limit,
5090 : // and the Live is size of live objects.
5091 : // Let’s assume that we already know the Limit. Then:
5092 : // TG = Limit / gc_speed
5093 : // TM = (TM + TG) * MU, by definition of MU.
5094 : // TM = TG * MU / (1 - MU)
5095 : // TM = Limit * MU / (gc_speed * (1 - MU))
5096 : // On the other hand, if the allocation throughput remains constant:
5097 : // Limit = Live + TM * allocation_throughput = Live + TM * mutator_speed
5098 : // Solving it for TM, we get
5099 : // TM = (Limit - Live) / mutator_speed
5100 : // Combining the two equation for TM:
5101 : // (Limit - Live) / mutator_speed = Limit * MU / (gc_speed * (1 - MU))
5102 : // (Limit - Live) = Limit * MU * mutator_speed / (gc_speed * (1 - MU))
5103 : // substitute R = gc_speed / mutator_speed
5104 : // (Limit - Live) = Limit * MU / (R * (1 - MU))
5105 : // substitute F = Limit / Live
5106 : // F - 1 = F * MU / (R * (1 - MU))
5107 : // F - F * MU / (R * (1 - MU)) = 1
5108 : // F * (1 - MU / (R * (1 - MU))) = 1
5109 : // F * (R * (1 - MU) - MU) / (R * (1 - MU)) = 1
5110 : // F = R * (1 - MU) / (R * (1 - MU) - MU)
5111 9 : double Heap::HeapGrowingFactor(double gc_speed, double mutator_speed,
5112 : double max_factor) {
5113 : DCHECK_LE(kMinHeapGrowingFactor, max_factor);
5114 : DCHECK_GE(kMaxHeapGrowingFactor, max_factor);
5115 56892 : if (gc_speed == 0 || mutator_speed == 0) return max_factor;
5116 :
5117 41059 : const double speed_ratio = gc_speed / mutator_speed;
5118 : const double mu = kTargetMutatorUtilization;
5119 :
5120 41059 : const double a = speed_ratio * (1 - mu);
5121 41059 : const double b = speed_ratio * (1 - mu) - mu;
5122 :
5123 : // The factor is a / b, but we need to check for small b first.
5124 41059 : double factor = (a < b * max_factor) ? a / b : max_factor;
5125 : factor = Min(factor, max_factor);
5126 : factor = Max(factor, kMinHeapGrowingFactor);
5127 9 : return factor;
5128 : }
5129 :
5130 4 : double Heap::MaxHeapGrowingFactor(size_t max_old_generation_size) {
5131 : const double min_small_factor = 1.3;
5132 : const double max_small_factor = 2.0;
5133 : const double high_factor = 4.0;
5134 :
5135 56887 : size_t max_old_generation_size_in_mb = max_old_generation_size / MB;
5136 : max_old_generation_size_in_mb =
5137 : Max(max_old_generation_size_in_mb,
5138 : static_cast<size_t>(kMinOldGenerationSize));
5139 :
5140 : // If we are on a device with lots of memory, we allow a high heap
5141 : // growing factor.
5142 56887 : if (max_old_generation_size_in_mb >= kMaxOldGenerationSize) {
5143 : return high_factor;
5144 : }
5145 :
5146 : DCHECK_GE(max_old_generation_size_in_mb, kMinOldGenerationSize);
5147 : DCHECK_LT(max_old_generation_size_in_mb, kMaxOldGenerationSize);
5148 :
5149 : // On smaller devices we linearly scale the factor: (X-A)/(B-A)*(D-C)+C
5150 14970 : double factor = (max_old_generation_size_in_mb - kMinOldGenerationSize) *
5151 14970 : (max_small_factor - min_small_factor) /
5152 : (kMaxOldGenerationSize - kMinOldGenerationSize) +
5153 14970 : min_small_factor;
5154 3 : return factor;
5155 : }
5156 :
5157 56882 : size_t Heap::CalculateOldGenerationAllocationLimit(double factor,
5158 : size_t old_gen_size) {
5159 56882 : CHECK_LT(1.0, factor);
5160 56882 : CHECK_LT(0, old_gen_size);
5161 56882 : uint64_t limit = static_cast<uint64_t>(old_gen_size * factor);
5162 : limit = Max(limit, static_cast<uint64_t>(old_gen_size) +
5163 56882 : MinimumAllocationLimitGrowingStep());
5164 113764 : limit += new_space_->Capacity();
5165 : uint64_t halfway_to_the_max =
5166 56882 : (static_cast<uint64_t>(old_gen_size) + max_old_generation_size_) / 2;
5167 56882 : return static_cast<size_t>(Min(limit, halfway_to_the_max));
5168 : }
5169 :
5170 0 : size_t Heap::MinimumAllocationLimitGrowingStep() {
5171 : const size_t kRegularAllocationLimitGrowingStep = 8;
5172 : const size_t kLowMemoryAllocationLimitGrowingStep = 2;
5173 : size_t limit = (Page::kPageSize > MB ? Page::kPageSize : MB);
5174 77658 : return limit * (ShouldOptimizeForMemoryUsage()
5175 : ? kLowMemoryAllocationLimitGrowingStep
5176 77658 : : kRegularAllocationLimitGrowingStep);
5177 : }
5178 :
5179 56800 : void Heap::SetOldGenerationAllocationLimit(size_t old_gen_size, double gc_speed,
5180 56038 : double mutator_speed) {
5181 56800 : double max_factor = MaxHeapGrowingFactor(max_old_generation_size_);
5182 : double factor = HeapGrowingFactor(gc_speed, mutator_speed, max_factor);
5183 :
5184 56800 : if (FLAG_trace_gc_verbose) {
5185 : isolate_->PrintWithTimestamp(
5186 : "Heap growing factor %.1f based on mu=%.3f, speed_ratio=%.f "
5187 : "(gc=%.f, mutator=%.f)\n",
5188 : factor, kTargetMutatorUtilization, gc_speed / mutator_speed, gc_speed,
5189 0 : mutator_speed);
5190 : }
5191 :
5192 170399 : if (memory_reducer_->ShouldGrowHeapSlowly() ||
5193 56800 : ShouldOptimizeForMemoryUsage()) {
5194 : factor = Min(factor, kConservativeHeapGrowingFactor);
5195 : }
5196 :
5197 112837 : if (FLAG_stress_compaction || ShouldReduceMemory()) {
5198 : factor = kMinHeapGrowingFactor;
5199 : }
5200 :
5201 56799 : if (FLAG_heap_growing_percent > 0) {
5202 0 : factor = 1.0 + FLAG_heap_growing_percent / 100.0;
5203 : }
5204 :
5205 : old_generation_allocation_limit_ =
5206 56799 : CalculateOldGenerationAllocationLimit(factor, old_gen_size);
5207 :
5208 56799 : if (FLAG_trace_gc_verbose) {
5209 : isolate_->PrintWithTimestamp(
5210 : "Grow: old size: %" PRIuS " KB, new limit: %" PRIuS " KB (%.1f)\n",
5211 0 : old_gen_size / KB, old_generation_allocation_limit_ / KB, factor);
5212 : }
5213 56799 : }
5214 :
5215 83 : void Heap::DampenOldGenerationAllocationLimit(size_t old_gen_size,
5216 : double gc_speed,
5217 : double mutator_speed) {
5218 83 : double max_factor = MaxHeapGrowingFactor(max_old_generation_size_);
5219 : double factor = HeapGrowingFactor(gc_speed, mutator_speed, max_factor);
5220 83 : size_t limit = CalculateOldGenerationAllocationLimit(factor, old_gen_size);
5221 83 : if (limit < old_generation_allocation_limit_) {
5222 0 : if (FLAG_trace_gc_verbose) {
5223 : isolate_->PrintWithTimestamp(
5224 : "Dampen: old size: %" PRIuS " KB, old limit: %" PRIuS
5225 : " KB, "
5226 : "new limit: %" PRIuS " KB (%.1f)\n",
5227 : old_gen_size / KB, old_generation_allocation_limit_ / KB, limit / KB,
5228 0 : factor);
5229 : }
5230 0 : old_generation_allocation_limit_ = limit;
5231 : }
5232 83 : }
5233 :
5234 9515 : bool Heap::ShouldOptimizeForLoadTime() {
5235 0 : return isolate()->rail_mode() == PERFORMANCE_LOAD &&
5236 9515 : !AllocationLimitOvershotByLargeMargin() &&
5237 0 : MonotonicallyIncreasingTimeInMs() <
5238 9515 : isolate()->LoadStartTimeMs() + kMaxLoadTimeMs;
5239 : }
5240 :
5241 : // This predicate is called when an old generation space cannot allocated from
5242 : // the free list and is about to add a new page. Returning false will cause a
5243 : // major GC. It happens when the old generation allocation limit is reached and
5244 : // - either we need to optimize for memory usage,
5245 : // - or the incremental marking is not in progress and we cannot start it.
5246 410420 : bool Heap::ShouldExpandOldGenerationOnSlowAllocation() {
5247 408569 : if (always_allocate() || OldGenerationSpaceAvailable() > 0) return true;
5248 : // We reached the old generation allocation limit.
5249 :
5250 1851 : if (ShouldOptimizeForMemoryUsage()) return false;
5251 :
5252 1851 : if (ShouldOptimizeForLoadTime()) return true;
5253 :
5254 1851 : if (incremental_marking()->NeedsFinalization()) {
5255 1419 : return !AllocationLimitOvershotByLargeMargin();
5256 : }
5257 :
5258 461 : if (incremental_marking()->IsStopped() &&
5259 29 : IncrementalMarkingLimitReached() == IncrementalMarkingLimit::kNoLimit) {
5260 : // We cannot start incremental marking.
5261 : return false;
5262 : }
5263 403 : return true;
5264 : }
5265 :
5266 : // This function returns either kNoLimit, kSoftLimit, or kHardLimit.
5267 : // The kNoLimit means that either incremental marking is disabled or it is too
5268 : // early to start incremental marking.
5269 : // The kSoftLimit means that incremental marking should be started soon.
5270 : // The kHardLimit means that incremental marking should be started immediately.
5271 1589186 : Heap::IncrementalMarkingLimit Heap::IncrementalMarkingLimitReached() {
5272 : // Code using an AlwaysAllocateScope assumes that the GC state does not
5273 : // change; that implies that no marking steps must be performed.
5274 2726482 : if (!incremental_marking()->CanBeActivated() || always_allocate()) {
5275 : // Incremental marking is disabled or it is too early to start.
5276 : return IncrementalMarkingLimit::kNoLimit;
5277 : }
5278 1136477 : if (FLAG_stress_incremental_marking) {
5279 : return IncrementalMarkingLimit::kHardLimit;
5280 : }
5281 1117861 : if (PromotedSpaceSizeOfObjects() <=
5282 : IncrementalMarking::kActivationThreshold) {
5283 : // Incremental marking is disabled or it is too early to start.
5284 : return IncrementalMarkingLimit::kNoLimit;
5285 : }
5286 143563 : if ((FLAG_stress_compaction && (gc_count_ & 1) != 0) ||
5287 71759 : HighMemoryPressure()) {
5288 : // If there is high memory pressure or stress testing is enabled, then
5289 : // start marking immediately.
5290 : return IncrementalMarkingLimit::kHardLimit;
5291 : }
5292 71759 : size_t old_generation_space_available = OldGenerationSpaceAvailable();
5293 143518 : if (old_generation_space_available > new_space_->Capacity()) {
5294 : return IncrementalMarkingLimit::kNoLimit;
5295 : }
5296 7664 : if (ShouldOptimizeForMemoryUsage()) {
5297 : return IncrementalMarkingLimit::kHardLimit;
5298 : }
5299 7664 : if (ShouldOptimizeForLoadTime()) {
5300 : return IncrementalMarkingLimit::kNoLimit;
5301 : }
5302 7664 : if (old_generation_space_available == 0) {
5303 : return IncrementalMarkingLimit::kHardLimit;
5304 : }
5305 7359 : return IncrementalMarkingLimit::kSoftLimit;
5306 : }
5307 :
5308 80 : void Heap::EnableInlineAllocation() {
5309 80 : if (!inline_allocation_disabled_) return;
5310 40 : inline_allocation_disabled_ = false;
5311 :
5312 : // Update inline allocation limit for new space.
5313 40 : new_space()->UpdateInlineAllocationLimit(0);
5314 : }
5315 :
5316 :
5317 124 : void Heap::DisableInlineAllocation() {
5318 62 : if (inline_allocation_disabled_) return;
5319 62 : inline_allocation_disabled_ = true;
5320 :
5321 : // Update inline allocation limit for new space.
5322 62 : new_space()->UpdateInlineAllocationLimit(0);
5323 :
5324 : // Update inline allocation limit for old spaces.
5325 : PagedSpaces spaces(this);
5326 248 : for (PagedSpace* space = spaces.next(); space != nullptr;
5327 : space = spaces.next()) {
5328 186 : space->EmptyAllocationInfo();
5329 : }
5330 : }
5331 :
5332 329993 : bool Heap::SetUp() {
5333 : #ifdef DEBUG
5334 : allocation_timeout_ = FLAG_gc_interval;
5335 : #endif
5336 :
5337 : // Initialize heap spaces and initial maps and objects. Whenever something
5338 : // goes wrong, just return false. The caller should check the results and
5339 : // call Heap::TearDown() to release allocated memory.
5340 : //
5341 : // If the heap is not yet configured (e.g. through the API), configure it.
5342 : // Configuration is based on the flags new-space-size (really the semispace
5343 : // size) and old-space-size if set or the initial values of semispace_size_
5344 : // and old_generation_size_ otherwise.
5345 54999 : if (!configured_) {
5346 28190 : if (!ConfigureHeapDefault()) return false;
5347 : }
5348 :
5349 : mmap_region_base_ =
5350 54998 : reinterpret_cast<uintptr_t>(base::OS::GetRandomMmapAddr()) &
5351 54999 : ~kMmapRegionMask;
5352 :
5353 : // Set up memory allocator.
5354 164997 : memory_allocator_ = new MemoryAllocator(isolate_);
5355 109996 : if (!memory_allocator_->SetUp(MaxReserved(), code_range_size_)) return false;
5356 :
5357 54999 : store_buffer_ = new StoreBuffer(this);
5358 :
5359 54999 : mark_compact_collector_ = new MarkCompactCollector(this);
5360 : incremental_marking_ =
5361 54999 : new IncrementalMarking(this, mark_compact_collector_->marking_worklist());
5362 :
5363 54999 : if (FLAG_concurrent_marking) {
5364 : MarkCompactCollector::MarkingWorklist* marking_worklist =
5365 54882 : mark_compact_collector_->marking_worklist();
5366 : concurrent_marking_ = new ConcurrentMarking(
5367 : this, marking_worklist->shared(), marking_worklist->bailout(),
5368 54882 : marking_worklist->on_hold(), mark_compact_collector_->weak_objects());
5369 : } else {
5370 : concurrent_marking_ =
5371 117 : new ConcurrentMarking(this, nullptr, nullptr, nullptr, nullptr);
5372 : }
5373 :
5374 274990 : for (int i = 0; i <= LAST_SPACE; i++) {
5375 274990 : space_[i] = nullptr;
5376 : }
5377 :
5378 54999 : space_[NEW_SPACE] = new_space_ = new NewSpace(this);
5379 54998 : if (!new_space_->SetUp(initial_semispace_size_, max_semi_space_size_)) {
5380 : return false;
5381 : }
5382 :
5383 : space_[OLD_SPACE] = old_space_ =
5384 54999 : new OldSpace(this, OLD_SPACE, NOT_EXECUTABLE);
5385 54999 : if (!old_space_->SetUp()) return false;
5386 :
5387 54999 : space_[CODE_SPACE] = code_space_ = new OldSpace(this, CODE_SPACE, EXECUTABLE);
5388 54999 : if (!code_space_->SetUp()) return false;
5389 :
5390 54999 : space_[MAP_SPACE] = map_space_ = new MapSpace(this, MAP_SPACE);
5391 54999 : if (!map_space_->SetUp()) return false;
5392 :
5393 : // The large object code space may contain code or data. We set the memory
5394 : // to be non-executable here for safety, but this means we need to enable it
5395 : // explicitly when allocating large code objects.
5396 54999 : space_[LO_SPACE] = lo_space_ = new LargeObjectSpace(this, LO_SPACE);
5397 54999 : if (!lo_space_->SetUp()) return false;
5398 :
5399 : // Set up the seed that is used to randomize the string hash function.
5400 : DCHECK_EQ(Smi::kZero, hash_seed());
5401 54999 : if (FLAG_randomize_hashes) InitializeHashSeed();
5402 :
5403 2309958 : for (int i = 0; i < static_cast<int>(v8::Isolate::kUseCounterFeatureCount);
5404 : i++) {
5405 2309958 : deferred_counters_[i] = 0;
5406 : }
5407 :
5408 54999 : tracer_ = new GCTracer(this);
5409 54999 : minor_mark_compact_collector_ = new MinorMarkCompactCollector(this);
5410 109998 : gc_idle_time_handler_ = new GCIdleTimeHandler();
5411 109998 : memory_reducer_ = new MemoryReducer(this);
5412 54999 : if (V8_UNLIKELY(FLAG_gc_stats)) {
5413 0 : live_object_stats_ = new ObjectStats(this);
5414 0 : dead_object_stats_ = new ObjectStats(this);
5415 : }
5416 109998 : scavenge_job_ = new ScavengeJob();
5417 109998 : local_embedder_heap_tracer_ = new LocalEmbedderHeapTracer();
5418 :
5419 109998 : LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
5420 109998 : LOG(isolate_, IntPtrTEvent("heap-available", Available()));
5421 :
5422 54999 : store_buffer()->SetUp();
5423 :
5424 54999 : mark_compact_collector()->SetUp();
5425 54999 : if (minor_mark_compact_collector() != nullptr) {
5426 54999 : minor_mark_compact_collector()->SetUp();
5427 : }
5428 :
5429 : idle_scavenge_observer_ = new IdleScavengeObserver(
5430 109998 : *this, ScavengeJob::kBytesAllocatedBeforeNextIdleTask);
5431 54999 : new_space()->AddAllocationObserver(idle_scavenge_observer_);
5432 :
5433 : SetGetExternallyAllocatedMemoryInBytesCallback(
5434 : DefaultGetExternallyAllocatedMemoryInBytesCallback);
5435 :
5436 54999 : return true;
5437 : }
5438 :
5439 109907 : void Heap::InitializeHashSeed() {
5440 109907 : if (FLAG_hash_seed == 0) {
5441 109872 : int rnd = isolate()->random_number_generator()->NextInt();
5442 109872 : set_hash_seed(Smi::FromInt(rnd & Name::kHashBitMask));
5443 : } else {
5444 : set_hash_seed(Smi::FromInt(FLAG_hash_seed));
5445 : }
5446 109907 : }
5447 :
5448 4367657 : void Heap::SetStackLimits() {
5449 : DCHECK_NOT_NULL(isolate_);
5450 : DCHECK(isolate_ == isolate());
5451 : // On 64 bit machines, pointers are generally out of range of Smis. We write
5452 : // something that looks like an out of range Smi to the GC.
5453 :
5454 : // Set up the special root array entries containing the stack limits.
5455 : // These are actually addresses, but the tag makes the GC ignore it.
5456 : roots_[kStackLimitRootIndex] = reinterpret_cast<Object*>(
5457 8735314 : (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag);
5458 : roots_[kRealStackLimitRootIndex] = reinterpret_cast<Object*>(
5459 4367657 : (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag);
5460 4367657 : }
5461 :
5462 161 : void Heap::ClearStackLimits() {
5463 161 : roots_[kStackLimitRootIndex] = Smi::kZero;
5464 161 : roots_[kRealStackLimitRootIndex] = Smi::kZero;
5465 161 : }
5466 :
5467 0 : void Heap::PrintAllocationsHash() {
5468 0 : uint32_t hash = StringHasher::GetHashCore(raw_allocations_hash_);
5469 0 : PrintF("\n### Allocations = %u, hash = 0x%08x\n", allocations_count(), hash);
5470 0 : }
5471 :
5472 :
5473 54999 : void Heap::NotifyDeserializationComplete() {
5474 : PagedSpaces spaces(this);
5475 219996 : for (PagedSpace* s = spaces.next(); s != nullptr; s = spaces.next()) {
5476 329994 : if (isolate()->snapshot_available()) s->ShrinkImmortalImmovablePages();
5477 : #ifdef DEBUG
5478 : // All pages right after bootstrapping must be marked as never-evacuate.
5479 : for (Page* p : *s) {
5480 : DCHECK(p->NeverEvacuate());
5481 : }
5482 : #endif // DEBUG
5483 : }
5484 :
5485 54999 : deserialization_complete_ = true;
5486 54999 : }
5487 :
5488 0 : void Heap::SetEmbedderHeapTracer(EmbedderHeapTracer* tracer) {
5489 : DCHECK_EQ(gc_state_, HeapState::NOT_IN_GC);
5490 : local_embedder_heap_tracer()->SetRemoteTracer(tracer);
5491 0 : }
5492 :
5493 0 : void Heap::TracePossibleWrapper(JSObject* js_object) {
5494 : DCHECK(js_object->WasConstructedFromApiFunction());
5495 0 : if (js_object->GetEmbedderFieldCount() >= 2 &&
5496 0 : js_object->GetEmbedderField(0) &&
5497 0 : js_object->GetEmbedderField(0) != undefined_value() &&
5498 : js_object->GetEmbedderField(1) != undefined_value()) {
5499 : DCHECK_EQ(0,
5500 : reinterpret_cast<intptr_t>(js_object->GetEmbedderField(0)) % 2);
5501 : local_embedder_heap_tracer()->AddWrapperToTrace(std::pair<void*, void*>(
5502 : reinterpret_cast<void*>(js_object->GetEmbedderField(0)),
5503 : reinterpret_cast<void*>(js_object->GetEmbedderField(1))));
5504 : }
5505 0 : }
5506 :
5507 0 : void Heap::RegisterExternallyReferencedObject(Object** object) {
5508 : // The embedder is not aware of whether numbers are materialized as heap
5509 : // objects are just passed around as Smis.
5510 0 : if (!(*object)->IsHeapObject()) return;
5511 : HeapObject* heap_object = HeapObject::cast(*object);
5512 : DCHECK(Contains(heap_object));
5513 0 : if (FLAG_incremental_marking_wrappers && incremental_marking()->IsMarking()) {
5514 0 : incremental_marking()->WhiteToGreyAndPush(heap_object);
5515 : } else {
5516 : DCHECK(mark_compact_collector()->in_use());
5517 : mark_compact_collector()->MarkExternallyReferencedObject(heap_object);
5518 : }
5519 : }
5520 :
5521 213460 : void Heap::TearDown() {
5522 53365 : use_tasks_ = false;
5523 : #ifdef VERIFY_HEAP
5524 : if (FLAG_verify_heap) {
5525 : Verify();
5526 : }
5527 : #endif
5528 :
5529 53365 : UpdateMaximumCommitted();
5530 :
5531 : if (FLAG_verify_predictable) {
5532 : PrintAllocationsHash();
5533 : }
5534 :
5535 106730 : new_space()->RemoveAllocationObserver(idle_scavenge_observer_);
5536 53365 : delete idle_scavenge_observer_;
5537 53365 : idle_scavenge_observer_ = nullptr;
5538 :
5539 53365 : if (mark_compact_collector_ != nullptr) {
5540 53365 : mark_compact_collector_->TearDown();
5541 53365 : delete mark_compact_collector_;
5542 53365 : mark_compact_collector_ = nullptr;
5543 : }
5544 :
5545 53365 : if (minor_mark_compact_collector_ != nullptr) {
5546 53365 : minor_mark_compact_collector_->TearDown();
5547 53365 : delete minor_mark_compact_collector_;
5548 53365 : minor_mark_compact_collector_ = nullptr;
5549 : }
5550 :
5551 106730 : delete incremental_marking_;
5552 53365 : incremental_marking_ = nullptr;
5553 :
5554 53365 : delete concurrent_marking_;
5555 53365 : concurrent_marking_ = nullptr;
5556 :
5557 53365 : delete gc_idle_time_handler_;
5558 53365 : gc_idle_time_handler_ = nullptr;
5559 :
5560 53365 : if (memory_reducer_ != nullptr) {
5561 53365 : memory_reducer_->TearDown();
5562 53365 : delete memory_reducer_;
5563 53365 : memory_reducer_ = nullptr;
5564 : }
5565 :
5566 53365 : if (live_object_stats_ != nullptr) {
5567 0 : delete live_object_stats_;
5568 0 : live_object_stats_ = nullptr;
5569 : }
5570 :
5571 53365 : if (dead_object_stats_ != nullptr) {
5572 0 : delete dead_object_stats_;
5573 0 : dead_object_stats_ = nullptr;
5574 : }
5575 :
5576 106730 : delete local_embedder_heap_tracer_;
5577 53365 : local_embedder_heap_tracer_ = nullptr;
5578 :
5579 53365 : delete scavenge_job_;
5580 53365 : scavenge_job_ = nullptr;
5581 :
5582 53365 : isolate_->global_handles()->TearDown();
5583 :
5584 53365 : external_string_table_.TearDown();
5585 :
5586 53365 : delete tracer_;
5587 53365 : tracer_ = nullptr;
5588 :
5589 53365 : new_space_->TearDown();
5590 53365 : delete new_space_;
5591 53365 : new_space_ = nullptr;
5592 :
5593 53365 : if (old_space_ != nullptr) {
5594 53365 : delete old_space_;
5595 53365 : old_space_ = nullptr;
5596 : }
5597 :
5598 53365 : if (code_space_ != nullptr) {
5599 53365 : delete code_space_;
5600 53365 : code_space_ = nullptr;
5601 : }
5602 :
5603 53365 : if (map_space_ != nullptr) {
5604 53365 : delete map_space_;
5605 53365 : map_space_ = nullptr;
5606 : }
5607 :
5608 53365 : if (lo_space_ != nullptr) {
5609 53365 : lo_space_->TearDown();
5610 53365 : delete lo_space_;
5611 53365 : lo_space_ = nullptr;
5612 : }
5613 :
5614 53365 : store_buffer()->TearDown();
5615 :
5616 53365 : memory_allocator()->TearDown();
5617 :
5618 : StrongRootsList* next = nullptr;
5619 106730 : for (StrongRootsList* list = strong_roots_list_; list; list = next) {
5620 0 : next = list->next;
5621 0 : delete list;
5622 : }
5623 53365 : strong_roots_list_ = nullptr;
5624 :
5625 106730 : delete store_buffer_;
5626 53365 : store_buffer_ = nullptr;
5627 :
5628 106730 : delete memory_allocator_;
5629 53365 : memory_allocator_ = nullptr;
5630 53365 : }
5631 :
5632 40 : void Heap::AddGCPrologueCallback(v8::Isolate::GCCallbackWithData callback,
5633 : GCType gc_type, void* data) {
5634 : DCHECK_NOT_NULL(callback);
5635 : DCHECK(gc_prologue_callbacks_.end() ==
5636 : std::find(gc_prologue_callbacks_.begin(), gc_prologue_callbacks_.end(),
5637 : GCCallbackTuple(callback, gc_type, data)));
5638 40 : gc_prologue_callbacks_.emplace_back(callback, gc_type, data);
5639 40 : }
5640 :
5641 35 : void Heap::RemoveGCPrologueCallback(v8::Isolate::GCCallbackWithData callback,
5642 : void* data) {
5643 : DCHECK_NOT_NULL(callback);
5644 70 : for (size_t i = 0; i < gc_prologue_callbacks_.size(); i++) {
5645 105 : if (gc_prologue_callbacks_[i].callback == callback &&
5646 35 : gc_prologue_callbacks_[i].data == data) {
5647 : gc_prologue_callbacks_[i] = gc_prologue_callbacks_.back();
5648 : gc_prologue_callbacks_.pop_back();
5649 35 : return;
5650 : }
5651 : }
5652 0 : UNREACHABLE();
5653 : }
5654 :
5655 35 : void Heap::AddGCEpilogueCallback(v8::Isolate::GCCallbackWithData callback,
5656 : GCType gc_type, void* data) {
5657 : DCHECK_NOT_NULL(callback);
5658 : DCHECK(gc_epilogue_callbacks_.end() ==
5659 : std::find(gc_epilogue_callbacks_.begin(), gc_epilogue_callbacks_.end(),
5660 : GCCallbackTuple(callback, gc_type, data)));
5661 35 : gc_epilogue_callbacks_.emplace_back(callback, gc_type, data);
5662 35 : }
5663 :
5664 35 : void Heap::RemoveGCEpilogueCallback(v8::Isolate::GCCallbackWithData callback,
5665 : void* data) {
5666 : DCHECK_NOT_NULL(callback);
5667 70 : for (size_t i = 0; i < gc_epilogue_callbacks_.size(); i++) {
5668 105 : if (gc_epilogue_callbacks_[i].callback == callback &&
5669 35 : gc_epilogue_callbacks_[i].data == data) {
5670 : gc_epilogue_callbacks_[i] = gc_epilogue_callbacks_.back();
5671 : gc_epilogue_callbacks_.pop_back();
5672 35 : return;
5673 : }
5674 : }
5675 0 : UNREACHABLE();
5676 : }
5677 :
5678 : // TODO(ishell): Find a better place for this.
5679 356886 : void Heap::AddWeakNewSpaceObjectToCodeDependency(Handle<HeapObject> obj,
5680 713772 : Handle<WeakCell> code) {
5681 : DCHECK(InNewSpace(*obj));
5682 : DCHECK(!InNewSpace(*code));
5683 : Handle<ArrayList> list(weak_new_space_object_to_code_list(), isolate());
5684 713772 : list = ArrayList::Add(list, isolate()->factory()->NewWeakCell(obj), code);
5685 356886 : if (*list != weak_new_space_object_to_code_list()) {
5686 : set_weak_new_space_object_to_code_list(*list);
5687 : }
5688 356886 : }
5689 :
5690 : // TODO(ishell): Find a better place for this.
5691 491672 : void Heap::AddWeakObjectToCodeDependency(Handle<HeapObject> obj,
5692 983344 : Handle<DependentCode> dep) {
5693 : DCHECK(!InNewSpace(*obj));
5694 : DCHECK(!InNewSpace(*dep));
5695 : Handle<WeakHashTable> table(weak_object_to_code_table(), isolate());
5696 491672 : table = WeakHashTable::Put(table, obj, dep);
5697 491672 : if (*table != weak_object_to_code_table())
5698 : set_weak_object_to_code_table(*table);
5699 : DCHECK_EQ(*dep, LookupWeakObjectToCodeDependency(obj));
5700 491672 : }
5701 :
5702 :
5703 602766 : DependentCode* Heap::LookupWeakObjectToCodeDependency(Handle<HeapObject> obj) {
5704 491672 : Object* dep = weak_object_to_code_table()->Lookup(obj);
5705 491672 : if (dep->IsDependentCode()) return DependentCode::cast(dep);
5706 111094 : return DependentCode::cast(empty_fixed_array());
5707 : }
5708 :
5709 : namespace {
5710 363 : void CompactWeakFixedArray(Object* object) {
5711 363 : if (object->IsWeakFixedArray()) {
5712 : WeakFixedArray* array = WeakFixedArray::cast(object);
5713 242 : array->Compact<WeakFixedArray::NullCallback>();
5714 : }
5715 363 : }
5716 : } // anonymous namespace
5717 :
5718 484 : void Heap::CompactWeakFixedArrays() {
5719 : // Find known WeakFixedArrays and compact them.
5720 121 : HeapIterator iterator(this);
5721 1274168 : for (HeapObject* o = iterator.next(); o != nullptr; o = iterator.next()) {
5722 1274047 : if (o->IsPrototypeInfo()) {
5723 : Object* prototype_users = PrototypeInfo::cast(o)->prototype_users();
5724 881 : if (prototype_users->IsWeakFixedArray()) {
5725 : WeakFixedArray* array = WeakFixedArray::cast(prototype_users);
5726 151 : array->Compact<JSObject::PrototypeRegistryCompactionCallback>();
5727 : }
5728 : }
5729 : }
5730 121 : CompactWeakFixedArray(noscript_shared_function_infos());
5731 121 : CompactWeakFixedArray(script_list());
5732 121 : CompactWeakFixedArray(weak_stack_trace_list());
5733 121 : }
5734 :
5735 104025 : void Heap::AddRetainedMap(Handle<Map> map) {
5736 34675 : Handle<WeakCell> cell = Map::WeakCellForMap(map);
5737 : Handle<ArrayList> array(retained_maps(), isolate());
5738 34675 : if (array->IsFull()) {
5739 2823 : CompactRetainedMaps(*array);
5740 : }
5741 : array = ArrayList::Add(
5742 : array, cell, handle(Smi::FromInt(FLAG_retain_maps_for_n_gc), isolate()),
5743 69350 : ArrayList::kReloadLengthAfterAllocation);
5744 34675 : if (*array != retained_maps()) {
5745 : set_retained_maps(*array);
5746 : }
5747 34675 : }
5748 :
5749 :
5750 5646 : void Heap::CompactRetainedMaps(ArrayList* retained_maps) {
5751 : DCHECK_EQ(retained_maps, this->retained_maps());
5752 2823 : int length = retained_maps->Length();
5753 : int new_length = 0;
5754 : int new_number_of_disposed_maps = 0;
5755 : // This loop compacts the array by removing cleared weak cells.
5756 25829 : for (int i = 0; i < length; i += 2) {
5757 : DCHECK(retained_maps->Get(i)->IsWeakCell());
5758 : WeakCell* cell = WeakCell::cast(retained_maps->Get(i));
5759 : Object* age = retained_maps->Get(i + 1);
5760 20183 : if (cell->cleared()) continue;
5761 16924 : if (i != new_length) {
5762 : retained_maps->Set(new_length, cell);
5763 : retained_maps->Set(new_length + 1, age);
5764 : }
5765 16924 : if (i < number_of_disposed_maps_) {
5766 16 : new_number_of_disposed_maps += 2;
5767 : }
5768 16924 : new_length += 2;
5769 : }
5770 2823 : number_of_disposed_maps_ = new_number_of_disposed_maps;
5771 : Object* undefined = undefined_value();
5772 12164 : for (int i = new_length; i < length; i++) {
5773 : retained_maps->Clear(i, undefined);
5774 : }
5775 2823 : if (new_length != length) retained_maps->SetLength(new_length);
5776 2823 : }
5777 :
5778 0 : void Heap::FatalProcessOutOfMemory(const char* location, bool is_heap_oom) {
5779 0 : v8::internal::V8::FatalProcessOutOfMemory(location, is_heap_oom);
5780 0 : }
5781 :
5782 : #ifdef DEBUG
5783 :
5784 : class PrintHandleVisitor : public RootVisitor {
5785 : public:
5786 : void VisitRootPointers(Root root, Object** start, Object** end) override {
5787 : for (Object** p = start; p < end; p++)
5788 : PrintF(" handle %p to %p\n", reinterpret_cast<void*>(p),
5789 : reinterpret_cast<void*>(*p));
5790 : }
5791 : };
5792 :
5793 :
5794 : void Heap::PrintHandles() {
5795 : PrintF("Handles:\n");
5796 : PrintHandleVisitor v;
5797 : isolate_->handle_scope_implementer()->Iterate(&v);
5798 : }
5799 :
5800 : #endif
5801 :
5802 : class CheckHandleCountVisitor : public RootVisitor {
5803 : public:
5804 0 : CheckHandleCountVisitor() : handle_count_(0) {}
5805 0 : ~CheckHandleCountVisitor() override {
5806 0 : CHECK_GT(HandleScope::kCheckHandleThreshold, handle_count_);
5807 0 : }
5808 0 : void VisitRootPointers(Root root, Object** start, Object** end) override {
5809 0 : handle_count_ += end - start;
5810 0 : }
5811 :
5812 : private:
5813 : ptrdiff_t handle_count_;
5814 : };
5815 :
5816 :
5817 0 : void Heap::CheckHandleCount() {
5818 : CheckHandleCountVisitor v;
5819 0 : isolate_->handle_scope_implementer()->Iterate(&v);
5820 0 : }
5821 :
5822 6006 : void Heap::ClearRecordedSlot(HeapObject* object, Object** slot) {
5823 : Address slot_addr = reinterpret_cast<Address>(slot);
5824 : Page* page = Page::FromAddress(slot_addr);
5825 749612 : if (!page->InNewSpace()) {
5826 : DCHECK_EQ(page->owner()->identity(), OLD_SPACE);
5827 : store_buffer()->DeleteEntry(slot_addr);
5828 : }
5829 5388 : }
5830 :
5831 0 : bool Heap::HasRecordedSlot(HeapObject* object, Object** slot) {
5832 0 : if (InNewSpace(object)) {
5833 : return false;
5834 : }
5835 : Address slot_addr = reinterpret_cast<Address>(slot);
5836 : Page* page = Page::FromAddress(slot_addr);
5837 : DCHECK_EQ(page->owner()->identity(), OLD_SPACE);
5838 0 : store_buffer()->MoveAllEntriesToRememberedSet();
5839 0 : return RememberedSet<OLD_TO_NEW>::Contains(page, slot_addr) ||
5840 0 : RememberedSet<OLD_TO_OLD>::Contains(page, slot_addr);
5841 : }
5842 :
5843 399313 : void Heap::ClearRecordedSlotRange(Address start, Address end) {
5844 : Page* page = Page::FromAddress(start);
5845 14547966 : if (!page->InNewSpace()) {
5846 : DCHECK_EQ(page->owner()->identity(), OLD_SPACE);
5847 : store_buffer()->DeleteEntry(start, end);
5848 : }
5849 323763 : }
5850 :
5851 726808 : void Heap::RecordWriteIntoCodeSlow(Code* host, RelocInfo* rinfo,
5852 : Object* value) {
5853 : DCHECK(InNewSpace(value));
5854 : Page* source_page = Page::FromAddress(reinterpret_cast<Address>(host));
5855 : RelocInfo::Mode rmode = rinfo->rmode();
5856 : Address addr = rinfo->pc();
5857 : SlotType slot_type = SlotTypeForRelocInfoMode(rmode);
5858 363404 : if (rinfo->IsInConstantPool()) {
5859 : addr = rinfo->constant_pool_entry_address();
5860 : if (RelocInfo::IsCodeTarget(rmode)) {
5861 : slot_type = CODE_ENTRY_SLOT;
5862 : } else {
5863 : DCHECK(RelocInfo::IsEmbeddedObject(rmode));
5864 : slot_type = OBJECT_SLOT;
5865 : }
5866 : }
5867 : RememberedSet<OLD_TO_NEW>::InsertTyped(
5868 363404 : source_page, reinterpret_cast<Address>(host), slot_type, addr);
5869 363404 : }
5870 :
5871 89126 : void Heap::RecordWritesIntoCode(Code* code) {
5872 193591 : for (RelocIterator it(code, RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT));
5873 15339 : !it.done(); it.next()) {
5874 30678 : RecordWriteIntoCode(code, it.rinfo(), it.rinfo()->target_object());
5875 : }
5876 89126 : }
5877 :
5878 4389800 : Space* AllSpaces::next() {
5879 4389800 : switch (counter_++) {
5880 : case NEW_SPACE:
5881 3658180 : return heap_->new_space();
5882 : case OLD_SPACE:
5883 1463272 : return heap_->old_space();
5884 : case CODE_SPACE:
5885 1463272 : return heap_->code_space();
5886 : case MAP_SPACE:
5887 1463272 : return heap_->map_space();
5888 : case LO_SPACE:
5889 1463272 : return heap_->lo_space();
5890 : default:
5891 : return nullptr;
5892 : }
5893 : }
5894 :
5895 667316 : PagedSpace* PagedSpaces::next() {
5896 667316 : switch (counter_++) {
5897 : case OLD_SPACE:
5898 500487 : return heap_->old_space();
5899 : case CODE_SPACE:
5900 333658 : return heap_->code_space();
5901 : case MAP_SPACE:
5902 333658 : return heap_->map_space();
5903 : default:
5904 : return nullptr;
5905 : }
5906 : }
5907 :
5908 :
5909 518583 : OldSpace* OldSpaces::next() {
5910 518583 : switch (counter_++) {
5911 : case OLD_SPACE:
5912 345722 : return heap_->old_space();
5913 : case CODE_SPACE:
5914 345722 : return heap_->code_space();
5915 : default:
5916 : return nullptr;
5917 : }
5918 : }
5919 :
5920 38287 : SpaceIterator::SpaceIterator(Heap* heap)
5921 49380 : : heap_(heap), current_space_(FIRST_SPACE - 1) {}
5922 :
5923 49380 : SpaceIterator::~SpaceIterator() {
5924 49380 : }
5925 :
5926 :
5927 229722 : bool SpaceIterator::has_next() {
5928 : // Iterate until no more spaces.
5929 285187 : return current_space_ != LAST_SPACE;
5930 : }
5931 :
5932 191435 : Space* SpaceIterator::next() {
5933 : DCHECK(has_next());
5934 438335 : return heap_->space(++current_space_);
5935 : }
5936 :
5937 :
5938 1347 : class HeapObjectsFilter {
5939 : public:
5940 1347 : virtual ~HeapObjectsFilter() {}
5941 : virtual bool SkipObject(HeapObject* object) = 0;
5942 : };
5943 :
5944 :
5945 : class UnreachableObjectsFilter : public HeapObjectsFilter {
5946 : public:
5947 4041 : explicit UnreachableObjectsFilter(Heap* heap) : heap_(heap) {
5948 1347 : MarkReachableObjects();
5949 1347 : }
5950 :
5951 2694 : ~UnreachableObjectsFilter() {
5952 13383 : for (auto it : reachable_) {
5953 21378 : delete it.second;
5954 : it.second = nullptr;
5955 : }
5956 2694 : }
5957 :
5958 17455107 : bool SkipObject(HeapObject* object) {
5959 17455107 : if (object->IsFiller()) return true;
5960 34910214 : MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
5961 17455107 : if (reachable_.count(chunk) == 0) return true;
5962 34910214 : return reachable_[chunk]->count(object) == 0;
5963 : }
5964 :
5965 : private:
5966 107798545 : bool MarkAsReachable(HeapObject* object) {
5967 215597090 : MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
5968 107798545 : if (reachable_.count(chunk) == 0) {
5969 21378 : reachable_[chunk] = new std::unordered_set<HeapObject*>();
5970 : }
5971 215597090 : if (reachable_[chunk]->count(object)) return false;
5972 16927994 : reachable_[chunk]->insert(object);
5973 16927994 : return true;
5974 : }
5975 :
5976 1347 : class MarkingVisitor : public ObjectVisitor, public RootVisitor {
5977 : public:
5978 : explicit MarkingVisitor(UnreachableObjectsFilter* filter)
5979 1347 : : filter_(filter) {}
5980 :
5981 38143311 : void VisitPointers(HeapObject* host, Object** start,
5982 : Object** end) override {
5983 38143311 : MarkPointers(start, end);
5984 38143311 : }
5985 :
5986 4050326 : void VisitRootPointers(Root root, Object** start, Object** end) override {
5987 4050326 : MarkPointers(start, end);
5988 4050326 : }
5989 :
5990 1347 : void TransitiveClosure() {
5991 16930688 : while (!marking_stack_.empty()) {
5992 16927994 : HeapObject* obj = marking_stack_.back();
5993 : marking_stack_.pop_back();
5994 16927994 : obj->Iterate(this);
5995 : }
5996 1347 : }
5997 :
5998 : private:
5999 42193637 : void MarkPointers(Object** start, Object** end) {
6000 167578498 : for (Object** p = start; p < end; p++) {
6001 268356038 : if (!(*p)->IsHeapObject()) continue;
6002 107798545 : HeapObject* obj = HeapObject::cast(*p);
6003 107798545 : if (filter_->MarkAsReachable(obj)) {
6004 16927994 : marking_stack_.push_back(obj);
6005 : }
6006 : }
6007 42193637 : }
6008 : UnreachableObjectsFilter* filter_;
6009 : std::vector<HeapObject*> marking_stack_;
6010 : };
6011 :
6012 : friend class MarkingVisitor;
6013 :
6014 1347 : void MarkReachableObjects() {
6015 : MarkingVisitor visitor(this);
6016 1347 : heap_->IterateRoots(&visitor, VISIT_ALL);
6017 1347 : visitor.TransitiveClosure();
6018 1347 : }
6019 :
6020 : Heap* heap_;
6021 : DisallowHeapAllocation no_allocation_;
6022 : std::unordered_map<MemoryChunk*, std::unordered_set<HeapObject*>*> reachable_;
6023 : };
6024 :
6025 11093 : HeapIterator::HeapIterator(Heap* heap,
6026 : HeapIterator::HeapObjectsFiltering filtering)
6027 : : no_heap_allocation_(),
6028 : heap_(heap),
6029 : filtering_(filtering),
6030 : filter_(nullptr),
6031 : space_iterator_(nullptr),
6032 11093 : object_iterator_(nullptr) {
6033 : heap_->MakeHeapIterable();
6034 11093 : heap_->heap_iterator_start();
6035 : // Start the iteration.
6036 22186 : space_iterator_ = new SpaceIterator(heap_);
6037 11093 : switch (filtering_) {
6038 : case kFilterUnreachable:
6039 1347 : filter_ = new UnreachableObjectsFilter(heap_);
6040 1347 : break;
6041 : default:
6042 : break;
6043 : }
6044 33279 : object_iterator_ = space_iterator_->next()->GetObjectIterator();
6045 11093 : }
6046 :
6047 :
6048 11093 : HeapIterator::~HeapIterator() {
6049 11093 : heap_->heap_iterator_end();
6050 : #ifdef DEBUG
6051 : // Assert that in filtering mode we have iterated through all
6052 : // objects. Otherwise, heap will be left in an inconsistent state.
6053 : if (filtering_ != kNoFiltering) {
6054 : DCHECK_NULL(object_iterator_);
6055 : }
6056 : #endif
6057 11093 : delete space_iterator_;
6058 11093 : delete filter_;
6059 11093 : }
6060 :
6061 :
6062 99515697 : HeapObject* HeapIterator::next() {
6063 99515697 : if (filter_ == nullptr) return NextObject();
6064 :
6065 16929341 : HeapObject* obj = NextObject();
6066 16929341 : while ((obj != nullptr) && (filter_->SkipObject(obj))) obj = NextObject();
6067 : return obj;
6068 : }
6069 :
6070 :
6071 100042868 : HeapObject* HeapIterator::NextObject() {
6072 : // No iterator means we are done.
6073 100042868 : if (object_iterator_.get() == nullptr) return nullptr;
6074 :
6075 100042892 : if (HeapObject* obj = object_iterator_.get()->Next()) {
6076 : // If the current iterator has more objects we are fine.
6077 : return obj;
6078 : } else {
6079 : // Go though the spaces looking for one that has objects.
6080 110930 : while (space_iterator_->has_next()) {
6081 88744 : object_iterator_ = space_iterator_->next()->GetObjectIterator();
6082 44372 : if (HeapObject* obj = object_iterator_.get()->Next()) {
6083 : return obj;
6084 : }
6085 : }
6086 : }
6087 : // Done with the last space.
6088 : object_iterator_.reset(nullptr);
6089 : return nullptr;
6090 : }
6091 :
6092 :
6093 86430 : void Heap::UpdateTotalGCTime(double duration) {
6094 86430 : if (FLAG_trace_gc_verbose) {
6095 0 : total_gc_time_ms_ += duration;
6096 : }
6097 86430 : }
6098 :
6099 56800 : void Heap::ExternalStringTable::CleanUpNewSpaceStrings() {
6100 : int last = 0;
6101 56800 : Isolate* isolate = heap_->isolate();
6102 117340 : for (size_t i = 0; i < new_space_strings_.size(); ++i) {
6103 62124 : Object* o = new_space_strings_[i];
6104 1870 : if (o->IsTheHole(isolate)) {
6105 285 : continue;
6106 : }
6107 3170 : if (o->IsThinString()) {
6108 2 : o = ThinString::cast(o)->actual();
6109 1 : if (!o->IsExternalString()) continue;
6110 : }
6111 : DCHECK(o->IsExternalString());
6112 3170 : if (heap_->InNewSpace(o)) {
6113 3168 : new_space_strings_[last++] = o;
6114 : } else {
6115 1 : old_space_strings_.push_back(o);
6116 : }
6117 : }
6118 56800 : new_space_strings_.resize(last);
6119 56800 : }
6120 :
6121 56800 : void Heap::ExternalStringTable::CleanUpAll() {
6122 56800 : CleanUpNewSpaceStrings();
6123 : int last = 0;
6124 56800 : Isolate* isolate = heap_->isolate();
6125 1361394 : for (size_t i = 0; i < old_space_strings_.size(); ++i) {
6126 1922885 : Object* o = old_space_strings_[i];
6127 623897 : if (o->IsTheHole(isolate)) {
6128 : continue;
6129 : }
6130 618291 : if (o->IsThinString()) {
6131 : o = ThinString::cast(o)->actual();
6132 0 : if (!o->IsExternalString()) continue;
6133 : }
6134 : DCHECK(o->IsExternalString());
6135 : DCHECK(!heap_->InNewSpace(o));
6136 1236582 : old_space_strings_[last++] = o;
6137 : }
6138 56800 : old_space_strings_.resize(last);
6139 : #ifdef VERIFY_HEAP
6140 : if (FLAG_verify_heap) {
6141 : Verify();
6142 : }
6143 : #endif
6144 56800 : }
6145 :
6146 53365 : void Heap::ExternalStringTable::TearDown() {
6147 144280 : for (size_t i = 0; i < new_space_strings_.size(); ++i) {
6148 90915 : Object* o = new_space_strings_[i];
6149 18775 : if (o->IsThinString()) {
6150 : o = ThinString::cast(o)->actual();
6151 8 : if (!o->IsExternalString()) continue;
6152 : }
6153 : heap_->FinalizeExternalString(ExternalString::cast(o));
6154 : }
6155 : new_space_strings_.clear();
6156 1188092 : for (size_t i = 0; i < old_space_strings_.size(); ++i) {
6157 1134727 : Object* o = old_space_strings_[i];
6158 540681 : if (o->IsThinString()) {
6159 : o = ThinString::cast(o)->actual();
6160 0 : if (!o->IsExternalString()) continue;
6161 : }
6162 : heap_->FinalizeExternalString(ExternalString::cast(o));
6163 : }
6164 : old_space_strings_.clear();
6165 53365 : }
6166 :
6167 :
6168 741352 : void Heap::RememberUnmappedPage(Address page, bool compacted) {
6169 741352 : uintptr_t p = reinterpret_cast<uintptr_t>(page);
6170 : // Tag the page pointer to make it findable in the dump file.
6171 741352 : if (compacted) {
6172 3726 : p ^= 0xc1ead & (Page::kPageSize - 1); // Cleared.
6173 : } else {
6174 737626 : p ^= 0x1d1ed & (Page::kPageSize - 1); // I died.
6175 : }
6176 796351 : remembered_unmapped_pages_[remembered_unmapped_pages_index_] =
6177 796351 : reinterpret_cast<Address>(p);
6178 796351 : remembered_unmapped_pages_index_++;
6179 796351 : remembered_unmapped_pages_index_ %= kRememberedUnmappedPages;
6180 741352 : }
6181 :
6182 1625070 : void Heap::RegisterStrongRoots(Object** start, Object** end) {
6183 1625070 : StrongRootsList* list = new StrongRootsList();
6184 1625071 : list->next = strong_roots_list_;
6185 1625071 : list->start = start;
6186 1625071 : list->end = end;
6187 1625071 : strong_roots_list_ = list;
6188 1625071 : }
6189 :
6190 :
6191 1625070 : void Heap::UnregisterStrongRoots(Object** start) {
6192 : StrongRootsList* prev = nullptr;
6193 1625070 : StrongRootsList* list = strong_roots_list_;
6194 4910998 : while (list != nullptr) {
6195 1660857 : StrongRootsList* next = list->next;
6196 1660857 : if (list->start == start) {
6197 1625070 : if (prev) {
6198 22 : prev->next = next;
6199 : } else {
6200 1625048 : strong_roots_list_ = next;
6201 : }
6202 1625070 : delete list;
6203 : } else {
6204 : prev = list;
6205 : }
6206 : list = next;
6207 : }
6208 1625071 : }
6209 :
6210 :
6211 0 : size_t Heap::NumberOfTrackedHeapObjectTypes() {
6212 0 : return ObjectStats::OBJECT_STATS_COUNT;
6213 : }
6214 :
6215 :
6216 0 : size_t Heap::ObjectCountAtLastGC(size_t index) {
6217 0 : if (live_object_stats_ == nullptr || index >= ObjectStats::OBJECT_STATS_COUNT)
6218 : return 0;
6219 0 : return live_object_stats_->object_count_last_gc(index);
6220 : }
6221 :
6222 :
6223 0 : size_t Heap::ObjectSizeAtLastGC(size_t index) {
6224 0 : if (live_object_stats_ == nullptr || index >= ObjectStats::OBJECT_STATS_COUNT)
6225 : return 0;
6226 0 : return live_object_stats_->object_size_last_gc(index);
6227 : }
6228 :
6229 :
6230 0 : bool Heap::GetObjectTypeName(size_t index, const char** object_type,
6231 : const char** object_sub_type) {
6232 0 : if (index >= ObjectStats::OBJECT_STATS_COUNT) return false;
6233 :
6234 0 : switch (static_cast<int>(index)) {
6235 : #define COMPARE_AND_RETURN_NAME(name) \
6236 : case name: \
6237 : *object_type = #name; \
6238 : *object_sub_type = ""; \
6239 : return true;
6240 0 : INSTANCE_TYPE_LIST(COMPARE_AND_RETURN_NAME)
6241 : #undef COMPARE_AND_RETURN_NAME
6242 : #define COMPARE_AND_RETURN_NAME(name) \
6243 : case ObjectStats::FIRST_CODE_KIND_SUB_TYPE + Code::name: \
6244 : *object_type = "CODE_TYPE"; \
6245 : *object_sub_type = "CODE_KIND/" #name; \
6246 : return true;
6247 0 : CODE_KIND_LIST(COMPARE_AND_RETURN_NAME)
6248 : #undef COMPARE_AND_RETURN_NAME
6249 : #define COMPARE_AND_RETURN_NAME(name) \
6250 : case ObjectStats::FIRST_FIXED_ARRAY_SUB_TYPE + name: \
6251 : *object_type = "FIXED_ARRAY_TYPE"; \
6252 : *object_sub_type = #name; \
6253 : return true;
6254 0 : FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(COMPARE_AND_RETURN_NAME)
6255 : #undef COMPARE_AND_RETURN_NAME
6256 : }
6257 : return false;
6258 : }
6259 :
6260 1 : const char* AllocationSpaceName(AllocationSpace space) {
6261 1 : switch (space) {
6262 : case NEW_SPACE:
6263 : return "NEW_SPACE";
6264 : case OLD_SPACE:
6265 1 : return "OLD_SPACE";
6266 : case CODE_SPACE:
6267 0 : return "CODE_SPACE";
6268 : case MAP_SPACE:
6269 0 : return "MAP_SPACE";
6270 : case LO_SPACE:
6271 0 : return "LO_SPACE";
6272 : default:
6273 0 : UNREACHABLE();
6274 : }
6275 : return nullptr;
6276 : }
6277 :
6278 0 : void VerifyPointersVisitor::VisitPointers(HeapObject* host, Object** start,
6279 : Object** end) {
6280 0 : VerifyPointers(start, end);
6281 0 : }
6282 :
6283 0 : void VerifyPointersVisitor::VisitRootPointers(Root root, Object** start,
6284 : Object** end) {
6285 0 : VerifyPointers(start, end);
6286 0 : }
6287 :
6288 0 : void VerifyPointersVisitor::VerifyPointers(Object** start, Object** end) {
6289 0 : for (Object** current = start; current < end; current++) {
6290 0 : if ((*current)->IsHeapObject()) {
6291 : HeapObject* object = HeapObject::cast(*current);
6292 0 : CHECK(object->GetIsolate()->heap()->Contains(object));
6293 0 : CHECK(object->map()->IsMap());
6294 : } else {
6295 0 : CHECK((*current)->IsSmi());
6296 : }
6297 : }
6298 0 : }
6299 :
6300 0 : void VerifySmisVisitor::VisitRootPointers(Root root, Object** start,
6301 : Object** end) {
6302 0 : for (Object** current = start; current < end; current++) {
6303 0 : CHECK((*current)->IsSmi());
6304 : }
6305 0 : }
6306 :
6307 0 : bool Heap::AllowedToBeMigrated(HeapObject* obj, AllocationSpace dst) {
6308 : // Object migration is governed by the following rules:
6309 : //
6310 : // 1) Objects in new-space can be migrated to the old space
6311 : // that matches their target space or they stay in new-space.
6312 : // 2) Objects in old-space stay in the same space when migrating.
6313 : // 3) Fillers (two or more words) can migrate due to left-trimming of
6314 : // fixed arrays in new-space or old space.
6315 : // 4) Fillers (one word) can never migrate, they are skipped by
6316 : // incremental marking explicitly to prevent invalid pattern.
6317 : //
6318 : // Since this function is used for debugging only, we do not place
6319 : // asserts here, but check everything explicitly.
6320 0 : if (obj->map() == one_pointer_filler_map()) return false;
6321 : InstanceType type = obj->map()->instance_type();
6322 0 : MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
6323 0 : AllocationSpace src = chunk->owner()->identity();
6324 0 : switch (src) {
6325 : case NEW_SPACE:
6326 0 : return dst == src || dst == OLD_SPACE;
6327 : case OLD_SPACE:
6328 0 : return dst == src &&
6329 0 : (dst == OLD_SPACE || obj->IsFiller() || obj->IsExternalString());
6330 : case CODE_SPACE:
6331 0 : return dst == src && type == CODE_TYPE;
6332 : case MAP_SPACE:
6333 : case LO_SPACE:
6334 : return false;
6335 : }
6336 0 : UNREACHABLE();
6337 : }
6338 :
6339 0 : void Heap::CreateObjectStats() {
6340 0 : if (V8_LIKELY(FLAG_gc_stats == 0)) return;
6341 0 : if (!live_object_stats_) {
6342 0 : live_object_stats_ = new ObjectStats(this);
6343 : }
6344 0 : if (!dead_object_stats_) {
6345 0 : dead_object_stats_ = new ObjectStats(this);
6346 : }
6347 : }
6348 :
6349 : } // namespace internal
6350 : } // namespace v8
|