LCOV - code coverage report
Current view: top level - src/heap - heap.cc (source / functions) Hit Total Coverage
Test: app.info Lines: 1760 2274 77.4 %
Date: 2019-01-20 Functions: 232 310 74.8 %

          Line data    Source code
       1             : // Copyright 2012 the V8 project authors. All rights reserved.
       2             : // Use of this source code is governed by a BSD-style license that can be
       3             : // found in the LICENSE file.
       4             : 
       5             : #include "src/heap/heap.h"
       6             : 
       7             : #include <unordered_map>
       8             : #include <unordered_set>
       9             : 
      10             : #include "src/accessors.h"
      11             : #include "src/api-inl.h"
      12             : #include "src/assembler-inl.h"
      13             : #include "src/base/bits.h"
      14             : #include "src/base/once.h"
      15             : #include "src/base/utils/random-number-generator.h"
      16             : #include "src/bootstrapper.h"
      17             : #include "src/compilation-cache.h"
      18             : #include "src/conversions.h"
      19             : #include "src/debug/debug.h"
      20             : #include "src/deoptimizer.h"
      21             : #include "src/feedback-vector.h"
      22             : #include "src/global-handles.h"
      23             : #include "src/heap/array-buffer-collector.h"
      24             : #include "src/heap/array-buffer-tracker-inl.h"
      25             : #include "src/heap/barrier.h"
      26             : #include "src/heap/code-stats.h"
      27             : #include "src/heap/concurrent-marking.h"
      28             : #include "src/heap/embedder-tracing.h"
      29             : #include "src/heap/gc-idle-time-handler.h"
      30             : #include "src/heap/gc-tracer.h"
      31             : #include "src/heap/heap-controller.h"
      32             : #include "src/heap/heap-write-barrier-inl.h"
      33             : #include "src/heap/incremental-marking.h"
      34             : #include "src/heap/mark-compact-inl.h"
      35             : #include "src/heap/mark-compact.h"
      36             : #include "src/heap/memory-reducer.h"
      37             : #include "src/heap/object-stats.h"
      38             : #include "src/heap/objects-visiting-inl.h"
      39             : #include "src/heap/objects-visiting.h"
      40             : #include "src/heap/remembered-set.h"
      41             : #include "src/heap/scavenge-job.h"
      42             : #include "src/heap/scavenger-inl.h"
      43             : #include "src/heap/store-buffer.h"
      44             : #include "src/heap/stress-marking-observer.h"
      45             : #include "src/heap/stress-scavenge-observer.h"
      46             : #include "src/heap/sweeper.h"
      47             : #include "src/interpreter/interpreter.h"
      48             : #include "src/microtask-queue.h"
      49             : #include "src/objects/data-handler.h"
      50             : #include "src/objects/free-space-inl.h"
      51             : #include "src/objects/hash-table-inl.h"
      52             : #include "src/objects/maybe-object.h"
      53             : #include "src/objects/shared-function-info.h"
      54             : #include "src/objects/slots-inl.h"
      55             : #include "src/regexp/jsregexp.h"
      56             : #include "src/runtime-profiler.h"
      57             : #include "src/snapshot/embedded-data.h"
      58             : #include "src/snapshot/natives.h"
      59             : #include "src/snapshot/serializer-common.h"
      60             : #include "src/snapshot/snapshot.h"
      61             : #include "src/tracing/trace-event.h"
      62             : #include "src/unicode-decoder.h"
      63             : #include "src/unicode-inl.h"
      64             : #include "src/utils-inl.h"
      65             : #include "src/utils.h"
      66             : #include "src/v8.h"
      67             : #include "src/vm-state-inl.h"
      68             : 
      69             : // Has to be the last include (doesn't have include guards):
      70             : #include "src/objects/object-macros.h"
      71             : 
      72             : namespace v8 {
      73             : namespace internal {
      74             : 
      75          56 : void Heap::SetArgumentsAdaptorDeoptPCOffset(int pc_offset) {
      76             :   DCHECK_EQ(Smi::kZero, arguments_adaptor_deopt_pc_offset());
      77          56 :   set_arguments_adaptor_deopt_pc_offset(Smi::FromInt(pc_offset));
      78          56 : }
      79             : 
      80          56 : void Heap::SetConstructStubCreateDeoptPCOffset(int pc_offset) {
      81             :   DCHECK(construct_stub_create_deopt_pc_offset() == Smi::kZero);
      82          56 :   set_construct_stub_create_deopt_pc_offset(Smi::FromInt(pc_offset));
      83          56 : }
      84             : 
      85          56 : void Heap::SetConstructStubInvokeDeoptPCOffset(int pc_offset) {
      86             :   DCHECK(construct_stub_invoke_deopt_pc_offset() == Smi::kZero);
      87          56 :   set_construct_stub_invoke_deopt_pc_offset(Smi::FromInt(pc_offset));
      88          56 : }
      89             : 
      90          56 : void Heap::SetInterpreterEntryReturnPCOffset(int pc_offset) {
      91             :   DCHECK_EQ(Smi::kZero, interpreter_entry_return_pc_offset());
      92          56 :   set_interpreter_entry_return_pc_offset(Smi::FromInt(pc_offset));
      93          56 : }
      94             : 
      95         236 : void Heap::SetSerializedObjects(FixedArray objects) {
      96             :   DCHECK(isolate()->serializer_enabled());
      97         236 :   set_serialized_objects(objects);
      98         236 : }
      99             : 
     100         186 : void Heap::SetSerializedGlobalProxySizes(FixedArray sizes) {
     101             :   DCHECK(isolate()->serializer_enabled());
     102         186 :   set_serialized_global_proxy_sizes(sizes);
     103         186 : }
     104             : 
     105           0 : bool Heap::GCCallbackTuple::operator==(
     106             :     const Heap::GCCallbackTuple& other) const {
     107           0 :   return other.callback == callback && other.data == data;
     108             : }
     109             : 
     110             : Heap::GCCallbackTuple& Heap::GCCallbackTuple::operator=(
     111             :     const Heap::GCCallbackTuple& other) V8_NOEXCEPT = default;
     112             : 
     113             : struct Heap::StrongRootsList {
     114             :   FullObjectSlot start;
     115             :   FullObjectSlot end;
     116             :   StrongRootsList* next;
     117             : };
     118             : 
     119      125732 : class IdleScavengeObserver : public AllocationObserver {
     120             :  public:
     121             :   IdleScavengeObserver(Heap& heap, intptr_t step_size)
     122       62883 :       : AllocationObserver(step_size), heap_(heap) {}
     123             : 
     124       32432 :   void Step(int bytes_allocated, Address, size_t) override {
     125       32432 :     heap_.ScheduleIdleScavengeIfNeeded(bytes_allocated);
     126       32432 :   }
     127             : 
     128             :  private:
     129             :   Heap& heap_;
     130             : };
     131             : 
     132       62883 : Heap::Heap()
     133             :     : isolate_(isolate()),
     134             :       initial_max_old_generation_size_(max_old_generation_size_),
     135             :       initial_max_old_generation_size_threshold_(0),
     136             :       initial_old_generation_size_(max_old_generation_size_ /
     137             :                                    kInitalOldGenerationLimitFactor),
     138             :       memory_pressure_level_(MemoryPressureLevel::kNone),
     139             :       old_generation_allocation_limit_(initial_old_generation_size_),
     140             :       global_pretenuring_feedback_(kInitialFeedbackCapacity),
     141             :       current_gc_callback_flags_(GCCallbackFlags::kNoGCCallbackFlags),
     142             :       is_current_gc_forced_(false),
     143      503063 :       external_string_table_(this) {
     144             :   // Ensure old_generation_size_ is a multiple of kPageSize.
     145             :   DCHECK_EQ(0, max_old_generation_size_ & (Page::kPageSize - 1));
     146             : 
     147             :   set_native_contexts_list(Smi::kZero);
     148             :   set_allocation_sites_list(Smi::kZero);
     149             :   // Put a dummy entry in the remembered pages so we can find the list the
     150             :   // minidump even if there are no real unmapped pages.
     151             :   RememberUnmappedPage(kNullAddress, false);
     152       62882 : }
     153             : 
     154        1261 : size_t Heap::MaxReserved() {
     155     2788838 :   return static_cast<size_t>(2 * max_semi_space_size_ +
     156     2788838 :                              max_old_generation_size_);
     157             : }
     158             : 
     159       28779 : size_t Heap::ComputeMaxOldGenerationSize(uint64_t physical_memory) {
     160             :   const size_t old_space_physical_memory_factor = 4;
     161       28779 :   size_t computed_size = static_cast<size_t>(physical_memory / i::MB /
     162             :                                              old_space_physical_memory_factor *
     163       28779 :                                              kPointerMultiplier);
     164             :   return Max(Min(computed_size, HeapController::kMaxSize),
     165       28779 :              HeapController::kMinSize);
     166             : }
     167             : 
     168          61 : size_t Heap::Capacity() {
     169          61 :   if (!HasBeenSetUp()) return 0;
     170             : 
     171          61 :   return new_space_->Capacity() + OldGenerationCapacity();
     172             : }
     173             : 
     174     2726806 : size_t Heap::OldGenerationCapacity() {
     175     2726806 :   if (!HasBeenSetUp()) return 0;
     176             :   PagedSpaces spaces(this, PagedSpaces::SpacesSpecifier::kAllPagedSpaces);
     177             :   size_t total = 0;
     178    13634028 :   for (PagedSpace* space = spaces.next(); space != nullptr;
     179             :        space = spaces.next()) {
     180    10907221 :     total += space->Capacity();
     181             :   }
     182     2726808 :   return total + lo_space_->SizeOfObjects() + code_lo_space_->SizeOfObjects();
     183             : }
     184             : 
     185      777534 : size_t Heap::CommittedOldGenerationMemory() {
     186      777534 :   if (!HasBeenSetUp()) return 0;
     187             : 
     188             :   PagedSpaces spaces(this, PagedSpaces::SpacesSpecifier::kAllPagedSpaces);
     189             :   size_t total = 0;
     190     3887667 :   for (PagedSpace* space = spaces.next(); space != nullptr;
     191             :        space = spaces.next()) {
     192     3110134 :     total += space->CommittedMemory();
     193             :   }
     194      777533 :   return total + lo_space_->Size() + code_lo_space_->Size();
     195             : }
     196             : 
     197           0 : size_t Heap::CommittedMemoryOfUnmapper() {
     198           0 :   if (!HasBeenSetUp()) return 0;
     199             : 
     200           0 :   return memory_allocator()->unmapper()->CommittedBufferedMemory();
     201             : }
     202             : 
     203      610529 : size_t Heap::CommittedMemory() {
     204      610529 :   if (!HasBeenSetUp()) return 0;
     205             : 
     206      610529 :   return new_space_->CommittedMemory() + CommittedOldGenerationMemory();
     207             : }
     208             : 
     209             : 
     210         246 : size_t Heap::CommittedPhysicalMemory() {
     211         246 :   if (!HasBeenSetUp()) return 0;
     212             : 
     213             :   size_t total = 0;
     214        2214 :   for (SpaceIterator it(this); it.has_next();) {
     215        1968 :     total += it.next()->CommittedPhysicalMemory();
     216             :   }
     217             : 
     218         246 :   return total;
     219             : }
     220             : 
     221      257598 : size_t Heap::CommittedMemoryExecutable() {
     222      128799 :   if (!HasBeenSetUp()) return 0;
     223             : 
     224      128799 :   return static_cast<size_t>(memory_allocator()->SizeExecutable());
     225             : }
     226             : 
     227             : 
     228      277039 : void Heap::UpdateMaximumCommitted() {
     229      554078 :   if (!HasBeenSetUp()) return;
     230             : 
     231      277040 :   const size_t current_committed_memory = CommittedMemory();
     232      277040 :   if (current_committed_memory > maximum_committed_) {
     233       99382 :     maximum_committed_ = current_committed_memory;
     234             :   }
     235             : }
     236             : 
     237         614 : size_t Heap::Available() {
     238         307 :   if (!HasBeenSetUp()) return 0;
     239             : 
     240             :   size_t total = 0;
     241             : 
     242        2763 :   for (SpaceIterator it(this); it.has_next();) {
     243        2456 :     total += it.next()->Available();
     244             :   }
     245             : 
     246         307 :   total += memory_allocator()->Available();
     247         307 :   return total;
     248             : }
     249             : 
     250     8178395 : bool Heap::CanExpandOldGeneration(size_t size) {
     251     2726967 :   if (force_oom_) return false;
     252     5453468 :   if (OldGenerationCapacity() + size > MaxOldGenerationSize()) return false;
     253             :   // The OldGenerationCapacity does not account compaction spaces used
     254             :   // during evacuation. Ensure that expanding the old generation does push
     255             :   // the total allocated memory size over the maximum heap size.
     256     5449388 :   return memory_allocator()->Size() + size <= MaxReserved();
     257             : }
     258             : 
     259          15 : bool Heap::HasBeenSetUp() {
     260             :   // We will always have a new space when the heap is set up.
     261     6475214 :   return new_space_ != nullptr;
     262             : }
     263             : 
     264             : 
     265      107086 : GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
     266       23602 :                                               const char** reason) {
     267             :   // Is global GC requested?
     268      107086 :   if (space != NEW_SPACE) {
     269      165586 :     isolate_->counters()->gc_compactor_caused_by_request()->Increment();
     270       82793 :     *reason = "GC in old space requested";
     271       82793 :     return MARK_COMPACTOR;
     272             :   }
     273             : 
     274       24293 :   if (FLAG_gc_global || (FLAG_stress_compaction && (gc_count_ & 1) != 0)) {
     275         691 :     *reason = "GC in old space forced by flags";
     276         691 :     return MARK_COMPACTOR;
     277             :   }
     278             : 
     279       23921 :   if (incremental_marking()->NeedsFinalization() &&
     280         319 :       AllocationLimitOvershotByLargeMargin()) {
     281           3 :     *reason = "Incremental marking needs finalization";
     282           3 :     return MARK_COMPACTOR;
     283             :   }
     284             : 
     285             :   // Over-estimate the new space size using capacity to allow some slack.
     286       47198 :   if (!CanExpandOldGeneration(new_space_->TotalCapacity())) {
     287             :     isolate_->counters()
     288             :         ->gc_compactor_caused_by_oldspace_exhaustion()
     289          10 :         ->Increment();
     290           5 :     *reason = "scavenge might not succeed";
     291           5 :     return MARK_COMPACTOR;
     292             :   }
     293             : 
     294             :   // Default
     295       23594 :   *reason = nullptr;
     296       23594 :   return YoungGenerationCollector();
     297             : }
     298             : 
     299           0 : void Heap::SetGCState(HeapState state) {
     300      277037 :   gc_state_ = state;
     301           0 : }
     302             : 
     303          25 : void Heap::PrintShortHeapStatistics() {
     304          50 :   if (!FLAG_trace_gc_verbose) return;
     305             :   PrintIsolate(isolate_,
     306             :                "Memory allocator,       used: %6" PRIuS
     307             :                " KB,"
     308             :                " available: %6" PRIuS " KB\n",
     309             :                memory_allocator()->Size() / KB,
     310           0 :                memory_allocator()->Available() / KB);
     311             :   PrintIsolate(isolate_,
     312             :                "Read-only space,        used: %6" PRIuS
     313             :                " KB"
     314             :                ", available: %6" PRIuS
     315             :                " KB"
     316             :                ", committed: %6" PRIuS " KB\n",
     317           0 :                read_only_space_->Size() / KB,
     318           0 :                read_only_space_->Available() / KB,
     319           0 :                read_only_space_->CommittedMemory() / KB);
     320             :   PrintIsolate(isolate_,
     321             :                "New space,              used: %6" PRIuS
     322             :                " KB"
     323             :                ", available: %6" PRIuS
     324             :                " KB"
     325             :                ", committed: %6" PRIuS " KB\n",
     326           0 :                new_space_->Size() / KB, new_space_->Available() / KB,
     327           0 :                new_space_->CommittedMemory() / KB);
     328             :   PrintIsolate(isolate_,
     329             :                "New large object space, used: %6" PRIuS
     330             :                " KB"
     331             :                ", available: %6" PRIuS
     332             :                " KB"
     333             :                ", committed: %6" PRIuS " KB\n",
     334           0 :                new_lo_space_->SizeOfObjects() / KB,
     335           0 :                new_lo_space_->Available() / KB,
     336           0 :                new_lo_space_->CommittedMemory() / KB);
     337             :   PrintIsolate(isolate_,
     338             :                "Old space,              used: %6" PRIuS
     339             :                " KB"
     340             :                ", available: %6" PRIuS
     341             :                " KB"
     342             :                ", committed: %6" PRIuS " KB\n",
     343           0 :                old_space_->SizeOfObjects() / KB, old_space_->Available() / KB,
     344           0 :                old_space_->CommittedMemory() / KB);
     345             :   PrintIsolate(isolate_,
     346             :                "Code space,             used: %6" PRIuS
     347             :                " KB"
     348             :                ", available: %6" PRIuS
     349             :                " KB"
     350             :                ", committed: %6" PRIuS "KB\n",
     351           0 :                code_space_->SizeOfObjects() / KB, code_space_->Available() / KB,
     352           0 :                code_space_->CommittedMemory() / KB);
     353             :   PrintIsolate(isolate_,
     354             :                "Map space,              used: %6" PRIuS
     355             :                " KB"
     356             :                ", available: %6" PRIuS
     357             :                " KB"
     358             :                ", committed: %6" PRIuS " KB\n",
     359           0 :                map_space_->SizeOfObjects() / KB, map_space_->Available() / KB,
     360           0 :                map_space_->CommittedMemory() / KB);
     361             :   PrintIsolate(isolate_,
     362             :                "Large object space,     used: %6" PRIuS
     363             :                " KB"
     364             :                ", available: %6" PRIuS
     365             :                " KB"
     366             :                ", committed: %6" PRIuS " KB\n",
     367           0 :                lo_space_->SizeOfObjects() / KB, lo_space_->Available() / KB,
     368           0 :                lo_space_->CommittedMemory() / KB);
     369             :   PrintIsolate(isolate_,
     370             :                "Code large object space,     used: %6" PRIuS
     371             :                " KB"
     372             :                ", available: %6" PRIuS
     373             :                " KB"
     374             :                ", committed: %6" PRIuS " KB\n",
     375           0 :                lo_space_->SizeOfObjects() / KB,
     376           0 :                code_lo_space_->Available() / KB,
     377           0 :                code_lo_space_->CommittedMemory() / KB);
     378             :   PrintIsolate(isolate_,
     379             :                "All spaces,             used: %6" PRIuS
     380             :                " KB"
     381             :                ", available: %6" PRIuS
     382             :                " KB"
     383             :                ", committed: %6" PRIuS "KB\n",
     384           0 :                this->SizeOfObjects() / KB, this->Available() / KB,
     385           0 :                this->CommittedMemory() / KB);
     386             :   PrintIsolate(isolate_,
     387             :                "Unmapper buffering %zu chunks of committed: %6" PRIuS " KB\n",
     388             :                memory_allocator()->unmapper()->NumberOfCommittedChunks(),
     389           0 :                CommittedMemoryOfUnmapper() / KB);
     390             :   PrintIsolate(isolate_, "External memory reported: %6" PRId64 " KB\n",
     391           0 :                isolate()->isolate_data()->external_memory_ / KB);
     392             :   PrintIsolate(isolate_, "Backing store memory: %6" PRIuS " KB\n",
     393           0 :                backing_store_bytes_ / KB);
     394             :   PrintIsolate(isolate_, "External memory global %zu KB\n",
     395           0 :                external_memory_callback_() / KB);
     396             :   PrintIsolate(isolate_, "Total time spent in GC  : %.1f ms\n",
     397           0 :                total_gc_time_ms_);
     398             : }
     399             : 
     400           0 : void Heap::ReportStatisticsAfterGC() {
     401           0 :   for (int i = 0; i < static_cast<int>(v8::Isolate::kUseCounterFeatureCount);
     402             :        ++i) {
     403           0 :     int count = deferred_counters_[i];
     404           0 :     deferred_counters_[i] = 0;
     405           0 :     while (count > 0) {
     406           0 :       count--;
     407           0 :       isolate()->CountUsage(static_cast<v8::Isolate::UseCounterFeature>(i));
     408             :     }
     409             :   }
     410           0 : }
     411             : 
     412        8133 : void Heap::AddHeapObjectAllocationTracker(
     413             :     HeapObjectAllocationTracker* tracker) {
     414        8133 :   if (allocation_trackers_.empty()) DisableInlineAllocation();
     415        8133 :   allocation_trackers_.push_back(tracker);
     416        8133 : }
     417             : 
     418        8128 : void Heap::RemoveHeapObjectAllocationTracker(
     419             :     HeapObjectAllocationTracker* tracker) {
     420             :   allocation_trackers_.erase(std::remove(allocation_trackers_.begin(),
     421             :                                          allocation_trackers_.end(), tracker),
     422        8128 :                              allocation_trackers_.end());
     423        8128 :   if (allocation_trackers_.empty()) EnableInlineAllocation();
     424        8128 : }
     425             : 
     426           0 : void Heap::AddRetainingPathTarget(Handle<HeapObject> object,
     427             :                                   RetainingPathOption option) {
     428           0 :   if (!FLAG_track_retaining_path) {
     429           0 :     PrintF("Retaining path tracking requires --track-retaining-path\n");
     430             :   } else {
     431           0 :     Handle<WeakArrayList> array(retaining_path_targets(), isolate());
     432           0 :     int index = array->length();
     433             :     array = WeakArrayList::AddToEnd(isolate(), array,
     434           0 :                                     MaybeObjectHandle::Weak(object));
     435           0 :     set_retaining_path_targets(*array);
     436             :     DCHECK_EQ(array->length(), index + 1);
     437           0 :     retaining_path_target_option_[index] = option;
     438             :   }
     439           0 : }
     440             : 
     441           0 : bool Heap::IsRetainingPathTarget(HeapObject object,
     442             :                                  RetainingPathOption* option) {
     443           0 :   WeakArrayList targets = retaining_path_targets();
     444             :   int length = targets->length();
     445             :   MaybeObject object_to_check = HeapObjectReference::Weak(object);
     446           0 :   for (int i = 0; i < length; i++) {
     447           0 :     MaybeObject target = targets->Get(i);
     448             :     DCHECK(target->IsWeakOrCleared());
     449           0 :     if (target == object_to_check) {
     450             :       DCHECK(retaining_path_target_option_.count(i));
     451           0 :       *option = retaining_path_target_option_[i];
     452           0 :       return true;
     453             :     }
     454             :   }
     455           0 :   return false;
     456             : }
     457             : 
     458           0 : void Heap::PrintRetainingPath(HeapObject target, RetainingPathOption option) {
     459           0 :   PrintF("\n\n\n");
     460           0 :   PrintF("#################################################\n");
     461           0 :   PrintF("Retaining path for %p:\n", reinterpret_cast<void*>(target->ptr()));
     462           0 :   HeapObject object = target;
     463             :   std::vector<std::pair<HeapObject, bool>> retaining_path;
     464             :   Root root = Root::kUnknown;
     465             :   bool ephemeron = false;
     466             :   while (true) {
     467           0 :     retaining_path.push_back(std::make_pair(object, ephemeron));
     468           0 :     if (option == RetainingPathOption::kTrackEphemeronPath &&
     469             :         ephemeron_retainer_.count(object)) {
     470           0 :       object = ephemeron_retainer_[object];
     471             :       ephemeron = true;
     472           0 :     } else if (retainer_.count(object)) {
     473           0 :       object = retainer_[object];
     474             :       ephemeron = false;
     475             :     } else {
     476           0 :       if (retaining_root_.count(object)) {
     477           0 :         root = retaining_root_[object];
     478             :       }
     479             :       break;
     480             :     }
     481             :   }
     482           0 :   int distance = static_cast<int>(retaining_path.size());
     483           0 :   for (auto node : retaining_path) {
     484           0 :     HeapObject object = node.first;
     485           0 :     bool ephemeron = node.second;
     486           0 :     PrintF("\n");
     487           0 :     PrintF("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n");
     488             :     PrintF("Distance from root %d%s: ", distance,
     489           0 :            ephemeron ? " (ephemeron)" : "");
     490           0 :     object->ShortPrint();
     491           0 :     PrintF("\n");
     492             : #ifdef OBJECT_PRINT
     493             :     object->Print();
     494             :     PrintF("\n");
     495             : #endif
     496           0 :     --distance;
     497             :   }
     498           0 :   PrintF("\n");
     499           0 :   PrintF("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n");
     500           0 :   PrintF("Root: %s\n", RootVisitor::RootName(root));
     501           0 :   PrintF("-------------------------------------------------\n");
     502           0 : }
     503             : 
     504           0 : void Heap::AddRetainer(HeapObject retainer, HeapObject object) {
     505           0 :   if (retainer_.count(object)) return;
     506           0 :   retainer_[object] = retainer;
     507           0 :   RetainingPathOption option = RetainingPathOption::kDefault;
     508           0 :   if (IsRetainingPathTarget(object, &option)) {
     509             :     // Check if the retaining path was already printed in
     510             :     // AddEphemeronRetainer().
     511           0 :     if (ephemeron_retainer_.count(object) == 0 ||
     512           0 :         option == RetainingPathOption::kDefault) {
     513           0 :       PrintRetainingPath(object, option);
     514             :     }
     515             :   }
     516             : }
     517             : 
     518           0 : void Heap::AddEphemeronRetainer(HeapObject retainer, HeapObject object) {
     519           0 :   if (ephemeron_retainer_.count(object)) return;
     520           0 :   ephemeron_retainer_[object] = retainer;
     521           0 :   RetainingPathOption option = RetainingPathOption::kDefault;
     522           0 :   if (IsRetainingPathTarget(object, &option) &&
     523           0 :       option == RetainingPathOption::kTrackEphemeronPath) {
     524             :     // Check if the retaining path was already printed in AddRetainer().
     525           0 :     if (retainer_.count(object) == 0) {
     526           0 :       PrintRetainingPath(object, option);
     527             :     }
     528             :   }
     529             : }
     530             : 
     531           0 : void Heap::AddRetainingRoot(Root root, HeapObject object) {
     532           0 :   if (retaining_root_.count(object)) return;
     533           0 :   retaining_root_[object] = root;
     534           0 :   RetainingPathOption option = RetainingPathOption::kDefault;
     535           0 :   if (IsRetainingPathTarget(object, &option)) {
     536           0 :     PrintRetainingPath(object, option);
     537             :   }
     538             : }
     539             : 
     540           0 : void Heap::IncrementDeferredCount(v8::Isolate::UseCounterFeature feature) {
     541           0 :   deferred_counters_[feature]++;
     542           0 : }
     543             : 
     544       26727 : bool Heap::UncommitFromSpace() { return new_space_->UncommitFromSpace(); }
     545             : 
     546      107086 : void Heap::GarbageCollectionPrologue() {
     547      428344 :   TRACE_GC(tracer(), GCTracer::Scope::HEAP_PROLOGUE);
     548             :   {
     549             :     AllowHeapAllocation for_the_first_part_of_prologue;
     550      107086 :     gc_count_++;
     551             : 
     552             : #ifdef VERIFY_HEAP
     553             :     if (FLAG_verify_heap) {
     554             :       Verify();
     555             :     }
     556             : #endif
     557             :   }
     558             : 
     559             :   // Reset GC statistics.
     560      107086 :   promoted_objects_size_ = 0;
     561      107086 :   previous_semi_space_copied_object_size_ = semi_space_copied_object_size_;
     562      107086 :   semi_space_copied_object_size_ = 0;
     563      107086 :   nodes_died_in_new_space_ = 0;
     564      107086 :   nodes_copied_in_new_space_ = 0;
     565      107086 :   nodes_promoted_ = 0;
     566             : 
     567      107086 :   UpdateMaximumCommitted();
     568             : 
     569             : #ifdef DEBUG
     570             :   DCHECK(!AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC);
     571             : 
     572             :   if (FLAG_gc_verbose) Print();
     573             : #endif  // DEBUG
     574             : 
     575      214172 :   if (new_space_->IsAtMaximumCapacity()) {
     576        3018 :     maximum_size_scavenges_++;
     577             :   } else {
     578      104068 :     maximum_size_scavenges_ = 0;
     579             :   }
     580      107086 :   CheckNewSpaceExpansionCriteria();
     581             :   UpdateNewSpaceAllocationCounter();
     582      107086 :   if (FLAG_track_retaining_path) {
     583             :     retainer_.clear();
     584             :     ephemeron_retainer_.clear();
     585             :     retaining_root_.clear();
     586      107086 :   }
     587      107086 : }
     588             : 
     589      215018 : size_t Heap::SizeOfObjects() {
     590             :   size_t total = 0;
     591             : 
     592     6412347 :   for (SpaceIterator it(this); it.has_next();) {
     593     5699864 :     total += it.next()->SizeOfObjects();
     594             :   }
     595      215018 :   return total;
     596             : }
     597             : 
     598             : 
     599          40 : const char* Heap::GetSpaceName(int idx) {
     600          40 :   switch (idx) {
     601             :     case NEW_SPACE:
     602             :       return "new_space";
     603             :     case OLD_SPACE:
     604           5 :       return "old_space";
     605             :     case MAP_SPACE:
     606           5 :       return "map_space";
     607             :     case CODE_SPACE:
     608           5 :       return "code_space";
     609             :     case LO_SPACE:
     610           5 :       return "large_object_space";
     611             :     case NEW_LO_SPACE:
     612           5 :       return "new_large_object_space";
     613             :     case CODE_LO_SPACE:
     614           5 :       return "code_large_object_space";
     615             :     case RO_SPACE:
     616           5 :       return "read_only_space";
     617             :     default:
     618           0 :       UNREACHABLE();
     619             :   }
     620             :   return nullptr;
     621             : }
     622             : 
     623      123179 : void Heap::MergeAllocationSitePretenuringFeedback(
     624             :     const PretenuringFeedbackMap& local_pretenuring_feedback) {
     625      123179 :   AllocationSite site;
     626      329317 :   for (auto& site_and_count : local_pretenuring_feedback) {
     627       82959 :     site = site_and_count.first;
     628             :     MapWord map_word = site_and_count.first->map_word();
     629       82959 :     if (map_word.IsForwardingAddress()) {
     630         492 :       site = AllocationSite::cast(map_word.ToForwardingAddress());
     631             :     }
     632             : 
     633             :     // We have not validated the allocation site yet, since we have not
     634             :     // dereferenced the site during collecting information.
     635             :     // This is an inlined check of AllocationMemento::IsValid.
     636      165918 :     if (!site->IsAllocationSite() || site->IsZombie()) continue;
     637             : 
     638       82901 :     const int value = static_cast<int>(site_and_count.second);
     639             :     DCHECK_LT(0, value);
     640       82901 :     if (site->IncrementMementoFoundCount(value)) {
     641             :       // For sites in the global map the count is accessed through the site.
     642        2930 :       global_pretenuring_feedback_.insert(std::make_pair(site, 0));
     643             :     }
     644             :   }
     645      123179 : }
     646             : 
     647       31943 : void Heap::AddAllocationObserversToAllSpaces(
     648      255544 :     AllocationObserver* observer, AllocationObserver* new_space_observer) {
     649             :   DCHECK(observer && new_space_observer);
     650             : 
     651      287487 :   for (SpaceIterator it(this); it.has_next();) {
     652             :     Space* space = it.next();
     653      255544 :     if (space == new_space()) {
     654       31943 :       space->AddAllocationObserver(new_space_observer);
     655             :     } else {
     656      223601 :       space->AddAllocationObserver(observer);
     657             :     }
     658             :   }
     659       31943 : }
     660             : 
     661          59 : void Heap::RemoveAllocationObserversFromAllSpaces(
     662         472 :     AllocationObserver* observer, AllocationObserver* new_space_observer) {
     663             :   DCHECK(observer && new_space_observer);
     664             : 
     665         531 :   for (SpaceIterator it(this); it.has_next();) {
     666             :     Space* space = it.next();
     667         472 :     if (space == new_space()) {
     668          59 :       space->RemoveAllocationObserver(new_space_observer);
     669             :     } else {
     670         413 :       space->RemoveAllocationObserver(observer);
     671             :     }
     672             :   }
     673          59 : }
     674             : 
     675             : class Heap::SkipStoreBufferScope {
     676             :  public:
     677             :   explicit SkipStoreBufferScope(StoreBuffer* store_buffer)
     678             :       : store_buffer_(store_buffer) {
     679      107086 :     store_buffer_->MoveAllEntriesToRememberedSet();
     680      107086 :     store_buffer_->SetMode(StoreBuffer::IN_GC);
     681             :   }
     682             : 
     683             :   ~SkipStoreBufferScope() {
     684             :     DCHECK(store_buffer_->Empty());
     685      107086 :     store_buffer_->SetMode(StoreBuffer::NOT_IN_GC);
     686             :   }
     687             : 
     688             :  private:
     689             :   StoreBuffer* store_buffer_;
     690             : };
     691             : 
     692             : namespace {
     693        1355 : inline bool MakePretenureDecision(
     694             :     AllocationSite site, AllocationSite::PretenureDecision current_decision,
     695             :     double ratio, bool maximum_size_scavenge) {
     696             :   // Here we just allow state transitions from undecided or maybe tenure
     697             :   // to don't tenure, maybe tenure, or tenure.
     698        2710 :   if ((current_decision == AllocationSite::kUndecided ||
     699        1355 :        current_decision == AllocationSite::kMaybeTenure)) {
     700        1077 :     if (ratio >= AllocationSite::kPretenureRatio) {
     701             :       // We just transition into tenure state when the semi-space was at
     702             :       // maximum capacity.
     703         878 :       if (maximum_size_scavenge) {
     704             :         site->set_deopt_dependent_code(true);
     705             :         site->set_pretenure_decision(AllocationSite::kTenure);
     706             :         // Currently we just need to deopt when we make a state transition to
     707             :         // tenure.
     708          48 :         return true;
     709             :       }
     710             :       site->set_pretenure_decision(AllocationSite::kMaybeTenure);
     711             :     } else {
     712             :       site->set_pretenure_decision(AllocationSite::kDontTenure);
     713             :     }
     714             :   }
     715             :   return false;
     716             : }
     717             : 
     718        1355 : inline bool DigestPretenuringFeedback(Isolate* isolate, AllocationSite site,
     719             :                                       bool maximum_size_scavenge) {
     720             :   bool deopt = false;
     721             :   int create_count = site->memento_create_count();
     722             :   int found_count = site->memento_found_count();
     723             :   bool minimum_mementos_created =
     724        1355 :       create_count >= AllocationSite::kPretenureMinimumCreated;
     725           0 :   double ratio = minimum_mementos_created || FLAG_trace_pretenuring_statistics
     726        1355 :                      ? static_cast<double>(found_count) / create_count
     727        2710 :                      : 0.0;
     728             :   AllocationSite::PretenureDecision current_decision =
     729             :       site->pretenure_decision();
     730             : 
     731        1355 :   if (minimum_mementos_created) {
     732             :     deopt = MakePretenureDecision(site, current_decision, ratio,
     733        1355 :                                   maximum_size_scavenge);
     734             :   }
     735             : 
     736        1355 :   if (FLAG_trace_pretenuring_statistics) {
     737             :     PrintIsolate(isolate,
     738             :                  "pretenuring: AllocationSite(%p): (created, found, ratio) "
     739             :                  "(%d, %d, %f) %s => %s\n",
     740             :                  reinterpret_cast<void*>(site.ptr()), create_count, found_count,
     741             :                  ratio, site->PretenureDecisionName(current_decision),
     742           0 :                  site->PretenureDecisionName(site->pretenure_decision()));
     743             :   }
     744             : 
     745             :   // Clear feedback calculation fields until the next gc.
     746             :   site->set_memento_found_count(0);
     747             :   site->set_memento_create_count(0);
     748        1355 :   return deopt;
     749             : }
     750             : }  // namespace
     751             : 
     752           0 : void Heap::RemoveAllocationSitePretenuringFeedback(AllocationSite site) {
     753             :   global_pretenuring_feedback_.erase(site);
     754           0 : }
     755             : 
     756           0 : bool Heap::DeoptMaybeTenuredAllocationSites() {
     757      214172 :   return new_space_->IsAtMaximumCapacity() && maximum_size_scavenges_ == 0;
     758             : }
     759             : 
     760      214172 : void Heap::ProcessPretenuringFeedback() {
     761      107086 :   bool trigger_deoptimization = false;
     762      107086 :   if (FLAG_allocation_site_pretenuring) {
     763             :     int tenure_decisions = 0;
     764             :     int dont_tenure_decisions = 0;
     765             :     int allocation_mementos_found = 0;
     766      107086 :     int allocation_sites = 0;
     767             :     int active_allocation_sites = 0;
     768             : 
     769      107086 :     AllocationSite site;
     770             : 
     771             :     // Step 1: Digest feedback for recorded allocation sites.
     772             :     bool maximum_size_scavenge = MaximumSizeScavenge();
     773      215527 :     for (auto& site_and_count : global_pretenuring_feedback_) {
     774        1355 :       allocation_sites++;
     775        1355 :       site = site_and_count.first;
     776             :       // Count is always access through the site.
     777             :       DCHECK_EQ(0, site_and_count.second);
     778             :       int found_count = site->memento_found_count();
     779             :       // An entry in the storage does not imply that the count is > 0 because
     780             :       // allocation sites might have been reset due to too many objects dying
     781             :       // in old space.
     782        1355 :       if (found_count > 0) {
     783             :         DCHECK(site->IsAllocationSite());
     784        1355 :         active_allocation_sites++;
     785        1355 :         allocation_mementos_found += found_count;
     786        1355 :         if (DigestPretenuringFeedback(isolate_, site, maximum_size_scavenge)) {
     787          48 :           trigger_deoptimization = true;
     788             :         }
     789        1355 :         if (site->GetPretenureMode() == TENURED) {
     790          55 :           tenure_decisions++;
     791             :         } else {
     792        1300 :           dont_tenure_decisions++;
     793             :         }
     794             :       }
     795             :     }
     796             : 
     797             :     // Step 2: Deopt maybe tenured allocation sites if necessary.
     798             :     bool deopt_maybe_tenured = DeoptMaybeTenuredAllocationSites();
     799      107086 :     if (deopt_maybe_tenured) {
     800             :       ForeachAllocationSite(
     801             :           allocation_sites_list(),
     802             :           [&allocation_sites, &trigger_deoptimization](AllocationSite site) {
     803             :             DCHECK(site->IsAllocationSite());
     804         300 :             allocation_sites++;
     805         300 :             if (site->IsMaybeTenure()) {
     806             :               site->set_deopt_dependent_code(true);
     807          20 :               trigger_deoptimization = true;
     808             :             }
     809         256 :           });
     810             :     }
     811             : 
     812      107086 :     if (trigger_deoptimization) {
     813          35 :       isolate_->stack_guard()->RequestDeoptMarkedAllocationSites();
     814             :     }
     815             : 
     816      107086 :     if (FLAG_trace_pretenuring_statistics &&
     817           0 :         (allocation_mementos_found > 0 || tenure_decisions > 0 ||
     818             :          dont_tenure_decisions > 0)) {
     819             :       PrintIsolate(isolate(),
     820             :                    "pretenuring: deopt_maybe_tenured=%d visited_sites=%d "
     821             :                    "active_sites=%d "
     822             :                    "mementos=%d tenured=%d not_tenured=%d\n",
     823             :                    deopt_maybe_tenured ? 1 : 0, allocation_sites,
     824             :                    active_allocation_sites, allocation_mementos_found,
     825           0 :                    tenure_decisions, dont_tenure_decisions);
     826             :     }
     827             : 
     828             :     global_pretenuring_feedback_.clear();
     829             :     global_pretenuring_feedback_.reserve(kInitialFeedbackCapacity);
     830             :   }
     831      107086 : }
     832             : 
     833      324342 : void Heap::InvalidateCodeDeoptimizationData(Code code) {
     834             :   MemoryChunk* chunk = MemoryChunk::FromHeapObject(code);
     835      324342 :   CodePageMemoryModificationScope modification_scope(chunk);
     836      324342 :   code->set_deoptimization_data(ReadOnlyRoots(this).empty_fixed_array());
     837      324342 : }
     838             : 
     839          34 : void Heap::DeoptMarkedAllocationSites() {
     840             :   // TODO(hpayer): If iterating over the allocation sites list becomes a
     841             :   // performance issue, use a cache data structure in heap instead.
     842             : 
     843         133 :   ForeachAllocationSite(allocation_sites_list(), [this](AllocationSite site) {
     844         133 :     if (site->deopt_dependent_code()) {
     845             :       site->dependent_code()->MarkCodeForDeoptimization(
     846          66 :           isolate_, DependentCode::kAllocationSiteTenuringChangedGroup);
     847             :       site->set_deopt_dependent_code(false);
     848             :     }
     849         201 :   });
     850             : 
     851          34 :   Deoptimizer::DeoptimizeMarkedCode(isolate_);
     852          34 : }
     853             : 
     854             : 
     855     3226480 : void Heap::GarbageCollectionEpilogue() {
     856      428344 :   TRACE_GC(tracer(), GCTracer::Scope::HEAP_EPILOGUE);
     857      107086 :   if (Heap::ShouldZapGarbage() || FLAG_clear_free_memory) {
     858           0 :     ZapFromSpace();
     859             :   }
     860             : 
     861             : #ifdef VERIFY_HEAP
     862             :   if (FLAG_verify_heap) {
     863             :     Verify();
     864             :   }
     865             : #endif
     866             : 
     867             :   AllowHeapAllocation for_the_rest_of_the_epilogue;
     868             : 
     869             : #ifdef DEBUG
     870             :   if (FLAG_print_global_handles) isolate_->global_handles()->Print();
     871             :   if (FLAG_print_handles) PrintHandles();
     872             :   if (FLAG_gc_verbose) Print();
     873             :   if (FLAG_code_stats) ReportCodeStatistics("After GC");
     874             :   if (FLAG_check_handle_count) CheckHandleCount();
     875             : #endif
     876             : 
     877      107086 :   UpdateMaximumCommitted();
     878             : 
     879             :   isolate_->counters()->alive_after_last_gc()->Set(
     880      214172 :       static_cast<int>(SizeOfObjects()));
     881             : 
     882             :   isolate_->counters()->string_table_capacity()->Set(
     883      214172 :       string_table()->Capacity());
     884             :   isolate_->counters()->number_of_symbols()->Set(
     885      214172 :       string_table()->NumberOfElements());
     886             : 
     887      107086 :   if (CommittedMemory() > 0) {
     888             :     isolate_->counters()->external_fragmentation_total()->AddSample(
     889      214172 :         static_cast<int>(100 - (SizeOfObjects() * 100.0) / CommittedMemory()));
     890             : 
     891             :     isolate_->counters()->heap_sample_total_committed()->AddSample(
     892      214172 :         static_cast<int>(CommittedMemory() / KB));
     893             :     isolate_->counters()->heap_sample_total_used()->AddSample(
     894      214172 :         static_cast<int>(SizeOfObjects() / KB));
     895             :     isolate_->counters()->heap_sample_map_space_committed()->AddSample(
     896      214172 :         static_cast<int>(map_space()->CommittedMemory() / KB));
     897             :     isolate_->counters()->heap_sample_code_space_committed()->AddSample(
     898      214172 :         static_cast<int>(code_space()->CommittedMemory() / KB));
     899             : 
     900             :     isolate_->counters()->heap_sample_maximum_committed()->AddSample(
     901      214172 :         static_cast<int>(MaximumCommittedMemory() / KB));
     902             :   }
     903             : 
     904             : #define UPDATE_COUNTERS_FOR_SPACE(space)                \
     905             :   isolate_->counters()->space##_bytes_available()->Set( \
     906             :       static_cast<int>(space()->Available()));          \
     907             :   isolate_->counters()->space##_bytes_committed()->Set( \
     908             :       static_cast<int>(space()->CommittedMemory()));    \
     909             :   isolate_->counters()->space##_bytes_used()->Set(      \
     910             :       static_cast<int>(space()->SizeOfObjects()));
     911             : #define UPDATE_FRAGMENTATION_FOR_SPACE(space)                          \
     912             :   if (space()->CommittedMemory() > 0) {                                \
     913             :     isolate_->counters()->external_fragmentation_##space()->AddSample( \
     914             :         static_cast<int>(100 -                                         \
     915             :                          (space()->SizeOfObjects() * 100.0) /          \
     916             :                              space()->CommittedMemory()));             \
     917             :   }
     918             : #define UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(space) \
     919             :   UPDATE_COUNTERS_FOR_SPACE(space)                         \
     920             :   UPDATE_FRAGMENTATION_FOR_SPACE(space)
     921             : 
     922      642516 :   UPDATE_COUNTERS_FOR_SPACE(new_space)
     923     1070860 :   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_space)
     924     1070860 :   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(code_space)
     925     1070860 :   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(map_space)
     926      770452 :   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(lo_space)
     927             : #undef UPDATE_COUNTERS_FOR_SPACE
     928             : #undef UPDATE_FRAGMENTATION_FOR_SPACE
     929             : #undef UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE
     930             : 
     931             : #ifdef DEBUG
     932             :   ReportStatisticsAfterGC();
     933             : #endif  // DEBUG
     934             : 
     935      107086 :   last_gc_time_ = MonotonicallyIncreasingTimeInMs();
     936             : 
     937             :   {
     938      428344 :     TRACE_GC(tracer(), GCTracer::Scope::HEAP_EPILOGUE_REDUCE_NEW_SPACE);
     939      214172 :     ReduceNewSpaceSize();
     940             :   }
     941             : 
     942      107086 :   if (FLAG_harmony_weak_refs) {
     943             :     // TODO(marja): (spec): The exact condition on when to schedule the cleanup
     944             :     // task is unclear. This version schedules the cleanup task for a factory
     945             :     // whenever the GC has discovered new dirty WeakCells for it (at that point
     946             :     // it might have leftover dirty WeakCells since an earlier invocation of the
     947             :     // cleanup function didn't iterate through them). See
     948             :     // https://github.com/tc39/proposal-weakrefs/issues/34
     949             :     HandleScope handle_scope(isolate());
     950         585 :     while (
     951        1170 :         !isolate()->heap()->dirty_js_weak_factories()->IsUndefined(isolate())) {
     952             :       // Enqueue one microtask per JSWeakFactory.
     953             :       Handle<JSWeakFactory> weak_factory(
     954             :           JSWeakFactory::cast(isolate()->heap()->dirty_js_weak_factories()),
     955         198 :           isolate());
     956         198 :       isolate()->heap()->set_dirty_js_weak_factories(weak_factory->next());
     957         396 :       weak_factory->set_next(ReadOnlyRoots(isolate()).undefined_value());
     958         396 :       Handle<NativeContext> context(weak_factory->native_context(), isolate());
     959             :       // GC has no native context, but we use the creation context of the
     960             :       // JSWeakFactory for the EnqueueTask operation. This is consitent with the
     961             :       // Promise implementation, assuming the JSFactory creation context is the
     962             :       // "caller's context" in promise functions. An alternative would be to use
     963             :       // the native context of the cleanup function. This difference shouldn't
     964             :       // be observable from JavaScript, since we enter the native context of the
     965             :       // cleanup function before calling it. TODO(marja): Revisit when the spec
     966             :       // clarifies this. See also
     967             :       // https://github.com/tc39/proposal-weakrefs/issues/38 .
     968             :       Handle<WeakFactoryCleanupJobTask> task =
     969         198 :           isolate()->factory()->NewWeakFactoryCleanupJobTask(weak_factory);
     970         396 :       context->microtask_queue()->EnqueueMicrotask(*task);
     971             :     }
     972      107086 :   }
     973      107086 : }
     974             : 
     975             : class GCCallbacksScope {
     976             :  public:
     977             :   explicit GCCallbacksScope(Heap* heap) : heap_(heap) {
     978      262208 :     heap_->gc_callbacks_depth_++;
     979             :   }
     980      262208 :   ~GCCallbacksScope() { heap_->gc_callbacks_depth_--; }
     981             : 
     982      107086 :   bool CheckReenter() { return heap_->gc_callbacks_depth_ == 1; }
     983             : 
     984             :  private:
     985             :   Heap* heap_;
     986             : };
     987             : 
     988             : 
     989       37000 : void Heap::HandleGCRequest() {
     990       18500 :   if (FLAG_stress_scavenge > 0 && stress_scavenge_observer_->HasRequestedGC()) {
     991             :     CollectAllGarbage(NEW_SPACE, GarbageCollectionReason::kTesting);
     992           0 :     stress_scavenge_observer_->RequestedGCDone();
     993       18500 :   } else if (HighMemoryPressure()) {
     994             :     incremental_marking()->reset_request_type();
     995           5 :     CheckMemoryPressure();
     996       18495 :   } else if (incremental_marking()->request_type() ==
     997             :              IncrementalMarking::COMPLETE_MARKING) {
     998             :     incremental_marking()->reset_request_type();
     999             :     CollectAllGarbage(current_gc_flags_,
    1000             :                       GarbageCollectionReason::kFinalizeMarkingViaStackGuard,
    1001        6901 :                       current_gc_callback_flags_);
    1002       11594 :   } else if (incremental_marking()->request_type() ==
    1003       11559 :                  IncrementalMarking::FINALIZATION &&
    1004       23153 :              incremental_marking()->IsMarking() &&
    1005       11559 :              !incremental_marking()->finalize_marking_completed()) {
    1006             :     incremental_marking()->reset_request_type();
    1007             :     FinalizeIncrementalMarkingIncrementally(
    1008       11558 :         GarbageCollectionReason::kFinalizeMarkingViaStackGuard);
    1009             :   }
    1010       18500 : }
    1011             : 
    1012             : 
    1013           0 : void Heap::ScheduleIdleScavengeIfNeeded(int bytes_allocated) {
    1014             :   DCHECK(FLAG_idle_time_scavenge);
    1015             :   DCHECK_NOT_NULL(scavenge_job_);
    1016       32432 :   scavenge_job_->ScheduleIdleTaskIfNeeded(this, bytes_allocated);
    1017           0 : }
    1018             : 
    1019      221671 : TimedHistogram* Heap::GCTypePriorityTimer(GarbageCollector collector) {
    1020      107086 :   if (IsYoungGenerationCollector(collector)) {
    1021      107086 :     if (isolate_->IsIsolateInBackground()) {
    1022           0 :       return isolate_->counters()->gc_scavenger_background();
    1023             :     }
    1024       23594 :     return isolate_->counters()->gc_scavenger_foreground();
    1025             :   } else {
    1026       83492 :     if (!incremental_marking()->IsStopped()) {
    1027       31093 :       if (ShouldReduceMemory()) {
    1028        5748 :         if (isolate_->IsIsolateInBackground()) {
    1029           0 :           return isolate_->counters()->gc_finalize_reduce_memory_background();
    1030             :         }
    1031        2874 :         return isolate_->counters()->gc_finalize_reduce_memory_foreground();
    1032             :       } else {
    1033       56438 :         if (isolate_->IsIsolateInBackground()) {
    1034           0 :           return isolate_->counters()->gc_finalize_background();
    1035             :         }
    1036       28219 :         return isolate_->counters()->gc_finalize_foreground();
    1037             :       }
    1038             :     } else {
    1039      104798 :       if (isolate_->IsIsolateInBackground()) {
    1040           0 :         return isolate_->counters()->gc_compactor_background();
    1041             :       }
    1042       52399 :       return isolate_->counters()->gc_compactor_foreground();
    1043             :     }
    1044             :   }
    1045             : }
    1046             : 
    1047      221671 : TimedHistogram* Heap::GCTypeTimer(GarbageCollector collector) {
    1048      107086 :   if (IsYoungGenerationCollector(collector)) {
    1049       47188 :     return isolate_->counters()->gc_scavenger();
    1050             :   } else {
    1051       83492 :     if (!incremental_marking()->IsStopped()) {
    1052       31093 :       if (ShouldReduceMemory()) {
    1053        5748 :         return isolate_->counters()->gc_finalize_reduce_memory();
    1054             :       } else {
    1055       56438 :         return isolate_->counters()->gc_finalize();
    1056             :       }
    1057             :     } else {
    1058      104798 :       return isolate_->counters()->gc_compactor();
    1059             :     }
    1060             :   }
    1061             : }
    1062             : 
    1063        4267 : void Heap::CollectAllGarbage(int flags, GarbageCollectionReason gc_reason,
    1064             :                              const v8::GCCallbackFlags gc_callback_flags) {
    1065             :   // Since we are ignoring the return value, the exact choice of space does
    1066             :   // not matter, so long as we do not specify NEW_SPACE, which would not
    1067             :   // cause a full GC.
    1068             :   set_current_gc_flags(flags);
    1069       79315 :   CollectGarbage(OLD_SPACE, gc_reason, gc_callback_flags);
    1070             :   set_current_gc_flags(kNoGCFlags);
    1071        4267 : }
    1072             : 
    1073             : namespace {
    1074             : 
    1075             : intptr_t CompareWords(int size, HeapObject a, HeapObject b) {
    1076           0 :   int slots = size / kTaggedSize;
    1077             :   DCHECK_EQ(a->Size(), size);
    1078             :   DCHECK_EQ(b->Size(), size);
    1079           0 :   Tagged_t* slot_a = reinterpret_cast<Tagged_t*>(a->address());
    1080           0 :   Tagged_t* slot_b = reinterpret_cast<Tagged_t*>(b->address());
    1081           0 :   for (int i = 0; i < slots; i++) {
    1082           0 :     if (*slot_a != *slot_b) {
    1083           0 :       return *slot_a - *slot_b;
    1084             :     }
    1085           0 :     slot_a++;
    1086           0 :     slot_b++;
    1087             :   }
    1088             :   return 0;
    1089             : }
    1090             : 
    1091           0 : void ReportDuplicates(int size, std::vector<HeapObject>& objects) {
    1092           0 :   if (objects.size() == 0) return;
    1093             : 
    1094           0 :   sort(objects.begin(), objects.end(), [size](HeapObject a, HeapObject b) {
    1095           0 :     intptr_t c = CompareWords(size, a, b);
    1096           0 :     if (c != 0) return c < 0;
    1097           0 :     return a < b;
    1098           0 :   });
    1099             : 
    1100             :   std::vector<std::pair<int, HeapObject>> duplicates;
    1101           0 :   HeapObject current = objects[0];
    1102             :   int count = 1;
    1103           0 :   for (size_t i = 1; i < objects.size(); i++) {
    1104           0 :     if (CompareWords(size, current, objects[i]) == 0) {
    1105           0 :       count++;
    1106             :     } else {
    1107           0 :       if (count > 1) {
    1108           0 :         duplicates.push_back(std::make_pair(count - 1, current));
    1109             :       }
    1110             :       count = 1;
    1111           0 :       current = objects[i];
    1112             :     }
    1113             :   }
    1114           0 :   if (count > 1) {
    1115           0 :     duplicates.push_back(std::make_pair(count - 1, current));
    1116             :   }
    1117             : 
    1118           0 :   int threshold = FLAG_trace_duplicate_threshold_kb * KB;
    1119             : 
    1120           0 :   sort(duplicates.begin(), duplicates.end());
    1121           0 :   for (auto it = duplicates.rbegin(); it != duplicates.rend(); ++it) {
    1122           0 :     int duplicate_bytes = it->first * size;
    1123           0 :     if (duplicate_bytes < threshold) break;
    1124             :     PrintF("%d duplicates of size %d each (%dKB)\n", it->first, size,
    1125           0 :            duplicate_bytes / KB);
    1126           0 :     PrintF("Sample object: ");
    1127           0 :     it->second->Print();
    1128           0 :     PrintF("============================\n");
    1129             :   }
    1130             : }
    1131             : }  // anonymous namespace
    1132             : 
    1133        1367 : void Heap::CollectAllAvailableGarbage(GarbageCollectionReason gc_reason) {
    1134             :   // Since we are ignoring the return value, the exact choice of space does
    1135             :   // not matter, so long as we do not specify NEW_SPACE, which would not
    1136             :   // cause a full GC.
    1137             :   // Major GC would invoke weak handle callbacks on weakly reachable
    1138             :   // handles, but won't collect weakly reachable objects until next
    1139             :   // major GC.  Therefore if we collect aggressively and weak handle callback
    1140             :   // has been invoked, we rerun major GC to release objects which become
    1141             :   // garbage.
    1142             :   // Note: as weak callbacks can execute arbitrary code, we cannot
    1143             :   // hope that eventually there will be no weak callbacks invocations.
    1144             :   // Therefore stop recollecting after several attempts.
    1145        1367 :   if (gc_reason == GarbageCollectionReason::kLastResort) {
    1146          21 :     InvokeNearHeapLimitCallback();
    1147             :   }
    1148             :   RuntimeCallTimerScope runtime_timer(
    1149        1367 :       isolate(), RuntimeCallCounterId::kGC_Custom_AllAvailableGarbage);
    1150             : 
    1151             :   // The optimizing compiler may be unnecessarily holding on to memory.
    1152        1367 :   isolate()->AbortConcurrentOptimization(BlockingBehavior::kDontBlock);
    1153        1367 :   isolate()->ClearSerializerData();
    1154             :   set_current_gc_flags(kReduceMemoryFootprintMask);
    1155        1367 :   isolate_->compilation_cache()->Clear();
    1156             :   const int kMaxNumberOfAttempts = 7;
    1157             :   const int kMinNumberOfAttempts = 2;
    1158             :   const v8::GCCallbackFlags callback_flags =
    1159             :       gc_reason == GarbageCollectionReason::kLowMemoryNotification
    1160             :           ? v8::kGCCallbackFlagForced
    1161        1367 :           : v8::kGCCallbackFlagCollectAllAvailableGarbage;
    1162        2775 :   for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
    1163        2775 :     if (!CollectGarbage(OLD_SPACE, gc_reason, callback_flags) &&
    1164             :         attempt + 1 >= kMinNumberOfAttempts) {
    1165             :       break;
    1166             :     }
    1167             :   }
    1168             : 
    1169             :   set_current_gc_flags(kNoGCFlags);
    1170        1367 :   new_space_->Shrink();
    1171             :   UncommitFromSpace();
    1172        1367 :   EagerlyFreeExternalMemory();
    1173             : 
    1174        1367 :   if (FLAG_trace_duplicate_threshold_kb) {
    1175             :     std::map<int, std::vector<HeapObject>> objects_by_size;
    1176             :     PagedSpaces spaces(this);
    1177           0 :     for (PagedSpace* space = spaces.next(); space != nullptr;
    1178             :          space = spaces.next()) {
    1179           0 :       HeapObjectIterator it(space);
    1180           0 :       for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
    1181           0 :         objects_by_size[obj->Size()].push_back(obj);
    1182             :       }
    1183             :     }
    1184             :     {
    1185           0 :       LargeObjectIterator it(lo_space());
    1186           0 :       for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
    1187           0 :         objects_by_size[obj->Size()].push_back(obj);
    1188             :       }
    1189             :     }
    1190           0 :     for (auto it = objects_by_size.rbegin(); it != objects_by_size.rend();
    1191             :          ++it) {
    1192           0 :       ReportDuplicates(it->first, it->second);
    1193             :     }
    1194             :   }
    1195        1367 : }
    1196             : 
    1197       34471 : void Heap::PreciseCollectAllGarbage(int flags,
    1198             :                                     GarbageCollectionReason gc_reason,
    1199       34471 :                                     const GCCallbackFlags gc_callback_flags) {
    1200       34471 :   if (!incremental_marking()->IsStopped()) {
    1201             :     FinalizeIncrementalMarkingAtomically(gc_reason);
    1202             :   }
    1203             :   CollectAllGarbage(flags, gc_reason, gc_callback_flags);
    1204       34471 : }
    1205             : 
    1206     3597649 : void Heap::ReportExternalMemoryPressure() {
    1207             :   const GCCallbackFlags kGCCallbackFlagsForExternalMemory =
    1208             :       static_cast<GCCallbackFlags>(
    1209             :           kGCCallbackFlagSynchronousPhantomCallbackProcessing |
    1210             :           kGCCallbackFlagCollectAllExternalMemory);
    1211     2401072 :   if (isolate()->isolate_data()->external_memory_ >
    1212     2401072 :       (isolate()->isolate_data()->external_memory_at_last_mark_compact_ +
    1213             :        external_memory_hard_limit())) {
    1214             :     CollectAllGarbage(
    1215             :         kReduceMemoryFootprintMask,
    1216             :         GarbageCollectionReason::kExternalMemoryPressure,
    1217             :         static_cast<GCCallbackFlags>(kGCCallbackFlagCollectAllAvailableGarbage |
    1218             :                                      kGCCallbackFlagsForExternalMemory));
    1219     1200536 :     return;
    1220             :   }
    1221     1199435 :   if (incremental_marking()->IsStopped()) {
    1222        1757 :     if (incremental_marking()->CanBeActivated()) {
    1223             :       StartIncrementalMarking(GCFlagsForIncrementalMarking(),
    1224             :                               GarbageCollectionReason::kExternalMemoryPressure,
    1225             :                               kGCCallbackFlagsForExternalMemory);
    1226             :     } else {
    1227             :       CollectAllGarbage(i::Heap::kNoGCFlags,
    1228             :                         GarbageCollectionReason::kExternalMemoryPressure,
    1229             :                         kGCCallbackFlagsForExternalMemory);
    1230             :     }
    1231             :   } else {
    1232             :     // Incremental marking is turned on an has already been started.
    1233             :     const double kMinStepSize = 5;
    1234             :     const double kMaxStepSize = 10;
    1235             :     const double ms_step = Min(
    1236             :         kMaxStepSize,
    1237             :         Max(kMinStepSize,
    1238     1197678 :             static_cast<double>(isolate()->isolate_data()->external_memory_) /
    1239             :                 isolate()->isolate_data()->external_memory_limit_ *
    1240     1197678 :                 kMinStepSize));
    1241     1197678 :     const double deadline = MonotonicallyIncreasingTimeInMs() + ms_step;
    1242             :     // Extend the gc callback flags with external memory flags.
    1243             :     current_gc_callback_flags_ = static_cast<GCCallbackFlags>(
    1244     1197678 :         current_gc_callback_flags_ | kGCCallbackFlagsForExternalMemory);
    1245             :     incremental_marking()->AdvanceIncrementalMarking(
    1246     1197678 :         deadline, IncrementalMarking::GC_VIA_STACK_GUARD, StepOrigin::kV8);
    1247             :   }
    1248             : }
    1249             : 
    1250      107086 : void Heap::EnsureFillerObjectAtTop() {
    1251             :   // There may be an allocation memento behind objects in new space. Upon
    1252             :   // evacuation of a non-full new space (or if we are on the last page) there
    1253             :   // may be uninitialized memory behind top. We fill the remainder of the page
    1254             :   // with a filler.
    1255      107086 :   Address to_top = new_space_->top();
    1256      107086 :   Page* page = Page::FromAddress(to_top - kTaggedSize);
    1257      107086 :   if (page->Contains(to_top)) {
    1258      105347 :     int remaining_in_page = static_cast<int>(page->area_end() - to_top);
    1259      105347 :     CreateFillerObjectAt(to_top, remaining_in_page, ClearRecordedSlots::kNo);
    1260             :   }
    1261      107086 : }
    1262             : 
    1263      107086 : bool Heap::CollectGarbage(AllocationSpace space,
    1264             :                           GarbageCollectionReason gc_reason,
    1265      451938 :                           const v8::GCCallbackFlags gc_callback_flags) {
    1266      107086 :   const char* collector_reason = nullptr;
    1267      107086 :   GarbageCollector collector = SelectGarbageCollector(space, &collector_reason);
    1268      107086 :   is_current_gc_forced_ = gc_callback_flags & v8::kGCCallbackFlagForced;
    1269             : 
    1270      107086 :   if (!CanExpandOldGeneration(new_space()->Capacity())) {
    1271          65 :     InvokeNearHeapLimitCallback();
    1272             :   }
    1273             : 
    1274             :   // Ensure that all pending phantom callbacks are invoked.
    1275      107086 :   isolate()->global_handles()->InvokeSecondPassPhantomCallbacks();
    1276             : 
    1277             :   // The VM is in the GC state until exiting this function.
    1278             :   VMState<GC> state(isolate());
    1279             : 
    1280             : #ifdef V8_ENABLE_ALLOCATION_TIMEOUT
    1281             :   // Reset the allocation timeout, but make sure to allow at least a few
    1282             :   // allocations after a collection. The reason for this is that we have a lot
    1283             :   // of allocation sequences and we assume that a garbage collection will allow
    1284             :   // the subsequent allocation attempts to go through.
    1285             :   if (FLAG_random_gc_interval > 0 || FLAG_gc_interval >= 0) {
    1286             :     allocation_timeout_ = Max(6, NextAllocationTimeout(allocation_timeout_));
    1287             :   }
    1288             : #endif
    1289             : 
    1290      107086 :   EnsureFillerObjectAtTop();
    1291             : 
    1292      130680 :   if (IsYoungGenerationCollector(collector) &&
    1293             :       !incremental_marking()->IsStopped()) {
    1294         727 :     if (FLAG_trace_incremental_marking) {
    1295             :       isolate()->PrintWithTimestamp(
    1296           0 :           "[IncrementalMarking] Scavenge during marking.\n");
    1297             :     }
    1298             :   }
    1299             : 
    1300             :   bool next_gc_likely_to_collect_more = false;
    1301             :   size_t committed_memory_before = 0;
    1302             : 
    1303      107086 :   if (collector == MARK_COMPACTOR) {
    1304       83492 :     committed_memory_before = CommittedOldGenerationMemory();
    1305             :   }
    1306             : 
    1307             :   {
    1308      214172 :     tracer()->Start(collector, gc_reason, collector_reason);
    1309             :     DCHECK(AllowHeapAllocation::IsAllowed());
    1310             :     DisallowHeapAllocation no_allocation_during_gc;
    1311      107086 :     GarbageCollectionPrologue();
    1312             : 
    1313             :     {
    1314      107086 :       TimedHistogram* gc_type_timer = GCTypeTimer(collector);
    1315      214172 :       TimedHistogramScope histogram_timer_scope(gc_type_timer, isolate_);
    1316      321258 :       TRACE_EVENT0("v8", gc_type_timer->name());
    1317             : 
    1318      107086 :       TimedHistogram* gc_type_priority_timer = GCTypePriorityTimer(collector);
    1319             :       OptionalTimedHistogramScopeMode mode =
    1320      107086 :           isolate_->IsMemorySavingsModeActive()
    1321             :               ? OptionalTimedHistogramScopeMode::DONT_TAKE_TIME
    1322      107086 :               : OptionalTimedHistogramScopeMode::TAKE_TIME;
    1323             :       OptionalTimedHistogramScope histogram_timer_priority_scope(
    1324      107086 :           gc_type_priority_timer, isolate_, mode);
    1325             : 
    1326             :       next_gc_likely_to_collect_more =
    1327      107086 :           PerformGarbageCollection(collector, gc_callback_flags);
    1328      107086 :       if (collector == MARK_COMPACTOR || collector == SCAVENGER) {
    1329      107086 :         tracer()->RecordGCPhasesHistograms(gc_type_timer);
    1330             :       }
    1331             :     }
    1332             : 
    1333             :     // Clear is_current_gc_forced now that the current GC is complete. Do this
    1334             :     // before GarbageCollectionEpilogue() since that could trigger another
    1335             :     // unforced GC.
    1336      107086 :     is_current_gc_forced_ = false;
    1337             : 
    1338      107086 :     GarbageCollectionEpilogue();
    1339      107086 :     if (collector == MARK_COMPACTOR && FLAG_track_detached_contexts) {
    1340       83492 :       isolate()->CheckDetachedContextsAfterGC();
    1341             :     }
    1342             : 
    1343      107086 :     if (collector == MARK_COMPACTOR) {
    1344       83492 :       size_t committed_memory_after = CommittedOldGenerationMemory();
    1345       83492 :       size_t used_memory_after = OldGenerationSizeOfObjects();
    1346             :       MemoryReducer::Event event;
    1347       83492 :       event.type = MemoryReducer::kMarkCompact;
    1348       83492 :       event.time_ms = MonotonicallyIncreasingTimeInMs();
    1349             :       // Trigger one more GC if
    1350             :       // - this GC decreased committed memory,
    1351             :       // - there is high fragmentation,
    1352             :       // - there are live detached contexts.
    1353             :       event.next_gc_likely_to_collect_more =
    1354      166308 :           (committed_memory_before > committed_memory_after + MB) ||
    1355      166308 :           HasHighFragmentation(used_memory_after, committed_memory_after) ||
    1356      166308 :           (detached_contexts()->length() > 0);
    1357       83492 :       event.committed_memory = committed_memory_after;
    1358       83492 :       if (deserialization_complete_) {
    1359       83492 :         memory_reducer_->NotifyMarkCompact(event);
    1360             :       }
    1361       83539 :       if (initial_max_old_generation_size_ < max_old_generation_size_ &&
    1362          47 :           used_memory_after < initial_max_old_generation_size_threshold_) {
    1363           4 :         max_old_generation_size_ = initial_max_old_generation_size_;
    1364             :       }
    1365             :     }
    1366             : 
    1367      107086 :     tracer()->Stop(collector);
    1368             :   }
    1369             : 
    1370      190578 :   if (collector == MARK_COMPACTOR &&
    1371       83492 :       (gc_callback_flags & (kGCCallbackFlagForced |
    1372             :                             kGCCallbackFlagCollectAllAvailableGarbage)) != 0) {
    1373       36604 :     isolate()->CountUsage(v8::Isolate::kForcedGC);
    1374             :   }
    1375             : 
    1376             :   // Start incremental marking for the next cycle. We do this only for scavenger
    1377             :   // to avoid a loop where mark-compact causes another mark-compact.
    1378      107086 :   if (IsYoungGenerationCollector(collector)) {
    1379             :     StartIncrementalMarkingIfAllocationLimitIsReached(
    1380             :         GCFlagsForIncrementalMarking(),
    1381       23594 :         kGCCallbackScheduleIdleGarbageCollection);
    1382             :   }
    1383             : 
    1384      107086 :   return next_gc_likely_to_collect_more;
    1385             : }
    1386             : 
    1387             : 
    1388        1322 : int Heap::NotifyContextDisposed(bool dependant_context) {
    1389         656 :   if (!dependant_context) {
    1390          10 :     tracer()->ResetSurvivalEvents();
    1391          10 :     old_generation_size_configured_ = false;
    1392          10 :     old_generation_allocation_limit_ = initial_old_generation_size_;
    1393             :     MemoryReducer::Event event;
    1394          10 :     event.type = MemoryReducer::kPossibleGarbage;
    1395          10 :     event.time_ms = MonotonicallyIncreasingTimeInMs();
    1396          10 :     memory_reducer_->NotifyPossibleGarbage(event);
    1397             :   }
    1398         656 :   isolate()->AbortConcurrentOptimization(BlockingBehavior::kDontBlock);
    1399             : 
    1400        1312 :   number_of_disposed_maps_ = retained_maps()->length();
    1401        1312 :   tracer()->AddContextDisposalTime(MonotonicallyIncreasingTimeInMs());
    1402         656 :   return ++contexts_disposed_;
    1403             : }
    1404             : 
    1405         964 : void Heap::StartIncrementalMarking(int gc_flags,
    1406             :                                    GarbageCollectionReason gc_reason,
    1407       31869 :                                    GCCallbackFlags gc_callback_flags) {
    1408             :   DCHECK(incremental_marking()->IsStopped());
    1409             :   set_current_gc_flags(gc_flags);
    1410       31869 :   current_gc_callback_flags_ = gc_callback_flags;
    1411       31869 :   incremental_marking()->Start(gc_reason);
    1412         964 : }
    1413             : 
    1414     1699683 : void Heap::StartIncrementalMarkingIfAllocationLimitIsReached(
    1415     1701384 :     int gc_flags, const GCCallbackFlags gc_callback_flags) {
    1416     1699683 :   if (incremental_marking()->IsStopped()) {
    1417     1398024 :     IncrementalMarkingLimit reached_limit = IncrementalMarkingLimitReached();
    1418     1398024 :     if (reached_limit == IncrementalMarkingLimit::kSoftLimit) {
    1419        1701 :       incremental_marking()->incremental_marking_job()->ScheduleTask(this);
    1420     1396323 :     } else if (reached_limit == IncrementalMarkingLimit::kHardLimit) {
    1421             :       StartIncrementalMarking(gc_flags,
    1422             :                               GarbageCollectionReason::kAllocationLimit,
    1423             :                               gc_callback_flags);
    1424             :     }
    1425             :   }
    1426     1699683 : }
    1427             : 
    1428          10 : void Heap::StartIdleIncrementalMarking(
    1429             :     GarbageCollectionReason gc_reason,
    1430             :     const GCCallbackFlags gc_callback_flags) {
    1431          10 :   gc_idle_time_handler_->ResetNoProgressCounter();
    1432             :   StartIncrementalMarking(kReduceMemoryFootprintMask, gc_reason,
    1433             :                           gc_callback_flags);
    1434          10 : }
    1435             : 
    1436        1345 : void Heap::MoveElements(FixedArray array, int dst_index, int src_index, int len,
    1437        1339 :                         WriteBarrierMode mode) {
    1438        1345 :   if (len == 0) return;
    1439             : 
    1440             :   DCHECK(array->map() != ReadOnlyRoots(this).fixed_cow_array_map());
    1441             :   ObjectSlot dst = array->RawFieldOfElementAt(dst_index);
    1442             :   ObjectSlot src = array->RawFieldOfElementAt(src_index);
    1443        2684 :   if (FLAG_concurrent_marking && incremental_marking()->IsMarking()) {
    1444         147 :     if (dst < src) {
    1445         295 :       for (int i = 0; i < len; i++) {
    1446             :         dst.Relaxed_Store(src.Relaxed_Load());
    1447             :         ++dst;
    1448             :         ++src;
    1449             :       }
    1450             :     } else {
    1451             :       // Copy backwards.
    1452         119 :       dst += len - 1;
    1453             :       src += len - 1;
    1454         329 :       for (int i = 0; i < len; i++) {
    1455             :         dst.Relaxed_Store(src.Relaxed_Load());
    1456             :         --dst;
    1457             :         --src;
    1458             :       }
    1459             :     }
    1460             :   } else {
    1461        1198 :     MemMove(dst.ToVoidPtr(), src.ToVoidPtr(), len * kTaggedSize);
    1462             :   }
    1463        1345 :   if (mode == SKIP_WRITE_BARRIER) return;
    1464         614 :   FIXED_ARRAY_ELEMENTS_WRITE_BARRIER(this, array, dst_index, len);
    1465             : }
    1466             : 
    1467             : #ifdef VERIFY_HEAP
    1468             : // Helper class for verifying the string table.
    1469             : class StringTableVerifier : public ObjectVisitor {
    1470             :  public:
    1471             :   explicit StringTableVerifier(Isolate* isolate) : isolate_(isolate) {}
    1472             : 
    1473             :   void VisitPointers(HeapObject host, ObjectSlot start,
    1474             :                      ObjectSlot end) override {
    1475             :     // Visit all HeapObject pointers in [start, end).
    1476             :     for (ObjectSlot p = start; p < end; ++p) {
    1477             :       DCHECK(!HasWeakHeapObjectTag(*p));
    1478             :       if ((*p)->IsHeapObject()) {
    1479             :         HeapObject object = HeapObject::cast(*p);
    1480             :         // Check that the string is actually internalized.
    1481             :         CHECK(object->IsTheHole(isolate_) || object->IsUndefined(isolate_) ||
    1482             :               object->IsInternalizedString());
    1483             :       }
    1484             :     }
    1485             :   }
    1486             :   void VisitPointers(HeapObject host, MaybeObjectSlot start,
    1487             :                      MaybeObjectSlot end) override {
    1488             :     UNREACHABLE();
    1489             :   }
    1490             : 
    1491             :   void VisitCodeTarget(Code host, RelocInfo* rinfo) override { UNREACHABLE(); }
    1492             : 
    1493             :   void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
    1494             :     UNREACHABLE();
    1495             :   }
    1496             : 
    1497             :  private:
    1498             :   Isolate* isolate_;
    1499             : };
    1500             : 
    1501             : static void VerifyStringTable(Isolate* isolate) {
    1502             :   StringTableVerifier verifier(isolate);
    1503             :   isolate->heap()->string_table()->IterateElements(&verifier);
    1504             : }
    1505             : #endif  // VERIFY_HEAP
    1506             : 
    1507    22649261 : bool Heap::ReserveSpace(Reservation* reservations, std::vector<Address>* maps) {
    1508             :   bool gc_performed = true;
    1509             :   int counter = 0;
    1510             :   static const int kThreshold = 20;
    1511      652994 :   while (gc_performed && counter++ < kThreshold) {
    1512             :     gc_performed = false;
    1513     1305987 :     for (int space = FIRST_SPACE;
    1514             :          space < SerializerDeserializer::kNumberOfSpaces; space++) {
    1515     1305993 :       Reservation* reservation = &reservations[space];
    1516             :       DCHECK_LE(1, reservation->size());
    1517     1305994 :       if (reservation->at(0).size == 0) {
    1518             :         DCHECK_EQ(1, reservation->size());
    1519             :         continue;
    1520             :       }
    1521             :       bool perform_gc = false;
    1522      435366 :       if (space == MAP_SPACE) {
    1523             :         // We allocate each map individually to avoid fragmentation.
    1524             :         maps->clear();
    1525             :         DCHECK_LE(reservation->size(), 2);
    1526             :         int reserved_size = 0;
    1527      463866 :         for (const Chunk& c : *reservation) reserved_size += c.size;
    1528             :         DCHECK_EQ(0, reserved_size % Map::kSize);
    1529      154622 :         int num_maps = reserved_size / Map::kSize;
    1530    45172012 :         for (int i = 0; i < num_maps; i++) {
    1531             :           // The deserializer will update the skip list.
    1532             :           AllocationResult allocation = map_space()->AllocateRawUnaligned(
    1533    22431385 :               Map::kSize, PagedSpace::IGNORE_SKIP_LIST);
    1534    22431394 :           HeapObject free_space;
    1535    22431394 :           if (allocation.To(&free_space)) {
    1536             :             // Mark with a free list node, in case we have a GC before
    1537             :             // deserializing.
    1538    22431384 :             Address free_space_address = free_space->address();
    1539             :             CreateFillerObjectAt(free_space_address, Map::kSize,
    1540    22431384 :                                  ClearRecordedSlots::kNo);
    1541    22431378 :             maps->push_back(free_space_address);
    1542             :           } else {
    1543             :             perform_gc = true;
    1544           0 :             break;
    1545             :           }
    1546             :         }
    1547      280744 :       } else if (space == LO_SPACE) {
    1548             :         // Just check that we can allocate during deserialization.
    1549             :         DCHECK_LE(reservation->size(), 2);
    1550             :         int reserved_size = 0;
    1551         105 :         for (const Chunk& c : *reservation) reserved_size += c.size;
    1552          35 :         perform_gc = !CanExpandOldGeneration(reserved_size);
    1553             :       } else {
    1554     2676550 :         for (auto& chunk : *reservation) {
    1555             :           AllocationResult allocation;
    1556     2395847 :           int size = chunk.size;
    1557             :           DCHECK_LE(static_cast<size_t>(size),
    1558             :                     MemoryChunkLayout::AllocatableMemoryInMemoryChunk(
    1559             :                         static_cast<AllocationSpace>(space)));
    1560     2395847 :           if (space == NEW_SPACE) {
    1561         214 :             allocation = new_space()->AllocateRawUnaligned(size);
    1562             :           } else {
    1563             :             // The deserializer will update the skip list.
    1564             :             allocation = paged_space(space)->AllocateRawUnaligned(
    1565     2395634 :                 size, PagedSpace::IGNORE_SKIP_LIST);
    1566             :           }
    1567     2395847 :           HeapObject free_space;
    1568     2395847 :           if (allocation.To(&free_space)) {
    1569             :             // Mark with a free list node, in case we have a GC before
    1570             :             // deserializing.
    1571             :             Address free_space_address = free_space->address();
    1572             :             CreateFillerObjectAt(free_space_address, size,
    1573     2395840 :                                  ClearRecordedSlots::kNo);
    1574             :             DCHECK_GT(SerializerDeserializer::kNumberOfPreallocatedSpaces,
    1575             :                       space);
    1576     2395841 :             chunk.start = free_space_address;
    1577     2395841 :             chunk.end = free_space_address + size;
    1578             :           } else {
    1579             :             perform_gc = true;
    1580           5 :             break;
    1581             :           }
    1582             :         }
    1583             :       }
    1584      435364 :       if (perform_gc) {
    1585             :         // We cannot perfom a GC with an uninitialized isolate. This check
    1586             :         // fails for example if the max old space size is chosen unwisely,
    1587             :         // so that we cannot allocate space to deserialize the initial heap.
    1588           5 :         if (!deserialization_complete_) {
    1589             :           V8::FatalProcessOutOfMemory(
    1590           0 :               isolate(), "insufficient memory to create an Isolate");
    1591             :         }
    1592           5 :         if (space == NEW_SPACE) {
    1593           0 :           CollectGarbage(NEW_SPACE, GarbageCollectionReason::kDeserializer);
    1594             :         } else {
    1595           5 :           if (counter > 1) {
    1596             :             CollectAllGarbage(kReduceMemoryFootprintMask,
    1597             :                               GarbageCollectionReason::kDeserializer);
    1598             :           } else {
    1599             :             CollectAllGarbage(kNoGCFlags,
    1600             :                               GarbageCollectionReason::kDeserializer);
    1601             :           }
    1602             :         }
    1603             :         gc_performed = true;
    1604             :         break;  // Abort for-loop over spaces and retry.
    1605             :       }
    1606             :     }
    1607             :   }
    1608             : 
    1609      217663 :   return !gc_performed;
    1610             : }
    1611             : 
    1612             : 
    1613      107086 : void Heap::EnsureFromSpaceIsCommitted() {
    1614      321258 :   if (new_space_->CommitFromSpaceIfNeeded()) return;
    1615             : 
    1616             :   // Committing memory to from space failed.
    1617             :   // Memory is exhausted and we will die.
    1618           0 :   FatalProcessOutOfMemory("Committing semi space failed.");
    1619             : }
    1620             : 
    1621             : 
    1622      200623 : void Heap::UpdateSurvivalStatistics(int start_new_space_size) {
    1623      214172 :   if (start_new_space_size == 0) return;
    1624             : 
    1625       93537 :   promotion_ratio_ = (static_cast<double>(promoted_objects_size_) /
    1626       93537 :                       static_cast<double>(start_new_space_size) * 100);
    1627             : 
    1628       93537 :   if (previous_semi_space_copied_object_size_ > 0) {
    1629             :     promotion_rate_ =
    1630       63991 :         (static_cast<double>(promoted_objects_size_) /
    1631       63991 :          static_cast<double>(previous_semi_space_copied_object_size_) * 100);
    1632             :   } else {
    1633       29546 :     promotion_rate_ = 0;
    1634             :   }
    1635             : 
    1636             :   semi_space_copied_rate_ =
    1637       93537 :       (static_cast<double>(semi_space_copied_object_size_) /
    1638       93537 :        static_cast<double>(start_new_space_size) * 100);
    1639             : 
    1640       93537 :   double survival_rate = promotion_ratio_ + semi_space_copied_rate_;
    1641       93537 :   tracer()->AddSurvivalRatio(survival_rate);
    1642             : }
    1643             : 
    1644      107086 : bool Heap::PerformGarbageCollection(
    1645     1428700 :     GarbageCollector collector, const v8::GCCallbackFlags gc_callback_flags) {
    1646             :   int freed_global_handles = 0;
    1647             : 
    1648      107086 :   if (!IsYoungGenerationCollector(collector)) {
    1649      488242 :     PROFILE(isolate_, CodeMovingGCEvent());
    1650             :   }
    1651             : 
    1652             : #ifdef VERIFY_HEAP
    1653             :   if (FLAG_verify_heap) {
    1654             :     VerifyStringTable(this->isolate());
    1655             :   }
    1656             : #endif
    1657             : 
    1658             :   GCType gc_type =
    1659      107086 :       collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
    1660             : 
    1661             :   {
    1662             :     GCCallbacksScope scope(this);
    1663             :     // Temporary override any embedder stack state as callbacks may create their
    1664             :     // own state on the stack and recursively trigger GC.
    1665             :     EmbedderStackStateScope embedder_scope(
    1666             :         local_embedder_heap_tracer(),
    1667             :         EmbedderHeapTracer::EmbedderStackState::kUnknown);
    1668      107086 :     if (scope.CheckReenter()) {
    1669             :       AllowHeapAllocation allow_allocation;
    1670      428216 :       TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_PROLOGUE);
    1671      214108 :       VMState<EXTERNAL> state(isolate_);
    1672      107054 :       HandleScope handle_scope(isolate_);
    1673      214108 :       CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags);
    1674             :     }
    1675             :   }
    1676             : 
    1677      107086 :   EnsureFromSpaceIsCommitted();
    1678             : 
    1679      107086 :   size_t start_new_space_size = Heap::new_space()->Size();
    1680             : 
    1681             :   {
    1682      107086 :     Heap::SkipStoreBufferScope skip_store_buffer_scope(store_buffer_);
    1683             : 
    1684      107086 :     switch (collector) {
    1685             :       case MARK_COMPACTOR:
    1686             :         UpdateOldGenerationAllocationCounter();
    1687             :         // Perform mark-sweep with optional compaction.
    1688       83492 :         MarkCompact();
    1689       83492 :         old_generation_size_configured_ = true;
    1690             :         // This should be updated before PostGarbageCollectionProcessing, which
    1691             :         // can cause another GC. Take into account the objects promoted during
    1692             :         // GC.
    1693             :         old_generation_allocation_counter_at_last_gc_ +=
    1694       83492 :             static_cast<size_t>(promoted_objects_size_);
    1695       83492 :         old_generation_size_at_last_gc_ = OldGenerationSizeOfObjects();
    1696       83492 :         break;
    1697             :       case MINOR_MARK_COMPACTOR:
    1698           0 :         MinorMarkCompact();
    1699           0 :         break;
    1700             :       case SCAVENGER:
    1701       23594 :         if ((fast_promotion_mode_ &&
    1702           0 :              CanExpandOldGeneration(new_space()->Size()))) {
    1703             :           tracer()->NotifyYoungGenerationHandling(
    1704           0 :               YoungGenerationHandling::kFastPromotionDuringScavenge);
    1705           0 :           EvacuateYoungGeneration();
    1706             :         } else {
    1707             :           tracer()->NotifyYoungGenerationHandling(
    1708       23594 :               YoungGenerationHandling::kRegularScavenge);
    1709             : 
    1710       23594 :           Scavenge();
    1711             :         }
    1712             :         break;
    1713             :     }
    1714             : 
    1715      107086 :     ProcessPretenuringFeedback();
    1716             :   }
    1717             : 
    1718      107086 :   UpdateSurvivalStatistics(static_cast<int>(start_new_space_size));
    1719      107086 :   ConfigureInitialOldGenerationSize();
    1720             : 
    1721      107086 :   if (collector != MARK_COMPACTOR) {
    1722             :     // Objects that died in the new space might have been accounted
    1723             :     // as bytes marked ahead of schedule by the incremental marker.
    1724             :     incremental_marking()->UpdateMarkedBytesAfterScavenge(
    1725       47188 :         start_new_space_size - SurvivedNewSpaceObjectSize());
    1726             :   }
    1727             : 
    1728      107086 :   if (!fast_promotion_mode_ || collector == MARK_COMPACTOR) {
    1729      107086 :     ComputeFastPromotionMode();
    1730             :   }
    1731             : 
    1732      214172 :   isolate_->counters()->objs_since_last_young()->Set(0);
    1733             : 
    1734             :   {
    1735      428344 :     TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_WEAK_GLOBAL_HANDLES);
    1736             :     // First round weak callbacks are not supposed to allocate and trigger
    1737             :     // nested GCs.
    1738             :     freed_global_handles =
    1739      321258 :         isolate_->global_handles()->InvokeFirstPassWeakCallbacks();
    1740             :   }
    1741             : 
    1742      107086 :   if (collector == MARK_COMPACTOR) {
    1743      333968 :     TRACE_GC(tracer(), GCTracer::Scope::HEAP_EMBEDDER_TRACING_EPILOGUE);
    1744             :     // TraceEpilogue may trigger operations that invalidate global handles. It
    1745             :     // has to be called *after* all other operations that potentially touch and
    1746             :     // reset global handles. It is also still part of the main garbage
    1747             :     // collection pause and thus needs to be called *before* any operation that
    1748             :     // can potentially trigger recursive garbage
    1749      166984 :     local_embedder_heap_tracer()->TraceEpilogue();
    1750             :   }
    1751             : 
    1752             :   {
    1753      428344 :     TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_WEAK_GLOBAL_HANDLES);
    1754      107086 :     gc_post_processing_depth_++;
    1755             :     {
    1756             :       AllowHeapAllocation allow_allocation;
    1757             :       freed_global_handles +=
    1758             :           isolate_->global_handles()->PostGarbageCollectionProcessing(
    1759      214172 :               collector, gc_callback_flags);
    1760             :     }
    1761      214172 :     gc_post_processing_depth_--;
    1762             :   }
    1763             : 
    1764      214172 :   isolate_->eternal_handles()->PostGarbageCollectionProcessing();
    1765             : 
    1766             :   // Update relocatables.
    1767      107086 :   Relocatable::PostGarbageCollectionProcessing(isolate_);
    1768             : 
    1769      107086 :   double gc_speed = tracer()->CombinedMarkCompactSpeedInBytesPerMillisecond();
    1770             :   double mutator_speed =
    1771      107086 :       tracer()->CurrentOldGenerationAllocationThroughputInBytesPerMillisecond();
    1772      107086 :   size_t old_gen_size = OldGenerationSizeOfObjects();
    1773      107086 :   if (collector == MARK_COMPACTOR) {
    1774             :     // Register the amount of external allocated memory.
    1775             :     isolate()->isolate_data()->external_memory_at_last_mark_compact_ =
    1776       83492 :         isolate()->isolate_data()->external_memory_;
    1777             :     isolate()->isolate_data()->external_memory_limit_ =
    1778       83492 :         isolate()->isolate_data()->external_memory_ +
    1779       83492 :         kExternalAllocationSoftLimit;
    1780             : 
    1781             :     double max_factor =
    1782      166984 :         heap_controller()->MaxGrowingFactor(max_old_generation_size_);
    1783             :     size_t new_limit = heap_controller()->CalculateAllocationLimit(
    1784             :         old_gen_size, max_old_generation_size_, max_factor, gc_speed,
    1785      250476 :         mutator_speed, new_space()->Capacity(), CurrentHeapGrowingMode());
    1786       83492 :     old_generation_allocation_limit_ = new_limit;
    1787             : 
    1788             :     CheckIneffectiveMarkCompact(
    1789       83492 :         old_gen_size, tracer()->AverageMarkCompactMutatorUtilization());
    1790       23594 :   } else if (HasLowYoungGenerationAllocationRate() &&
    1791             :              old_generation_size_configured_) {
    1792             :     double max_factor =
    1793         228 :         heap_controller()->MaxGrowingFactor(max_old_generation_size_);
    1794             :     size_t new_limit = heap_controller()->CalculateAllocationLimit(
    1795             :         old_gen_size, max_old_generation_size_, max_factor, gc_speed,
    1796         342 :         mutator_speed, new_space()->Capacity(), CurrentHeapGrowingMode());
    1797         114 :     if (new_limit < old_generation_allocation_limit_) {
    1798           0 :       old_generation_allocation_limit_ = new_limit;
    1799             :     }
    1800             :   }
    1801             : 
    1802             :   {
    1803             :     GCCallbacksScope scope(this);
    1804      107086 :     if (scope.CheckReenter()) {
    1805             :       AllowHeapAllocation allow_allocation;
    1806      428216 :       TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_EPILOGUE);
    1807      214108 :       VMState<EXTERNAL> state(isolate_);
    1808      107054 :       HandleScope handle_scope(isolate_);
    1809      214108 :       CallGCEpilogueCallbacks(gc_type, gc_callback_flags);
    1810             :     }
    1811             :   }
    1812             : 
    1813             : #ifdef VERIFY_HEAP
    1814             :   if (FLAG_verify_heap) {
    1815             :     VerifyStringTable(this->isolate());
    1816             :   }
    1817             : #endif
    1818             : 
    1819      107086 :   return freed_global_handles > 0;
    1820             : }
    1821             : 
    1822             : 
    1823      141320 : void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
    1824             :   RuntimeCallTimerScope runtime_timer(
    1825      141320 :       isolate(), RuntimeCallCounterId::kGCPrologueCallback);
    1826      282707 :   for (const GCCallbackTuple& info : gc_prologue_callbacks_) {
    1827          67 :     if (gc_type & info.gc_type) {
    1828             :       v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
    1829          67 :       info.callback(isolate, gc_type, flags, info.data);
    1830             :     }
    1831             :   }
    1832      141320 : }
    1833             : 
    1834      141320 : void Heap::CallGCEpilogueCallbacks(GCType gc_type, GCCallbackFlags flags) {
    1835             :   RuntimeCallTimerScope runtime_timer(
    1836      141320 :       isolate(), RuntimeCallCounterId::kGCEpilogueCallback);
    1837      424025 :   for (const GCCallbackTuple& info : gc_epilogue_callbacks_) {
    1838      141385 :     if (gc_type & info.gc_type) {
    1839             :       v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
    1840       83545 :       info.callback(isolate, gc_type, flags, info.data);
    1841             :     }
    1842             :   }
    1843      141320 : }
    1844             : 
    1845             : 
    1846      250476 : void Heap::MarkCompact() {
    1847       83492 :   PauseAllocationObserversScope pause_observers(this);
    1848             : 
    1849             :   SetGCState(MARK_COMPACT);
    1850             : 
    1851      166984 :   LOG(isolate_, ResourceEvent("markcompact", "begin"));
    1852             : 
    1853             :   uint64_t size_of_objects_before_gc = SizeOfObjects();
    1854             : 
    1855      166984 :   CodeSpaceMemoryModificationScope code_modifcation(this);
    1856             : 
    1857       83492 :   mark_compact_collector()->Prepare();
    1858             : 
    1859       83492 :   ms_count_++;
    1860             : 
    1861       83492 :   MarkCompactPrologue();
    1862             : 
    1863       83492 :   mark_compact_collector()->CollectGarbage();
    1864             : 
    1865      166984 :   LOG(isolate_, ResourceEvent("markcompact", "end"));
    1866             : 
    1867       83492 :   MarkCompactEpilogue();
    1868             : 
    1869       83492 :   if (FLAG_allocation_site_pretenuring) {
    1870       83492 :     EvaluateOldSpaceLocalPretenuring(size_of_objects_before_gc);
    1871       83492 :   }
    1872       83492 : }
    1873             : 
    1874           0 : void Heap::MinorMarkCompact() {
    1875             : #ifdef ENABLE_MINOR_MC
    1876             :   DCHECK(FLAG_minor_mc);
    1877             : 
    1878           0 :   PauseAllocationObserversScope pause_observers(this);
    1879             :   SetGCState(MINOR_MARK_COMPACT);
    1880           0 :   LOG(isolate_, ResourceEvent("MinorMarkCompact", "begin"));
    1881             : 
    1882           0 :   TRACE_GC(tracer(), GCTracer::Scope::MINOR_MC);
    1883             :   AlwaysAllocateScope always_allocate(isolate());
    1884             :   IncrementalMarking::PauseBlackAllocationScope pause_black_allocation(
    1885             :       incremental_marking());
    1886           0 :   ConcurrentMarking::PauseScope pause_scope(concurrent_marking());
    1887             : 
    1888           0 :   minor_mark_compact_collector()->CollectGarbage();
    1889             : 
    1890           0 :   LOG(isolate_, ResourceEvent("MinorMarkCompact", "end"));
    1891           0 :   SetGCState(NOT_IN_GC);
    1892             : #else
    1893             :   UNREACHABLE();
    1894             : #endif  // ENABLE_MINOR_MC
    1895           0 : }
    1896             : 
    1897      166984 : void Heap::MarkCompactEpilogue() {
    1898      333968 :   TRACE_GC(tracer(), GCTracer::Scope::MC_EPILOGUE);
    1899             :   SetGCState(NOT_IN_GC);
    1900             : 
    1901      166984 :   isolate_->counters()->objs_since_last_full()->Set(0);
    1902             : 
    1903       83492 :   incremental_marking()->Epilogue();
    1904             : 
    1905       83492 :   DCHECK(incremental_marking()->IsStopped());
    1906       83492 : }
    1907             : 
    1908             : 
    1909       83492 : void Heap::MarkCompactPrologue() {
    1910      333968 :   TRACE_GC(tracer(), GCTracer::Scope::MC_PROLOGUE);
    1911      166984 :   isolate_->descriptor_lookup_cache()->Clear();
    1912       83492 :   RegExpResultsCache::Clear(string_split_cache());
    1913       83492 :   RegExpResultsCache::Clear(regexp_multiple_cache());
    1914             : 
    1915      166984 :   isolate_->compilation_cache()->MarkCompactPrologue();
    1916             : 
    1917      166984 :   FlushNumberStringCache();
    1918       83492 : }
    1919             : 
    1920             : 
    1921      107086 : void Heap::CheckNewSpaceExpansionCriteria() {
    1922      107086 :   if (FLAG_experimental_new_space_growth_heuristic) {
    1923           0 :     if (new_space_->TotalCapacity() < new_space_->MaximumCapacity() &&
    1924           0 :         survived_last_scavenge_ * 100 / new_space_->TotalCapacity() >= 10) {
    1925             :       // Grow the size of new space if there is room to grow, and more than 10%
    1926             :       // have survived the last scavenge.
    1927           0 :       new_space_->Grow();
    1928           0 :       survived_since_last_expansion_ = 0;
    1929             :     }
    1930      318240 :   } else if (new_space_->TotalCapacity() < new_space_->MaximumCapacity() &&
    1931      104068 :              survived_since_last_expansion_ > new_space_->TotalCapacity()) {
    1932             :     // Grow the size of new space if there is room to grow, and enough data
    1933             :     // has survived scavenge since the last expansion.
    1934        1848 :     new_space_->Grow();
    1935        1848 :     survived_since_last_expansion_ = 0;
    1936             :   }
    1937      107086 : }
    1938             : 
    1939           0 : void Heap::EvacuateYoungGeneration() {
    1940           0 :   TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_FAST_PROMOTE);
    1941           0 :   base::MutexGuard guard(relocation_mutex());
    1942           0 :   ConcurrentMarking::PauseScope pause_scope(concurrent_marking());
    1943             :   if (!FLAG_concurrent_marking) {
    1944             :     DCHECK(fast_promotion_mode_);
    1945             :     DCHECK(CanExpandOldGeneration(new_space()->Size()));
    1946             :   }
    1947             : 
    1948           0 :   mark_compact_collector()->sweeper()->EnsureIterabilityCompleted();
    1949             : 
    1950             :   SetGCState(SCAVENGE);
    1951           0 :   LOG(isolate_, ResourceEvent("scavenge", "begin"));
    1952             : 
    1953             :   // Move pages from new->old generation.
    1954             :   PageRange range(new_space()->first_allocatable_address(), new_space()->top());
    1955           0 :   for (auto it = range.begin(); it != range.end();) {
    1956             :     Page* p = (*++it)->prev_page();
    1957           0 :     new_space()->from_space().RemovePage(p);
    1958           0 :     Page::ConvertNewToOld(p);
    1959           0 :     if (incremental_marking()->IsMarking())
    1960           0 :       mark_compact_collector()->RecordLiveSlotsOnPage(p);
    1961             :   }
    1962             : 
    1963             :   // Reset new space.
    1964           0 :   if (!new_space()->Rebalance()) {
    1965           0 :     FatalProcessOutOfMemory("NewSpace::Rebalance");
    1966             :   }
    1967           0 :   new_space()->ResetLinearAllocationArea();
    1968             :   new_space()->set_age_mark(new_space()->top());
    1969             : 
    1970             :   // Fix up special trackers.
    1971           0 :   external_string_table_.PromoteAllNewSpaceStrings();
    1972             :   // GlobalHandles are updated in PostGarbageCollectonProcessing
    1973             : 
    1974           0 :   IncrementYoungSurvivorsCounter(new_space()->Size());
    1975           0 :   IncrementPromotedObjectsSize(new_space()->Size());
    1976             :   IncrementSemiSpaceCopiedObjectSize(0);
    1977             : 
    1978           0 :   LOG(isolate_, ResourceEvent("scavenge", "end"));
    1979           0 :   SetGCState(NOT_IN_GC);
    1980           0 : }
    1981             : 
    1982      165158 : void Heap::Scavenge() {
    1983       94376 :   TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE);
    1984       23594 :   base::MutexGuard guard(relocation_mutex());
    1985       47188 :   ConcurrentMarking::PauseScope pause_scope(concurrent_marking());
    1986             :   // There are soft limits in the allocation code, designed to trigger a mark
    1987             :   // sweep collection by failing allocations. There is no sense in trying to
    1988             :   // trigger one during scavenge: scavenges allocation should always succeed.
    1989             :   AlwaysAllocateScope scope(isolate());
    1990             : 
    1991             :   // Bump-pointer allocations done during scavenge are not real allocations.
    1992             :   // Pause the inline allocation steps.
    1993       47188 :   PauseAllocationObserversScope pause_observers(this);
    1994             :   IncrementalMarking::PauseBlackAllocationScope pause_black_allocation(
    1995             :       incremental_marking());
    1996             : 
    1997             : 
    1998       23594 :   mark_compact_collector()->sweeper()->EnsureIterabilityCompleted();
    1999             : 
    2000             :   SetGCState(SCAVENGE);
    2001             : 
    2002             :   // Flip the semispaces.  After flipping, to space is empty, from space has
    2003             :   // live objects.
    2004       23594 :   new_space()->Flip();
    2005       23594 :   new_space()->ResetLinearAllocationArea();
    2006             : 
    2007             :   // We also flip the young generation large object space. All large objects
    2008             :   // will be in the from space.
    2009       23594 :   new_lo_space()->Flip();
    2010             : 
    2011             :   // Implements Cheney's copying algorithm
    2012       47188 :   LOG(isolate_, ResourceEvent("scavenge", "begin"));
    2013             : 
    2014       23594 :   scavenger_collector_->CollectGarbage();
    2015             : 
    2016       47188 :   LOG(isolate_, ResourceEvent("scavenge", "end"));
    2017             : 
    2018       23594 :   SetGCState(NOT_IN_GC);
    2019       23594 : }
    2020             : 
    2021      107086 : void Heap::ComputeFastPromotionMode() {
    2022             :   const size_t survived_in_new_space =
    2023      214172 :       survived_last_scavenge_ * 100 / new_space_->Capacity();
    2024             :   fast_promotion_mode_ =
    2025      214172 :       !FLAG_optimize_for_size && FLAG_fast_promotion_new_space &&
    2026      107086 :       !ShouldReduceMemory() && new_space_->IsAtMaximumCapacity() &&
    2027      107086 :       survived_in_new_space >= kMinPromotedPercentForFastPromotionMode;
    2028      107086 :   if (FLAG_trace_gc_verbose && !FLAG_trace_gc_ignore_scavenger) {
    2029             :     PrintIsolate(
    2030             :         isolate(), "Fast promotion mode: %s survival rate: %" PRIuS "%%\n",
    2031           0 :         fast_promotion_mode_ ? "true" : "false", survived_in_new_space);
    2032             :   }
    2033      107086 : }
    2034             : 
    2035     2339880 : void Heap::UnprotectAndRegisterMemoryChunk(MemoryChunk* chunk) {
    2036     2339880 :   if (unprotected_memory_chunks_registry_enabled_) {
    2037     2099700 :     base::MutexGuard guard(&unprotected_memory_chunks_mutex_);
    2038     2099697 :     if (unprotected_memory_chunks_.insert(chunk).second) {
    2039     2093998 :       chunk->SetReadAndWritable();
    2040             :     }
    2041             :   }
    2042     2339881 : }
    2043             : 
    2044     2200383 : void Heap::UnprotectAndRegisterMemoryChunk(HeapObject object) {
    2045     2240525 :   UnprotectAndRegisterMemoryChunk(MemoryChunk::FromHeapObject(object));
    2046     2200386 : }
    2047             : 
    2048      134359 : void Heap::UnregisterUnprotectedMemoryChunk(MemoryChunk* chunk) {
    2049             :   unprotected_memory_chunks_.erase(chunk);
    2050      134359 : }
    2051             : 
    2052     4142530 : void Heap::ProtectUnprotectedMemoryChunks() {
    2053             :   DCHECK(unprotected_memory_chunks_registry_enabled_);
    2054     6191060 :   for (auto chunk = unprotected_memory_chunks_.begin();
    2055             :        chunk != unprotected_memory_chunks_.end(); chunk++) {
    2056     4188000 :     CHECK(memory_allocator()->IsMemoryChunkExecutable(*chunk));
    2057     2094000 :     (*chunk)->SetDefaultCodePermissions();
    2058             :   }
    2059             :   unprotected_memory_chunks_.clear();
    2060     2048530 : }
    2061             : 
    2062           0 : bool Heap::ExternalStringTable::Contains(String string) {
    2063           0 :   for (size_t i = 0; i < new_space_strings_.size(); ++i) {
    2064           0 :     if (new_space_strings_[i] == string) return true;
    2065             :   }
    2066           0 :   for (size_t i = 0; i < old_space_strings_.size(); ++i) {
    2067           0 :     if (old_space_strings_[i] == string) return true;
    2068             :   }
    2069             :   return false;
    2070             : }
    2071             : 
    2072         127 : String Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(
    2073             :     Heap* heap, FullObjectSlot p) {
    2074             :   MapWord first_word = HeapObject::cast(*p)->map_word();
    2075             : 
    2076         127 :   if (!first_word.IsForwardingAddress()) {
    2077             :     // Unreachable external string can be finalized.
    2078         124 :     String string = String::cast(*p);
    2079         124 :     if (!string->IsExternalString()) {
    2080             :       // Original external string has been internalized.
    2081             :       DCHECK(string->IsThinString());
    2082           5 :       return String();
    2083             :     }
    2084         119 :     heap->FinalizeExternalString(string);
    2085         119 :     return String();
    2086             :   }
    2087             : 
    2088             :   // String is still reachable.
    2089           3 :   String new_string = String::cast(first_word.ToForwardingAddress());
    2090           3 :   if (new_string->IsThinString()) {
    2091             :     // Filtering Thin strings out of the external string table.
    2092           0 :     return String();
    2093           3 :   } else if (new_string->IsExternalString()) {
    2094             :     MemoryChunk::MoveExternalBackingStoreBytes(
    2095             :         ExternalBackingStoreType::kExternalString,
    2096             :         Page::FromAddress((*p).ptr()), Page::FromHeapObject(new_string),
    2097           6 :         ExternalString::cast(new_string)->ExternalPayloadSize());
    2098           3 :     return new_string;
    2099             :   }
    2100             : 
    2101             :   // Internalization can replace external strings with non-external strings.
    2102           0 :   return new_string->IsExternalString() ? new_string : String();
    2103             : }
    2104             : 
    2105           0 : void Heap::ExternalStringTable::VerifyNewSpace() {
    2106             : #ifdef DEBUG
    2107             :   std::set<String> visited_map;
    2108             :   std::map<MemoryChunk*, size_t> size_map;
    2109             :   ExternalBackingStoreType type = ExternalBackingStoreType::kExternalString;
    2110             :   for (size_t i = 0; i < new_space_strings_.size(); ++i) {
    2111             :     String obj = String::cast(new_space_strings_[i]);
    2112             :     MemoryChunk* mc = MemoryChunk::FromHeapObject(obj);
    2113             :     DCHECK(mc->InNewSpace());
    2114             :     DCHECK(heap_->InNewSpace(obj));
    2115             :     DCHECK(!obj->IsTheHole(heap_->isolate()));
    2116             :     DCHECK(obj->IsExternalString());
    2117             :     // Note: we can have repeated elements in the table.
    2118             :     DCHECK_EQ(0, visited_map.count(obj));
    2119             :     visited_map.insert(obj);
    2120             :     size_map[mc] += ExternalString::cast(obj)->ExternalPayloadSize();
    2121             :   }
    2122             :   for (std::map<MemoryChunk*, size_t>::iterator it = size_map.begin();
    2123             :        it != size_map.end(); it++)
    2124             :     DCHECK_EQ(it->first->ExternalBackingStoreBytes(type), it->second);
    2125             : #endif
    2126           0 : }
    2127             : 
    2128           0 : void Heap::ExternalStringTable::Verify() {
    2129             : #ifdef DEBUG
    2130             :   std::set<String> visited_map;
    2131             :   std::map<MemoryChunk*, size_t> size_map;
    2132             :   ExternalBackingStoreType type = ExternalBackingStoreType::kExternalString;
    2133             :   VerifyNewSpace();
    2134             :   for (size_t i = 0; i < old_space_strings_.size(); ++i) {
    2135             :     String obj = String::cast(old_space_strings_[i]);
    2136             :     MemoryChunk* mc = MemoryChunk::FromHeapObject(obj);
    2137             :     DCHECK(!mc->InNewSpace());
    2138             :     DCHECK(!heap_->InNewSpace(obj));
    2139             :     DCHECK(!obj->IsTheHole(heap_->isolate()));
    2140             :     DCHECK(obj->IsExternalString());
    2141             :     // Note: we can have repeated elements in the table.
    2142             :     DCHECK_EQ(0, visited_map.count(obj));
    2143             :     visited_map.insert(obj);
    2144             :     size_map[mc] += ExternalString::cast(obj)->ExternalPayloadSize();
    2145             :   }
    2146             :   for (std::map<MemoryChunk*, size_t>::iterator it = size_map.begin();
    2147             :        it != size_map.end(); it++)
    2148             :     DCHECK_EQ(it->first->ExternalBackingStoreBytes(type), it->second);
    2149             : #endif
    2150           0 : }
    2151             : 
    2152      107086 : void Heap::ExternalStringTable::UpdateNewSpaceReferences(
    2153             :     Heap::ExternalStringTableUpdaterCallback updater_func) {
    2154      214172 :   if (new_space_strings_.empty()) return;
    2155             : 
    2156             :   FullObjectSlot start(&new_space_strings_[0]);
    2157             :   FullObjectSlot end(&new_space_strings_[new_space_strings_.size()]);
    2158             :   FullObjectSlot last = start;
    2159             : 
    2160         188 :   for (FullObjectSlot p = start; p < end; ++p) {
    2161         148 :     String target = updater_func(heap_, p);
    2162             : 
    2163         272 :     if (target.is_null()) continue;
    2164             : 
    2165             :     DCHECK(target->IsExternalString());
    2166             : 
    2167          24 :     if (InNewSpace(target)) {
    2168             :       // String is still in new space. Update the table entry.
    2169             :       last.store(target);
    2170             :       ++last;
    2171             :     } else {
    2172             :       // String got promoted. Move it to the old string list.
    2173           0 :       old_space_strings_.push_back(target);
    2174             :     }
    2175             :   }
    2176             : 
    2177             :   DCHECK(last <= end);
    2178          20 :   new_space_strings_.resize(last - start);
    2179             : #ifdef VERIFY_HEAP
    2180             :   if (FLAG_verify_heap) {
    2181             :     VerifyNewSpace();
    2182             :   }
    2183             : #endif
    2184             : }
    2185             : 
    2186           0 : void Heap::ExternalStringTable::PromoteAllNewSpaceStrings() {
    2187           0 :   old_space_strings_.reserve(old_space_strings_.size() +
    2188           0 :                              new_space_strings_.size());
    2189             :   std::move(std::begin(new_space_strings_), std::end(new_space_strings_),
    2190             :             std::back_inserter(old_space_strings_));
    2191             :   new_space_strings_.clear();
    2192           0 : }
    2193             : 
    2194       85187 : void Heap::ExternalStringTable::IterateNewSpaceStrings(RootVisitor* v) {
    2195       85187 :   if (!new_space_strings_.empty()) {
    2196             :     v->VisitRootPointers(
    2197             :         Root::kExternalStringsTable, nullptr,
    2198             :         FullObjectSlot(&new_space_strings_[0]),
    2199          70 :         FullObjectSlot(&new_space_strings_[new_space_strings_.size()]));
    2200             :   }
    2201       85187 : }
    2202             : 
    2203       85187 : void Heap::ExternalStringTable::IterateAll(RootVisitor* v) {
    2204       85187 :   IterateNewSpaceStrings(v);
    2205       85187 :   if (!old_space_strings_.empty()) {
    2206             :     v->VisitRootPointers(
    2207             :         Root::kExternalStringsTable, nullptr,
    2208             :         FullObjectSlot(old_space_strings_.data()),
    2209      255396 :         FullObjectSlot(old_space_strings_.data() + old_space_strings_.size()));
    2210             :   }
    2211       85187 : }
    2212             : 
    2213       23594 : void Heap::UpdateNewSpaceReferencesInExternalStringTable(
    2214             :     ExternalStringTableUpdaterCallback updater_func) {
    2215       23594 :   external_string_table_.UpdateNewSpaceReferences(updater_func);
    2216       23594 : }
    2217             : 
    2218       83492 : void Heap::ExternalStringTable::UpdateReferences(
    2219             :     Heap::ExternalStringTableUpdaterCallback updater_func) {
    2220      166984 :   if (old_space_strings_.size() > 0) {
    2221             :     FullObjectSlot start(old_space_strings_.data());
    2222       83382 :     FullObjectSlot end(old_space_strings_.data() + old_space_strings_.size());
    2223      286229 :     for (FullObjectSlot p = start; p < end; ++p)
    2224      238930 :       p.store(updater_func(heap_, p));
    2225             :   }
    2226             : 
    2227       83492 :   UpdateNewSpaceReferences(updater_func);
    2228       83492 : }
    2229             : 
    2230       83492 : void Heap::UpdateReferencesInExternalStringTable(
    2231             :     ExternalStringTableUpdaterCallback updater_func) {
    2232       83492 :   external_string_table_.UpdateReferences(updater_func);
    2233       83492 : }
    2234             : 
    2235             : 
    2236       83492 : void Heap::ProcessAllWeakReferences(WeakObjectRetainer* retainer) {
    2237             :   ProcessNativeContexts(retainer);
    2238             :   ProcessAllocationSites(retainer);
    2239       83492 : }
    2240             : 
    2241             : 
    2242       23594 : void Heap::ProcessYoungWeakReferences(WeakObjectRetainer* retainer) {
    2243             :   ProcessNativeContexts(retainer);
    2244       23594 : }
    2245             : 
    2246             : 
    2247           0 : void Heap::ProcessNativeContexts(WeakObjectRetainer* retainer) {
    2248      107086 :   Object head = VisitWeakList<Context>(this, native_contexts_list(), retainer);
    2249             :   // Update the head of the list of contexts.
    2250             :   set_native_contexts_list(head);
    2251           0 : }
    2252             : 
    2253             : 
    2254           0 : void Heap::ProcessAllocationSites(WeakObjectRetainer* retainer) {
    2255             :   Object allocation_site_obj =
    2256       83492 :       VisitWeakList<AllocationSite>(this, allocation_sites_list(), retainer);
    2257             :   set_allocation_sites_list(allocation_site_obj);
    2258           0 : }
    2259             : 
    2260       83492 : void Heap::ProcessWeakListRoots(WeakObjectRetainer* retainer) {
    2261       83492 :   set_native_contexts_list(retainer->RetainAs(native_contexts_list()));
    2262       83492 :   set_allocation_sites_list(retainer->RetainAs(allocation_sites_list()));
    2263       83492 : }
    2264             : 
    2265         316 : void Heap::ForeachAllocationSite(
    2266             :     Object list, const std::function<void(AllocationSite)>& visitor) {
    2267             :   DisallowHeapAllocation disallow_heap_allocation;
    2268         316 :   Object current = list;
    2269        2003 :   while (current->IsAllocationSite()) {
    2270        1371 :     AllocationSite site = AllocationSite::cast(current);
    2271        1371 :     visitor(site);
    2272        1371 :     Object current_nested = site->nested_site();
    2273        2774 :     while (current_nested->IsAllocationSite()) {
    2274          32 :       AllocationSite nested_site = AllocationSite::cast(current_nested);
    2275          32 :       visitor(nested_site);
    2276          32 :       current_nested = nested_site->nested_site();
    2277             :     }
    2278        1371 :     current = site->weak_next();
    2279             :   }
    2280         316 : }
    2281             : 
    2282         154 : void Heap::ResetAllAllocationSitesDependentCode(PretenureFlag flag) {
    2283             :   DisallowHeapAllocation no_allocation_scope;
    2284         154 :   bool marked = false;
    2285             : 
    2286             :   ForeachAllocationSite(allocation_sites_list(),
    2287         970 :                         [&marked, flag, this](AllocationSite site) {
    2288         970 :                           if (site->GetPretenureMode() == flag) {
    2289           0 :                             site->ResetPretenureDecision();
    2290             :                             site->set_deopt_dependent_code(true);
    2291           0 :                             marked = true;
    2292           0 :                             RemoveAllocationSitePretenuringFeedback(site);
    2293         970 :                             return;
    2294             :                           }
    2295         308 :                         });
    2296         154 :   if (marked) isolate_->stack_guard()->RequestDeoptMarkedAllocationSites();
    2297         154 : }
    2298             : 
    2299             : 
    2300       83492 : void Heap::EvaluateOldSpaceLocalPretenuring(
    2301             :     uint64_t size_of_objects_before_gc) {
    2302             :   uint64_t size_of_objects_after_gc = SizeOfObjects();
    2303             :   double old_generation_survival_rate =
    2304       83492 :       (static_cast<double>(size_of_objects_after_gc) * 100) /
    2305       83492 :       static_cast<double>(size_of_objects_before_gc);
    2306             : 
    2307       83492 :   if (old_generation_survival_rate < kOldSurvivalRateLowThreshold) {
    2308             :     // Too many objects died in the old generation, pretenuring of wrong
    2309             :     // allocation sites may be the cause for that. We have to deopt all
    2310             :     // dependent code registered in the allocation sites to re-evaluate
    2311             :     // our pretenuring decisions.
    2312         154 :     ResetAllAllocationSitesDependentCode(TENURED);
    2313         154 :     if (FLAG_trace_pretenuring) {
    2314             :       PrintF(
    2315             :           "Deopt all allocation sites dependent code due to low survival "
    2316             :           "rate in the old generation %f\n",
    2317           0 :           old_generation_survival_rate);
    2318             :     }
    2319             :   }
    2320       83492 : }
    2321             : 
    2322             : 
    2323           5 : void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
    2324             :   DisallowHeapAllocation no_allocation;
    2325             :   // All external strings are listed in the external string table.
    2326             : 
    2327           0 :   class ExternalStringTableVisitorAdapter : public RootVisitor {
    2328             :    public:
    2329             :     explicit ExternalStringTableVisitorAdapter(
    2330             :         Isolate* isolate, v8::ExternalResourceVisitor* visitor)
    2331           5 :         : isolate_(isolate), visitor_(visitor) {}
    2332           5 :     void VisitRootPointers(Root root, const char* description,
    2333             :                            FullObjectSlot start, FullObjectSlot end) override {
    2334          35 :       for (FullObjectSlot p = start; p < end; ++p) {
    2335             :         DCHECK((*p)->IsExternalString());
    2336             :         visitor_->VisitExternalString(
    2337          50 :             Utils::ToLocal(Handle<String>(String::cast(*p), isolate_)));
    2338             :       }
    2339           5 :     }
    2340             : 
    2341             :    private:
    2342             :     Isolate* isolate_;
    2343             :     v8::ExternalResourceVisitor* visitor_;
    2344             :   } external_string_table_visitor(isolate(), visitor);
    2345             : 
    2346           5 :   external_string_table_.IterateAll(&external_string_table_visitor);
    2347           5 : }
    2348             : 
    2349             : STATIC_ASSERT((FixedDoubleArray::kHeaderSize & kDoubleAlignmentMask) ==
    2350             :               0);  // NOLINT
    2351             : STATIC_ASSERT((FixedTypedArrayBase::kDataOffset & kDoubleAlignmentMask) ==
    2352             :               0);  // NOLINT
    2353             : #ifdef V8_HOST_ARCH_32_BIT
    2354             : STATIC_ASSERT((HeapNumber::kValueOffset & kDoubleAlignmentMask) !=
    2355             :               0);  // NOLINT
    2356             : #endif
    2357             : 
    2358             : 
    2359          15 : int Heap::GetMaximumFillToAlign(AllocationAlignment alignment) {
    2360          15 :   switch (alignment) {
    2361             :     case kWordAligned:
    2362             :       return 0;
    2363             :     case kDoubleAligned:
    2364             :     case kDoubleUnaligned:
    2365             :       return kDoubleSize - kTaggedSize;
    2366             :     default:
    2367           0 :       UNREACHABLE();
    2368             :   }
    2369             :   return 0;
    2370             : }
    2371             : 
    2372             : 
    2373    89848730 : int Heap::GetFillToAlign(Address address, AllocationAlignment alignment) {
    2374    89848730 :   if (alignment == kDoubleAligned && (address & kDoubleAlignmentMask) != 0)
    2375             :     return kTaggedSize;
    2376             :   if (alignment == kDoubleUnaligned && (address & kDoubleAlignmentMask) == 0)
    2377             :     return kDoubleSize - kTaggedSize;  // No fill if double is always aligned.
    2378             :   return 0;
    2379             : }
    2380             : 
    2381           0 : HeapObject Heap::PrecedeWithFiller(HeapObject object, int filler_size) {
    2382           0 :   CreateFillerObjectAt(object->address(), filler_size, ClearRecordedSlots::kNo);
    2383           0 :   return HeapObject::FromAddress(object->address() + filler_size);
    2384             : }
    2385             : 
    2386           0 : HeapObject Heap::AlignWithFiller(HeapObject object, int object_size,
    2387             :                                  int allocation_size,
    2388             :                                  AllocationAlignment alignment) {
    2389           0 :   int filler_size = allocation_size - object_size;
    2390             :   DCHECK_LT(0, filler_size);
    2391             :   int pre_filler = GetFillToAlign(object->address(), alignment);
    2392           0 :   if (pre_filler) {
    2393           0 :     object = PrecedeWithFiller(object, pre_filler);
    2394           0 :     filler_size -= pre_filler;
    2395             :   }
    2396           0 :   if (filler_size) {
    2397             :     CreateFillerObjectAt(object->address() + object_size, filler_size,
    2398           0 :                          ClearRecordedSlots::kNo);
    2399             :   }
    2400           0 :   return object;
    2401             : }
    2402             : 
    2403      453109 : void Heap::RegisterNewArrayBuffer(JSArrayBuffer buffer) {
    2404      453109 :   ArrayBufferTracker::RegisterNew(this, buffer);
    2405      453127 : }
    2406             : 
    2407        5595 : void Heap::UnregisterArrayBuffer(JSArrayBuffer buffer) {
    2408        5595 :   ArrayBufferTracker::Unregister(this, buffer);
    2409        5595 : }
    2410             : 
    2411      162241 : void Heap::ConfigureInitialOldGenerationSize() {
    2412      125491 :   if (!old_generation_size_configured_ && tracer()->SurvivalEventsRecorded()) {
    2413             :     old_generation_allocation_limit_ =
    2414       18375 :         Max(OldGenerationSizeOfObjects() +
    2415             :                 heap_controller()->MinimumAllocationLimitGrowingStep(
    2416       36750 :                     CurrentHeapGrowingMode()),
    2417             :             static_cast<size_t>(
    2418       36750 :                 static_cast<double>(old_generation_allocation_limit_) *
    2419       73500 :                 (tracer()->AverageSurvivalRatio() / 100)));
    2420             :   }
    2421      107086 : }
    2422             : 
    2423       83492 : void Heap::FlushNumberStringCache() {
    2424             :   // Flush the number to string cache.
    2425      166984 :   int len = number_string_cache()->length();
    2426   357567524 :   for (int i = 0; i < len; i++) {
    2427   357484032 :     number_string_cache()->set_undefined(i);
    2428             :   }
    2429       83492 : }
    2430             : 
    2431    91259181 : HeapObject Heap::CreateFillerObjectAt(Address addr, int size,
    2432             :                                       ClearRecordedSlots clear_slots_mode,
    2433             :                                       ClearFreedMemoryMode clear_memory_mode) {
    2434    91259181 :   if (size == 0) return HeapObject();
    2435    90054775 :   HeapObject filler = HeapObject::FromAddress(addr);
    2436    90054775 :   if (size == kTaggedSize) {
    2437             :     filler->set_map_after_allocation(
    2438             :         Map::unchecked_cast(isolate()->root(RootIndex::kOnePointerFillerMap)),
    2439     3315847 :         SKIP_WRITE_BARRIER);
    2440    86738928 :   } else if (size == 2 * kTaggedSize) {
    2441             :     filler->set_map_after_allocation(
    2442             :         Map::unchecked_cast(isolate()->root(RootIndex::kTwoPointerFillerMap)),
    2443     3742234 :         SKIP_WRITE_BARRIER);
    2444     3742235 :     if (clear_memory_mode == ClearFreedMemoryMode::kClearFreedMemory) {
    2445       14825 :       Memory<Tagged_t>(addr + kTaggedSize) =
    2446       14825 :           static_cast<Tagged_t>(kClearedFreeMemoryValue);
    2447             :     }
    2448             :   } else {
    2449             :     DCHECK_GT(size, 2 * kTaggedSize);
    2450             :     filler->set_map_after_allocation(
    2451             :         Map::unchecked_cast(isolate()->root(RootIndex::kFreeSpaceMap)),
    2452    82996694 :         SKIP_WRITE_BARRIER);
    2453             :     FreeSpace::cast(filler)->relaxed_write_size(size);
    2454    82987626 :     if (clear_memory_mode == ClearFreedMemoryMode::kClearFreedMemory) {
    2455             :       MemsetTagged(ObjectSlot(addr) + 2, Object(kClearedFreeMemoryValue),
    2456      220525 :                    (size / kTaggedSize) - 2);
    2457             :     }
    2458             :   }
    2459    90037792 :   if (clear_slots_mode == ClearRecordedSlots::kYes) {
    2460     1955743 :     ClearRecordedSlotRange(addr, addr + size);
    2461             :   }
    2462             : 
    2463             :   // At this point, we may be deserializing the heap from a snapshot, and
    2464             :   // none of the maps have been created yet and are nullptr.
    2465             :   DCHECK((filler->map_slot().contains_value(kNullAddress) &&
    2466             :           !deserialization_complete_) ||
    2467             :          filler->map()->IsMap());
    2468    90037792 :   return filler;
    2469             : }
    2470             : 
    2471      181688 : bool Heap::CanMoveObjectStart(HeapObject object) {
    2472      181688 :   if (!FLAG_move_object_start) return false;
    2473             : 
    2474             :   // Sampling heap profiler may have a reference to the object.
    2475      363376 :   if (isolate()->heap_profiler()->is_sampling_allocations()) return false;
    2476             : 
    2477      181688 :   if (IsLargeObject(object)) return false;
    2478             : 
    2479             :   // We can move the object start if the page was already swept.
    2480      181674 :   return Page::FromHeapObject(object)->SweepingDone();
    2481             : }
    2482             : 
    2483       90268 : bool Heap::IsImmovable(HeapObject object) {
    2484             :   MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
    2485      170553 :   return chunk->NeverEvacuate() || IsLargeObject(object);
    2486             : }
    2487             : 
    2488       89142 : bool Heap::IsLargeObject(HeapObject object) {
    2489     1821289 :   return IsLargeMemoryChunk(MemoryChunk::FromHeapObject(object));
    2490             : }
    2491             : 
    2492     2797604 : bool Heap::IsLargeMemoryChunk(MemoryChunk* chunk) {
    2493     5595203 :   return chunk->owner()->identity() == NEW_LO_SPACE ||
    2494     8367752 :          chunk->owner()->identity() == LO_SPACE ||
    2495     5570153 :          chunk->owner()->identity() == CODE_LO_SPACE;
    2496             : }
    2497             : 
    2498           0 : bool Heap::IsInYoungGeneration(HeapObject object) {
    2499           0 :   if (MemoryChunk::FromHeapObject(object)->IsInNewLargeObjectSpace()) {
    2500           0 :     return !object->map_word().IsForwardingAddress();
    2501             :   }
    2502           0 :   return Heap::InNewSpace(object);
    2503             : }
    2504             : 
    2505             : #ifdef ENABLE_SLOW_DCHECKS
    2506             : namespace {
    2507             : 
    2508             : class LeftTrimmerVerifierRootVisitor : public RootVisitor {
    2509             :  public:
    2510             :   explicit LeftTrimmerVerifierRootVisitor(FixedArrayBase to_check)
    2511             :       : to_check_(to_check) {}
    2512             : 
    2513             :   void VisitRootPointers(Root root, const char* description,
    2514             :                          FullObjectSlot start, FullObjectSlot end) override {
    2515             :     for (FullObjectSlot p = start; p < end; ++p) {
    2516             :       DCHECK_NE(*p, to_check_);
    2517             :     }
    2518             :   }
    2519             : 
    2520             :  private:
    2521             :   FixedArrayBase to_check_;
    2522             : 
    2523             :   DISALLOW_COPY_AND_ASSIGN(LeftTrimmerVerifierRootVisitor);
    2524             : };
    2525             : }  // namespace
    2526             : #endif  // ENABLE_SLOW_DCHECKS
    2527             : 
    2528             : namespace {
    2529      149845 : bool MayContainRecordedSlots(HeapObject object) {
    2530             :   // New space object do not have recorded slots.
    2531      299690 :   if (MemoryChunk::FromHeapObject(object)->InNewSpace()) return false;
    2532             :   // Whitelist objects that definitely do not have pointers.
    2533        1856 :   if (object->IsByteArray() || object->IsFixedDoubleArray()) return false;
    2534             :   // Conservatively return true for other objects.
    2535         928 :   return true;
    2536             : }
    2537             : }  // namespace
    2538             : 
    2539      181815 : FixedArrayBase Heap::LeftTrimFixedArray(FixedArrayBase object,
    2540      363630 :                                         int elements_to_trim) {
    2541      181815 :   if (elements_to_trim == 0) {
    2542             :     // This simplifies reasoning in the rest of the function.
    2543           0 :     return object;
    2544             :   }
    2545      181815 :   CHECK(!object.is_null());
    2546             :   DCHECK(CanMoveObjectStart(object));
    2547             :   // Add custom visitor to concurrent marker if new left-trimmable type
    2548             :   // is added.
    2549             :   DCHECK(object->IsFixedArray() || object->IsFixedDoubleArray());
    2550             :   const int element_size = object->IsFixedArray() ? kTaggedSize : kDoubleSize;
    2551      181815 :   const int bytes_to_trim = elements_to_trim * element_size;
    2552             :   Map map = object->map();
    2553             : 
    2554             :   // For now this trick is only applied to fixed arrays which may be in new
    2555             :   // space or old space. In a large object space the object's start must
    2556             :   // coincide with chunk and thus the trick is just not applicable.
    2557             :   DCHECK(!IsLargeObject(object));
    2558             :   DCHECK(object->map() != ReadOnlyRoots(this).fixed_cow_array_map());
    2559             : 
    2560             :   STATIC_ASSERT(FixedArrayBase::kMapOffset == 0);
    2561             :   STATIC_ASSERT(FixedArrayBase::kLengthOffset == kTaggedSize);
    2562             :   STATIC_ASSERT(FixedArrayBase::kHeaderSize == 2 * kTaggedSize);
    2563             : 
    2564             :   const int len = object->length();
    2565             :   DCHECK(elements_to_trim <= len);
    2566             : 
    2567             :   // Calculate location of new array start.
    2568             :   Address old_start = object->address();
    2569      181815 :   Address new_start = old_start + bytes_to_trim;
    2570             : 
    2571      181815 :   if (incremental_marking()->IsMarking()) {
    2572             :     incremental_marking()->NotifyLeftTrimming(
    2573         182 :         object, HeapObject::FromAddress(new_start));
    2574             :   }
    2575             : 
    2576             :   // Technically in new space this write might be omitted (except for
    2577             :   // debug mode which iterates through the heap), but to play safer
    2578             :   // we still do it.
    2579             :   HeapObject filler =
    2580      181815 :       CreateFillerObjectAt(old_start, bytes_to_trim, ClearRecordedSlots::kYes);
    2581             : 
    2582             :   // Initialize header of the trimmed array. Since left trimming is only
    2583             :   // performed on pages which are not concurrently swept creating a filler
    2584             :   // object does not require synchronization.
    2585      181815 :   RELAXED_WRITE_FIELD(object, bytes_to_trim, map);
    2586      363630 :   RELAXED_WRITE_FIELD(object, bytes_to_trim + kTaggedSize,
    2587             :                       Smi::FromInt(len - elements_to_trim));
    2588             : 
    2589             :   FixedArrayBase new_object =
    2590      181815 :       FixedArrayBase::cast(HeapObject::FromAddress(new_start));
    2591             : 
    2592             :   // Remove recorded slots for the new map and length offset.
    2593             :   ClearRecordedSlot(new_object, HeapObject::RawField(new_object, 0));
    2594             :   ClearRecordedSlot(new_object, HeapObject::RawField(
    2595             :                                     new_object, FixedArrayBase::kLengthOffset));
    2596             : 
    2597             :   // Handle invalidated old-to-old slots.
    2598      181822 :   if (incremental_marking()->IsCompacting() &&
    2599           7 :       MayContainRecordedSlots(new_object)) {
    2600             :     // If the array was right-trimmed before, then it is registered in
    2601             :     // the invalidated_slots.
    2602             :     MemoryChunk::FromHeapObject(new_object)
    2603           5 :         ->MoveObjectWithInvalidatedSlots(filler, new_object);
    2604             :     // We have to clear slots in the free space to avoid stale old-to-old slots.
    2605             :     // Note we cannot use ClearFreedMemoryMode of CreateFillerObjectAt because
    2606             :     // we need pointer granularity writes to avoid race with the concurrent
    2607             :     // marking.
    2608           5 :     if (filler->Size() > FreeSpace::kSize) {
    2609             :       MemsetTagged(HeapObject::RawField(filler, FreeSpace::kSize),
    2610             :                    ReadOnlyRoots(this).undefined_value(),
    2611           5 :                    (filler->Size() - FreeSpace::kSize) / kTaggedSize);
    2612             :     }
    2613             :   }
    2614             :   // Notify the heap profiler of change in object layout.
    2615      181815 :   OnMoveEvent(new_object, object, new_object->Size());
    2616             : 
    2617             : #ifdef ENABLE_SLOW_DCHECKS
    2618             :   if (FLAG_enable_slow_asserts) {
    2619             :     // Make sure the stack or other roots (e.g., Handles) don't contain pointers
    2620             :     // to the original FixedArray (which is now the filler object).
    2621             :     LeftTrimmerVerifierRootVisitor root_visitor(object);
    2622             :     ReadOnlyRoots(this).Iterate(&root_visitor);
    2623             :     IterateRoots(&root_visitor, VISIT_ALL);
    2624             :   }
    2625             : #endif  // ENABLE_SLOW_DCHECKS
    2626             : 
    2627      181815 :   return new_object;
    2628             : }
    2629             : 
    2630     1452524 : void Heap::RightTrimFixedArray(FixedArrayBase object, int elements_to_trim) {
    2631             :   const int len = object->length();
    2632             :   DCHECK_LE(elements_to_trim, len);
    2633             :   DCHECK_GE(elements_to_trim, 0);
    2634             : 
    2635             :   int bytes_to_trim;
    2636             :   DCHECK(!object->IsFixedTypedArrayBase());
    2637     1452524 :   if (object->IsByteArray()) {
    2638       10240 :     int new_size = ByteArray::SizeFor(len - elements_to_trim);
    2639       10240 :     bytes_to_trim = ByteArray::SizeFor(len) - new_size;
    2640             :     DCHECK_GE(bytes_to_trim, 0);
    2641     1442284 :   } else if (object->IsFixedArray()) {
    2642     1420593 :     CHECK_NE(elements_to_trim, len);
    2643     1420593 :     bytes_to_trim = elements_to_trim * kTaggedSize;
    2644             :   } else {
    2645             :     DCHECK(object->IsFixedDoubleArray());
    2646       21691 :     CHECK_NE(elements_to_trim, len);
    2647       21691 :     bytes_to_trim = elements_to_trim * kDoubleSize;
    2648             :   }
    2649             : 
    2650     1452524 :   CreateFillerForArray<FixedArrayBase>(object, elements_to_trim, bytes_to_trim);
    2651     1452524 : }
    2652             : 
    2653       17650 : void Heap::RightTrimWeakFixedArray(WeakFixedArray object,
    2654             :                                    int elements_to_trim) {
    2655             :   // This function is safe to use only at the end of the mark compact
    2656             :   // collection: When marking, we record the weak slots, and shrinking
    2657             :   // invalidates them.
    2658             :   DCHECK_EQ(gc_state(), MARK_COMPACT);
    2659             :   CreateFillerForArray<WeakFixedArray>(object, elements_to_trim,
    2660       17650 :                                        elements_to_trim * kTaggedSize);
    2661       17650 : }
    2662             : 
    2663             : template <typename T>
    2664     1470174 : void Heap::CreateFillerForArray(T object, int elements_to_trim,
    2665             :                                 int bytes_to_trim) {
    2666             :   DCHECK(object->IsFixedArrayBase() || object->IsByteArray() ||
    2667             :          object->IsWeakFixedArray());
    2668             : 
    2669             :   // For now this trick is only applied to objects in new and paged space.
    2670             :   DCHECK(object->map() != ReadOnlyRoots(this).fixed_cow_array_map());
    2671             : 
    2672     1470174 :   if (bytes_to_trim == 0) {
    2673             :     DCHECK_EQ(elements_to_trim, 0);
    2674             :     // No need to create filler and update live bytes counters.
    2675     1470174 :     return;
    2676             :   }
    2677             : 
    2678             :   // Calculate location of new array end.
    2679     1470174 :   int old_size = object->Size();
    2680     1470174 :   Address old_end = object->address() + old_size;
    2681     1470174 :   Address new_end = old_end - bytes_to_trim;
    2682             : 
    2683             :   // Register the array as an object with invalidated old-to-old slots. We
    2684             :   // cannot use NotifyObjectLayoutChange as it would mark the array black,
    2685             :   // which is not safe for left-trimming because left-trimming re-pushes
    2686             :   // only grey arrays onto the marking worklist.
    2687     1472499 :   if (incremental_marking()->IsCompacting() &&
    2688        2325 :       MayContainRecordedSlots(object)) {
    2689             :     // Ensure that the object survives because the InvalidatedSlotsFilter will
    2690             :     // compute its size from its map during pointers updating phase.
    2691          60 :     incremental_marking()->WhiteToGreyAndPush(object);
    2692          60 :     MemoryChunk::FromHeapObject(object)->RegisterObjectWithInvalidatedSlots(
    2693             :         object, old_size);
    2694             :   }
    2695             : 
    2696             :   // Technically in new space this write might be omitted (except for
    2697             :   // debug mode which iterates through the heap), but to play safer
    2698             :   // we still do it.
    2699             :   // We do not create a filler for objects in a large object space.
    2700     1470174 :   if (!IsLargeObject(object)) {
    2701             :     HeapObject filler =
    2702     1470093 :         CreateFillerObjectAt(new_end, bytes_to_trim, ClearRecordedSlots::kYes);
    2703             :     DCHECK(!filler.is_null());
    2704             :     // Clear the mark bits of the black area that belongs now to the filler.
    2705             :     // This is an optimization. The sweeper will release black fillers anyway.
    2706     1590033 :     if (incremental_marking()->black_allocation() &&
    2707             :         incremental_marking()->marking_state()->IsBlackOrGrey(filler)) {
    2708         157 :       Page* page = Page::FromAddress(new_end);
    2709         157 :       incremental_marking()->marking_state()->bitmap(page)->ClearRange(
    2710             :           page->AddressToMarkbitIndex(new_end),
    2711         157 :           page->AddressToMarkbitIndex(new_end + bytes_to_trim));
    2712             :     }
    2713             :   }
    2714             : 
    2715             :   // Initialize header of the trimmed array. We are storing the new length
    2716             :   // using release store after creating a filler for the left-over space to
    2717             :   // avoid races with the sweeper thread.
    2718     1470174 :   object->synchronized_set_length(object->length() - elements_to_trim);
    2719             : 
    2720             :   // Notify the heap object allocation tracker of change in object layout. The
    2721             :   // array may not be moved during GC, and size has to be adjusted nevertheless.
    2722     2943155 :   for (auto& tracker : allocation_trackers_) {
    2723        5614 :     tracker->UpdateObjectSizeEvent(object->address(), object->Size());
    2724             :   }
    2725             : }
    2726             : 
    2727        7853 : void Heap::MakeHeapIterable() {
    2728        7853 :   mark_compact_collector()->EnsureSweepingCompleted();
    2729           0 : }
    2730             : 
    2731             : 
    2732             : static double ComputeMutatorUtilization(double mutator_speed, double gc_speed) {
    2733             :   const double kMinMutatorUtilization = 0.0;
    2734             :   const double kConservativeGcSpeedInBytesPerMillisecond = 200000;
    2735       23623 :   if (mutator_speed == 0) return kMinMutatorUtilization;
    2736       21469 :   if (gc_speed == 0) gc_speed = kConservativeGcSpeedInBytesPerMillisecond;
    2737             :   // Derivation:
    2738             :   // mutator_utilization = mutator_time / (mutator_time + gc_time)
    2739             :   // mutator_time = 1 / mutator_speed
    2740             :   // gc_time = 1 / gc_speed
    2741             :   // mutator_utilization = (1 / mutator_speed) /
    2742             :   //                       (1 / mutator_speed + 1 / gc_speed)
    2743             :   // mutator_utilization = gc_speed / (mutator_speed + gc_speed)
    2744       21469 :   return gc_speed / (mutator_speed + gc_speed);
    2745             : }
    2746             : 
    2747             : 
    2748       47230 : double Heap::YoungGenerationMutatorUtilization() {
    2749             :   double mutator_speed = static_cast<double>(
    2750       23615 :       tracer()->NewSpaceAllocationThroughputInBytesPerMillisecond());
    2751             :   double gc_speed =
    2752       23615 :       tracer()->ScavengeSpeedInBytesPerMillisecond(kForSurvivedObjects);
    2753             :   double result = ComputeMutatorUtilization(mutator_speed, gc_speed);
    2754       23615 :   if (FLAG_trace_mutator_utilization) {
    2755             :     isolate()->PrintWithTimestamp(
    2756             :         "Young generation mutator utilization = %.3f ("
    2757             :         "mutator_speed=%.f, gc_speed=%.f)\n",
    2758           0 :         result, mutator_speed, gc_speed);
    2759             :   }
    2760       23615 :   return result;
    2761             : }
    2762             : 
    2763             : 
    2764          16 : double Heap::OldGenerationMutatorUtilization() {
    2765             :   double mutator_speed = static_cast<double>(
    2766           8 :       tracer()->OldGenerationAllocationThroughputInBytesPerMillisecond());
    2767             :   double gc_speed = static_cast<double>(
    2768           8 :       tracer()->CombinedMarkCompactSpeedInBytesPerMillisecond());
    2769             :   double result = ComputeMutatorUtilization(mutator_speed, gc_speed);
    2770           8 :   if (FLAG_trace_mutator_utilization) {
    2771             :     isolate()->PrintWithTimestamp(
    2772             :         "Old generation mutator utilization = %.3f ("
    2773             :         "mutator_speed=%.f, gc_speed=%.f)\n",
    2774           0 :         result, mutator_speed, gc_speed);
    2775             :   }
    2776           8 :   return result;
    2777             : }
    2778             : 
    2779             : 
    2780           0 : bool Heap::HasLowYoungGenerationAllocationRate() {
    2781             :   const double high_mutator_utilization = 0.993;
    2782       23615 :   return YoungGenerationMutatorUtilization() > high_mutator_utilization;
    2783             : }
    2784             : 
    2785             : 
    2786           0 : bool Heap::HasLowOldGenerationAllocationRate() {
    2787             :   const double high_mutator_utilization = 0.993;
    2788           8 :   return OldGenerationMutatorUtilization() > high_mutator_utilization;
    2789             : }
    2790             : 
    2791             : 
    2792          21 : bool Heap::HasLowAllocationRate() {
    2793          29 :   return HasLowYoungGenerationAllocationRate() &&
    2794          21 :          HasLowOldGenerationAllocationRate();
    2795             : }
    2796             : 
    2797           0 : bool Heap::IsIneffectiveMarkCompact(size_t old_generation_size,
    2798             :                                     double mutator_utilization) {
    2799             :   const double kHighHeapPercentage = 0.8;
    2800             :   const double kLowMutatorUtilization = 0.4;
    2801       82777 :   return old_generation_size >=
    2802       82777 :              kHighHeapPercentage * max_old_generation_size_ &&
    2803           0 :          mutator_utilization < kLowMutatorUtilization;
    2804             : }
    2805             : 
    2806       83492 : void Heap::CheckIneffectiveMarkCompact(size_t old_generation_size,
    2807             :                                        double mutator_utilization) {
    2808             :   const int kMaxConsecutiveIneffectiveMarkCompacts = 4;
    2809       83492 :   if (!FLAG_detect_ineffective_gcs_near_heap_limit) return;
    2810       82777 :   if (!IsIneffectiveMarkCompact(old_generation_size, mutator_utilization)) {
    2811       82765 :     consecutive_ineffective_mark_compacts_ = 0;
    2812       82765 :     return;
    2813             :   }
    2814          12 :   ++consecutive_ineffective_mark_compacts_;
    2815          12 :   if (consecutive_ineffective_mark_compacts_ ==
    2816             :       kMaxConsecutiveIneffectiveMarkCompacts) {
    2817           0 :     if (InvokeNearHeapLimitCallback()) {
    2818             :       // The callback increased the heap limit.
    2819           0 :       consecutive_ineffective_mark_compacts_ = 0;
    2820           0 :       return;
    2821             :     }
    2822           0 :     FatalProcessOutOfMemory("Ineffective mark-compacts near heap limit");
    2823             :   }
    2824             : }
    2825             : 
    2826           0 : bool Heap::HasHighFragmentation() {
    2827           0 :   size_t used = OldGenerationSizeOfObjects();
    2828           0 :   size_t committed = CommittedOldGenerationMemory();
    2829           0 :   return HasHighFragmentation(used, committed);
    2830             : }
    2831             : 
    2832           0 : bool Heap::HasHighFragmentation(size_t used, size_t committed) {
    2833             :   const size_t kSlack = 16 * MB;
    2834             :   // Fragmentation is high if committed > 2 * used + kSlack.
    2835             :   // Rewrite the exression to avoid overflow.
    2836             :   DCHECK_GE(committed, used);
    2837       82816 :   return committed - used > used + kSlack;
    2838             : }
    2839             : 
    2840     1929533 : bool Heap::ShouldOptimizeForMemoryUsage() {
    2841     1929533 :   const size_t kOldGenerationSlack = max_old_generation_size_ / 8;
    2842     3859069 :   return FLAG_optimize_for_size || isolate()->IsIsolateInBackground() ||
    2843     7718129 :          isolate()->IsMemorySavingsModeActive() || HighMemoryPressure() ||
    2844     3859059 :          !CanExpandOldGeneration(kOldGenerationSlack);
    2845             : }
    2846             : 
    2847           0 : void Heap::ActivateMemoryReducerIfNeeded() {
    2848             :   // Activate memory reducer when switching to background if
    2849             :   // - there was no mark compact since the start.
    2850             :   // - the committed memory can be potentially reduced.
    2851             :   // 2 pages for the old, code, and map space + 1 page for new space.
    2852             :   const int kMinCommittedMemory = 7 * Page::kPageSize;
    2853           0 :   if (ms_count_ == 0 && CommittedMemory() > kMinCommittedMemory &&
    2854           0 :       isolate()->IsIsolateInBackground()) {
    2855             :     MemoryReducer::Event event;
    2856           0 :     event.type = MemoryReducer::kPossibleGarbage;
    2857           0 :     event.time_ms = MonotonicallyIncreasingTimeInMs();
    2858           0 :     memory_reducer_->NotifyPossibleGarbage(event);
    2859             :   }
    2860           0 : }
    2861             : 
    2862      213674 : void Heap::ReduceNewSpaceSize() {
    2863             :   // TODO(ulan): Unify this constant with the similar constant in
    2864             :   // GCIdleTimeHandler once the change is merged to 4.5.
    2865             :   static const size_t kLowAllocationThroughput = 1000;
    2866             :   const double allocation_throughput =
    2867      107086 :       tracer()->CurrentAllocationThroughputInBytesPerMillisecond();
    2868             : 
    2869      214172 :   if (FLAG_predictable) return;
    2870             : 
    2871      213176 :   if (ShouldReduceMemory() ||
    2872       74314 :       ((allocation_throughput != 0) &&
    2873             :        (allocation_throughput < kLowAllocationThroughput))) {
    2874       25350 :     new_space_->Shrink();
    2875             :     UncommitFromSpace();
    2876             :   }
    2877             : }
    2878             : 
    2879       37654 : void Heap::FinalizeIncrementalMarkingIfComplete(
    2880       79576 :     GarbageCollectionReason gc_reason) {
    2881      111788 :   if (incremental_marking()->IsMarking() &&
    2882       60500 :       (incremental_marking()->IsReadyToOverApproximateWeakClosure() ||
    2883        6888 :        (!incremental_marking()->finalize_marking_completed() &&
    2884        6888 :         mark_compact_collector()->marking_worklist()->IsEmpty() &&
    2885           0 :         local_embedder_heap_tracer()->ShouldFinalizeIncrementalMarking()))) {
    2886       12460 :     FinalizeIncrementalMarkingIncrementally(gc_reason);
    2887       59054 :   } else if (incremental_marking()->IsComplete() ||
    2888        9840 :              (mark_compact_collector()->marking_worklist()->IsEmpty() &&
    2889             :               local_embedder_heap_tracer()
    2890        1174 :                   ->ShouldFinalizeIncrementalMarking())) {
    2891       17702 :     CollectAllGarbage(current_gc_flags_, gc_reason, current_gc_callback_flags_);
    2892             :   }
    2893       37654 : }
    2894             : 
    2895           5 : void Heap::FinalizeIncrementalMarkingAtomically(
    2896             :     GarbageCollectionReason gc_reason) {
    2897             :   DCHECK(!incremental_marking()->IsStopped());
    2898        2699 :   CollectAllGarbage(current_gc_flags_, gc_reason, current_gc_callback_flags_);
    2899           5 : }
    2900             : 
    2901       24018 : void Heap::FinalizeIncrementalMarkingIncrementally(
    2902       96072 :     GarbageCollectionReason gc_reason) {
    2903       24018 :   if (FLAG_trace_incremental_marking) {
    2904             :     isolate()->PrintWithTimestamp(
    2905             :         "[IncrementalMarking] (%s).\n",
    2906           0 :         Heap::GarbageCollectionReasonToString(gc_reason));
    2907             :   }
    2908             : 
    2909             :   HistogramTimerScope incremental_marking_scope(
    2910       24018 :       isolate()->counters()->gc_incremental_marking_finalize());
    2911       72054 :   TRACE_EVENT0("v8", "V8.GCIncrementalMarkingFinalize");
    2912       96072 :   TRACE_GC(tracer(), GCTracer::Scope::MC_INCREMENTAL_FINALIZE);
    2913             : 
    2914             :   {
    2915             :     GCCallbacksScope scope(this);
    2916       24018 :     if (scope.CheckReenter()) {
    2917             :       AllowHeapAllocation allow_allocation;
    2918       96072 :       TRACE_GC(tracer(), GCTracer::Scope::MC_INCREMENTAL_EXTERNAL_PROLOGUE);
    2919       48036 :       VMState<EXTERNAL> state(isolate_);
    2920       24018 :       HandleScope handle_scope(isolate_);
    2921       48036 :       CallGCPrologueCallbacks(kGCTypeIncrementalMarking, kNoGCCallbackFlags);
    2922             :     }
    2923             :   }
    2924       24018 :   incremental_marking()->FinalizeIncrementally();
    2925             :   {
    2926             :     GCCallbacksScope scope(this);
    2927       24018 :     if (scope.CheckReenter()) {
    2928             :       AllowHeapAllocation allow_allocation;
    2929       96072 :       TRACE_GC(tracer(), GCTracer::Scope::MC_INCREMENTAL_EXTERNAL_EPILOGUE);
    2930       48036 :       VMState<EXTERNAL> state(isolate_);
    2931       24018 :       HandleScope handle_scope(isolate_);
    2932       48036 :       CallGCEpilogueCallbacks(kGCTypeIncrementalMarking, kNoGCCallbackFlags);
    2933             :     }
    2934             :   }
    2935       24018 : }
    2936             : 
    2937       92009 : void Heap::RegisterDeserializedObjectsForBlackAllocation(
    2938             :     Reservation* reservations, const std::vector<HeapObject>& large_objects,
    2939    17246958 :     const std::vector<Address>& maps) {
    2940             :   // TODO(ulan): pause black allocation during deserialization to avoid
    2941             :   // iterating all these objects in one go.
    2942             : 
    2943      184019 :   if (!incremental_marking()->black_allocation()) return;
    2944             : 
    2945             :   // Iterate black objects in old space, code space, map space, and large
    2946             :   // object space for side effects.
    2947             :   IncrementalMarking::MarkingState* marking_state =
    2948             :       incremental_marking()->marking_state();
    2949       50704 :   for (int i = OLD_SPACE; i < Serializer::kNumberOfSpaces; i++) {
    2950       50704 :     const Heap::Reservation& res = reservations[i];
    2951      443568 :     for (auto& chunk : res) {
    2952      342160 :       Address addr = chunk.start;
    2953    14768414 :       while (addr < chunk.end) {
    2954    14084094 :         HeapObject obj = HeapObject::FromAddress(addr);
    2955             :         // Objects can have any color because incremental marking can
    2956             :         // start in the middle of Heap::ReserveSpace().
    2957    14084032 :         if (marking_state->IsBlack(obj)) {
    2958    14083850 :           incremental_marking()->ProcessBlackAllocatedObject(obj);
    2959             :         }
    2960    14084115 :         addr += obj->Size();
    2961             :       }
    2962             :     }
    2963             :   }
    2964             : 
    2965             :   // Large object space doesn't use reservations, so it needs custom handling.
    2966       25367 :   for (HeapObject object : large_objects) {
    2967          15 :     incremental_marking()->ProcessBlackAllocatedObject(object);
    2968             :   }
    2969             : 
    2970             :   // Map space doesn't use reservations, so it needs custom handling.
    2971     3096437 :   for (Address addr : maps) {
    2972             :     incremental_marking()->ProcessBlackAllocatedObject(
    2973     3071084 :         HeapObject::FromAddress(addr));
    2974             :   }
    2975             : }
    2976             : 
    2977    23667152 : void Heap::NotifyObjectLayoutChange(HeapObject object, int size,
    2978    26061033 :                                     const DisallowHeapAllocation&) {
    2979    23667152 :   if (incremental_marking()->IsMarking()) {
    2980     2393880 :     incremental_marking()->MarkBlackAndVisitObjectDueToLayoutChange(object);
    2981     2541394 :     if (incremental_marking()->IsCompacting() &&
    2982      147513 :         MayContainRecordedSlots(object)) {
    2983             :       MemoryChunk::FromHeapObject(object)->RegisterObjectWithInvalidatedSlots(
    2984         863 :           object, size);
    2985             :     }
    2986             :   }
    2987             : #ifdef VERIFY_HEAP
    2988             :   if (FLAG_verify_heap) {
    2989             :     DCHECK(pending_layout_change_object_.is_null());
    2990             :     pending_layout_change_object_ = object;
    2991             :   }
    2992             : #endif
    2993    23667153 : }
    2994             : 
    2995             : #ifdef VERIFY_HEAP
    2996             : // Helper class for collecting slot addresses.
    2997             : class SlotCollectingVisitor final : public ObjectVisitor {
    2998             :  public:
    2999             :   void VisitPointers(HeapObject host, ObjectSlot start,
    3000             :                      ObjectSlot end) override {
    3001             :     VisitPointers(host, MaybeObjectSlot(start), MaybeObjectSlot(end));
    3002             :   }
    3003             :   void VisitPointers(HeapObject host, MaybeObjectSlot start,
    3004             :                      MaybeObjectSlot end) final {
    3005             :     for (MaybeObjectSlot p = start; p < end; ++p) {
    3006             :       slots_.push_back(p);
    3007             :     }
    3008             :   }
    3009             : 
    3010             :   void VisitCodeTarget(Code host, RelocInfo* rinfo) final { UNREACHABLE(); }
    3011             : 
    3012             :   void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
    3013             :     UNREACHABLE();
    3014             :   }
    3015             : 
    3016             :   int number_of_slots() { return static_cast<int>(slots_.size()); }
    3017             : 
    3018             :   MaybeObjectSlot slot(int i) { return slots_[i]; }
    3019             : 
    3020             :  private:
    3021             :   std::vector<MaybeObjectSlot> slots_;
    3022             : };
    3023             : 
    3024             : void Heap::VerifyObjectLayoutChange(HeapObject object, Map new_map) {
    3025             :   if (!FLAG_verify_heap) return;
    3026             : 
    3027             :   // Check that Heap::NotifyObjectLayout was called for object transitions
    3028             :   // that are not safe for concurrent marking.
    3029             :   // If you see this check triggering for a freshly allocated object,
    3030             :   // use object->set_map_after_allocation() to initialize its map.
    3031             :   if (pending_layout_change_object_.is_null()) {
    3032             :     if (object->IsJSObject()) {
    3033             :       DCHECK(!object->map()->TransitionRequiresSynchronizationWithGC(new_map));
    3034             :     } else {
    3035             :       // Check that the set of slots before and after the transition match.
    3036             :       SlotCollectingVisitor old_visitor;
    3037             :       object->IterateFast(&old_visitor);
    3038             :       MapWord old_map_word = object->map_word();
    3039             :       // Temporarily set the new map to iterate new slots.
    3040             :       object->set_map_word(MapWord::FromMap(new_map));
    3041             :       SlotCollectingVisitor new_visitor;
    3042             :       object->IterateFast(&new_visitor);
    3043             :       // Restore the old map.
    3044             :       object->set_map_word(old_map_word);
    3045             :       DCHECK_EQ(new_visitor.number_of_slots(), old_visitor.number_of_slots());
    3046             :       for (int i = 0; i < new_visitor.number_of_slots(); i++) {
    3047             :         DCHECK(new_visitor.slot(i) == old_visitor.slot(i));
    3048             :       }
    3049             :     }
    3050             :   } else {
    3051             :     DCHECK_EQ(pending_layout_change_object_, object);
    3052             :     pending_layout_change_object_ = HeapObject();
    3053             :   }
    3054             : }
    3055             : #endif
    3056             : 
    3057        1380 : GCIdleTimeHeapState Heap::ComputeHeapState() {
    3058             :   GCIdleTimeHeapState heap_state;
    3059         460 :   heap_state.contexts_disposed = contexts_disposed_;
    3060             :   heap_state.contexts_disposal_rate =
    3061         460 :       tracer()->ContextDisposalRateInMilliseconds();
    3062         460 :   heap_state.size_of_objects = static_cast<size_t>(SizeOfObjects());
    3063         460 :   heap_state.incremental_marking_stopped = incremental_marking()->IsStopped();
    3064         460 :   return heap_state;
    3065             : }
    3066             : 
    3067             : 
    3068         460 : bool Heap::PerformIdleTimeAction(GCIdleTimeAction action,
    3069             :                                  GCIdleTimeHeapState heap_state,
    3070          32 :                                  double deadline_in_ms) {
    3071             :   bool result = false;
    3072         460 :   switch (action.type) {
    3073             :     case DONE:
    3074             :       result = true;
    3075         147 :       break;
    3076             :     case DO_INCREMENTAL_STEP: {
    3077             :       const double remaining_idle_time_in_ms =
    3078             :           incremental_marking()->AdvanceIncrementalMarking(
    3079             :               deadline_in_ms, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
    3080          16 :               StepOrigin::kTask);
    3081          16 :       if (remaining_idle_time_in_ms > 0.0) {
    3082             :         FinalizeIncrementalMarkingIfComplete(
    3083          16 :             GarbageCollectionReason::kFinalizeMarkingViaTask);
    3084             :       }
    3085             :       result = incremental_marking()->IsStopped();
    3086          16 :       break;
    3087             :     }
    3088             :     case DO_FULL_GC: {
    3089             :       DCHECK_LT(0, contexts_disposed_);
    3090         368 :       HistogramTimerScope scope(isolate_->counters()->gc_context());
    3091         552 :       TRACE_EVENT0("v8", "V8.GCContext");
    3092             :       CollectAllGarbage(kNoGCFlags, GarbageCollectionReason::kContextDisposal);
    3093             :       break;
    3094             :     }
    3095             :     case DO_NOTHING:
    3096             :       break;
    3097             :   }
    3098             : 
    3099         460 :   return result;
    3100             : }
    3101             : 
    3102         460 : void Heap::IdleNotificationEpilogue(GCIdleTimeAction action,
    3103             :                                     GCIdleTimeHeapState heap_state,
    3104             :                                     double start_ms, double deadline_in_ms) {
    3105         460 :   double idle_time_in_ms = deadline_in_ms - start_ms;
    3106         460 :   double current_time = MonotonicallyIncreasingTimeInMs();
    3107         460 :   last_idle_notification_time_ = current_time;
    3108         460 :   double deadline_difference = deadline_in_ms - current_time;
    3109             : 
    3110         460 :   contexts_disposed_ = 0;
    3111             : 
    3112         460 :   if ((FLAG_trace_idle_notification && action.type > DO_NOTHING) ||
    3113             :       FLAG_trace_idle_notification_verbose) {
    3114             :     isolate_->PrintWithTimestamp(
    3115             :         "Idle notification: requested idle time %.2f ms, used idle time %.2f "
    3116             :         "ms, deadline usage %.2f ms [",
    3117             :         idle_time_in_ms, idle_time_in_ms - deadline_difference,
    3118           0 :         deadline_difference);
    3119           0 :     action.Print();
    3120           0 :     PrintF("]");
    3121           0 :     if (FLAG_trace_idle_notification_verbose) {
    3122           0 :       PrintF("[");
    3123           0 :       heap_state.Print();
    3124           0 :       PrintF("]");
    3125             :     }
    3126           0 :     PrintF("\n");
    3127             :   }
    3128         460 : }
    3129             : 
    3130             : 
    3131    27197824 : double Heap::MonotonicallyIncreasingTimeInMs() {
    3132    27197824 :   return V8::GetCurrentPlatform()->MonotonicallyIncreasingTime() *
    3133    27196087 :          static_cast<double>(base::Time::kMillisecondsPerSecond);
    3134             : }
    3135             : 
    3136             : 
    3137           0 : bool Heap::IdleNotification(int idle_time_in_ms) {
    3138             :   return IdleNotification(
    3139           0 :       V8::GetCurrentPlatform()->MonotonicallyIncreasingTime() +
    3140           0 :       (static_cast<double>(idle_time_in_ms) /
    3141           0 :        static_cast<double>(base::Time::kMillisecondsPerSecond)));
    3142             : }
    3143             : 
    3144             : 
    3145         920 : bool Heap::IdleNotification(double deadline_in_seconds) {
    3146         460 :   CHECK(HasBeenSetUp());
    3147             :   double deadline_in_ms =
    3148             :       deadline_in_seconds *
    3149         460 :       static_cast<double>(base::Time::kMillisecondsPerSecond);
    3150             :   HistogramTimerScope idle_notification_scope(
    3151         920 :       isolate_->counters()->gc_idle_notification());
    3152        1380 :   TRACE_EVENT0("v8", "V8.GCIdleNotification");
    3153         460 :   double start_ms = MonotonicallyIncreasingTimeInMs();
    3154         460 :   double idle_time_in_ms = deadline_in_ms - start_ms;
    3155             : 
    3156             :   tracer()->SampleAllocation(start_ms, NewSpaceAllocationCounter(),
    3157         460 :                              OldGenerationAllocationCounter());
    3158             : 
    3159         460 :   GCIdleTimeHeapState heap_state = ComputeHeapState();
    3160             : 
    3161             :   GCIdleTimeAction action =
    3162         460 :       gc_idle_time_handler_->Compute(idle_time_in_ms, heap_state);
    3163             : 
    3164         460 :   bool result = PerformIdleTimeAction(action, heap_state, deadline_in_ms);
    3165             : 
    3166         460 :   IdleNotificationEpilogue(action, heap_state, start_ms, deadline_in_ms);
    3167         460 :   return result;
    3168             : }
    3169             : 
    3170             : 
    3171           0 : bool Heap::RecentIdleNotificationHappened() {
    3172           0 :   return (last_idle_notification_time_ +
    3173             :           GCIdleTimeHandler::kMaxScheduledIdleTime) >
    3174           0 :          MonotonicallyIncreasingTimeInMs();
    3175             : }
    3176             : 
    3177             : class MemoryPressureInterruptTask : public CancelableTask {
    3178             :  public:
    3179             :   explicit MemoryPressureInterruptTask(Heap* heap)
    3180          11 :       : CancelableTask(heap->isolate()), heap_(heap) {}
    3181             : 
    3182          22 :   ~MemoryPressureInterruptTask() override = default;
    3183             : 
    3184             :  private:
    3185             :   // v8::internal::CancelableTask overrides.
    3186          11 :   void RunInternal() override { heap_->CheckMemoryPressure(); }
    3187             : 
    3188             :   Heap* heap_;
    3189             :   DISALLOW_COPY_AND_ASSIGN(MemoryPressureInterruptTask);
    3190             : };
    3191             : 
    3192     2129887 : void Heap::CheckMemoryPressure() {
    3193     2129887 :   if (HighMemoryPressure()) {
    3194             :     // The optimizing compiler may be unnecessarily holding on to memory.
    3195        8753 :     isolate()->AbortConcurrentOptimization(BlockingBehavior::kDontBlock);
    3196             :   }
    3197             :   MemoryPressureLevel memory_pressure_level = memory_pressure_level_;
    3198             :   // Reset the memory pressure level to avoid recursive GCs triggered by
    3199             :   // CheckMemoryPressure from AdjustAmountOfExternalMemory called by
    3200             :   // the finalizers.
    3201             :   memory_pressure_level_ = MemoryPressureLevel::kNone;
    3202     2129887 :   if (memory_pressure_level == MemoryPressureLevel::kCritical) {
    3203        8753 :     CollectGarbageOnMemoryPressure();
    3204     2121134 :   } else if (memory_pressure_level == MemoryPressureLevel::kModerate) {
    3205           0 :     if (FLAG_incremental_marking && incremental_marking()->IsStopped()) {
    3206             :       StartIncrementalMarking(kReduceMemoryFootprintMask,
    3207             :                               GarbageCollectionReason::kMemoryPressure);
    3208             :     }
    3209             :   }
    3210     2129887 :   if (memory_reducer_) {
    3211             :     MemoryReducer::Event event;
    3212     2129887 :     event.type = MemoryReducer::kPossibleGarbage;
    3213     2129887 :     event.time_ms = MonotonicallyIncreasingTimeInMs();
    3214     2129887 :     memory_reducer_->NotifyPossibleGarbage(event);
    3215             :   }
    3216     2129887 : }
    3217             : 
    3218        8753 : void Heap::CollectGarbageOnMemoryPressure() {
    3219             :   const int kGarbageThresholdInBytes = 8 * MB;
    3220             :   const double kGarbageThresholdAsFractionOfTotalMemory = 0.1;
    3221             :   // This constant is the maximum response time in RAIL performance model.
    3222             :   const double kMaxMemoryPressurePauseMs = 100;
    3223             : 
    3224        8753 :   double start = MonotonicallyIncreasingTimeInMs();
    3225             :   CollectAllGarbage(kReduceMemoryFootprintMask,
    3226             :                     GarbageCollectionReason::kMemoryPressure,
    3227             :                     kGCCallbackFlagCollectAllAvailableGarbage);
    3228        8753 :   EagerlyFreeExternalMemory();
    3229        8753 :   double end = MonotonicallyIncreasingTimeInMs();
    3230             : 
    3231             :   // Estimate how much memory we can free.
    3232       26259 :   int64_t potential_garbage = (CommittedMemory() - SizeOfObjects()) +
    3233        8753 :                               isolate()->isolate_data()->external_memory_;
    3234             :   // If we can potentially free large amount of memory, then start GC right
    3235             :   // away instead of waiting for memory reducer.
    3236       11985 :   if (potential_garbage >= kGarbageThresholdInBytes &&
    3237        3232 :       potential_garbage >=
    3238        3232 :           CommittedMemory() * kGarbageThresholdAsFractionOfTotalMemory) {
    3239             :     // If we spent less than half of the time budget, then perform full GC
    3240             :     // Otherwise, start incremental marking.
    3241        3232 :     if (end - start < kMaxMemoryPressurePauseMs / 2) {
    3242             :       CollectAllGarbage(kReduceMemoryFootprintMask,
    3243             :                         GarbageCollectionReason::kMemoryPressure,
    3244             :                         kGCCallbackFlagCollectAllAvailableGarbage);
    3245             :     } else {
    3246           0 :       if (FLAG_incremental_marking && incremental_marking()->IsStopped()) {
    3247             :         StartIncrementalMarking(kReduceMemoryFootprintMask,
    3248             :                                 GarbageCollectionReason::kMemoryPressure);
    3249             :       }
    3250             :     }
    3251             :   }
    3252        8753 : }
    3253             : 
    3254        8763 : void Heap::MemoryPressureNotification(MemoryPressureLevel level,
    3255             :                                       bool is_isolate_locked) {
    3256             :   MemoryPressureLevel previous = memory_pressure_level_;
    3257             :   memory_pressure_level_ = level;
    3258       17526 :   if ((previous != MemoryPressureLevel::kCritical &&
    3259        8773 :        level == MemoryPressureLevel::kCritical) ||
    3260          20 :       (previous == MemoryPressureLevel::kNone &&
    3261          10 :        level == MemoryPressureLevel::kModerate)) {
    3262        8758 :     if (is_isolate_locked) {
    3263        8747 :       CheckMemoryPressure();
    3264             :     } else {
    3265             :       ExecutionAccess access(isolate());
    3266          11 :       isolate()->stack_guard()->RequestGC();
    3267          11 :       auto taskrunner = V8::GetCurrentPlatform()->GetForegroundTaskRunner(
    3268          11 :           reinterpret_cast<v8::Isolate*>(isolate()));
    3269          11 :       taskrunner->PostTask(
    3270          44 :           base::make_unique<MemoryPressureInterruptTask>(this));
    3271             :     }
    3272             :   }
    3273        8763 : }
    3274             : 
    3275       21916 : void Heap::EagerlyFreeExternalMemory() {
    3276       42496 :   for (Page* page : *old_space()) {
    3277       32376 :     if (!page->SweepingDone()) {
    3278             :       base::MutexGuard guard(page->mutex());
    3279        5652 :       if (!page->SweepingDone()) {
    3280             :         ArrayBufferTracker::FreeDead(
    3281        1676 :             page, mark_compact_collector()->non_atomic_marking_state());
    3282             :       }
    3283             :     }
    3284             :   }
    3285       10120 :   memory_allocator()->unmapper()->EnsureUnmappingCompleted();
    3286       10120 : }
    3287             : 
    3288        3404 : void Heap::AddNearHeapLimitCallback(v8::NearHeapLimitCallback callback,
    3289             :                                     void* data) {
    3290             :   const size_t kMaxCallbacks = 100;
    3291        6808 :   CHECK_LT(near_heap_limit_callbacks_.size(), kMaxCallbacks);
    3292        3404 :   for (auto callback_data : near_heap_limit_callbacks_) {
    3293           0 :     CHECK_NE(callback_data.first, callback);
    3294             :   }
    3295        6808 :   near_heap_limit_callbacks_.push_back(std::make_pair(callback, data));
    3296        3404 : }
    3297             : 
    3298        3396 : void Heap::RemoveNearHeapLimitCallback(v8::NearHeapLimitCallback callback,
    3299             :                                        size_t heap_limit) {
    3300        6792 :   for (size_t i = 0; i < near_heap_limit_callbacks_.size(); i++) {
    3301        6792 :     if (near_heap_limit_callbacks_[i].first == callback) {
    3302        3396 :       near_heap_limit_callbacks_.erase(near_heap_limit_callbacks_.begin() + i);
    3303        3396 :       if (heap_limit) {
    3304           5 :         RestoreHeapLimit(heap_limit);
    3305             :       }
    3306        3396 :       return;
    3307             :     }
    3308             :   }
    3309           0 :   UNREACHABLE();
    3310             : }
    3311             : 
    3312           4 : void Heap::AutomaticallyRestoreInitialHeapLimit(double threshold_percent) {
    3313             :   initial_max_old_generation_size_threshold_ =
    3314           4 :       initial_max_old_generation_size_ * threshold_percent;
    3315           4 : }
    3316             : 
    3317          86 : bool Heap::InvokeNearHeapLimitCallback() {
    3318         172 :   if (near_heap_limit_callbacks_.size() > 0) {
    3319             :     HandleScope scope(isolate());
    3320             :     v8::NearHeapLimitCallback callback =
    3321          22 :         near_heap_limit_callbacks_.back().first;
    3322          22 :     void* data = near_heap_limit_callbacks_.back().second;
    3323             :     size_t heap_limit = callback(data, max_old_generation_size_,
    3324          22 :                                  initial_max_old_generation_size_);
    3325          22 :     if (heap_limit > max_old_generation_size_) {
    3326          22 :       max_old_generation_size_ = heap_limit;
    3327             :       return true;
    3328             :     }
    3329             :   }
    3330             :   return false;
    3331             : }
    3332             : 
    3333           0 : void Heap::CollectCodeStatistics() {
    3334           0 :   TRACE_EVENT0("v8", "Heap::CollectCodeStatistics");
    3335           0 :   CodeStatistics::ResetCodeAndMetadataStatistics(isolate());
    3336             :   // We do not look for code in new space, or map space.  If code
    3337             :   // somehow ends up in those spaces, we would miss it here.
    3338           0 :   CodeStatistics::CollectCodeStatistics(code_space_, isolate());
    3339           0 :   CodeStatistics::CollectCodeStatistics(old_space_, isolate());
    3340           0 :   CodeStatistics::CollectCodeStatistics(code_lo_space_, isolate());
    3341           0 : }
    3342             : 
    3343             : #ifdef DEBUG
    3344             : 
    3345             : void Heap::Print() {
    3346             :   if (!HasBeenSetUp()) return;
    3347             :   isolate()->PrintStack(stdout);
    3348             : 
    3349             :   for (SpaceIterator it(this); it.has_next();) {
    3350             :     it.next()->Print();
    3351             :   }
    3352             : }
    3353             : 
    3354             : 
    3355             : void Heap::ReportCodeStatistics(const char* title) {
    3356             :   PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
    3357             :   CollectCodeStatistics();
    3358             :   CodeStatistics::ReportCodeStatistics(isolate());
    3359             : }
    3360             : 
    3361             : #endif  // DEBUG
    3362             : 
    3363      107071 : const char* Heap::GarbageCollectionReasonToString(
    3364             :     GarbageCollectionReason gc_reason) {
    3365      107071 :   switch (gc_reason) {
    3366             :     case GarbageCollectionReason::kAllocationFailure:
    3367             :       return "allocation failure";
    3368             :     case GarbageCollectionReason::kAllocationLimit:
    3369           0 :       return "allocation limit";
    3370             :     case GarbageCollectionReason::kContextDisposal:
    3371         184 :       return "context disposal";
    3372             :     case GarbageCollectionReason::kCountersExtension:
    3373           0 :       return "counters extension";
    3374             :     case GarbageCollectionReason::kDebugger:
    3375       14332 :       return "debugger";
    3376             :     case GarbageCollectionReason::kDeserializer:
    3377           5 :       return "deserialize";
    3378             :     case GarbageCollectionReason::kExternalMemoryPressure:
    3379        1101 :       return "external memory pressure";
    3380             :     case GarbageCollectionReason::kFinalizeMarkingViaStackGuard:
    3381        6901 :       return "finalize incremental marking via stack guard";
    3382             :     case GarbageCollectionReason::kFinalizeMarkingViaTask:
    3383       17702 :       return "finalize incremental marking via task";
    3384             :     case GarbageCollectionReason::kFullHashtable:
    3385           0 :       return "full hash-table";
    3386             :     case GarbageCollectionReason::kHeapProfiler:
    3387        1142 :       return "heap profiler";
    3388             :     case GarbageCollectionReason::kIdleTask:
    3389        1946 :       return "idle task";
    3390             :     case GarbageCollectionReason::kLastResort:
    3391          42 :       return "last resort";
    3392             :     case GarbageCollectionReason::kLowMemoryNotification:
    3393        1220 :       return "low memory notification";
    3394             :     case GarbageCollectionReason::kMakeHeapIterable:
    3395           0 :       return "make heap iterable";
    3396             :     case GarbageCollectionReason::kMemoryPressure:
    3397       11985 :       return "memory pressure";
    3398             :     case GarbageCollectionReason::kMemoryReducer:
    3399           0 :       return "memory reducer";
    3400             :     case GarbageCollectionReason::kRuntime:
    3401         380 :       return "runtime";
    3402             :     case GarbageCollectionReason::kSamplingProfiler:
    3403          20 :       return "sampling profiler";
    3404             :     case GarbageCollectionReason::kSnapshotCreator:
    3405         372 :       return "snapshot creator";
    3406             :     case GarbageCollectionReason::kTesting:
    3407       29591 :       return "testing";
    3408             :     case GarbageCollectionReason::kExternalFinalize:
    3409           5 :       return "external finalize";
    3410             :     case GarbageCollectionReason::kUnknown:
    3411           5 :       return "unknown";
    3412             :   }
    3413           0 :   UNREACHABLE();
    3414             : }
    3415             : 
    3416     3906696 : bool Heap::Contains(HeapObject value) {
    3417     1953348 :   if (memory_allocator()->IsOutsideAllocatedSpace(value->address())) {
    3418             :     return false;
    3419             :   }
    3420     3906696 :   return HasBeenSetUp() &&
    3421     1935543 :          (new_space_->ToSpaceContains(value) || old_space_->Contains(value) ||
    3422           0 :           code_space_->Contains(value) || map_space_->Contains(value) ||
    3423           0 :           lo_space_->Contains(value) || read_only_space_->Contains(value) ||
    3424           0 :           code_lo_space_->Contains(value) || new_lo_space_->Contains(value));
    3425             : }
    3426             : 
    3427         140 : bool Heap::InSpace(HeapObject value, AllocationSpace space) {
    3428          70 :   if (memory_allocator()->IsOutsideAllocatedSpace(value->address())) {
    3429             :     return false;
    3430             :   }
    3431          70 :   if (!HasBeenSetUp()) return false;
    3432             : 
    3433          70 :   switch (space) {
    3434             :     case NEW_SPACE:
    3435          15 :       return new_space_->ToSpaceContains(value);
    3436             :     case OLD_SPACE:
    3437          15 :       return old_space_->Contains(value);
    3438             :     case CODE_SPACE:
    3439           0 :       return code_space_->Contains(value);
    3440             :     case MAP_SPACE:
    3441           0 :       return map_space_->Contains(value);
    3442             :     case LO_SPACE:
    3443          30 :       return lo_space_->Contains(value);
    3444             :     case CODE_LO_SPACE:
    3445          10 :       return code_lo_space_->Contains(value);
    3446             :     case NEW_LO_SPACE:
    3447           0 :       return new_lo_space_->Contains(value);
    3448             :     case RO_SPACE:
    3449           0 :       return read_only_space_->Contains(value);
    3450             :   }
    3451           0 :   UNREACHABLE();
    3452             : }
    3453             : 
    3454           0 : bool Heap::InSpaceSlow(Address addr, AllocationSpace space) {
    3455           0 :   if (memory_allocator()->IsOutsideAllocatedSpace(addr)) {
    3456             :     return false;
    3457             :   }
    3458           0 :   if (!HasBeenSetUp()) return false;
    3459             : 
    3460           0 :   switch (space) {
    3461             :     case NEW_SPACE:
    3462           0 :       return new_space_->ToSpaceContainsSlow(addr);
    3463             :     case OLD_SPACE:
    3464           0 :       return old_space_->ContainsSlow(addr);
    3465             :     case CODE_SPACE:
    3466           0 :       return code_space_->ContainsSlow(addr);
    3467             :     case MAP_SPACE:
    3468           0 :       return map_space_->ContainsSlow(addr);
    3469             :     case LO_SPACE:
    3470           0 :       return lo_space_->ContainsSlow(addr);
    3471             :     case CODE_LO_SPACE:
    3472           0 :       return code_lo_space_->ContainsSlow(addr);
    3473             :     case NEW_LO_SPACE:
    3474           0 :       return new_lo_space_->ContainsSlow(addr);
    3475             :     case RO_SPACE:
    3476           0 :       return read_only_space_->ContainsSlow(addr);
    3477             :   }
    3478           0 :   UNREACHABLE();
    3479             : }
    3480             : 
    3481          40 : bool Heap::IsValidAllocationSpace(AllocationSpace space) {
    3482          40 :   switch (space) {
    3483             :     case NEW_SPACE:
    3484             :     case OLD_SPACE:
    3485             :     case CODE_SPACE:
    3486             :     case MAP_SPACE:
    3487             :     case LO_SPACE:
    3488             :     case NEW_LO_SPACE:
    3489             :     case CODE_LO_SPACE:
    3490             :     case RO_SPACE:
    3491             :       return true;
    3492             :     default:
    3493           0 :       return false;
    3494             :   }
    3495             : }
    3496             : 
    3497             : #ifdef VERIFY_HEAP
    3498             : class VerifyReadOnlyPointersVisitor : public VerifyPointersVisitor {
    3499             :  public:
    3500             :   explicit VerifyReadOnlyPointersVisitor(Heap* heap)
    3501             :       : VerifyPointersVisitor(heap) {}
    3502             : 
    3503             :  protected:
    3504             :   void VerifyPointers(HeapObject host, MaybeObjectSlot start,
    3505             :                       MaybeObjectSlot end) override {
    3506             :     if (!host.is_null()) {
    3507             :       CHECK(heap_->InReadOnlySpace(host->map()));
    3508             :     }
    3509             :     VerifyPointersVisitor::VerifyPointers(host, start, end);
    3510             : 
    3511             :     for (MaybeObjectSlot current = start; current < end; ++current) {
    3512             :       HeapObject heap_object;
    3513             :       if ((*current)->GetHeapObject(&heap_object)) {
    3514             :         CHECK(heap_->InReadOnlySpace(heap_object));
    3515             :       }
    3516             :     }
    3517             :   }
    3518             : };
    3519             : 
    3520             : void Heap::Verify() {
    3521             :   CHECK(HasBeenSetUp());
    3522             :   HandleScope scope(isolate());
    3523             : 
    3524             :   // We have to wait here for the sweeper threads to have an iterable heap.
    3525             :   mark_compact_collector()->EnsureSweepingCompleted();
    3526             : 
    3527             :   VerifyPointersVisitor visitor(this);
    3528             :   IterateRoots(&visitor, VISIT_ONLY_STRONG);
    3529             : 
    3530             :   VerifySmisVisitor smis_visitor;
    3531             :   IterateSmiRoots(&smis_visitor);
    3532             : 
    3533             :   new_space_->Verify(isolate());
    3534             : 
    3535             :   old_space_->Verify(isolate(), &visitor);
    3536             :   map_space_->Verify(isolate(), &visitor);
    3537             : 
    3538             :   VerifyPointersVisitor no_dirty_regions_visitor(this);
    3539             :   code_space_->Verify(isolate(), &no_dirty_regions_visitor);
    3540             : 
    3541             :   lo_space_->Verify(isolate());
    3542             :   code_lo_space_->Verify(isolate());
    3543             :   new_lo_space_->Verify(isolate());
    3544             : 
    3545             :   VerifyReadOnlyPointersVisitor read_only_visitor(this);
    3546             :   read_only_space_->Verify(isolate(), &read_only_visitor);
    3547             : }
    3548             : 
    3549             : class SlotVerifyingVisitor : public ObjectVisitor {
    3550             :  public:
    3551             :   SlotVerifyingVisitor(std::set<Address>* untyped,
    3552             :                        std::set<std::pair<SlotType, Address> >* typed)
    3553             :       : untyped_(untyped), typed_(typed) {}
    3554             : 
    3555             :   virtual bool ShouldHaveBeenRecorded(HeapObject host, MaybeObject target) = 0;
    3556             : 
    3557             :   void VisitPointers(HeapObject host, ObjectSlot start,
    3558             :                      ObjectSlot end) override {
    3559             : #ifdef DEBUG
    3560             :     for (ObjectSlot slot = start; slot < end; ++slot) {
    3561             :       DCHECK(!HasWeakHeapObjectTag(*slot));
    3562             :     }
    3563             : #endif  // DEBUG
    3564             :     VisitPointers(host, MaybeObjectSlot(start), MaybeObjectSlot(end));
    3565             :   }
    3566             : 
    3567             :   void VisitPointers(HeapObject host, MaybeObjectSlot start,
    3568             :                      MaybeObjectSlot end) final {
    3569             :     for (MaybeObjectSlot slot = start; slot < end; ++slot) {
    3570             :       if (ShouldHaveBeenRecorded(host, *slot)) {
    3571             :         CHECK_GT(untyped_->count(slot.address()), 0);
    3572             :       }
    3573             :     }
    3574             :   }
    3575             : 
    3576             :   void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
    3577             :     Object target = Code::GetCodeFromTargetAddress(rinfo->target_address());
    3578             :     if (ShouldHaveBeenRecorded(host, MaybeObject::FromObject(target))) {
    3579             :       CHECK(
    3580             :           InTypedSet(CODE_TARGET_SLOT, rinfo->pc()) ||
    3581             :           (rinfo->IsInConstantPool() &&
    3582             :            InTypedSet(CODE_ENTRY_SLOT, rinfo->constant_pool_entry_address())));
    3583             :     }
    3584             :   }
    3585             : 
    3586             :   void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
    3587             :     Object target = rinfo->target_object();
    3588             :     if (ShouldHaveBeenRecorded(host, MaybeObject::FromObject(target))) {
    3589             :       CHECK(InTypedSet(EMBEDDED_OBJECT_SLOT, rinfo->pc()) ||
    3590             :             (rinfo->IsInConstantPool() &&
    3591             :              InTypedSet(OBJECT_SLOT, rinfo->constant_pool_entry_address())));
    3592             :     }
    3593             :   }
    3594             : 
    3595             :  private:
    3596             :   bool InTypedSet(SlotType type, Address slot) {
    3597             :     return typed_->count(std::make_pair(type, slot)) > 0;
    3598             :   }
    3599             :   std::set<Address>* untyped_;
    3600             :   std::set<std::pair<SlotType, Address> >* typed_;
    3601             : };
    3602             : 
    3603             : class OldToNewSlotVerifyingVisitor : public SlotVerifyingVisitor {
    3604             :  public:
    3605             :   OldToNewSlotVerifyingVisitor(std::set<Address>* untyped,
    3606             :                                std::set<std::pair<SlotType, Address>>* typed)
    3607             :       : SlotVerifyingVisitor(untyped, typed) {}
    3608             : 
    3609             :   bool ShouldHaveBeenRecorded(HeapObject host, MaybeObject target) override {
    3610             :     DCHECK_IMPLIES(target->IsStrongOrWeak() && Heap::InNewSpace(target),
    3611             :                    Heap::InToSpace(target));
    3612             :     return target->IsStrongOrWeak() && Heap::InNewSpace(target) &&
    3613             :            !Heap::InNewSpace(host);
    3614             :   }
    3615             : };
    3616             : 
    3617             : template <RememberedSetType direction>
    3618             : void CollectSlots(MemoryChunk* chunk, Address start, Address end,
    3619             :                   std::set<Address>* untyped,
    3620             :                   std::set<std::pair<SlotType, Address> >* typed) {
    3621             :   RememberedSet<direction>::Iterate(
    3622             :       chunk,
    3623             :       [start, end, untyped](MaybeObjectSlot slot) {
    3624             :         if (start <= slot.address() && slot.address() < end) {
    3625             :           untyped->insert(slot.address());
    3626             :         }
    3627             :         return KEEP_SLOT;
    3628             :       },
    3629             :       SlotSet::PREFREE_EMPTY_BUCKETS);
    3630             :   RememberedSet<direction>::IterateTyped(
    3631             :       chunk, [=](SlotType type, Address slot) {
    3632             :         if (start <= slot && slot < end) {
    3633             :           typed->insert(std::make_pair(type, slot));
    3634             :         }
    3635             :         return KEEP_SLOT;
    3636             :       });
    3637             : }
    3638             : 
    3639             : void Heap::VerifyRememberedSetFor(HeapObject object) {
    3640             :   MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
    3641             :   DCHECK_IMPLIES(chunk->mutex() == nullptr, InReadOnlySpace(object));
    3642             :   // In RO_SPACE chunk->mutex() may be nullptr, so just ignore it.
    3643             :   base::LockGuard<base::Mutex, base::NullBehavior::kIgnoreIfNull> lock_guard(
    3644             :       chunk->mutex());
    3645             :   Address start = object->address();
    3646             :   Address end = start + object->Size();
    3647             :   std::set<Address> old_to_new;
    3648             :   std::set<std::pair<SlotType, Address> > typed_old_to_new;
    3649             :   if (!InNewSpace(object)) {
    3650             :     store_buffer()->MoveAllEntriesToRememberedSet();
    3651             :     CollectSlots<OLD_TO_NEW>(chunk, start, end, &old_to_new, &typed_old_to_new);
    3652             :     OldToNewSlotVerifyingVisitor visitor(&old_to_new, &typed_old_to_new);
    3653             :     object->IterateBody(&visitor);
    3654             :   }
    3655             :   // TODO(ulan): Add old to old slot set verification once all weak objects
    3656             :   // have their own instance types and slots are recorded for all weal fields.
    3657             : }
    3658             : #endif
    3659             : 
    3660             : #ifdef DEBUG
    3661             : void Heap::VerifyCountersAfterSweeping() {
    3662             :   PagedSpaces spaces(this);
    3663             :   for (PagedSpace* space = spaces.next(); space != nullptr;
    3664             :        space = spaces.next()) {
    3665             :     space->VerifyCountersAfterSweeping();
    3666             :   }
    3667             : }
    3668             : 
    3669             : void Heap::VerifyCountersBeforeConcurrentSweeping() {
    3670             :   PagedSpaces spaces(this);
    3671             :   for (PagedSpace* space = spaces.next(); space != nullptr;
    3672             :        space = spaces.next()) {
    3673             :     space->VerifyCountersBeforeConcurrentSweeping();
    3674             :   }
    3675             : }
    3676             : #endif
    3677             : 
    3678           0 : void Heap::ZapFromSpace() {
    3679           0 :   if (!new_space_->IsFromSpaceCommitted()) return;
    3680           0 :   for (Page* page : PageRange(new_space_->from_space().first_page(), nullptr)) {
    3681             :     memory_allocator()->ZapBlock(page->area_start(),
    3682           0 :                                  page->HighWaterMark() - page->area_start(),
    3683           0 :                                  ZapValue());
    3684             :   }
    3685             : }
    3686             : 
    3687     2200385 : void Heap::ZapCodeObject(Address start_address, int size_in_bytes) {
    3688             : #ifdef DEBUG
    3689             :   DCHECK(IsAligned(start_address, kIntSize));
    3690             :   for (int i = 0; i < size_in_bytes / kIntSize; i++) {
    3691             :     Memory<int>(start_address + i * kIntSize) = kCodeZapValue;
    3692             :   }
    3693             : #endif
    3694     2200385 : }
    3695             : 
    3696             : // TODO(ishell): move builtin accessors out from Heap.
    3697   149629292 : Code Heap::builtin(int index) {
    3698             :   DCHECK(Builtins::IsBuiltinId(index));
    3699   299259515 :   return Code::cast(Object(isolate()->builtins_table()[index]));
    3700             : }
    3701             : 
    3702    91961855 : Address Heap::builtin_address(int index) {
    3703             :   DCHECK(Builtins::IsBuiltinId(index) || index == Builtins::builtin_count);
    3704   520544448 :   return reinterpret_cast<Address>(&isolate()->builtins_table()[index]);
    3705             : }
    3706             : 
    3707      253848 : void Heap::set_builtin(int index, Code builtin) {
    3708             :   DCHECK(Builtins::IsBuiltinId(index));
    3709             :   DCHECK(Internals::HasHeapObjectTag(builtin.ptr()));
    3710             :   // The given builtin may be completely uninitialized thus we cannot check its
    3711             :   // type here.
    3712      507696 :   isolate()->builtins_table()[index] = builtin.ptr();
    3713      253848 : }
    3714             : 
    3715      108776 : void Heap::IterateRoots(RootVisitor* v, VisitMode mode) {
    3716      108776 :   IterateStrongRoots(v, mode);
    3717      108776 :   IterateWeakRoots(v, mode);
    3718      108776 : }
    3719             : 
    3720      171854 : void Heap::IterateWeakRoots(RootVisitor* v, VisitMode mode) {
    3721      343708 :   const bool isMinorGC = mode == VISIT_ALL_IN_SCAVENGE ||
    3722      343708 :                          mode == VISIT_ALL_IN_MINOR_MC_MARK ||
    3723             :                          mode == VISIT_ALL_IN_MINOR_MC_UPDATE;
    3724             :   v->VisitRootPointer(Root::kStringTable, nullptr,
    3725      515562 :                       FullObjectSlot(&roots_table()[RootIndex::kStringTable]));
    3726      171854 :   v->Synchronize(VisitorSynchronization::kStringTable);
    3727      171854 :   if (!isMinorGC && mode != VISIT_ALL_IN_SWEEP_NEWSPACE &&
    3728             :       mode != VISIT_FOR_SERIALIZATION) {
    3729             :     // Scavenge collections have special processing for this.
    3730             :     // Do not visit for serialization, since the external string table will
    3731             :     // be populated from scratch upon deserialization.
    3732        1690 :     external_string_table_.IterateAll(v);
    3733             :   }
    3734      171854 :   v->Synchronize(VisitorSynchronization::kExternalStringsTable);
    3735      171854 : }
    3736             : 
    3737       63078 : void Heap::IterateSmiRoots(RootVisitor* v) {
    3738             :   // Acquire execution access since we are going to read stack limit values.
    3739             :   ExecutionAccess access(isolate());
    3740             :   v->VisitRootPointers(Root::kSmiRootList, nullptr,
    3741             :                        roots_table().smi_roots_begin(),
    3742      126156 :                        roots_table().smi_roots_end());
    3743       63078 :   v->Synchronize(VisitorSynchronization::kSmiRootList);
    3744       63078 : }
    3745             : 
    3746             : // We cannot avoid stale handles to left-trimmed objects, but can only make
    3747             : // sure all handles still needed are updated. Filter out a stale pointer
    3748             : // and clear the slot to allow post processing of handles (needed because
    3749             : // the sweeper might actually free the underlying page).
    3750           0 : class FixStaleLeftTrimmedHandlesVisitor : public RootVisitor {
    3751             :  public:
    3752      307250 :   explicit FixStaleLeftTrimmedHandlesVisitor(Heap* heap) : heap_(heap) {
    3753             :     USE(heap_);
    3754             :   }
    3755             : 
    3756           0 :   void VisitRootPointer(Root root, const char* description,
    3757             :                         FullObjectSlot p) override {
    3758           0 :     FixHandle(p);
    3759           0 :   }
    3760             : 
    3761      721627 :   void VisitRootPointers(Root root, const char* description,
    3762             :                          FullObjectSlot start, FullObjectSlot end) override {
    3763    97225713 :     for (FullObjectSlot p = start; p < end; ++p) FixHandle(p);
    3764      721627 :   }
    3765             : 
    3766             :  private:
    3767    95782459 :   inline void FixHandle(FullObjectSlot p) {
    3768   204140722 :     if (!(*p)->IsHeapObject()) return;
    3769    83206657 :     HeapObject current = HeapObject::cast(*p);
    3770             :     const MapWord map_word = current->map_word();
    3771   165244086 :     if (!map_word.IsForwardingAddress() && current->IsFiller()) {
    3772             : #ifdef DEBUG
    3773             :       // We need to find a FixedArrayBase map after walking the fillers.
    3774             :       while (current->IsFiller()) {
    3775             :         Address next = current->ptr();
    3776             :         if (current->map() == ReadOnlyRoots(heap_).one_pointer_filler_map()) {
    3777             :           next += kTaggedSize;
    3778             :         } else if (current->map() ==
    3779             :                    ReadOnlyRoots(heap_).two_pointer_filler_map()) {
    3780             :           next += 2 * kTaggedSize;
    3781             :         } else {
    3782             :           next += current->Size();
    3783             :         }
    3784             :         current = HeapObject::cast(Object(next));
    3785             :       }
    3786             :       DCHECK(current->IsFixedArrayBase());
    3787             : #endif  // DEBUG
    3788             :       p.store(Smi::kZero);
    3789             :     }
    3790             :   }
    3791             : 
    3792             :   Heap* heap_;
    3793             : };
    3794             : 
    3795      307250 : void Heap::IterateStrongRoots(RootVisitor* v, VisitMode mode) {
    3796      614500 :   const bool isMinorGC = mode == VISIT_ALL_IN_SCAVENGE ||
    3797      614500 :                          mode == VISIT_ALL_IN_MINOR_MC_MARK ||
    3798             :                          mode == VISIT_ALL_IN_MINOR_MC_UPDATE;
    3799             :   v->VisitRootPointers(Root::kStrongRootList, nullptr,
    3800             :                        roots_table().strong_roots_begin(),
    3801      614500 :                        roots_table().strong_roots_end());
    3802      307250 :   v->Synchronize(VisitorSynchronization::kStrongRootList);
    3803             : 
    3804     2859672 :   isolate_->bootstrapper()->Iterate(v);
    3805      307250 :   v->Synchronize(VisitorSynchronization::kBootstrapper);
    3806      307250 :   isolate_->Iterate(v);
    3807      307250 :   v->Synchronize(VisitorSynchronization::kTop);
    3808      307250 :   Relocatable::Iterate(isolate_, v);
    3809      307250 :   v->Synchronize(VisitorSynchronization::kRelocatable);
    3810      614500 :   isolate_->debug()->Iterate(v);
    3811      307250 :   v->Synchronize(VisitorSynchronization::kDebug);
    3812             : 
    3813      614500 :   isolate_->compilation_cache()->Iterate(v);
    3814      307250 :   v->Synchronize(VisitorSynchronization::kCompilationCache);
    3815             : 
    3816             :   // Iterate over local handles in handle scopes.
    3817             :   FixStaleLeftTrimmedHandlesVisitor left_trim_visitor(this);
    3818      614500 :   isolate_->handle_scope_implementer()->Iterate(&left_trim_visitor);
    3819      614500 :   isolate_->handle_scope_implementer()->Iterate(v);
    3820      307250 :   isolate_->IterateDeferredHandles(v);
    3821      307250 :   v->Synchronize(VisitorSynchronization::kHandleScope);
    3822             : 
    3823             :   // Iterate over the builtin code objects and code stubs in the
    3824             :   // heap. Note that it is not necessary to iterate over code objects
    3825             :   // on scavenge collections.
    3826      307250 :   if (!isMinorGC) {
    3827      283656 :     IterateBuiltins(v);
    3828      283655 :     v->Synchronize(VisitorSynchronization::kBuiltins);
    3829             : 
    3830             :     // The dispatch table is set up directly from the builtins using
    3831             :     // IntitializeDispatchTable so there is no need to iterate to create it.
    3832      283656 :     if (mode != VISIT_FOR_SERIALIZATION) {
    3833             :       // Currently we iterate the dispatch table to update pointers to possibly
    3834             :       // moved Code objects for bytecode handlers.
    3835             :       // TODO(v8:6666): Remove iteration once builtins are embedded (and thus
    3836             :       // immovable) in every build configuration.
    3837      441156 :       isolate_->interpreter()->IterateDispatchTable(v);
    3838      220578 :       v->Synchronize(VisitorSynchronization::kDispatchTable);
    3839             :     }
    3840             :   }
    3841             : 
    3842             :   // Iterate over global handles.
    3843      307250 :   switch (mode) {
    3844             :     case VISIT_FOR_SERIALIZATION:
    3845             :       // Global handles are not iterated by the serializer. Values referenced by
    3846             :       // global handles need to be added manually.
    3847             :       break;
    3848             :     case VISIT_ONLY_STRONG:
    3849      271590 :       isolate_->global_handles()->IterateStrongRoots(v);
    3850      135795 :       break;
    3851             :     case VISIT_ALL_IN_SCAVENGE:
    3852       47188 :       isolate_->global_handles()->IterateNewSpaceStrongAndDependentRoots(v);
    3853       23594 :       break;
    3854             :     case VISIT_ALL_IN_MINOR_MC_MARK:
    3855             :       // Global handles are processed manually by the minor MC.
    3856             :       break;
    3857             :     case VISIT_ALL_IN_MINOR_MC_UPDATE:
    3858             :       // Global handles are processed manually by the minor MC.
    3859             :       break;
    3860             :     case VISIT_ALL_IN_SWEEP_NEWSPACE:
    3861             :     case VISIT_ALL:
    3862      169566 :       isolate_->global_handles()->IterateAllRoots(v);
    3863       84783 :       break;
    3864             :   }
    3865      307250 :   v->Synchronize(VisitorSynchronization::kGlobalHandles);
    3866             : 
    3867             :   // Iterate over eternal handles. Eternal handles are not iterated by the
    3868             :   // serializer. Values referenced by eternal handles need to be added manually.
    3869      307250 :   if (mode != VISIT_FOR_SERIALIZATION) {
    3870      244172 :     if (isMinorGC) {
    3871       47188 :       isolate_->eternal_handles()->IterateNewSpaceRoots(v);
    3872             :     } else {
    3873      441156 :       isolate_->eternal_handles()->IterateAllRoots(v);
    3874             :     }
    3875             :   }
    3876      307250 :   v->Synchronize(VisitorSynchronization::kEternalHandles);
    3877             : 
    3878             :   // Iterate over pointers being held by inactive threads.
    3879      614500 :   isolate_->thread_manager()->Iterate(v);
    3880      307250 :   v->Synchronize(VisitorSynchronization::kThreadManager);
    3881             : 
    3882             :   // Iterate over other strong roots (currently only identity maps).
    3883      614850 :   for (StrongRootsList* list = strong_roots_list_; list; list = list->next) {
    3884      307600 :     v->VisitRootPointers(Root::kStrongRoots, nullptr, list->start, list->end);
    3885             :   }
    3886      307250 :   v->Synchronize(VisitorSynchronization::kStrongRoots);
    3887             : 
    3888             :   // Iterate over pending Microtasks stored in MicrotaskQueues.
    3889      307250 :   MicrotaskQueue* default_microtask_queue = isolate_->default_microtask_queue();
    3890      307250 :   if (default_microtask_queue) {
    3891      307250 :     MicrotaskQueue* microtask_queue = default_microtask_queue;
    3892      307250 :     do {
    3893      307250 :       microtask_queue->IterateMicrotasks(v);
    3894             :       microtask_queue = microtask_queue->next();
    3895             :     } while (microtask_queue != default_microtask_queue);
    3896             :   }
    3897             : 
    3898             :   // Iterate over the partial snapshot cache unless serializing or
    3899             :   // deserializing.
    3900      307250 :   if (mode != VISIT_FOR_SERIALIZATION) {
    3901      244172 :     SerializerDeserializer::Iterate(isolate_, v);
    3902      244172 :     v->Synchronize(VisitorSynchronization::kPartialSnapshotCache);
    3903             :   }
    3904      307250 : }
    3905             : 
    3906         399 : void Heap::IterateWeakGlobalHandles(RootVisitor* v) {
    3907         399 :   isolate_->global_handles()->IterateWeakRoots(v);
    3908         399 : }
    3909             : 
    3910      283824 : void Heap::IterateBuiltins(RootVisitor* v) {
    3911   428866355 :   for (int i = 0; i < Builtins::builtin_count; i++) {
    3912             :     v->VisitRootPointer(Root::kBuiltins, Builtins::name(i),
    3913   857165293 :                         FullObjectSlot(builtin_address(i)));
    3914             :   }
    3915             : #ifdef V8_EMBEDDED_BUILTINS
    3916             :   // The entry table does not need to be updated if all builtins are embedded.
    3917             :   STATIC_ASSERT(Builtins::AllBuiltinsAreIsolateIndependent());
    3918             : #else
    3919             :   // If builtins are not embedded, they may move and thus the entry table must
    3920             :   // be updated.
    3921             :   // TODO(v8:6666): Remove once builtins are embedded unconditionally.
    3922             :   Builtins::UpdateBuiltinEntryTable(isolate());
    3923             : #endif  // V8_EMBEDDED_BUILTINS
    3924      283655 : }
    3925             : 
    3926             : // TODO(1236194): Since the heap size is configurable on the command line
    3927             : // and through the API, we should gracefully handle the case that the heap
    3928             : // size is not big enough to fit all the initial objects.
    3929       62883 : void Heap::ConfigureHeap(size_t max_semi_space_size_in_kb,
    3930             :                          size_t max_old_generation_size_in_mb,
    3931             :                          size_t code_range_size_in_mb) {
    3932             :   // Overwrite default configuration.
    3933       62883 :   if (max_semi_space_size_in_kb != 0) {
    3934             :     max_semi_space_size_ =
    3935       57568 :         RoundUp<Page::kPageSize>(max_semi_space_size_in_kb * KB);
    3936             :   }
    3937       62883 :   if (max_old_generation_size_in_mb != 0) {
    3938       28788 :     max_old_generation_size_ = max_old_generation_size_in_mb * MB;
    3939             :   }
    3940             : 
    3941             :   // If max space size flags are specified overwrite the configuration.
    3942       62883 :   if (FLAG_max_semi_space_size > 0) {
    3943         195 :     max_semi_space_size_ = static_cast<size_t>(FLAG_max_semi_space_size) * MB;
    3944             :   }
    3945       62883 :   if (FLAG_max_old_space_size > 0) {
    3946             :     max_old_generation_size_ =
    3947          39 :         static_cast<size_t>(FLAG_max_old_space_size) * MB;
    3948             :   }
    3949             : 
    3950             :   if (Page::kPageSize > MB) {
    3951             :     max_semi_space_size_ = RoundUp<Page::kPageSize>(max_semi_space_size_);
    3952             :     max_old_generation_size_ =
    3953             :         RoundUp<Page::kPageSize>(max_old_generation_size_);
    3954             :   }
    3955             : 
    3956       62883 :   if (FLAG_stress_compaction) {
    3957             :     // This will cause more frequent GCs when stressing.
    3958         105 :     max_semi_space_size_ = MB;
    3959             :   }
    3960             : 
    3961             :   // The new space size must be a power of two to support single-bit testing
    3962             :   // for containment.
    3963             :   max_semi_space_size_ = static_cast<size_t>(base::bits::RoundUpToPowerOfTwo64(
    3964       62883 :       static_cast<uint64_t>(max_semi_space_size_)));
    3965             : 
    3966       62883 :   if (max_semi_space_size_ == kMaxSemiSpaceSizeInKB * KB) {
    3967             :     // Start with at least 1*MB semi-space on machines with a lot of memory.
    3968             :     initial_semispace_size_ =
    3969      125334 :         Max(initial_semispace_size_, static_cast<size_t>(1 * MB));
    3970             :   }
    3971             : 
    3972       62883 :   if (FLAG_min_semi_space_size > 0) {
    3973             :     size_t initial_semispace_size =
    3974          35 :         static_cast<size_t>(FLAG_min_semi_space_size) * MB;
    3975          35 :     if (initial_semispace_size > max_semi_space_size_) {
    3976           5 :       initial_semispace_size_ = max_semi_space_size_;
    3977           5 :       if (FLAG_trace_gc) {
    3978             :         PrintIsolate(isolate_,
    3979             :                      "Min semi-space size cannot be more than the maximum "
    3980             :                      "semi-space size of %" PRIuS " MB\n",
    3981           0 :                      max_semi_space_size_ / MB);
    3982             :       }
    3983             :     } else {
    3984             :       initial_semispace_size_ =
    3985          30 :           RoundUp<Page::kPageSize>(initial_semispace_size);
    3986             :     }
    3987             :   }
    3988             : 
    3989      125766 :   initial_semispace_size_ = Min(initial_semispace_size_, max_semi_space_size_);
    3990             : 
    3991       62883 :   if (FLAG_semi_space_growth_factor < 2) {
    3992           0 :     FLAG_semi_space_growth_factor = 2;
    3993             :   }
    3994             : 
    3995             :   // The old generation is paged and needs at least one page for each space.
    3996             :   int paged_space_count =
    3997             :       LAST_GROWABLE_PAGED_SPACE - FIRST_GROWABLE_PAGED_SPACE + 1;
    3998             :   initial_max_old_generation_size_ = max_old_generation_size_ =
    3999             :       Max(static_cast<size_t>(paged_space_count * Page::kPageSize),
    4000      125766 :           max_old_generation_size_);
    4001             : 
    4002       62883 :   if (FLAG_initial_old_space_size > 0) {
    4003           0 :     initial_old_generation_size_ = FLAG_initial_old_space_size * MB;
    4004             :   } else {
    4005             :     initial_old_generation_size_ =
    4006       62883 :         max_old_generation_size_ / kInitalOldGenerationLimitFactor;
    4007             :   }
    4008       62883 :   old_generation_allocation_limit_ = initial_old_generation_size_;
    4009             : 
    4010             :   // We rely on being able to allocate new arrays in paged spaces.
    4011             :   DCHECK(kMaxRegularHeapObjectSize >=
    4012             :          (JSArray::kSize +
    4013             :           FixedArray::SizeFor(JSArray::kInitialMaxFastElementArray) +
    4014             :           AllocationMemento::kSize));
    4015             : 
    4016       62883 :   code_range_size_ = code_range_size_in_mb * MB;
    4017             : 
    4018       62883 :   configured_ = true;
    4019       62883 : }
    4020             : 
    4021             : 
    4022      107066 : void Heap::AddToRingBuffer(const char* string) {
    4023             :   size_t first_part =
    4024      107066 :       Min(strlen(string), kTraceRingBufferSize - ring_buffer_end_);
    4025      107066 :   memcpy(trace_ring_buffer_ + ring_buffer_end_, string, first_part);
    4026      107066 :   ring_buffer_end_ += first_part;
    4027      107066 :   if (first_part < strlen(string)) {
    4028       28186 :     ring_buffer_full_ = true;
    4029       28186 :     size_t second_part = strlen(string) - first_part;
    4030       28186 :     memcpy(trace_ring_buffer_, string + first_part, second_part);
    4031       28186 :     ring_buffer_end_ = second_part;
    4032             :   }
    4033      107066 : }
    4034             : 
    4035             : 
    4036          15 : void Heap::GetFromRingBuffer(char* buffer) {
    4037             :   size_t copied = 0;
    4038          15 :   if (ring_buffer_full_) {
    4039           0 :     copied = kTraceRingBufferSize - ring_buffer_end_;
    4040           0 :     memcpy(buffer, trace_ring_buffer_ + ring_buffer_end_, copied);
    4041             :   }
    4042          15 :   memcpy(buffer + copied, trace_ring_buffer_, ring_buffer_end_);
    4043          15 : }
    4044             : 
    4045       34093 : void Heap::ConfigureHeapDefault() { ConfigureHeap(0, 0, 0); }
    4046             : 
    4047          75 : void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
    4048          15 :   *stats->start_marker = HeapStats::kStartMarker;
    4049          15 :   *stats->end_marker = HeapStats::kEndMarker;
    4050          15 :   *stats->ro_space_size = read_only_space_->Size();
    4051          30 :   *stats->ro_space_capacity = read_only_space_->Capacity();
    4052          15 :   *stats->new_space_size = new_space_->Size();
    4053          30 :   *stats->new_space_capacity = new_space_->Capacity();
    4054          15 :   *stats->old_space_size = old_space_->SizeOfObjects();
    4055          30 :   *stats->old_space_capacity = old_space_->Capacity();
    4056          15 :   *stats->code_space_size = code_space_->SizeOfObjects();
    4057          30 :   *stats->code_space_capacity = code_space_->Capacity();
    4058          15 :   *stats->map_space_size = map_space_->SizeOfObjects();
    4059          30 :   *stats->map_space_capacity = map_space_->Capacity();
    4060          15 :   *stats->lo_space_size = lo_space_->Size();
    4061          15 :   *stats->code_lo_space_size = code_lo_space_->Size();
    4062          45 :   isolate_->global_handles()->RecordStats(stats);
    4063          30 :   *stats->memory_allocator_size = memory_allocator()->Size();
    4064             :   *stats->memory_allocator_capacity =
    4065          30 :       memory_allocator()->Size() + memory_allocator()->Available();
    4066          15 :   *stats->os_error = base::OS::GetLastError();
    4067          30 :   *stats->malloced_memory = isolate_->allocator()->GetCurrentMemoryUsage();
    4068          30 :   *stats->malloced_peak_memory = isolate_->allocator()->GetMaxMemoryUsage();
    4069          15 :   if (take_snapshot) {
    4070           0 :     HeapIterator iterator(this);
    4071           0 :     for (HeapObject obj = iterator.next(); !obj.is_null();
    4072             :          obj = iterator.next()) {
    4073             :       InstanceType type = obj->map()->instance_type();
    4074             :       DCHECK(0 <= type && type <= LAST_TYPE);
    4075           0 :       stats->objects_per_type[type]++;
    4076           0 :       stats->size_per_type[type] += obj->Size();
    4077           0 :     }
    4078             :   }
    4079          15 :   if (stats->last_few_messages != nullptr)
    4080          15 :     GetFromRingBuffer(stats->last_few_messages);
    4081          15 :   if (stats->js_stacktrace != nullptr) {
    4082             :     FixedStringAllocator fixed(stats->js_stacktrace, kStacktraceBufferSize - 1);
    4083             :     StringStream accumulator(&fixed, StringStream::kPrintObjectConcise);
    4084          15 :     if (gc_state() == Heap::NOT_IN_GC) {
    4085          15 :       isolate()->PrintStack(&accumulator, Isolate::kPrintStackVerbose);
    4086             :     } else {
    4087           0 :       accumulator.Add("Cannot get stack trace in GC.");
    4088             :     }
    4089             :   }
    4090          15 : }
    4091             : 
    4092     1950178 : size_t Heap::OldGenerationSizeOfObjects() {
    4093             :   PagedSpaces spaces(this, PagedSpaces::SpacesSpecifier::kAllPagedSpaces);
    4094             :   size_t total = 0;
    4095     9750891 :   for (PagedSpace* space = spaces.next(); space != nullptr;
    4096             :        space = spaces.next()) {
    4097     7800713 :     total += space->SizeOfObjects();
    4098             :   }
    4099     1950178 :   return total + lo_space_->SizeOfObjects();
    4100             : }
    4101             : 
    4102         170 : uint64_t Heap::PromotedExternalMemorySize() {
    4103             :   IsolateData* isolate_data = isolate()->isolate_data();
    4104      487242 :   if (isolate_data->external_memory_ <=
    4105             :       isolate_data->external_memory_at_last_mark_compact_) {
    4106             :     return 0;
    4107             :   }
    4108             :   return static_cast<uint64_t>(
    4109       27241 :       isolate_data->external_memory_ -
    4110       27241 :       isolate_data->external_memory_at_last_mark_compact_);
    4111             : }
    4112             : 
    4113        4092 : bool Heap::ShouldOptimizeForLoadTime() {
    4114           0 :   return isolate()->rail_mode() == PERFORMANCE_LOAD &&
    4115        4092 :          !AllocationLimitOvershotByLargeMargin() &&
    4116           0 :          MonotonicallyIncreasingTimeInMs() <
    4117        4092 :              isolate()->LoadStartTimeMs() + kMaxLoadTimeMs;
    4118             : }
    4119             : 
    4120             : // This predicate is called when an old generation space cannot allocated from
    4121             : // the free list and is about to add a new page. Returning false will cause a
    4122             : // major GC. It happens when the old generation allocation limit is reached and
    4123             : // - either we need to optimize for memory usage,
    4124             : // - or the incremental marking is not in progress and we cannot start it.
    4125      503380 : bool Heap::ShouldExpandOldGenerationOnSlowAllocation() {
    4126      501276 :   if (always_allocate() || OldGenerationSpaceAvailable() > 0) return true;
    4127             :   // We reached the old generation allocation limit.
    4128             : 
    4129        2137 :   if (ShouldOptimizeForMemoryUsage()) return false;
    4130             : 
    4131        2104 :   if (ShouldOptimizeForLoadTime()) return true;
    4132             : 
    4133        2104 :   if (incremental_marking()->NeedsFinalization()) {
    4134        1593 :     return !AllocationLimitOvershotByLargeMargin();
    4135             :   }
    4136             : 
    4137         540 :   if (incremental_marking()->IsStopped() &&
    4138          29 :       IncrementalMarkingLimitReached() == IncrementalMarkingLimit::kNoLimit) {
    4139             :     // We cannot start incremental marking.
    4140             :     return false;
    4141             :   }
    4142         483 :   return true;
    4143             : }
    4144             : 
    4145      186963 : Heap::HeapGrowingMode Heap::CurrentHeapGrowingMode() {
    4146      101981 :   if (ShouldReduceMemory() || FLAG_stress_compaction) {
    4147             :     return Heap::HeapGrowingMode::kMinimal;
    4148             :   }
    4149             : 
    4150       85030 :   if (ShouldOptimizeForMemoryUsage()) {
    4151             :     return Heap::HeapGrowingMode::kConservative;
    4152             :   }
    4153             : 
    4154      169964 :   if (memory_reducer()->ShouldGrowHeapSlowly()) {
    4155             :     return Heap::HeapGrowingMode::kSlow;
    4156             :   }
    4157             : 
    4158       84982 :   return Heap::HeapGrowingMode::kDefault;
    4159             : }
    4160             : 
    4161             : // This function returns either kNoLimit, kSoftLimit, or kHardLimit.
    4162             : // The kNoLimit means that either incremental marking is disabled or it is too
    4163             : // early to start incremental marking.
    4164             : // The kSoftLimit means that incremental marking should be started soon.
    4165             : // The kHardLimit means that incremental marking should be started immediately.
    4166     1399132 : Heap::IncrementalMarkingLimit Heap::IncrementalMarkingLimitReached() {
    4167             :   // Code using an AlwaysAllocateScope assumes that the GC state does not
    4168             :   // change; that implies that no marking steps must be performed.
    4169     2280351 :   if (!incremental_marking()->CanBeActivated() || always_allocate()) {
    4170             :     // Incremental marking is disabled or it is too early to start.
    4171             :     return IncrementalMarkingLimit::kNoLimit;
    4172             :   }
    4173      880293 :   if (FLAG_stress_incremental_marking) {
    4174             :     return IncrementalMarkingLimit::kHardLimit;
    4175             :   }
    4176      850881 :   if (OldGenerationSizeOfObjects() <=
    4177             :       IncrementalMarking::kActivationThreshold) {
    4178             :     // Incremental marking is disabled or it is too early to start.
    4179             :     return IncrementalMarkingLimit::kNoLimit;
    4180             :   }
    4181       46382 :   if ((FLAG_stress_compaction && (gc_count_ & 1) != 0) ||
    4182             :       HighMemoryPressure()) {
    4183             :     // If there is high memory pressure or stress testing is enabled, then
    4184             :     // start marking immediately.
    4185             :     return IncrementalMarkingLimit::kHardLimit;
    4186             :   }
    4187             : 
    4188       23175 :   if (FLAG_stress_marking > 0) {
    4189             :     double gained_since_last_gc =
    4190           0 :         PromotedSinceLastGC() +
    4191           0 :         (isolate()->isolate_data()->external_memory_ -
    4192           0 :          isolate()->isolate_data()->external_memory_at_last_mark_compact_);
    4193             :     double size_before_gc =
    4194           0 :         OldGenerationObjectsAndPromotedExternalMemorySize() -
    4195           0 :         gained_since_last_gc;
    4196           0 :     double bytes_to_limit = old_generation_allocation_limit_ - size_before_gc;
    4197           0 :     if (bytes_to_limit > 0) {
    4198           0 :       double current_percent = (gained_since_last_gc / bytes_to_limit) * 100.0;
    4199             : 
    4200           0 :       if (FLAG_trace_stress_marking) {
    4201             :         isolate()->PrintWithTimestamp(
    4202             :             "[IncrementalMarking] %.2lf%% of the memory limit reached\n",
    4203           0 :             current_percent);
    4204             :       }
    4205             : 
    4206           0 :       if (FLAG_fuzzer_gc_analysis) {
    4207             :         // Skips values >=100% since they already trigger marking.
    4208           0 :         if (current_percent < 100.0) {
    4209             :           max_marking_limit_reached_ =
    4210           0 :               std::max(max_marking_limit_reached_, current_percent);
    4211             :         }
    4212           0 :       } else if (static_cast<int>(current_percent) >=
    4213             :                  stress_marking_percentage_) {
    4214           0 :         stress_marking_percentage_ = NextStressMarkingLimit();
    4215           0 :         return IncrementalMarkingLimit::kHardLimit;
    4216             :       }
    4217             :     }
    4218             :   }
    4219             : 
    4220       23175 :   size_t old_generation_space_available = OldGenerationSpaceAvailable();
    4221             : 
    4222       46350 :   if (old_generation_space_available > new_space_->Capacity()) {
    4223             :     return IncrementalMarkingLimit::kNoLimit;
    4224             :   }
    4225        2022 :   if (ShouldOptimizeForMemoryUsage()) {
    4226             :     return IncrementalMarkingLimit::kHardLimit;
    4227             :   }
    4228        1988 :   if (ShouldOptimizeForLoadTime()) {
    4229             :     return IncrementalMarkingLimit::kNoLimit;
    4230             :   }
    4231        1988 :   if (old_generation_space_available == 0) {
    4232             :     return IncrementalMarkingLimit::kHardLimit;
    4233             :   }
    4234        1767 :   return IncrementalMarkingLimit::kSoftLimit;
    4235             : }
    4236             : 
    4237        8148 : void Heap::EnableInlineAllocation() {
    4238        8148 :   if (!inline_allocation_disabled_) return;
    4239        8138 :   inline_allocation_disabled_ = false;
    4240             : 
    4241             :   // Update inline allocation limit for new space.
    4242        8138 :   new_space()->UpdateInlineAllocationLimit(0);
    4243             : }
    4244             : 
    4245             : 
    4246       16326 : void Heap::DisableInlineAllocation() {
    4247        8163 :   if (inline_allocation_disabled_) return;
    4248        8163 :   inline_allocation_disabled_ = true;
    4249             : 
    4250             :   // Update inline allocation limit for new space.
    4251        8163 :   new_space()->UpdateInlineAllocationLimit(0);
    4252             : 
    4253             :   // Update inline allocation limit for old spaces.
    4254             :   PagedSpaces spaces(this);
    4255        8163 :   CodeSpaceMemoryModificationScope modification_scope(this);
    4256       32652 :   for (PagedSpace* space = spaces.next(); space != nullptr;
    4257             :        space = spaces.next()) {
    4258       24489 :     space->FreeLinearAllocationArea();
    4259        8163 :   }
    4260             : }
    4261             : 
    4262       45139 : HeapObject Heap::EnsureImmovableCode(HeapObject heap_object, int object_size) {
    4263             :   // Code objects which should stay at a fixed address are allocated either
    4264             :   // in the first page of code space, in large object space, or (during
    4265             :   // snapshot creation) the containing page is marked as immovable.
    4266             :   DCHECK(!heap_object.is_null());
    4267             :   DCHECK(code_space_->Contains(heap_object));
    4268             :   DCHECK_GE(object_size, 0);
    4269       45139 :   if (!Heap::IsImmovable(heap_object)) {
    4270       80290 :     if (isolate()->serializer_enabled() ||
    4271       40142 :         code_space_->first_page()->Contains(heap_object->address())) {
    4272             :       MemoryChunk::FromHeapObject(heap_object)->MarkNeverEvacuate();
    4273             :     } else {
    4274             :       // Discard the first code allocation, which was on a page where it could
    4275             :       // be moved.
    4276             :       CreateFillerObjectAt(heap_object->address(), object_size,
    4277       40142 :                            ClearRecordedSlots::kNo);
    4278       40142 :       heap_object = AllocateRawCodeInLargeObjectSpace(object_size);
    4279             :       UnprotectAndRegisterMemoryChunk(heap_object);
    4280             :       ZapCodeObject(heap_object->address(), object_size);
    4281       40142 :       OnAllocationEvent(heap_object, object_size);
    4282             :     }
    4283             :   }
    4284       45139 :   return heap_object;
    4285             : }
    4286             : 
    4287   316946822 : HeapObject Heap::AllocateRawWithLightRetry(int size, AllocationSpace space,
    4288             :                                            AllocationAlignment alignment) {
    4289   316946822 :   HeapObject result;
    4290   316946822 :   AllocationResult alloc = AllocateRaw(size, space, alignment);
    4291   316946759 :   if (alloc.To(&result)) {
    4292             :     DCHECK(result != ReadOnlyRoots(this).exception());
    4293   316926508 :     return result;
    4294             :   }
    4295             :   // Two GCs before panicking. In newspace will almost always succeed.
    4296          93 :   for (int i = 0; i < 2; i++) {
    4297             :     CollectGarbage(alloc.RetrySpace(),
    4298       20156 :                    GarbageCollectionReason::kAllocationFailure);
    4299       20156 :     alloc = AllocateRaw(size, space, alignment);
    4300       20156 :     if (alloc.To(&result)) {
    4301             :       DCHECK(result != ReadOnlyRoots(this).exception());
    4302       20063 :       return result;
    4303             :     }
    4304             :   }
    4305          21 :   return HeapObject();
    4306             : }
    4307             : 
    4308   315066155 : HeapObject Heap::AllocateRawWithRetryOrFail(int size, AllocationSpace space,
    4309             :                                             AllocationAlignment alignment) {
    4310             :   AllocationResult alloc;
    4311   315066155 :   HeapObject result = AllocateRawWithLightRetry(size, space, alignment);
    4312   315065824 :   if (!result.is_null()) return result;
    4313             : 
    4314          21 :   isolate()->counters()->gc_last_resort_from_handles()->Increment();
    4315          21 :   CollectAllAvailableGarbage(GarbageCollectionReason::kLastResort);
    4316             :   {
    4317             :     AlwaysAllocateScope scope(isolate());
    4318          21 :     alloc = AllocateRaw(size, space, alignment);
    4319             :   }
    4320          21 :   if (alloc.To(&result)) {
    4321             :     DCHECK(result != ReadOnlyRoots(this).exception());
    4322          21 :     return result;
    4323             :   }
    4324             :   // TODO(1181417): Fix this.
    4325           0 :   FatalProcessOutOfMemory("CALL_AND_RETRY_LAST");
    4326             :   return HeapObject();
    4327             : }
    4328             : 
    4329             : // TODO(jkummerow): Refactor this. AllocateRaw should take an "immovability"
    4330             : // parameter and just do what's necessary.
    4331       40144 : HeapObject Heap::AllocateRawCodeInLargeObjectSpace(int size) {
    4332       40142 :   AllocationResult alloc = code_lo_space()->AllocateRaw(size);
    4333       40142 :   HeapObject result;
    4334       40142 :   if (alloc.To(&result)) {
    4335             :     DCHECK(result != ReadOnlyRoots(this).exception());
    4336       40140 :     return result;
    4337             :   }
    4338             :   // Two GCs before panicking.
    4339           0 :   for (int i = 0; i < 2; i++) {
    4340             :     CollectGarbage(alloc.RetrySpace(),
    4341           2 :                    GarbageCollectionReason::kAllocationFailure);
    4342           2 :     alloc = code_lo_space()->AllocateRaw(size);
    4343           2 :     if (alloc.To(&result)) {
    4344             :       DCHECK(result != ReadOnlyRoots(this).exception());
    4345           2 :       return result;
    4346             :     }
    4347             :   }
    4348           0 :   isolate()->counters()->gc_last_resort_from_handles()->Increment();
    4349           0 :   CollectAllAvailableGarbage(GarbageCollectionReason::kLastResort);
    4350             :   {
    4351             :     AlwaysAllocateScope scope(isolate());
    4352           0 :     alloc = code_lo_space()->AllocateRaw(size);
    4353             :   }
    4354           0 :   if (alloc.To(&result)) {
    4355             :     DCHECK(result != ReadOnlyRoots(this).exception());
    4356           0 :     return result;
    4357             :   }
    4358             :   // TODO(1181417): Fix this.
    4359           0 :   FatalProcessOutOfMemory("CALL_AND_RETRY_LAST");
    4360             :   return HeapObject();
    4361             : }
    4362             : 
    4363      314415 : void Heap::SetUp() {
    4364             : #ifdef V8_ENABLE_ALLOCATION_TIMEOUT
    4365             :   allocation_timeout_ = NextAllocationTimeout();
    4366             : #endif
    4367             : 
    4368             :   // Initialize heap spaces and initial maps and objects.
    4369             :   //
    4370             :   // If the heap is not yet configured (e.g. through the API), configure it.
    4371             :   // Configuration is based on the flags new-space-size (really the semispace
    4372             :   // size) and old-space-size if set or the initial values of semispace_size_
    4373             :   // and old_generation_size_ otherwise.
    4374       62883 :   if (!configured_) ConfigureHeapDefault();
    4375             : 
    4376             :   mmap_region_base_ =
    4377       62882 :       reinterpret_cast<uintptr_t>(v8::internal::GetRandomMmapAddr()) &
    4378       62883 :       ~kMmapRegionMask;
    4379             : 
    4380             :   // Set up memory allocator.
    4381             :   memory_allocator_ =
    4382      188649 :       new MemoryAllocator(isolate_, MaxReserved(), code_range_size_);
    4383             : 
    4384       62883 :   store_buffer_ = new StoreBuffer(this);
    4385             : 
    4386      125766 :   heap_controller_ = new HeapController(this);
    4387             : 
    4388       62883 :   mark_compact_collector_ = new MarkCompactCollector(this);
    4389             : 
    4390       62882 :   scavenger_collector_ = new ScavengerCollector(this);
    4391             : 
    4392             :   incremental_marking_ =
    4393       62883 :       new IncrementalMarking(this, mark_compact_collector_->marking_worklist(),
    4394       62883 :                              mark_compact_collector_->weak_objects());
    4395             : 
    4396       62882 :   if (FLAG_concurrent_marking || FLAG_parallel_marking) {
    4397             :     MarkCompactCollector::MarkingWorklist* marking_worklist =
    4398       62776 :         mark_compact_collector_->marking_worklist();
    4399             :     concurrent_marking_ = new ConcurrentMarking(
    4400             :         this, marking_worklist->shared(), marking_worklist->on_hold(),
    4401       62776 :         mark_compact_collector_->weak_objects(), marking_worklist->embedder());
    4402             :   } else {
    4403             :     concurrent_marking_ =
    4404         106 :         new ConcurrentMarking(this, nullptr, nullptr, nullptr, nullptr);
    4405             :   }
    4406             : 
    4407      503064 :   for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) {
    4408      503064 :     space_[i] = nullptr;
    4409             :   }
    4410             : 
    4411       62883 :   space_[RO_SPACE] = read_only_space_ = new ReadOnlySpace(this);
    4412             :   space_[NEW_SPACE] = new_space_ =
    4413       62883 :       new NewSpace(this, memory_allocator_->data_page_allocator(),
    4414       62883 :                    initial_semispace_size_, max_semi_space_size_);
    4415       62883 :   space_[OLD_SPACE] = old_space_ = new OldSpace(this);
    4416       62883 :   space_[CODE_SPACE] = code_space_ = new CodeSpace(this);
    4417       62883 :   space_[MAP_SPACE] = map_space_ = new MapSpace(this);
    4418       62883 :   space_[LO_SPACE] = lo_space_ = new LargeObjectSpace(this);
    4419       62883 :   space_[NEW_LO_SPACE] = new_lo_space_ = new NewLargeObjectSpace(this);
    4420       62883 :   space_[CODE_LO_SPACE] = code_lo_space_ = new CodeLargeObjectSpace(this);
    4421             : 
    4422     4779108 :   for (int i = 0; i < static_cast<int>(v8::Isolate::kUseCounterFeatureCount);
    4423             :        i++) {
    4424     4716225 :     deferred_counters_[i] = 0;
    4425             :   }
    4426             : 
    4427       62883 :   tracer_ = new GCTracer(this);
    4428             : #ifdef ENABLE_MINOR_MC
    4429       62883 :   minor_mark_compact_collector_ = new MinorMarkCompactCollector(this);
    4430             : #else
    4431             :   minor_mark_compact_collector_ = nullptr;
    4432             : #endif  // ENABLE_MINOR_MC
    4433      125766 :   array_buffer_collector_ = new ArrayBufferCollector(this);
    4434      125766 :   gc_idle_time_handler_ = new GCIdleTimeHandler();
    4435       62883 :   memory_reducer_ = new MemoryReducer(this);
    4436       62883 :   if (V8_UNLIKELY(FLAG_gc_stats)) {
    4437           0 :     live_object_stats_ = new ObjectStats(this);
    4438           0 :     dead_object_stats_ = new ObjectStats(this);
    4439             :   }
    4440      125766 :   local_embedder_heap_tracer_ = new LocalEmbedderHeapTracer(isolate());
    4441             : 
    4442      125766 :   LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
    4443      125766 :   LOG(isolate_, IntPtrTEvent("heap-available", Available()));
    4444             : 
    4445       62883 :   store_buffer()->SetUp();
    4446             : 
    4447       62883 :   mark_compact_collector()->SetUp();
    4448             : #ifdef ENABLE_MINOR_MC
    4449       62883 :   if (minor_mark_compact_collector() != nullptr) {
    4450       62883 :     minor_mark_compact_collector()->SetUp();
    4451             :   }
    4452             : #endif  // ENABLE_MINOR_MC
    4453             : 
    4454       62883 :   if (FLAG_idle_time_scavenge) {
    4455      125766 :     scavenge_job_ = new ScavengeJob();
    4456             :     idle_scavenge_observer_ = new IdleScavengeObserver(
    4457      125766 :         *this, ScavengeJob::kBytesAllocatedBeforeNextIdleTask);
    4458       62883 :     new_space()->AddAllocationObserver(idle_scavenge_observer_);
    4459             :   }
    4460             : 
    4461             :   SetGetExternallyAllocatedMemoryInBytesCallback(
    4462             :       DefaultGetExternallyAllocatedMemoryInBytesCallback);
    4463             : 
    4464       62883 :   if (FLAG_stress_marking > 0) {
    4465           0 :     stress_marking_percentage_ = NextStressMarkingLimit();
    4466           0 :     stress_marking_observer_ = new StressMarkingObserver(*this);
    4467             :     AddAllocationObserversToAllSpaces(stress_marking_observer_,
    4468           0 :                                       stress_marking_observer_);
    4469             :   }
    4470       62883 :   if (FLAG_stress_scavenge > 0) {
    4471           0 :     stress_scavenge_observer_ = new StressScavengeObserver(*this);
    4472           0 :     new_space()->AddAllocationObserver(stress_scavenge_observer_);
    4473             :   }
    4474             : 
    4475       62883 :   write_protect_code_memory_ = FLAG_write_protect_code_memory;
    4476       62883 : }
    4477             : 
    4478       62823 : void Heap::InitializeHashSeed() {
    4479             :   DCHECK(!deserialization_complete_);
    4480             :   uint64_t new_hash_seed;
    4481       62823 :   if (FLAG_hash_seed == 0) {
    4482       62750 :     int64_t rnd = isolate()->random_number_generator()->NextInt64();
    4483       62750 :     new_hash_seed = static_cast<uint64_t>(rnd);
    4484             :   } else {
    4485          73 :     new_hash_seed = static_cast<uint64_t>(FLAG_hash_seed);
    4486             :   }
    4487             :   ReadOnlyRoots(this).hash_seed()->copy_in(
    4488             :       0, reinterpret_cast<byte*>(&new_hash_seed), kInt64Size);
    4489       62823 : }
    4490             : 
    4491     7300003 : void Heap::SetStackLimits() {
    4492             :   DCHECK_NOT_NULL(isolate_);
    4493             :   DCHECK(isolate_ == isolate());
    4494             :   // On 64 bit machines, pointers are generally out of range of Smis.  We write
    4495             :   // something that looks like an out of range Smi to the GC.
    4496             : 
    4497             :   // Set up the special root array entries containing the stack limits.
    4498             :   // These are actually addresses, but the tag makes the GC ignore it.
    4499             :   roots_table()[RootIndex::kStackLimit] =
    4500    14600022 :       (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag;
    4501             :   roots_table()[RootIndex::kRealStackLimit] =
    4502     7300011 :       (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag;
    4503     7300011 : }
    4504             : 
    4505         251 : void Heap::ClearStackLimits() {
    4506         251 :   roots_table()[RootIndex::kStackLimit] = kNullAddress;
    4507         251 :   roots_table()[RootIndex::kRealStackLimit] = kNullAddress;
    4508         251 : }
    4509             : 
    4510           0 : int Heap::NextAllocationTimeout(int current_timeout) {
    4511           0 :   if (FLAG_random_gc_interval > 0) {
    4512             :     // If current timeout hasn't reached 0 the GC was caused by something
    4513             :     // different than --stress-atomic-gc flag and we don't update the timeout.
    4514           0 :     if (current_timeout <= 0) {
    4515           0 :       return isolate()->fuzzer_rng()->NextInt(FLAG_random_gc_interval + 1);
    4516             :     } else {
    4517             :       return current_timeout;
    4518             :     }
    4519             :   }
    4520           0 :   return FLAG_gc_interval;
    4521             : }
    4522             : 
    4523           0 : void Heap::PrintAllocationsHash() {
    4524           0 :   uint32_t hash = StringHasher::GetHashCore(raw_allocations_hash_);
    4525           0 :   PrintF("\n### Allocations = %u, hash = 0x%08x\n", allocations_count(), hash);
    4526           0 : }
    4527             : 
    4528           0 : void Heap::PrintMaxMarkingLimitReached() {
    4529             :   PrintF("\n### Maximum marking limit reached = %.02lf\n",
    4530           0 :          max_marking_limit_reached_);
    4531           0 : }
    4532             : 
    4533           0 : void Heap::PrintMaxNewSpaceSizeReached() {
    4534             :   PrintF("\n### Maximum new space size reached = %.02lf\n",
    4535           0 :          stress_scavenge_observer_->MaxNewSpaceSizeReached());
    4536           0 : }
    4537             : 
    4538           0 : int Heap::NextStressMarkingLimit() {
    4539           0 :   return isolate()->fuzzer_rng()->NextInt(FLAG_stress_marking + 1);
    4540             : }
    4541             : 
    4542      125766 : void Heap::NotifyDeserializationComplete() {
    4543             :   PagedSpaces spaces(this);
    4544      251532 :   for (PagedSpace* s = spaces.next(); s != nullptr; s = spaces.next()) {
    4545      377298 :     if (isolate()->snapshot_available()) s->ShrinkImmortalImmovablePages();
    4546             : #ifdef DEBUG
    4547             :     // All pages right after bootstrapping must be marked as never-evacuate.
    4548             :     for (Page* p : *s) {
    4549             :       DCHECK(p->NeverEvacuate());
    4550             :     }
    4551             : #endif  // DEBUG
    4552             :   }
    4553             : 
    4554       62883 :   read_only_space()->MarkAsReadOnly();
    4555       62883 :   deserialization_complete_ = true;
    4556       62883 : }
    4557             : 
    4558          70 : void Heap::SetEmbedderHeapTracer(EmbedderHeapTracer* tracer) {
    4559             :   DCHECK_EQ(gc_state_, HeapState::NOT_IN_GC);
    4560          70 :   local_embedder_heap_tracer()->SetRemoteTracer(tracer);
    4561          70 : }
    4562             : 
    4563           0 : EmbedderHeapTracer* Heap::GetEmbedderHeapTracer() const {
    4564           0 :   return local_embedder_heap_tracer()->remote_tracer();
    4565             : }
    4566             : 
    4567          15 : void Heap::RegisterExternallyReferencedObject(Address* location) {
    4568             :   // The embedder is not aware of whether numbers are materialized as heap
    4569             :   // objects are just passed around as Smis.
    4570           5 :   Object object(*location);
    4571           5 :   if (!object->IsHeapObject()) return;
    4572             :   HeapObject heap_object = HeapObject::cast(object);
    4573             :   DCHECK(Contains(heap_object));
    4574          10 :   if (FLAG_incremental_marking_wrappers && incremental_marking()->IsMarking()) {
    4575           0 :     incremental_marking()->WhiteToGreyAndPush(heap_object);
    4576             :   } else {
    4577             :     DCHECK(mark_compact_collector()->in_use());
    4578             :     mark_compact_collector()->MarkExternallyReferencedObject(heap_object);
    4579             :   }
    4580             : }
    4581             : 
    4582      125730 : void Heap::StartTearDown() { SetGCState(TEAR_DOWN); }
    4583             : 
    4584      251472 : void Heap::TearDown() {
    4585             :   DCHECK_EQ(gc_state_, TEAR_DOWN);
    4586             : #ifdef VERIFY_HEAP
    4587             :   if (FLAG_verify_heap) {
    4588             :     Verify();
    4589             :   }
    4590             : #endif
    4591             : 
    4592       62868 :   UpdateMaximumCommitted();
    4593             : 
    4594       62868 :   if (FLAG_verify_predictable || FLAG_fuzzer_gc_analysis) {
    4595           0 :     PrintAllocationsHash();
    4596             :   }
    4597             : 
    4598       62868 :   if (FLAG_fuzzer_gc_analysis) {
    4599           0 :     if (FLAG_stress_marking > 0) {
    4600             :       PrintMaxMarkingLimitReached();
    4601             :     }
    4602           0 :     if (FLAG_stress_scavenge > 0) {
    4603           0 :       PrintMaxNewSpaceSizeReached();
    4604             :     }
    4605             :   }
    4606             : 
    4607       62868 :   if (FLAG_idle_time_scavenge) {
    4608       62868 :     new_space()->RemoveAllocationObserver(idle_scavenge_observer_);
    4609       62867 :     delete idle_scavenge_observer_;
    4610       62867 :     idle_scavenge_observer_ = nullptr;
    4611       62867 :     delete scavenge_job_;
    4612       62867 :     scavenge_job_ = nullptr;
    4613             :   }
    4614             : 
    4615       62867 :   if (FLAG_stress_marking > 0) {
    4616             :     RemoveAllocationObserversFromAllSpaces(stress_marking_observer_,
    4617           0 :                                            stress_marking_observer_);
    4618           0 :     delete stress_marking_observer_;
    4619           0 :     stress_marking_observer_ = nullptr;
    4620             :   }
    4621       62867 :   if (FLAG_stress_scavenge > 0) {
    4622           0 :     new_space()->RemoveAllocationObserver(stress_scavenge_observer_);
    4623           0 :     delete stress_scavenge_observer_;
    4624           0 :     stress_scavenge_observer_ = nullptr;
    4625             :   }
    4626             : 
    4627       62867 :   if (heap_controller_ != nullptr) {
    4628       62867 :     delete heap_controller_;
    4629       62867 :     heap_controller_ = nullptr;
    4630             :   }
    4631             : 
    4632       62867 :   if (mark_compact_collector_ != nullptr) {
    4633       62867 :     mark_compact_collector_->TearDown();
    4634       62868 :     delete mark_compact_collector_;
    4635       62868 :     mark_compact_collector_ = nullptr;
    4636             :   }
    4637             : 
    4638             : #ifdef ENABLE_MINOR_MC
    4639       62868 :   if (minor_mark_compact_collector_ != nullptr) {
    4640       62868 :     minor_mark_compact_collector_->TearDown();
    4641       62868 :     delete minor_mark_compact_collector_;
    4642       62868 :     minor_mark_compact_collector_ = nullptr;
    4643             :   }
    4644             : #endif  // ENABLE_MINOR_MC
    4645             : 
    4646       62868 :   if (scavenger_collector_ != nullptr) {
    4647       62868 :     delete scavenger_collector_;
    4648       62868 :     scavenger_collector_ = nullptr;
    4649             :   }
    4650             : 
    4651       62868 :   if (array_buffer_collector_ != nullptr) {
    4652       62868 :     delete array_buffer_collector_;
    4653       62868 :     array_buffer_collector_ = nullptr;
    4654             :   }
    4655             : 
    4656      125736 :   delete incremental_marking_;
    4657       62868 :   incremental_marking_ = nullptr;
    4658             : 
    4659       62868 :   delete concurrent_marking_;
    4660       62868 :   concurrent_marking_ = nullptr;
    4661             : 
    4662       62868 :   delete gc_idle_time_handler_;
    4663       62868 :   gc_idle_time_handler_ = nullptr;
    4664             : 
    4665       62868 :   if (memory_reducer_ != nullptr) {
    4666       62868 :     memory_reducer_->TearDown();
    4667      125734 :     delete memory_reducer_;
    4668       62868 :     memory_reducer_ = nullptr;
    4669             :   }
    4670             : 
    4671       62868 :   if (live_object_stats_ != nullptr) {
    4672           0 :     delete live_object_stats_;
    4673           0 :     live_object_stats_ = nullptr;
    4674             :   }
    4675             : 
    4676       62868 :   if (dead_object_stats_ != nullptr) {
    4677           0 :     delete dead_object_stats_;
    4678           0 :     dead_object_stats_ = nullptr;
    4679             :   }
    4680             : 
    4681      125735 :   delete local_embedder_heap_tracer_;
    4682       62869 :   local_embedder_heap_tracer_ = nullptr;
    4683             : 
    4684       62869 :   isolate_->global_handles()->TearDown();
    4685             : 
    4686       62867 :   external_string_table_.TearDown();
    4687             : 
    4688             :   // Tear down all ArrayBuffers before tearing down the heap since  their
    4689             :   // byte_length may be a HeapNumber which is required for freeing the backing
    4690             :   // store.
    4691       62868 :   ArrayBufferTracker::TearDown(this);
    4692             : 
    4693      125736 :   delete tracer_;
    4694       62868 :   tracer_ = nullptr;
    4695             : 
    4696      565812 :   for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) {
    4697      502944 :     delete space_[i];
    4698      502944 :     space_[i] = nullptr;
    4699             :   }
    4700             : 
    4701       62868 :   store_buffer()->TearDown();
    4702             : 
    4703       62868 :   memory_allocator()->TearDown();
    4704             : 
    4705             :   StrongRootsList* next = nullptr;
    4706      125736 :   for (StrongRootsList* list = strong_roots_list_; list; list = next) {
    4707           0 :     next = list->next;
    4708           0 :     delete list;
    4709             :   }
    4710       62868 :   strong_roots_list_ = nullptr;
    4711             : 
    4712      125736 :   delete store_buffer_;
    4713       62868 :   store_buffer_ = nullptr;
    4714             : 
    4715       62868 :   delete memory_allocator_;
    4716       62868 :   memory_allocator_ = nullptr;
    4717       62868 : }
    4718             : 
    4719          40 : void Heap::AddGCPrologueCallback(v8::Isolate::GCCallbackWithData callback,
    4720             :                                  GCType gc_type, void* data) {
    4721             :   DCHECK_NOT_NULL(callback);
    4722             :   DCHECK(gc_prologue_callbacks_.end() ==
    4723             :          std::find(gc_prologue_callbacks_.begin(), gc_prologue_callbacks_.end(),
    4724             :                    GCCallbackTuple(callback, gc_type, data)));
    4725          40 :   gc_prologue_callbacks_.emplace_back(callback, gc_type, data);
    4726          40 : }
    4727             : 
    4728          35 : void Heap::RemoveGCPrologueCallback(v8::Isolate::GCCallbackWithData callback,
    4729             :                                     void* data) {
    4730             :   DCHECK_NOT_NULL(callback);
    4731          70 :   for (size_t i = 0; i < gc_prologue_callbacks_.size(); i++) {
    4732         105 :     if (gc_prologue_callbacks_[i].callback == callback &&
    4733          35 :         gc_prologue_callbacks_[i].data == data) {
    4734             :       gc_prologue_callbacks_[i] = gc_prologue_callbacks_.back();
    4735             :       gc_prologue_callbacks_.pop_back();
    4736          35 :       return;
    4737             :     }
    4738             :   }
    4739           0 :   UNREACHABLE();
    4740             : }
    4741             : 
    4742       71108 : void Heap::AddGCEpilogueCallback(v8::Isolate::GCCallbackWithData callback,
    4743             :                                  GCType gc_type, void* data) {
    4744             :   DCHECK_NOT_NULL(callback);
    4745             :   DCHECK(gc_epilogue_callbacks_.end() ==
    4746             :          std::find(gc_epilogue_callbacks_.begin(), gc_epilogue_callbacks_.end(),
    4747             :                    GCCallbackTuple(callback, gc_type, data)));
    4748       71108 :   gc_epilogue_callbacks_.emplace_back(callback, gc_type, data);
    4749       71108 : }
    4750             : 
    4751        8225 : void Heap::RemoveGCEpilogueCallback(v8::Isolate::GCCallbackWithData callback,
    4752             :                                     void* data) {
    4753             :   DCHECK_NOT_NULL(callback);
    4754       32900 :   for (size_t i = 0; i < gc_epilogue_callbacks_.size(); i++) {
    4755       41125 :     if (gc_epilogue_callbacks_[i].callback == callback &&
    4756        8225 :         gc_epilogue_callbacks_[i].data == data) {
    4757             :       gc_epilogue_callbacks_[i] = gc_epilogue_callbacks_.back();
    4758             :       gc_epilogue_callbacks_.pop_back();
    4759        8225 :       return;
    4760             :     }
    4761             :   }
    4762           0 :   UNREACHABLE();
    4763             : }
    4764             : 
    4765             : namespace {
    4766         372 : Handle<WeakArrayList> CompactWeakArrayList(Heap* heap,
    4767             :                                            Handle<WeakArrayList> array,
    4768             :                                            PretenureFlag pretenure) {
    4769         372 :   if (array->length() == 0) {
    4770           0 :     return array;
    4771             :   }
    4772         372 :   int new_length = array->CountLiveWeakReferences();
    4773         372 :   if (new_length == array->length()) {
    4774         267 :     return array;
    4775             :   }
    4776             : 
    4777             :   Handle<WeakArrayList> new_array = WeakArrayList::EnsureSpace(
    4778             :       heap->isolate(),
    4779             :       handle(ReadOnlyRoots(heap).empty_weak_array_list(), heap->isolate()),
    4780         105 :       new_length, pretenure);
    4781             :   // Allocation might have caused GC and turned some of the elements into
    4782             :   // cleared weak heap objects. Count the number of live references again and
    4783             :   // fill in the new array.
    4784             :   int copy_to = 0;
    4785       19480 :   for (int i = 0; i < array->length(); i++) {
    4786        9635 :     MaybeObject element = array->Get(i);
    4787        9955 :     if (element->IsCleared()) continue;
    4788       18630 :     new_array->Set(copy_to++, element);
    4789             :   }
    4790             :   new_array->set_length(copy_to);
    4791         105 :   return new_array;
    4792             : }
    4793             : 
    4794             : }  // anonymous namespace
    4795             : 
    4796         186 : void Heap::CompactWeakArrayLists(PretenureFlag pretenure) {
    4797             :   // Find known PrototypeUsers and compact them.
    4798             :   std::vector<Handle<PrototypeInfo>> prototype_infos;
    4799             :   {
    4800         186 :     HeapIterator iterator(this);
    4801     3059034 :     for (HeapObject o = iterator.next(); !o.is_null(); o = iterator.next()) {
    4802     1529331 :       if (o->IsPrototypeInfo()) {
    4803       12175 :         PrototypeInfo prototype_info = PrototypeInfo::cast(o);
    4804       24350 :         if (prototype_info->prototype_users()->IsWeakArrayList()) {
    4805          25 :           prototype_infos.emplace_back(handle(prototype_info, isolate()));
    4806             :         }
    4807             :       }
    4808         186 :     }
    4809             :   }
    4810         397 :   for (auto& prototype_info : prototype_infos) {
    4811             :     Handle<WeakArrayList> array(
    4812          50 :         WeakArrayList::cast(prototype_info->prototype_users()), isolate());
    4813             :     DCHECK_IMPLIES(pretenure == TENURED,
    4814             :                    InOldSpace(*array) ||
    4815             :                        *array == ReadOnlyRoots(this).empty_weak_array_list());
    4816             :     WeakArrayList new_array = PrototypeUsers::Compact(
    4817          25 :         array, this, JSObject::PrototypeRegistryCompactionCallback, pretenure);
    4818          25 :     prototype_info->set_prototype_users(new_array);
    4819             :   }
    4820             : 
    4821             :   // Find known WeakArrayLists and compact them.
    4822         186 :   Handle<WeakArrayList> scripts(script_list(), isolate());
    4823             :   DCHECK_IMPLIES(pretenure == TENURED, InOldSpace(*scripts));
    4824         186 :   scripts = CompactWeakArrayList(this, scripts, pretenure);
    4825         186 :   set_script_list(*scripts);
    4826             : 
    4827             :   Handle<WeakArrayList> no_script_list(noscript_shared_function_infos(),
    4828         186 :                                        isolate());
    4829             :   DCHECK_IMPLIES(pretenure == TENURED, InOldSpace(*no_script_list));
    4830         186 :   no_script_list = CompactWeakArrayList(this, no_script_list, pretenure);
    4831         186 :   set_noscript_shared_function_infos(*no_script_list);
    4832         186 : }
    4833             : 
    4834      139595 : void Heap::AddRetainedMap(Handle<Map> map) {
    4835      139595 :   if (map->is_in_retained_map_list()) {
    4836      139595 :     return;
    4837             :   }
    4838       42481 :   Handle<WeakArrayList> array(retained_maps(), isolate());
    4839       42481 :   if (array->IsFull()) {
    4840       12071 :     CompactRetainedMaps(*array);
    4841             :   }
    4842             :   array =
    4843       42481 :       WeakArrayList::AddToEnd(isolate(), array, MaybeObjectHandle::Weak(map));
    4844             :   array = WeakArrayList::AddToEnd(
    4845             :       isolate(), array,
    4846       84962 :       MaybeObjectHandle(Smi::FromInt(FLAG_retain_maps_for_n_gc), isolate()));
    4847       84962 :   if (*array != retained_maps()) {
    4848       16192 :     set_retained_maps(*array);
    4849             :   }
    4850             :   map->set_is_in_retained_map_list(true);
    4851             : }
    4852             : 
    4853       12071 : void Heap::CompactRetainedMaps(WeakArrayList retained_maps) {
    4854             :   DCHECK_EQ(retained_maps, this->retained_maps());
    4855             :   int length = retained_maps->length();
    4856             :   int new_length = 0;
    4857             :   int new_number_of_disposed_maps = 0;
    4858             :   // This loop compacts the array by removing cleared weak cells.
    4859       49114 :   for (int i = 0; i < length; i += 2) {
    4860       37043 :     MaybeObject maybe_object = retained_maps->Get(i);
    4861       37043 :     if (maybe_object->IsCleared()) {
    4862        2896 :       continue;
    4863             :     }
    4864             : 
    4865             :     DCHECK(maybe_object->IsWeak());
    4866             : 
    4867       34147 :     MaybeObject age = retained_maps->Get(i + 1);
    4868             :     DCHECK(age->IsSmi());
    4869       34147 :     if (i != new_length) {
    4870        1231 :       retained_maps->Set(new_length, maybe_object);
    4871        1231 :       retained_maps->Set(new_length + 1, age);
    4872             :     }
    4873       34147 :     if (i < number_of_disposed_maps_) {
    4874          78 :       new_number_of_disposed_maps += 2;
    4875             :     }
    4876       34147 :     new_length += 2;
    4877             :   }
    4878       12071 :   number_of_disposed_maps_ = new_number_of_disposed_maps;
    4879             :   HeapObject undefined = ReadOnlyRoots(this).undefined_value();
    4880       17863 :   for (int i = new_length; i < length; i++) {
    4881        5792 :     retained_maps->Set(i, HeapObjectReference::Strong(undefined));
    4882             :   }
    4883       12071 :   if (new_length != length) retained_maps->set_length(new_length);
    4884       12071 : }
    4885             : 
    4886           0 : void Heap::FatalProcessOutOfMemory(const char* location) {
    4887           0 :   v8::internal::V8::FatalProcessOutOfMemory(isolate(), location, true);
    4888             : }
    4889             : 
    4890             : #ifdef DEBUG
    4891             : 
    4892             : class PrintHandleVisitor : public RootVisitor {
    4893             :  public:
    4894             :   void VisitRootPointers(Root root, const char* description,
    4895             :                          FullObjectSlot start, FullObjectSlot end) override {
    4896             :     for (FullObjectSlot p = start; p < end; ++p)
    4897             :       PrintF("  handle %p to %p\n", p.ToVoidPtr(),
    4898             :              reinterpret_cast<void*>((*p).ptr()));
    4899             :   }
    4900             : };
    4901             : 
    4902             : 
    4903             : void Heap::PrintHandles() {
    4904             :   PrintF("Handles:\n");
    4905             :   PrintHandleVisitor v;
    4906             :   isolate_->handle_scope_implementer()->Iterate(&v);
    4907             : }
    4908             : 
    4909             : #endif
    4910             : 
    4911             : class CheckHandleCountVisitor : public RootVisitor {
    4912             :  public:
    4913           0 :   CheckHandleCountVisitor() : handle_count_(0) {}
    4914           0 :   ~CheckHandleCountVisitor() override {
    4915           0 :     CHECK_GT(HandleScope::kCheckHandleThreshold, handle_count_);
    4916           0 :   }
    4917           0 :   void VisitRootPointers(Root root, const char* description,
    4918             :                          FullObjectSlot start, FullObjectSlot end) override {
    4919           0 :     handle_count_ += end - start;
    4920           0 :   }
    4921             : 
    4922             :  private:
    4923             :   ptrdiff_t handle_count_;
    4924             : };
    4925             : 
    4926             : 
    4927           0 : void Heap::CheckHandleCount() {
    4928             :   CheckHandleCountVisitor v;
    4929           0 :   isolate_->handle_scope_implementer()->Iterate(&v);
    4930           0 : }
    4931             : 
    4932       62995 : Address* Heap::store_buffer_top_address() {
    4933       62995 :   return store_buffer()->top_address();
    4934             : }
    4935             : 
    4936             : // static
    4937         112 : intptr_t Heap::store_buffer_mask_constant() {
    4938         112 :   return StoreBuffer::kStoreBufferMask;
    4939             : }
    4940             : 
    4941             : // static
    4942       62994 : Address Heap::store_buffer_overflow_function_address() {
    4943       62994 :   return FUNCTION_ADDR(StoreBuffer::StoreBufferOverflow);
    4944             : }
    4945             : 
    4946        6178 : void Heap::ClearRecordedSlot(HeapObject object, ObjectSlot slot) {
    4947             :   Page* page = Page::FromAddress(slot.address());
    4948      737882 :   if (!page->InNewSpace()) {
    4949             :     DCHECK_EQ(page->owner()->identity(), OLD_SPACE);
    4950             :     store_buffer()->DeleteEntry(slot.address());
    4951             :   }
    4952        5311 : }
    4953             : 
    4954             : #ifdef DEBUG
    4955             : void Heap::VerifyClearedSlot(HeapObject object, ObjectSlot slot) {
    4956             :   if (InNewSpace(object)) return;
    4957             :   Page* page = Page::FromAddress(slot.address());
    4958             :   DCHECK_EQ(page->owner()->identity(), OLD_SPACE);
    4959             :   store_buffer()->MoveAllEntriesToRememberedSet();
    4960             :   CHECK(!RememberedSet<OLD_TO_NEW>::Contains(page, slot.address()));
    4961             :   // Old to old slots are filtered with invalidated slots.
    4962             :   CHECK_IMPLIES(RememberedSet<OLD_TO_OLD>::Contains(page, slot.address()),
    4963             :                 page->RegisteredObjectWithInvalidatedSlots(object));
    4964             : }
    4965             : #endif
    4966             : 
    4967      349255 : void Heap::ClearRecordedSlotRange(Address start, Address end) {
    4968             :   Page* page = Page::FromAddress(start);
    4969     4542112 :   if (!page->InNewSpace()) {
    4970             :     DCHECK_EQ(page->owner()->identity(), OLD_SPACE);
    4971             :     store_buffer()->DeleteEntry(start, end);
    4972             :   }
    4973      315313 : }
    4974             : 
    4975    28747249 : PagedSpace* PagedSpaces::next() {
    4976    28747249 :   switch (counter_++) {
    4977             :     case RO_SPACE:
    4978             :       // skip NEW_SPACE
    4979     5454519 :       counter_++;
    4980    22924091 :       return heap_->read_only_space();
    4981             :     case OLD_SPACE:
    4982    11646380 :       return heap_->old_space();
    4983             :     case CODE_SPACE:
    4984    11646382 :       return heap_->code_space();
    4985             :     case MAP_SPACE:
    4986    11646382 :       return heap_->map_space();
    4987             :     default:
    4988             :       return nullptr;
    4989             :   }
    4990             : }
    4991             : 
    4992      247369 : SpaceIterator::SpaceIterator(Heap* heap)
    4993      255222 :     : heap_(heap), current_space_(FIRST_SPACE - 1) {}
    4994             : 
    4995             : SpaceIterator::~SpaceIterator() = default;
    4996             : 
    4997     2226321 : bool SpaceIterator::has_next() {
    4998             :   // Iterate until no more spaces.
    4999     2289145 :   return current_space_ != LAST_SPACE;
    5000             : }
    5001             : 
    5002     1978952 : Space* SpaceIterator::next() {
    5003             :   DCHECK(has_next());
    5004     9981032 :   return heap_->space(++current_space_);
    5005             : }
    5006             : 
    5007             : 
    5008        1291 : class HeapObjectsFilter {
    5009             :  public:
    5010        1291 :   virtual ~HeapObjectsFilter() = default;
    5011             :   virtual bool SkipObject(HeapObject object) = 0;
    5012             : };
    5013             : 
    5014             : 
    5015             : class UnreachableObjectsFilter : public HeapObjectsFilter {
    5016             :  public:
    5017        3873 :   explicit UnreachableObjectsFilter(Heap* heap) : heap_(heap) {
    5018        1291 :     MarkReachableObjects();
    5019        1291 :   }
    5020             : 
    5021        2582 :   ~UnreachableObjectsFilter() override {
    5022       12429 :     for (auto it : reachable_) {
    5023       19694 :       delete it.second;
    5024             :       it.second = nullptr;
    5025             :     }
    5026        2582 :   }
    5027             : 
    5028    10885593 :   bool SkipObject(HeapObject object) override {
    5029    10885593 :     if (object->IsFiller()) return true;
    5030    10885593 :     MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
    5031    10885593 :     if (reachable_.count(chunk) == 0) return true;
    5032    21771002 :     return reachable_[chunk]->count(object) == 0;
    5033             :   }
    5034             : 
    5035             :  private:
    5036    55933970 :   bool MarkAsReachable(HeapObject object) {
    5037    55933970 :     MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
    5038    55933970 :     if (reachable_.count(chunk) == 0) {
    5039       19694 :       reachable_[chunk] = new std::unordered_set<HeapObject, Object::Hasher>();
    5040             :     }
    5041   111867940 :     if (reachable_[chunk]->count(object)) return false;
    5042    10385917 :     reachable_[chunk]->insert(object);
    5043    10385917 :     return true;
    5044             :   }
    5045             : 
    5046        1291 :   class MarkingVisitor : public ObjectVisitor, public RootVisitor {
    5047             :    public:
    5048             :     explicit MarkingVisitor(UnreachableObjectsFilter* filter)
    5049        1291 :         : filter_(filter) {}
    5050             : 
    5051    23284301 :     void VisitPointers(HeapObject host, ObjectSlot start,
    5052             :                        ObjectSlot end) override {
    5053    23284301 :       MarkPointers(MaybeObjectSlot(start), MaybeObjectSlot(end));
    5054    23284301 :     }
    5055             : 
    5056     1217131 :     void VisitPointers(HeapObject host, MaybeObjectSlot start,
    5057             :                        MaybeObjectSlot end) final {
    5058     1217131 :       MarkPointers(start, end);
    5059     1217131 :     }
    5060             : 
    5061           0 :     void VisitCodeTarget(Code host, RelocInfo* rinfo) final {
    5062           0 :       Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
    5063             :       MarkHeapObject(target);
    5064           0 :     }
    5065       37402 :     void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) final {
    5066             :       MarkHeapObject(rinfo->target_object());
    5067       37402 :     }
    5068             : 
    5069     3961665 :     void VisitRootPointers(Root root, const char* description,
    5070             :                            FullObjectSlot start, FullObjectSlot end) override {
    5071             :       MarkPointersImpl(start, end);
    5072     3961665 :     }
    5073             : 
    5074        1291 :     void TransitiveClosure() {
    5075    10388499 :       while (!marking_stack_.empty()) {
    5076    10385917 :         HeapObject obj = marking_stack_.back();
    5077             :         marking_stack_.pop_back();
    5078    10385917 :         obj->Iterate(this);
    5079             :       }
    5080        1291 :     }
    5081             : 
    5082             :    private:
    5083    24501432 :     void MarkPointers(MaybeObjectSlot start, MaybeObjectSlot end) {
    5084             :       MarkPointersImpl(start, end);
    5085    24501432 :     }
    5086             : 
    5087             :     template <typename TSlot>
    5088             :     V8_INLINE void MarkPointersImpl(TSlot start, TSlot end) {
    5089             :       // Treat weak references as strong.
    5090    92804354 :       for (TSlot p = start; p < end; ++p) {
    5091    64341257 :         typename TSlot::TObject object = *p;
    5092    64341257 :         HeapObject heap_object;
    5093    64341257 :         if (object.GetHeapObject(&heap_object)) {
    5094             :           MarkHeapObject(heap_object);
    5095             :         }
    5096             :       }
    5097             :     }
    5098             : 
    5099             :     V8_INLINE void MarkHeapObject(HeapObject heap_object) {
    5100    55933970 :       if (filter_->MarkAsReachable(heap_object)) {
    5101    10385917 :         marking_stack_.push_back(heap_object);
    5102             :       }
    5103             :     }
    5104             : 
    5105             :     UnreachableObjectsFilter* filter_;
    5106             :     std::vector<HeapObject> marking_stack_;
    5107             :   };
    5108             : 
    5109             :   friend class MarkingVisitor;
    5110             : 
    5111        1291 :   void MarkReachableObjects() {
    5112             :     MarkingVisitor visitor(this);
    5113        1291 :     heap_->IterateRoots(&visitor, VISIT_ALL);
    5114        1291 :     visitor.TransitiveClosure();
    5115        1291 :   }
    5116             : 
    5117             :   Heap* heap_;
    5118             :   DisallowHeapAllocation no_allocation_;
    5119             :   std::unordered_map<MemoryChunk*,
    5120             :                      std::unordered_set<HeapObject, Object::Hasher>*>
    5121             :       reachable_;
    5122             : };
    5123             : 
    5124        7853 : HeapIterator::HeapIterator(Heap* heap,
    5125             :                            HeapIterator::HeapObjectsFiltering filtering)
    5126             :     : heap_(heap),
    5127             :       filtering_(filtering),
    5128             :       filter_(nullptr),
    5129             :       space_iterator_(nullptr),
    5130        7853 :       object_iterator_(nullptr) {
    5131             :   heap_->MakeHeapIterable();
    5132        7853 :   heap_->heap_iterator_start();
    5133             :   // Start the iteration.
    5134       15706 :   space_iterator_ = new SpaceIterator(heap_);
    5135        7853 :   switch (filtering_) {
    5136             :     case kFilterUnreachable:
    5137        1291 :       filter_ = new UnreachableObjectsFilter(heap_);
    5138        1291 :       break;
    5139             :     default:
    5140             :       break;
    5141             :   }
    5142       23559 :   object_iterator_ = space_iterator_->next()->GetObjectIterator();
    5143        7853 : }
    5144             : 
    5145             : 
    5146        7853 : HeapIterator::~HeapIterator() {
    5147        7853 :   heap_->heap_iterator_end();
    5148             : #ifdef DEBUG
    5149             :   // Assert that in filtering mode we have iterated through all
    5150             :   // objects. Otherwise, heap will be left in an inconsistent state.
    5151             :   if (filtering_ != kNoFiltering) {
    5152             :     DCHECK_NULL(object_iterator_);
    5153             :   }
    5154             : #endif
    5155        7853 :   delete space_iterator_;
    5156        7853 :   delete filter_;
    5157        7853 : }
    5158             : 
    5159    88648020 : HeapObject HeapIterator::next() {
    5160    88648020 :   if (filter_ == nullptr) return NextObject();
    5161             : 
    5162    10387208 :   HeapObject obj = NextObject();
    5163    21274092 :   while (!obj.is_null() && (filter_->SkipObject(obj))) obj = NextObject();
    5164    10387208 :   return obj;
    5165             : }
    5166             : 
    5167    89147224 : HeapObject HeapIterator::NextObject() {
    5168             :   // No iterator means we are done.
    5169    89147224 :   if (object_iterator_.get() == nullptr) return HeapObject();
    5170             : 
    5171    89147224 :   HeapObject obj = object_iterator_.get()->Next();
    5172    89148081 :   if (!obj.is_null()) {
    5173             :     // If the current iterator has more objects we are fine.
    5174    89110002 :     return obj;
    5175             :   } else {
    5176             :     // Go though the spaces looking for one that has objects.
    5177      125648 :     while (space_iterator_->has_next()) {
    5178      109942 :       object_iterator_ = space_iterator_->next()->GetObjectIterator();
    5179       54971 :       obj = object_iterator_.get()->Next();
    5180       54971 :       if (!obj.is_null()) {
    5181       30226 :         return obj;
    5182             :       }
    5183             :     }
    5184             :   }
    5185             :   // Done with the last space.
    5186             :   object_iterator_.reset(nullptr);
    5187        7853 :   return HeapObject();
    5188             : }
    5189             : 
    5190      107066 : void Heap::UpdateTotalGCTime(double duration) {
    5191      107066 :   if (FLAG_trace_gc_verbose) {
    5192           0 :     total_gc_time_ms_ += duration;
    5193             :   }
    5194      107066 : }
    5195             : 
    5196       83492 : void Heap::ExternalStringTable::CleanUpNewSpaceStrings() {
    5197             :   int last = 0;
    5198       83492 :   Isolate* isolate = heap_->isolate();
    5199      167484 :   for (size_t i = 0; i < new_space_strings_.size(); ++i) {
    5200       84013 :     Object o = new_space_strings_[i];
    5201         250 :     if (o->IsTheHole(isolate)) {
    5202         229 :       continue;
    5203             :     }
    5204             :     // The real external string is already in one of these vectors and was or
    5205             :     // will be processed. Re-processing it will add a duplicate to the vector.
    5206          22 :     if (o->IsThinString()) continue;
    5207             :     DCHECK(o->IsExternalString());
    5208          21 :     if (InNewSpace(o)) {
    5209          42 :       new_space_strings_[last++] = o;
    5210             :     } else {
    5211           0 :       old_space_strings_.push_back(o);
    5212             :     }
    5213             :   }
    5214       83492 :   new_space_strings_.resize(last);
    5215       83492 : }
    5216             : 
    5217       83492 : void Heap::ExternalStringTable::CleanUpAll() {
    5218       83492 :   CleanUpNewSpaceStrings();
    5219             :   int last = 0;
    5220       83492 :   Isolate* isolate = heap_->isolate();
    5221      408688 :   for (size_t i = 0; i < old_space_strings_.size(); ++i) {
    5222      444661 :     Object o = old_space_strings_[i];
    5223      120852 :     if (o->IsTheHole(isolate)) {
    5224        1387 :       continue;
    5225             :     }
    5226             :     // The real external string is already in one of these vectors and was or
    5227             :     // will be processed. Re-processing it will add a duplicate to the vector.
    5228      119465 :     if (o->IsThinString()) continue;
    5229             :     DCHECK(o->IsExternalString());
    5230             :     DCHECK(!InNewSpace(o));
    5231      238930 :     old_space_strings_[last++] = o;
    5232             :   }
    5233       83492 :   old_space_strings_.resize(last);
    5234             : #ifdef VERIFY_HEAP
    5235             :   if (FLAG_verify_heap) {
    5236             :     Verify();
    5237             :   }
    5238             : #endif
    5239       83492 : }
    5240             : 
    5241       62867 : void Heap::ExternalStringTable::TearDown() {
    5242      125960 :   for (size_t i = 0; i < new_space_strings_.size(); ++i) {
    5243       63093 :     Object o = new_space_strings_[i];
    5244             :     // Dont finalize thin strings.
    5245         121 :     if (o->IsThinString()) continue;
    5246         105 :     heap_->FinalizeExternalString(ExternalString::cast(o));
    5247             :   }
    5248             :   new_space_strings_.clear();
    5249      298710 :   for (size_t i = 0; i < old_space_strings_.size(); ++i) {
    5250      235842 :     Object o = old_space_strings_[i];
    5251             :     // Dont finalize thin strings.
    5252       86482 :     if (o->IsThinString()) continue;
    5253       86481 :     heap_->FinalizeExternalString(ExternalString::cast(o));
    5254             :   }
    5255             :   old_space_strings_.clear();
    5256       62868 : }
    5257             : 
    5258             : 
    5259      728899 : void Heap::RememberUnmappedPage(Address page, bool compacted) {
    5260             :   // Tag the page pointer to make it findable in the dump file.
    5261      728899 :   if (compacted) {
    5262       10342 :     page ^= 0xC1EAD & (Page::kPageSize - 1);  // Cleared.
    5263             :   } else {
    5264      718557 :     page ^= 0x1D1ED & (Page::kPageSize - 1);  // I died.
    5265             :   }
    5266      791781 :   remembered_unmapped_pages_[remembered_unmapped_pages_index_] = page;
    5267      791781 :   remembered_unmapped_pages_index_++;
    5268      791781 :   remembered_unmapped_pages_index_ %= kRememberedUnmappedPages;
    5269      728899 : }
    5270             : 
    5271     3388910 : void Heap::RegisterStrongRoots(FullObjectSlot start, FullObjectSlot end) {
    5272     3388910 :   StrongRootsList* list = new StrongRootsList();
    5273     3388929 :   list->next = strong_roots_list_;
    5274     3388929 :   list->start = start;
    5275     3388929 :   list->end = end;
    5276     3388929 :   strong_roots_list_ = list;
    5277     3388929 : }
    5278             : 
    5279     3388905 : void Heap::UnregisterStrongRoots(FullObjectSlot start) {
    5280             :   StrongRootsList* prev = nullptr;
    5281     3388905 :   StrongRootsList* list = strong_roots_list_;
    5282    13618707 :   while (list != nullptr) {
    5283     6840888 :     StrongRootsList* next = list->next;
    5284     6840888 :     if (list->start == start) {
    5285     3388904 :       if (prev) {
    5286         775 :         prev->next = next;
    5287             :       } else {
    5288     3388129 :         strong_roots_list_ = next;
    5289             :       }
    5290     3388904 :       delete list;
    5291             :     } else {
    5292             :       prev = list;
    5293             :     }
    5294             :     list = next;
    5295             :   }
    5296     3388914 : }
    5297             : 
    5298          56 : void Heap::SetBuiltinsConstantsTable(FixedArray cache) {
    5299          56 :   set_builtins_constants_table(cache);
    5300          56 : }
    5301             : 
    5302          56 : void Heap::SetInterpreterEntryTrampolineForProfiling(Code code) {
    5303             :   DCHECK_EQ(Builtins::kInterpreterEntryTrampoline, code->builtin_index());
    5304          56 :   set_interpreter_entry_trampoline_for_profiling(code);
    5305          56 : }
    5306             : 
    5307         198 : void Heap::AddDirtyJSWeakFactory(
    5308             :     JSWeakFactory weak_factory,
    5309             :     std::function<void(HeapObject object, ObjectSlot slot, Object target)>
    5310             :         gc_notify_updated_slot) {
    5311             :   DCHECK(dirty_js_weak_factories()->IsUndefined(isolate()) ||
    5312             :          dirty_js_weak_factories()->IsJSWeakFactory());
    5313             :   DCHECK(weak_factory->next()->IsUndefined(isolate()));
    5314             :   DCHECK(!weak_factory->scheduled_for_cleanup());
    5315         198 :   weak_factory->set_scheduled_for_cleanup(true);
    5316         198 :   weak_factory->set_next(dirty_js_weak_factories());
    5317             :   gc_notify_updated_slot(weak_factory,
    5318             :                          weak_factory.RawField(JSWeakFactory::kNextOffset),
    5319         396 :                          dirty_js_weak_factories());
    5320         198 :   set_dirty_js_weak_factories(weak_factory);
    5321             :   // Roots are rescanned after objects are moved, so no need to record a slot
    5322             :   // for the root pointing to the first JSWeakFactory.
    5323         198 : }
    5324             : 
    5325         190 : void Heap::AddKeepDuringJobTarget(Handle<JSReceiver> target) {
    5326             :   DCHECK(FLAG_harmony_weak_refs);
    5327             :   DCHECK(weak_refs_keep_during_job()->IsUndefined() ||
    5328             :          weak_refs_keep_during_job()->IsOrderedHashSet());
    5329             :   Handle<OrderedHashSet> table;
    5330         380 :   if (weak_refs_keep_during_job()->IsUndefined(isolate())) {
    5331          82 :     table = isolate()->factory()->NewOrderedHashSet();
    5332             :   } else {
    5333             :     table =
    5334         216 :         handle(OrderedHashSet::cast(weak_refs_keep_during_job()), isolate());
    5335             :   }
    5336         190 :   table = OrderedHashSet::Add(isolate(), table, target);
    5337         190 :   set_weak_refs_keep_during_job(*table);
    5338         190 : }
    5339             : 
    5340      727988 : void Heap::ClearKeepDuringJobSet() {
    5341      727986 :   set_weak_refs_keep_during_job(ReadOnlyRoots(isolate()).undefined_value());
    5342      727985 : }
    5343             : 
    5344           0 : size_t Heap::NumberOfTrackedHeapObjectTypes() {
    5345           0 :   return ObjectStats::OBJECT_STATS_COUNT;
    5346             : }
    5347             : 
    5348             : 
    5349           0 : size_t Heap::ObjectCountAtLastGC(size_t index) {
    5350           0 :   if (live_object_stats_ == nullptr || index >= ObjectStats::OBJECT_STATS_COUNT)
    5351             :     return 0;
    5352           0 :   return live_object_stats_->object_count_last_gc(index);
    5353             : }
    5354             : 
    5355             : 
    5356           0 : size_t Heap::ObjectSizeAtLastGC(size_t index) {
    5357           0 :   if (live_object_stats_ == nullptr || index >= ObjectStats::OBJECT_STATS_COUNT)
    5358             :     return 0;
    5359           0 :   return live_object_stats_->object_size_last_gc(index);
    5360             : }
    5361             : 
    5362             : 
    5363           0 : bool Heap::GetObjectTypeName(size_t index, const char** object_type,
    5364             :                              const char** object_sub_type) {
    5365           0 :   if (index >= ObjectStats::OBJECT_STATS_COUNT) return false;
    5366             : 
    5367           0 :   switch (static_cast<int>(index)) {
    5368             : #define COMPARE_AND_RETURN_NAME(name) \
    5369             :   case name:                          \
    5370             :     *object_type = #name;             \
    5371             :     *object_sub_type = "";            \
    5372             :     return true;
    5373           0 :     INSTANCE_TYPE_LIST(COMPARE_AND_RETURN_NAME)
    5374             : #undef COMPARE_AND_RETURN_NAME
    5375             : 
    5376             : #define COMPARE_AND_RETURN_NAME(name)                       \
    5377             :   case ObjectStats::FIRST_VIRTUAL_TYPE + ObjectStats::name: \
    5378             :     *object_type = #name;                                   \
    5379             :     *object_sub_type = "";                                  \
    5380             :     return true;
    5381           0 :     VIRTUAL_INSTANCE_TYPE_LIST(COMPARE_AND_RETURN_NAME)
    5382             : #undef COMPARE_AND_RETURN_NAME
    5383             :   }
    5384             :   return false;
    5385             : }
    5386             : 
    5387         246 : size_t Heap::NumberOfNativeContexts() {
    5388             :   int result = 0;
    5389         246 :   Object context = native_contexts_list();
    5390        1591 :   while (!context->IsUndefined(isolate())) {
    5391        1099 :     ++result;
    5392        1099 :     Context native_context = Context::cast(context);
    5393        1099 :     context = native_context->next_context_link();
    5394             :   }
    5395         246 :   return result;
    5396             : }
    5397             : 
    5398         246 : size_t Heap::NumberOfDetachedContexts() {
    5399             :   // The detached_contexts() array has two entries per detached context.
    5400         492 :   return detached_contexts()->length() / 2;
    5401             : }
    5402             : 
    5403         156 : const char* AllocationSpaceName(AllocationSpace space) {
    5404         156 :   switch (space) {
    5405             :     case NEW_SPACE:
    5406             :       return "NEW_SPACE";
    5407             :     case OLD_SPACE:
    5408           1 :       return "OLD_SPACE";
    5409             :     case CODE_SPACE:
    5410           0 :       return "CODE_SPACE";
    5411             :     case MAP_SPACE:
    5412           2 :       return "MAP_SPACE";
    5413             :     case LO_SPACE:
    5414           0 :       return "LO_SPACE";
    5415             :     case NEW_LO_SPACE:
    5416           0 :       return "NEW_LO_SPACE";
    5417             :     case RO_SPACE:
    5418         153 :       return "RO_SPACE";
    5419             :     default:
    5420           0 :       UNREACHABLE();
    5421             :   }
    5422             :   return nullptr;
    5423             : }
    5424             : 
    5425           0 : void VerifyPointersVisitor::VisitPointers(HeapObject host, ObjectSlot start,
    5426             :                                           ObjectSlot end) {
    5427           0 :   VerifyPointers(host, MaybeObjectSlot(start), MaybeObjectSlot(end));
    5428           0 : }
    5429             : 
    5430           0 : void VerifyPointersVisitor::VisitPointers(HeapObject host,
    5431             :                                           MaybeObjectSlot start,
    5432             :                                           MaybeObjectSlot end) {
    5433           0 :   VerifyPointers(host, start, end);
    5434           0 : }
    5435             : 
    5436           0 : void VerifyPointersVisitor::VisitRootPointers(Root root,
    5437             :                                               const char* description,
    5438             :                                               FullObjectSlot start,
    5439             :                                               FullObjectSlot end) {
    5440             :   VerifyPointersImpl(start, end);
    5441           0 : }
    5442             : 
    5443             : void VerifyPointersVisitor::VerifyHeapObjectImpl(HeapObject heap_object) {
    5444           0 :   CHECK(heap_->Contains(heap_object));
    5445           0 :   CHECK(heap_object->map()->IsMap());
    5446             : }
    5447             : 
    5448             : template <typename TSlot>
    5449             : void VerifyPointersVisitor::VerifyPointersImpl(TSlot start, TSlot end) {
    5450           0 :   for (TSlot slot = start; slot < end; ++slot) {
    5451           0 :     typename TSlot::TObject object = *slot;
    5452           0 :     HeapObject heap_object;
    5453           0 :     if (object.GetHeapObject(&heap_object)) {
    5454             :       VerifyHeapObjectImpl(heap_object);
    5455             :     } else {
    5456           0 :       CHECK(object->IsSmi() || object->IsCleared());
    5457             :     }
    5458             :   }
    5459             : }
    5460             : 
    5461           0 : void VerifyPointersVisitor::VerifyPointers(HeapObject host,
    5462             :                                            MaybeObjectSlot start,
    5463             :                                            MaybeObjectSlot end) {
    5464             :   VerifyPointersImpl(start, end);
    5465           0 : }
    5466             : 
    5467           0 : void VerifyPointersVisitor::VisitCodeTarget(Code host, RelocInfo* rinfo) {
    5468           0 :   Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
    5469             :   VerifyHeapObjectImpl(target);
    5470           0 : }
    5471             : 
    5472           0 : void VerifyPointersVisitor::VisitEmbeddedPointer(Code host, RelocInfo* rinfo) {
    5473             :   VerifyHeapObjectImpl(rinfo->target_object());
    5474           0 : }
    5475             : 
    5476           0 : void VerifySmisVisitor::VisitRootPointers(Root root, const char* description,
    5477             :                                           FullObjectSlot start,
    5478             :                                           FullObjectSlot end) {
    5479           0 :   for (FullObjectSlot current = start; current < end; ++current) {
    5480           0 :     CHECK((*current)->IsSmi());
    5481             :   }
    5482           0 : }
    5483             : 
    5484           0 : bool Heap::AllowedToBeMigrated(HeapObject obj, AllocationSpace dst) {
    5485             :   // Object migration is governed by the following rules:
    5486             :   //
    5487             :   // 1) Objects in new-space can be migrated to the old space
    5488             :   //    that matches their target space or they stay in new-space.
    5489             :   // 2) Objects in old-space stay in the same space when migrating.
    5490             :   // 3) Fillers (two or more words) can migrate due to left-trimming of
    5491             :   //    fixed arrays in new-space or old space.
    5492             :   // 4) Fillers (one word) can never migrate, they are skipped by
    5493             :   //    incremental marking explicitly to prevent invalid pattern.
    5494             :   //
    5495             :   // Since this function is used for debugging only, we do not place
    5496             :   // asserts here, but check everything explicitly.
    5497           0 :   if (obj->map() == ReadOnlyRoots(this).one_pointer_filler_map()) return false;
    5498             :   InstanceType type = obj->map()->instance_type();
    5499             :   MemoryChunk* chunk = MemoryChunk::FromHeapObject(obj);
    5500           0 :   AllocationSpace src = chunk->owner()->identity();
    5501           0 :   switch (src) {
    5502             :     case NEW_SPACE:
    5503           0 :       return dst == NEW_SPACE || dst == OLD_SPACE;
    5504             :     case OLD_SPACE:
    5505           0 :       return dst == OLD_SPACE;
    5506             :     case CODE_SPACE:
    5507           0 :       return dst == CODE_SPACE && type == CODE_TYPE;
    5508             :     case MAP_SPACE:
    5509             :     case LO_SPACE:
    5510             :     case CODE_LO_SPACE:
    5511             :     case NEW_LO_SPACE:
    5512             :     case RO_SPACE:
    5513             :       return false;
    5514             :   }
    5515           0 :   UNREACHABLE();
    5516             : }
    5517             : 
    5518           0 : void Heap::CreateObjectStats() {
    5519           0 :   if (V8_LIKELY(FLAG_gc_stats == 0)) return;
    5520           0 :   if (!live_object_stats_) {
    5521           0 :     live_object_stats_ = new ObjectStats(this);
    5522             :   }
    5523           0 :   if (!dead_object_stats_) {
    5524           0 :     dead_object_stats_ = new ObjectStats(this);
    5525             :   }
    5526             : }
    5527             : 
    5528    22639591 : void AllocationObserver::AllocationStep(int bytes_allocated,
    5529             :                                         Address soon_object, size_t size) {
    5530             :   DCHECK_GE(bytes_allocated, 0);
    5531    22639591 :   bytes_to_next_step_ -= bytes_allocated;
    5532    22639591 :   if (bytes_to_next_step_ <= 0) {
    5533      202012 :     Step(static_cast<int>(step_size_ - bytes_to_next_step_), soon_object, size);
    5534      202012 :     step_size_ = GetNextStepSize();
    5535      202012 :     bytes_to_next_step_ = step_size_;
    5536             :   }
    5537             :   DCHECK_GE(bytes_to_next_step_, 0);
    5538    22639591 : }
    5539             : 
    5540             : namespace {
    5541             : 
    5542     2434698 : Map GcSafeMapOfCodeSpaceObject(HeapObject object) {
    5543             :   MapWord map_word = object->map_word();
    5544             :   return map_word.IsForwardingAddress() ? map_word.ToForwardingAddress()->map()
    5545     4869396 :                                         : map_word.ToMap();
    5546             : }
    5547             : 
    5548     2434697 : int GcSafeSizeOfCodeSpaceObject(HeapObject object) {
    5549     2434697 :   return object->SizeFromMap(GcSafeMapOfCodeSpaceObject(object));
    5550             : }
    5551             : 
    5552             : Code GcSafeCastToCode(Heap* heap, HeapObject object, Address inner_pointer) {
    5553             :   Code code = Code::unchecked_cast(object);
    5554             :   DCHECK(!code.is_null());
    5555             :   DCHECK(heap->GcSafeCodeContains(code, inner_pointer));
    5556             :   return code;
    5557             : }
    5558             : 
    5559             : }  // namespace
    5560             : 
    5561           0 : bool Heap::GcSafeCodeContains(Code code, Address addr) {
    5562           0 :   Map map = GcSafeMapOfCodeSpaceObject(code);
    5563             :   DCHECK(map == ReadOnlyRoots(this).code_map());
    5564           0 :   if (InstructionStream::TryLookupCode(isolate(), addr) == code) return true;
    5565             :   Address start = code->address();
    5566           0 :   Address end = code->address() + code->SizeFromMap(map);
    5567           0 :   return start <= addr && addr < end;
    5568             : }
    5569             : 
    5570     2847820 : Code Heap::GcSafeFindCodeForInnerPointer(Address inner_pointer) {
    5571     1283420 :   Code code = InstructionStream::TryLookupCode(isolate(), inner_pointer);
    5572     1283418 :   if (!code.is_null()) return code;
    5573             : 
    5574             :   // Check if the inner pointer points into a large object chunk.
    5575      521470 :   LargePage* large_page = code_lo_space()->FindPage(inner_pointer);
    5576      521470 :   if (large_page != nullptr) {
    5577             :     return GcSafeCastToCode(this, large_page->GetObject(), inner_pointer);
    5578             :   }
    5579             : 
    5580             :   DCHECK(code_space()->Contains(inner_pointer));
    5581             : 
    5582             :   // Iterate through the page until we reach the end or find an object starting
    5583             :   // after the inner pointer.
    5584             :   Page* page = Page::FromAddress(inner_pointer);
    5585             :   DCHECK_EQ(page->owner(), code_space());
    5586      521465 :   mark_compact_collector()->sweeper()->EnsurePageIsIterable(page);
    5587             : 
    5588      521465 :   Address addr = page->skip_list()->StartFor(inner_pointer);
    5589             :   Address top = code_space()->top();
    5590             :   Address limit = code_space()->limit();
    5591             : 
    5592             :   while (true) {
    5593     2443352 :     if (addr == top && addr != limit) {
    5594             :       addr = limit;
    5595             :       continue;
    5596             :     }
    5597             : 
    5598             :     HeapObject obj = HeapObject::FromAddress(addr);
    5599     2434697 :     int obj_size = GcSafeSizeOfCodeSpaceObject(obj);
    5600     2434697 :     Address next_addr = addr + obj_size;
    5601     2434697 :     if (next_addr > inner_pointer) {
    5602             :       return GcSafeCastToCode(this, obj, inner_pointer);
    5603             :     }
    5604             :     addr = next_addr;
    5605             :   }
    5606             : }
    5607             : 
    5608          96 : void Heap::WriteBarrierForCodeSlow(Code code) {
    5609         197 :   for (RelocIterator it(code, RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT));
    5610           5 :        !it.done(); it.next()) {
    5611          10 :     GenerationalBarrierForCode(code, it.rinfo(), it.rinfo()->target_object());
    5612          10 :     MarkingBarrierForCode(code, it.rinfo(), it.rinfo()->target_object());
    5613             :   }
    5614          96 : }
    5615             : 
    5616   106743130 : void Heap::GenerationalBarrierSlow(HeapObject object, Address slot,
    5617             :                                    HeapObject value) {
    5618   106743130 :   Heap* heap = Heap::FromWritableHeapObject(object);
    5619             :   heap->store_buffer()->InsertEntry(slot);
    5620   106743077 : }
    5621             : 
    5622        1002 : void Heap::GenerationalBarrierForElementsSlow(Heap* heap, FixedArray array,
    5623             :                                               int offset, int length) {
    5624     1217976 :   for (int i = 0; i < length; i++) {
    5625     2433948 :     if (!InNewSpace(array->get(offset + i))) continue;
    5626             :     heap->store_buffer()->InsertEntry(
    5627             :         array->RawFieldOfElementAt(offset + i).address());
    5628             :   }
    5629        1002 : }
    5630             : 
    5631      870196 : void Heap::GenerationalBarrierForCodeSlow(Code host, RelocInfo* rinfo,
    5632             :                                           HeapObject object) {
    5633             :   DCHECK(InNewSpace(object));
    5634             :   Page* source_page = Page::FromHeapObject(host);
    5635             :   RelocInfo::Mode rmode = rinfo->rmode();
    5636             :   Address addr = rinfo->pc();
    5637             :   SlotType slot_type = SlotTypeForRelocInfoMode(rmode);
    5638      435098 :   if (rinfo->IsInConstantPool()) {
    5639             :     addr = rinfo->constant_pool_entry_address();
    5640             :     if (RelocInfo::IsCodeTargetMode(rmode)) {
    5641             :       slot_type = CODE_ENTRY_SLOT;
    5642             :     } else {
    5643             :       DCHECK(RelocInfo::IsEmbeddedObject(rmode));
    5644             :       slot_type = OBJECT_SLOT;
    5645             :     }
    5646             :   }
    5647      870196 :   uintptr_t offset = addr - source_page->address();
    5648             :   DCHECK_LT(offset, static_cast<uintptr_t>(TypedSlotSet::kMaxOffset));
    5649             :   RememberedSet<OLD_TO_NEW>::InsertTyped(source_page, slot_type,
    5650      435098 :                                          static_cast<uint32_t>(offset));
    5651      435098 : }
    5652             : 
    5653   275081034 : void Heap::MarkingBarrierSlow(HeapObject object, Address slot,
    5654             :                               HeapObject value) {
    5655   275081482 :   Heap* heap = Heap::FromWritableHeapObject(object);
    5656             :   heap->incremental_marking()->RecordWriteSlow(object, HeapObjectSlot(slot),
    5657   275081482 :                                                value);
    5658   275080952 : }
    5659             : 
    5660        1230 : void Heap::MarkingBarrierForElementsSlow(Heap* heap, HeapObject object) {
    5661             :   IncrementalMarking::MarkingState* marking_state =
    5662             :       heap->incremental_marking()->marking_state();
    5663         615 :   if (!marking_state->IsBlack(object)) {
    5664             :     marking_state->WhiteToGrey(object);
    5665             :     marking_state->GreyToBlack(object);
    5666             :   }
    5667         615 :   heap->incremental_marking()->RevisitObject(object);
    5668         615 : }
    5669             : 
    5670      254942 : void Heap::MarkingBarrierForCodeSlow(Code host, RelocInfo* rinfo,
    5671             :                                      HeapObject object) {
    5672      254943 :   Heap* heap = Heap::FromWritableHeapObject(host);
    5673             :   DCHECK(heap->incremental_marking()->IsMarking());
    5674      254943 :   heap->incremental_marking()->RecordWriteIntoCode(host, rinfo, object);
    5675      254942 : }
    5676             : 
    5677    21205078 : void Heap::MarkingBarrierForDescriptorArraySlow(Heap* heap, HeapObject host,
    5678             :                                                 HeapObject raw_descriptor_array,
    5679             :                                                 int number_of_own_descriptors) {
    5680             :   DCHECK(heap->incremental_marking()->IsMarking());
    5681             :   DescriptorArray descriptor_array =
    5682             :       DescriptorArray::cast(raw_descriptor_array);
    5683             :   int16_t raw_marked = descriptor_array->raw_number_of_marked_descriptors();
    5684     8864011 :   if (NumberOfMarkedDescriptors::decode(heap->mark_compact_collector()->epoch(),
    5685    17728022 :                                         raw_marked) <
    5686             :       number_of_own_descriptors) {
    5687             :     heap->incremental_marking()->VisitDescriptors(host, descriptor_array,
    5688     3477055 :                                                   number_of_own_descriptors);
    5689             :   }
    5690     8864012 : }
    5691             : 
    5692           0 : bool Heap::PageFlagsAreConsistent(HeapObject object) {
    5693           0 :   Heap* heap = Heap::FromWritableHeapObject(object);
    5694           0 :   MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
    5695             :   heap_internals::MemoryChunk* slim_chunk =
    5696             :       heap_internals::MemoryChunk::FromHeapObject(object);
    5697             : 
    5698             :   const bool generation_consistency =
    5699           0 :       chunk->owner()->identity() != NEW_SPACE ||
    5700           0 :       (chunk->InNewSpace() && slim_chunk->InNewSpace());
    5701             :   const bool marking_consistency =
    5702           0 :       !heap->incremental_marking()->IsMarking() ||
    5703           0 :       (chunk->IsFlagSet(MemoryChunk::INCREMENTAL_MARKING) &&
    5704             :        slim_chunk->IsMarking());
    5705             : 
    5706           0 :   return generation_consistency && marking_consistency;
    5707             : }
    5708             : 
    5709             : static_assert(MemoryChunk::Flag::INCREMENTAL_MARKING ==
    5710             :                   heap_internals::MemoryChunk::kMarkingBit,
    5711             :               "Incremental marking flag inconsistent");
    5712             : static_assert(MemoryChunk::Flag::IN_FROM_SPACE ==
    5713             :                   heap_internals::MemoryChunk::kFromSpaceBit,
    5714             :               "From space flag inconsistent");
    5715             : static_assert(MemoryChunk::Flag::IN_TO_SPACE ==
    5716             :                   heap_internals::MemoryChunk::kToSpaceBit,
    5717             :               "To space flag inconsistent");
    5718             : static_assert(MemoryChunk::kFlagsOffset ==
    5719             :                   heap_internals::MemoryChunk::kFlagsOffset,
    5720             :               "Flag offset inconsistent");
    5721             : static_assert(MemoryChunk::kHeapOffset ==
    5722             :                   heap_internals::MemoryChunk::kHeapOffset,
    5723             :               "Heap offset inconsistent");
    5724             : 
    5725           5 : void Heap::SetEmbedderStackStateForNextFinalizaton(
    5726           5 :     EmbedderHeapTracer::EmbedderStackState stack_state) {
    5727             :   local_embedder_heap_tracer()->SetEmbedderStackStateForNextFinalization(
    5728           5 :       stack_state);
    5729           5 : }
    5730             : 
    5731             : #ifdef DEBUG
    5732             : void Heap::IncrementObjectCounters() {
    5733             :   isolate_->counters()->objs_since_last_full()->Increment();
    5734             :   isolate_->counters()->objs_since_last_young()->Increment();
    5735             : }
    5736             : #endif  // DEBUG
    5737             : 
    5738             : }  // namespace internal
    5739      183867 : }  // namespace v8

Generated by: LCOV version 1.10