/src/hermes/lib/VM/GCBase.cpp
Line  | Count  | Source (jump to first uncovered line)  | 
1  |  | /*  | 
2  |  |  * Copyright (c) Meta Platforms, Inc. and affiliates.  | 
3  |  |  *  | 
4  |  |  * This source code is licensed under the MIT license found in the  | 
5  |  |  * LICENSE file in the root directory of this source tree.  | 
6  |  |  */  | 
7  |  |  | 
8  |  | #define DEBUG_TYPE "gc"  | 
9  |  | #include "hermes/VM/GC.h"  | 
10  |  |  | 
11  |  | #include "hermes/Platform/Logging.h"  | 
12  |  | #include "hermes/Public/JSOutOfMemoryError.h"  | 
13  |  | #include "hermes/Support/ErrorHandling.h"  | 
14  |  | #include "hermes/Support/OSCompat.h"  | 
15  |  | #include "hermes/VM/CellKind.h"  | 
16  |  | #include "hermes/VM/JSWeakMapImpl.h"  | 
17  |  | #include "hermes/VM/RootAndSlotAcceptorDefault.h"  | 
18  |  | #include "hermes/VM/Runtime.h"  | 
19  |  | #include "hermes/VM/SmallHermesValue-inline.h"  | 
20  |  | #include "hermes/VM/VTable.h"  | 
21  |  |  | 
22  |  | #include "llvh/Support/Debug.h"  | 
23  |  | #include "llvh/Support/FileSystem.h"  | 
24  |  | #include "llvh/Support/Format.h"  | 
25  |  | #include "llvh/Support/raw_os_ostream.h"  | 
26  |  | #include "llvh/Support/raw_ostream.h"  | 
27  |  |  | 
28  |  | #include <inttypes.h>  | 
29  |  | #include <clocale>  | 
30  |  | #include <stdexcept>  | 
31  |  | #include <system_error>  | 
32  |  | #pragma GCC diagnostic push  | 
33  |  |  | 
34  |  | #ifdef HERMES_COMPILER_SUPPORTS_WSHORTEN_64_TO_32  | 
35  |  | #pragma GCC diagnostic ignored "-Wshorten-64-to-32"  | 
36  |  | #endif  | 
37  |  | using llvh::format;  | 
38  |  |  | 
39  |  | namespace hermes { | 
40  |  | namespace vm { | 
41  |  |  | 
42  |  | const char GCBase::kNaturalCauseForAnalytics[] = "natural";  | 
43  |  | const char GCBase::kHandleSanCauseForAnalytics[] = "handle-san";  | 
44  |  |  | 
45  |  | GCBase::GCBase(  | 
46  |  |     GCCallbacks &gcCallbacks,  | 
47  |  |     PointerBase &pointerBase,  | 
48  |  |     const GCConfig &gcConfig,  | 
49  |  |     std::shared_ptr<CrashManager> crashMgr,  | 
50  |  |     HeapKind kind)  | 
51  | 160  |     : gcCallbacks_(gcCallbacks),  | 
52  | 160  |       pointerBase_(pointerBase),  | 
53  | 160  |       crashMgr_(crashMgr),  | 
54  | 160  |       heapKind_(kind),  | 
55  | 160  |       analyticsCallback_(gcConfig.getAnalyticsCallback()),  | 
56  | 160  |       recordGcStats_(gcConfig.getShouldRecordStats()),  | 
57  |  |       // Start off not in GC.  | 
58  | 160  |       inGC_(false),  | 
59  | 160  |       name_(gcConfig.getName()),  | 
60  | 160  |       weakSlots_(gcConfig.getOccupancyTarget(), 0.5 /* sizingWeight */),  | 
61  | 160  |       weakMapEntrySlots_(gcConfig.getOccupancyTarget(), 0.5 /* sizingWeight */),  | 
62  |  | #ifdef HERMES_MEMORY_INSTRUMENTATION  | 
63  | 160  |       allocationLocationTracker_(this),  | 
64  | 160  |       samplingAllocationTracker_(this),  | 
65  |  | #endif  | 
66  |  | #ifdef HERMESVM_SANITIZE_HANDLES  | 
67  |  |       sanitizeRate_(gcConfig.getSanitizeConfig().getSanitizeRate()),  | 
68  |  | #endif  | 
69  | 160  |       tripwireCallback_(gcConfig.getTripwireConfig().getCallback()),  | 
70  | 160  |       tripwireLimit_(gcConfig.getTripwireConfig().getLimit()) { | 
71  | 640  |   for (unsigned i = 0; i < (unsigned)XorPtrKeyID::_NumKeys; ++i) { | 
72  | 480  |     pointerEncryptionKey_[i] = std::random_device()();  | 
73  | 480  |     if constexpr (sizeof(uintptr_t) >= 8) { | 
74  |  |       // std::random_device() yields an unsigned int, so combine two.  | 
75  | 480  |       pointerEncryptionKey_[i] =  | 
76  | 480  |           (pointerEncryptionKey_[i] << 32) | std::random_device()();  | 
77  | 480  |     }  | 
78  | 480  |   }  | 
79  | 160  |   buildMetadataTable();  | 
80  |  | #ifdef HERMESVM_PLATFORM_LOGGING  | 
81  |  |   hermesLog(  | 
82  |  |       "HermesGC",  | 
83  |  |       "Initialisation (Init: %dMB, Max: %dMB, Tripwire: %dMB)",  | 
84  |  |       gcConfig.getInitHeapSize() >> 20,  | 
85  |  |       gcConfig.getMaxHeapSize() >> 20,  | 
86  |  |       gcConfig.getTripwireConfig().getLimit() >> 20);  | 
87  |  | #endif // HERMESVM_PLATFORM_LOGGING  | 
88  |  | #ifdef HERMESVM_SANITIZE_HANDLES  | 
89  |  |   const std::minstd_rand::result_type seed =  | 
90  |  |       gcConfig.getSanitizeConfig().getRandomSeed() >= 0  | 
91  |  |       ? gcConfig.getSanitizeConfig().getRandomSeed()  | 
92  |  |       : std::random_device()();  | 
93  |  |   if (sanitizeRate_ > 0.0 && sanitizeRate_ < 1.0) { | 
94  |  |     llvh::errs()  | 
95  |  |         << "Warning: you are using handle sanitization with random sampling.\n"  | 
96  |  |         << "Sanitize Rate: ";  | 
97  |  |     llvh::write_double(llvh::errs(), sanitizeRate_, llvh::FloatStyle::Percent);  | 
98  |  |     llvh::errs() << "\n"  | 
99  |  |                  << "Sanitize Rate Seed: " << seed << "\n"  | 
100  |  |                  << "Re-run with -gc-sanitize-handles-random-seed=" << seed  | 
101  |  |                  << " for deterministic crashes.\n";  | 
102  |  |   }  | 
103  |  |   randomEngine_.seed(seed);  | 
104  |  | #endif  | 
105  | 160  | }  | 
106  |  |  | 
107  |  | GCBase::GCCycle::GCCycle(GCBase &gc, std::string extraInfo)  | 
108  | 113  |     : gc_(gc), extraInfo_(std::move(extraInfo)), previousInGC_(gc_.inGC_) { | 
109  | 113  |   if (!previousInGC_) { | 
110  | 113  |     gc_.getCallbacks().onGCEvent(GCEventKind::CollectionStart, extraInfo_);  | 
111  | 113  |     gc_.inGC_ = true;  | 
112  | 113  |   }  | 
113  | 113  | }  | 
114  |  |  | 
115  | 113  | GCBase::GCCycle::~GCCycle() { | 
116  | 113  |   if (!previousInGC_) { | 
117  | 113  |     gc_.inGC_ = false;  | 
118  | 113  |     gc_.getCallbacks().onGCEvent(GCEventKind::CollectionEnd, extraInfo_);  | 
119  | 113  |   }  | 
120  | 113  | }  | 
121  |  |  | 
122  | 274  | void GCBase::runtimeWillExecute() { | 
123  | 274  |   if (recordGcStats_ && !execStartTimeRecorded_) { | 
124  | 0  |     execStartTime_ = std::chrono::steady_clock::now();  | 
125  | 0  |     execStartCPUTime_ = oscompat::thread_cpu_time();  | 
126  | 0  |     oscompat::num_context_switches(  | 
127  | 0  |         startNumVoluntaryContextSwitches_, startNumInvoluntaryContextSwitches_);  | 
128  | 0  |     execStartTimeRecorded_ = true;  | 
129  | 0  |   }  | 
130  | 274  | }  | 
131  |  |  | 
132  |  | #ifdef HERMES_MEMORY_INSTRUMENTATION  | 
133  | 0  | std::error_code GCBase::createSnapshotToFile(const std::string &fileName) { | 
134  | 0  |   std::error_code code;  | 
135  | 0  |   llvh::raw_fd_ostream os(fileName, code, llvh::sys::fs::FileAccess::FA_Write);  | 
136  | 0  |   if (code) { | 
137  | 0  |     return code;  | 
138  | 0  |   }  | 
139  | 0  |   createSnapshot(os, true);  | 
140  | 0  |   return std::error_code{}; | 
141  | 0  | }  | 
142  |  |  | 
143  |  | namespace { | 
144  |  |  | 
145  |  | constexpr HeapSnapshot::NodeID objectIDForRootSection(  | 
146  | 0  |     RootAcceptor::Section section) { | 
147  |  |   // Since root sections start at zero, and in IDTracker the root sections  | 
148  |  |   // start one past the reserved GC root, this number can be added to  | 
149  |  |   // do conversions.  | 
150  | 0  |   return GCBase::IDTracker::reserved(  | 
151  | 0  |       static_cast<GCBase::IDTracker::ReservedObjectID>(  | 
152  | 0  |           static_cast<HeapSnapshot::NodeID>(  | 
153  | 0  |               GCBase::IDTracker::ReservedObjectID::GCRoots) +  | 
154  | 0  |           1 + static_cast<HeapSnapshot::NodeID>(section)));  | 
155  | 0  | }  | 
156  |  |  | 
157  |  | // Abstract base class for all snapshot acceptors.  | 
158  |  | struct SnapshotAcceptor : public RootAndSlotAcceptorWithNamesDefault { | 
159  |  |   using RootAndSlotAcceptorWithNamesDefault::accept;  | 
160  |  |  | 
161  |  |   SnapshotAcceptor(PointerBase &base, HeapSnapshot &snap)  | 
162  | 0  |       : RootAndSlotAcceptorWithNamesDefault(base), snap_(snap) {} | 
163  |  |  | 
164  | 0  |   void acceptHV(HermesValue &hv, const char *name) override { | 
165  | 0  |     if (hv.isPointer()) { | 
166  | 0  |       GCCell *ptr = static_cast<GCCell *>(hv.getPointer());  | 
167  | 0  |       accept(ptr, name);  | 
168  | 0  |     }  | 
169  | 0  |   }  | 
170  | 0  |   void acceptSHV(SmallHermesValue &hv, const char *name) override { | 
171  | 0  |     if (hv.isPointer()) { | 
172  | 0  |       GCCell *ptr = static_cast<GCCell *>(hv.getPointer(pointerBase_));  | 
173  | 0  |       accept(ptr, name);  | 
174  | 0  |     }  | 
175  | 0  |   }  | 
176  |  |  | 
177  |  |  protected:  | 
178  |  |   HeapSnapshot &snap_;  | 
179  |  | };  | 
180  |  |  | 
181  |  | struct PrimitiveNodeAcceptor : public SnapshotAcceptor { | 
182  |  |   using SnapshotAcceptor::accept;  | 
183  |  |  | 
184  |  |   PrimitiveNodeAcceptor(  | 
185  |  |       PointerBase &base,  | 
186  |  |       HeapSnapshot &snap,  | 
187  |  |       GCBase::IDTracker &tracker)  | 
188  | 0  |       : SnapshotAcceptor(base, snap), tracker_(tracker) {} | 
189  |  |  | 
190  |  |   // Do nothing for any value except a number.  | 
191  | 0  |   void accept(GCCell *&ptr, const char *name) override {} | 
192  |  |  | 
193  | 0  |   void acceptHV(HermesValue &hv, const char *) override { | 
194  | 0  |     if (tracker_.isTrackingNumberIDs() && hv.isNumber()) { | 
195  | 0  |       seenNumbers_.insert(hv.getNumber());  | 
196  | 0  |     }  | 
197  | 0  |   }  | 
198  |  |  | 
199  | 0  |   void acceptSHV(SmallHermesValue &hv, const char *) override { | 
200  | 0  |     if (tracker_.isTrackingNumberIDs() && hv.isNumber()) { | 
201  | 0  |       seenNumbers_.insert(hv.getNumber(pointerBase_));  | 
202  | 0  |     }  | 
203  | 0  |   }  | 
204  |  |  | 
205  | 0  |   void writeAllNodes() { | 
206  |  |     // Always write out the nodes for singletons.  | 
207  | 0  |     snap_.beginNode();  | 
208  | 0  |     snap_.endNode(  | 
209  | 0  |         HeapSnapshot::NodeType::Object,  | 
210  | 0  |         "undefined",  | 
211  | 0  |         GCBase::IDTracker::reserved(  | 
212  | 0  |             GCBase::IDTracker::ReservedObjectID::Undefined),  | 
213  | 0  |         0,  | 
214  | 0  |         0);  | 
215  | 0  |     snap_.beginNode();  | 
216  | 0  |     snap_.endNode(  | 
217  | 0  |         HeapSnapshot::NodeType::Object,  | 
218  | 0  |         "null",  | 
219  | 0  |         GCBase::IDTracker::reserved(GCBase::IDTracker::ReservedObjectID::Null),  | 
220  | 0  |         0,  | 
221  | 0  |         0);  | 
222  | 0  |     snap_.beginNode();  | 
223  | 0  |     snap_.endNode(  | 
224  | 0  |         HeapSnapshot::NodeType::Object,  | 
225  | 0  |         "true",  | 
226  | 0  |         GCBase::IDTracker::reserved(GCBase::IDTracker::ReservedObjectID::True),  | 
227  | 0  |         0,  | 
228  | 0  |         0);  | 
229  | 0  |     snap_.beginNode();  | 
230  | 0  |     snap_.endNode(  | 
231  | 0  |         HeapSnapshot::NodeType::Object,  | 
232  | 0  |         "false",  | 
233  | 0  |         GCBase::IDTracker::reserved(GCBase::IDTracker::ReservedObjectID::False),  | 
234  | 0  |         0,  | 
235  | 0  |         0);  | 
236  | 0  |     if (tracker_.isTrackingNumberIDs()) { | 
237  | 0  |       for (double num : seenNumbers_) { | 
238  |  |         // A number never has any edges, so just make a node for it.  | 
239  | 0  |         snap_.beginNode();  | 
240  |  |         // Convert the number value to a string, according to the JS conversion  | 
241  |  |         // routines.  | 
242  | 0  |         char buf[hermes::NUMBER_TO_STRING_BUF_SIZE];  | 
243  | 0  |         size_t len = hermes::numberToString(num, buf, sizeof(buf));  | 
244  | 0  |         snap_.endNode(  | 
245  | 0  |             HeapSnapshot::NodeType::Number,  | 
246  | 0  |             llvh::StringRef{buf, len}, | 
247  | 0  |             tracker_.getNumberID(num),  | 
248  |  |             // Numbers are zero-sized in the heap because they're stored inline.  | 
249  | 0  |             0,  | 
250  | 0  |             0);  | 
251  | 0  |       }  | 
252  | 0  |     } else { | 
253  | 0  |       snap_.beginNode();  | 
254  | 0  |       snap_.endNode(  | 
255  | 0  |           HeapSnapshot::NodeType::Object,  | 
256  | 0  |           "number",  | 
257  | 0  |           GCBase::IDTracker::reserved(  | 
258  | 0  |               GCBase::IDTracker::ReservedObjectID::Number),  | 
259  | 0  |           0,  | 
260  | 0  |           0);  | 
261  | 0  |     }  | 
262  | 0  |   }  | 
263  |  |  | 
264  |  |  private:  | 
265  |  |   GCBase::IDTracker &tracker_;  | 
266  |  |   // Track all numbers that are seen in a heap pass, and only emit one node for  | 
267  |  |   // each of them.  | 
268  |  |   llvh::DenseSet<double, GCBase::IDTracker::DoubleComparator> seenNumbers_;  | 
269  |  | };  | 
270  |  |  | 
271  |  | struct EdgeAddingAcceptor : public SnapshotAcceptor { | 
272  |  |   using SnapshotAcceptor::accept;  | 
273  |  |  | 
274  |  |   EdgeAddingAcceptor(GCBase &gc, HeapSnapshot &snap)  | 
275  | 0  |       : SnapshotAcceptor(gc.getPointerBase(), snap), gc_(gc) {} | 
276  |  |  | 
277  | 0  |   void accept(GCCell *&ptr, const char *name) override { | 
278  | 0  |     if (!ptr) { | 
279  | 0  |       return;  | 
280  | 0  |     }  | 
281  | 0  |     snap_.addNamedEdge(  | 
282  | 0  |         HeapSnapshot::EdgeType::Internal,  | 
283  | 0  |         llvh::StringRef::withNullAsEmpty(name),  | 
284  | 0  |         gc_.getObjectID(ptr));  | 
285  | 0  |   }  | 
286  |  |  | 
287  | 0  |   void acceptHV(HermesValue &hv, const char *name) override { | 
288  | 0  |     if (auto id = gc_.getSnapshotID(hv)) { | 
289  | 0  |       snap_.addNamedEdge(  | 
290  | 0  |           HeapSnapshot::EdgeType::Internal,  | 
291  | 0  |           llvh::StringRef::withNullAsEmpty(name),  | 
292  | 0  |           id.getValue());  | 
293  | 0  |     }  | 
294  | 0  |   }  | 
295  |  |  | 
296  | 0  |   void acceptSHV(SmallHermesValue &shv, const char *name) override { | 
297  | 0  |     HermesValue hv = shv.toHV(pointerBase_);  | 
298  | 0  |     acceptHV(hv, name);  | 
299  | 0  |   }  | 
300  |  |  | 
301  | 0  |   void acceptSym(SymbolID sym, const char *name) override { | 
302  | 0  |     if (sym.isInvalid()) { | 
303  | 0  |       return;  | 
304  | 0  |     }  | 
305  | 0  |     snap_.addNamedEdge(  | 
306  | 0  |         HeapSnapshot::EdgeType::Internal,  | 
307  | 0  |         llvh::StringRef::withNullAsEmpty(name),  | 
308  | 0  |         gc_.getObjectID(sym));  | 
309  | 0  |   }  | 
310  |  |  | 
311  |  |  private:  | 
312  |  |   GCBase &gc_;  | 
313  |  | };  | 
314  |  |  | 
315  |  | struct SnapshotRootSectionAcceptor : public SnapshotAcceptor,  | 
316  |  |                                      public WeakAcceptorDefault { | 
317  |  |   using SnapshotAcceptor::accept;  | 
318  |  |   using WeakRootAcceptor::acceptWeak;  | 
319  |  |  | 
320  |  |   SnapshotRootSectionAcceptor(PointerBase &base, HeapSnapshot &snap)  | 
321  | 0  |       : SnapshotAcceptor(base, snap), WeakAcceptorDefault(base) {} | 
322  |  |  | 
323  | 0  |   void accept(GCCell *&, const char *) override { | 
324  |  |     // While adding edges to root sections, there's no need to do anything for  | 
325  |  |     // pointers.  | 
326  | 0  |   }  | 
327  |  |  | 
328  | 0  |   void acceptWeak(GCCell *&ptr) override { | 
329  |  |     // Same goes for weak pointers.  | 
330  | 0  |   }  | 
331  |  |  | 
332  | 0  |   void beginRootSection(Section section) override { | 
333  |  |     // Make an element edge from the super root to each root section.  | 
334  | 0  |     snap_.addIndexedEdge(  | 
335  | 0  |         HeapSnapshot::EdgeType::Element,  | 
336  | 0  |         rootSectionNum_++,  | 
337  | 0  |         objectIDForRootSection(section));  | 
338  | 0  |   }  | 
339  |  |  | 
340  | 0  |   void endRootSection() override { | 
341  |  |     // Do nothing for the end of the root section.  | 
342  | 0  |   }  | 
343  |  |  | 
344  |  |  private:  | 
345  |  |   // v8's roots start numbering at 1.  | 
346  |  |   int rootSectionNum_{1}; | 
347  |  | };  | 
348  |  |  | 
349  |  | struct SnapshotRootAcceptor : public SnapshotAcceptor,  | 
350  |  |                               public WeakAcceptorDefault { | 
351  |  |   using SnapshotAcceptor::accept;  | 
352  |  |   using WeakRootAcceptor::acceptWeak;  | 
353  |  |  | 
354  |  |   SnapshotRootAcceptor(  | 
355  |  |       GCBase &gc,  | 
356  |  |       HeapSnapshot &snap,  | 
357  |  |       GCBase::SavedNumRootEdges &numRootEdges)  | 
358  | 0  |       : SnapshotAcceptor(gc.getPointerBase(), snap),  | 
359  | 0  |         WeakAcceptorDefault(gc.getPointerBase()),  | 
360  | 0  |         gc_(gc),  | 
361  | 0  |         numRootEdges_(numRootEdges) {} | 
362  |  |  | 
363  | 0  |   void accept(GCCell *&ptr, const char *name) override { | 
364  | 0  |     pointerAccept(ptr, name, false);  | 
365  | 0  |   }  | 
366  |  |  | 
367  | 0  |   void acceptWeak(GCCell *&ptr) override { | 
368  | 0  |     pointerAccept(ptr, nullptr, true);  | 
369  | 0  |   }  | 
370  |  |  | 
371  | 0  |   void acceptSym(SymbolID sym, const char *name) override { | 
372  | 0  |     if (sym.isInvalid()) { | 
373  | 0  |       return;  | 
374  | 0  |     }  | 
375  | 0  |     auto nameRef = llvh::StringRef::withNullAsEmpty(name);  | 
376  | 0  |     const auto id = gc_.getObjectID(sym);  | 
377  | 0  |     if (!nameRef.empty()) { | 
378  | 0  |       snap_.addNamedEdge(HeapSnapshot::EdgeType::Internal, nameRef, id);  | 
379  | 0  |     } else { | 
380  |  |       // Unnamed edges get indices.  | 
381  | 0  |       snap_.addIndexedEdge(HeapSnapshot::EdgeType::Element, nextEdge_++, id);  | 
382  | 0  |     }  | 
383  | 0  |   }  | 
384  |  |  | 
385  |  |   void provideSnapshot(  | 
386  | 0  |       const std::function<void(HeapSnapshot &)> &func) override { | 
387  | 0  |     func(snap_);  | 
388  | 0  |   }  | 
389  |  |  | 
390  | 0  |   void beginRootSection(Section section) override { | 
391  | 0  |     assert(  | 
392  | 0  |         currentSection_ == Section::InvalidSection &&  | 
393  | 0  |         "beginRootSection called while previous section is open");  | 
394  | 0  |     snap_.beginNode();  | 
395  | 0  |     currentSection_ = section;  | 
396  | 0  |   }  | 
397  |  |  | 
398  | 0  |   void endRootSection() override { | 
399  |  |     // A root section creates a synthetic node with that name and makes edges  | 
400  |  |     // come from that root.  | 
401  | 0  |     static const char *rootNames[] = { | 
402  |  | // Parentheses around the name is adopted from V8's roots.  | 
403  | 0  | #define ROOT_SECTION(name) "(" #name ")", | 
404  | 0  | #include "hermes/VM/RootSections.def"  | 
405  | 0  |     };  | 
406  |  |  | 
407  |  |     // If we haven't visited this section before, save its current edge count.  | 
408  | 0  |     auto sectionIdx = static_cast<unsigned>(currentSection_);  | 
409  | 0  |     if (!numRootEdges_[sectionIdx].hasValue()) { | 
410  | 0  |       numRootEdges_[sectionIdx] = snap_.getCurEdgeCount();  | 
411  | 0  |     } else { | 
412  |  |       // Compare the edge count of this scan with the first scan, if some roots  | 
413  |  |       // are newly dropped, we will add dummy edges to make sure all following  | 
414  |  |       // scans have the same edge count as the first scan in a single call of  | 
415  |  |       // createSnapshot().  | 
416  | 0  |       auto savedEdgeCount = numRootEdges_[sectionIdx].getValue();  | 
417  | 0  |       assert(  | 
418  | 0  |           savedEdgeCount >= snap_.getCurEdgeCount() &&  | 
419  | 0  |           "Unexpected new edges added");  | 
420  | 0  |       const auto id = GCBase::IDTracker::reserved(  | 
421  | 0  |           GCBase::IDTracker::ReservedObjectID::Undefined);  | 
422  | 0  |       for (auto i = snap_.getCurEdgeCount(); i < savedEdgeCount; ++i) { | 
423  | 0  |         snap_.addIndexedEdge(HeapSnapshot::EdgeType::Element, nextEdge_++, id);  | 
424  | 0  |       }  | 
425  | 0  |     }  | 
426  |  |  | 
427  | 0  |     snap_.endNode(  | 
428  | 0  |         HeapSnapshot::NodeType::Synthetic,  | 
429  | 0  |         rootNames[static_cast<unsigned>(currentSection_)],  | 
430  | 0  |         objectIDForRootSection(currentSection_),  | 
431  |  |         // The heap visualizer doesn't like it when these synthetic nodes have a  | 
432  |  |         // size (it describes them as living in the heap).  | 
433  | 0  |         0,  | 
434  | 0  |         0);  | 
435  | 0  |     currentSection_ = Section::InvalidSection;  | 
436  |  |     // Reset the edge counter, so each root section's unnamed edges start at  | 
437  |  |     // zero.  | 
438  | 0  |     nextEdge_ = 0;  | 
439  | 0  |   }  | 
440  |  |  | 
441  |  |  private:  | 
442  |  |   GCBase &gc_;  | 
443  |  |   llvh::DenseSet<HeapSnapshot::NodeID> seenIDs_;  | 
444  |  |   /// For unnamed edges, use indices instead.  | 
445  |  |   unsigned nextEdge_{0}; | 
446  |  |   Section currentSection_{Section::InvalidSection}; | 
447  |  |   /// Number of edges for each root section.  | 
448  |  |   GCBase::SavedNumRootEdges &numRootEdges_;  | 
449  |  |  | 
450  | 0  |   void pointerAccept(GCCell *ptr, const char *name, bool weak) { | 
451  | 0  |     assert(  | 
452  | 0  |         currentSection_ != Section::InvalidSection &&  | 
453  | 0  |         "accept called outside of begin/end root section pair");  | 
454  | 0  |     if (!ptr) { | 
455  | 0  |       return;  | 
456  | 0  |     }  | 
457  |  |  | 
458  | 0  |     const auto id = gc_.getObjectID(ptr);  | 
459  | 0  |     if (!seenIDs_.insert(id).second) { | 
460  |  |       // Already seen this node, don't add another edge.  | 
461  | 0  |       return;  | 
462  | 0  |     }  | 
463  | 0  |     auto nameRef = llvh::StringRef::withNullAsEmpty(name);  | 
464  | 0  |     if (!nameRef.empty()) { | 
465  | 0  |       snap_.addNamedEdge(  | 
466  | 0  |           weak ? HeapSnapshot::EdgeType::Weak  | 
467  | 0  |                : HeapSnapshot::EdgeType::Internal,  | 
468  | 0  |           nameRef,  | 
469  | 0  |           id);  | 
470  | 0  |     } else if (weak) { | 
471  | 0  |       std::string numericName = std::to_string(nextEdge_++);  | 
472  | 0  |       snap_.addNamedEdge(HeapSnapshot::EdgeType::Weak, numericName.c_str(), id);  | 
473  | 0  |     } else { | 
474  |  |       // Unnamed edges get indices.  | 
475  | 0  |       snap_.addIndexedEdge(HeapSnapshot::EdgeType::Element, nextEdge_++, id);  | 
476  | 0  |     }  | 
477  | 0  |   }  | 
478  |  | };  | 
479  |  |  | 
480  |  | } // namespace  | 
481  |  |  | 
482  |  | void GCBase::createSnapshotImpl(  | 
483  |  |     GC &gc,  | 
484  |  |     HeapSnapshot &snap,  | 
485  | 0  |     SavedNumRootEdges &numRootEdges) { | 
486  | 0  |   const auto rootScan = [&gc, &snap, &numRootEdges, this]() { | 
487  | 0  |     { | 
488  |  |       // Make the super root node and add edges to each root section.  | 
489  | 0  |       SnapshotRootSectionAcceptor rootSectionAcceptor(getPointerBase(), snap);  | 
490  |  |       // The super root has a single element pointing to the "(GC roots)"  | 
491  |  |       // synthetic node. v8 also has some "shortcut" edges to things like the  | 
492  |  |       // global object, but those don't seem necessary for correctness.  | 
493  | 0  |       snap.beginNode();  | 
494  | 0  |       snap.addIndexedEdge(  | 
495  | 0  |           HeapSnapshot::EdgeType::Element,  | 
496  | 0  |           1,  | 
497  | 0  |           IDTracker::reserved(IDTracker::ReservedObjectID::GCRoots));  | 
498  | 0  |       snap.endNode(  | 
499  | 0  |           HeapSnapshot::NodeType::Synthetic,  | 
500  | 0  |           "",  | 
501  | 0  |           IDTracker::reserved(IDTracker::ReservedObjectID::SuperRoot),  | 
502  | 0  |           0,  | 
503  | 0  |           0);  | 
504  | 0  |       snapshotAddGCNativeNodes(snap);  | 
505  | 0  |       snap.beginNode();  | 
506  | 0  |       markRoots(rootSectionAcceptor, true);  | 
507  | 0  |       markWeakRoots(rootSectionAcceptor, /*markLongLived*/ true);  | 
508  | 0  |       snapshotAddGCNativeEdges(snap);  | 
509  | 0  |       snap.endNode(  | 
510  | 0  |           HeapSnapshot::NodeType::Synthetic,  | 
511  | 0  |           "(GC roots)",  | 
512  | 0  |           static_cast<HeapSnapshot::NodeID>(  | 
513  | 0  |               IDTracker::reserved(IDTracker::ReservedObjectID::GCRoots)),  | 
514  | 0  |           0,  | 
515  | 0  |           0);  | 
516  | 0  |     }  | 
517  | 0  |     { | 
518  |  |       // Make a node for each root section and add edges into the actual heap.  | 
519  |  |       // Within a root section, there might be duplicates. The root acceptor  | 
520  |  |       // filters out duplicate edges because there cannot be duplicate edges to  | 
521  |  |       // nodes reachable from the super root.  | 
522  | 0  |       SnapshotRootAcceptor rootAcceptor(gc, snap, numRootEdges);  | 
523  | 0  |       markRoots(rootAcceptor, true);  | 
524  | 0  |       markWeakRoots(rootAcceptor, /*markLongLived*/ true);  | 
525  | 0  |     }  | 
526  | 0  |     gcCallbacks_.visitIdentifiers([&snap, this](  | 
527  | 0  |                                       SymbolID sym,  | 
528  | 0  |                                       const StringPrimitive *str) { | 
529  | 0  |       snap.beginNode();  | 
530  | 0  |       if (str) { | 
531  | 0  |         snap.addNamedEdge(  | 
532  | 0  |             HeapSnapshot::EdgeType::Internal, "description", getObjectID(str));  | 
533  | 0  |       }  | 
534  | 0  |       snap.endNode(  | 
535  | 0  |           HeapSnapshot::NodeType::Symbol,  | 
536  | 0  |           convertSymbolToUTF8(sym),  | 
537  | 0  |           idTracker_.getObjectID(sym),  | 
538  | 0  |           sizeof(SymbolID),  | 
539  | 0  |           0);  | 
540  | 0  |     });  | 
541  | 0  |   };  | 
542  |  | 
  | 
543  | 0  |   snap.beginSection(HeapSnapshot::Section::Nodes);  | 
544  | 0  |   rootScan();  | 
545  |  |   // Add all primitive values as nodes if they weren't added before.  | 
546  |  |   // This must be done as a step before adding any edges to these nodes.  | 
547  |  |   // In particular, custom edge adders might try to add edges to primitives that  | 
548  |  |   // haven't been recorded yet.  | 
549  |  |   // The acceptor is recording some state between objects, so define it outside  | 
550  |  |   // the loop.  | 
551  | 0  |   PrimitiveNodeAcceptor primitiveAcceptor(  | 
552  | 0  |       getPointerBase(), snap, getIDTracker());  | 
553  | 0  |   SlotVisitorWithNames<PrimitiveNodeAcceptor> primitiveVisitor{ | 
554  | 0  |       primitiveAcceptor};  | 
555  |  |   // Add a node for each object in the heap.  | 
556  | 0  |   const auto snapshotForObject =  | 
557  | 0  |       [&snap, &primitiveVisitor, &gc, this](GCCell *cell) { | 
558  | 0  |         auto &allocationLocationTracker = getAllocationLocationTracker();  | 
559  |  |         // First add primitive nodes.  | 
560  | 0  |         markCellWithNames(primitiveVisitor, cell);  | 
561  | 0  |         EdgeAddingAcceptor acceptor(gc, snap);  | 
562  | 0  |         SlotVisitorWithNames<EdgeAddingAcceptor> visitor(acceptor);  | 
563  |  |         // Allow nodes to add extra nodes not in the JS heap.  | 
564  | 0  |         cell->getVT()->snapshotMetaData.addNodes(cell, gc, snap);  | 
565  | 0  |         snap.beginNode();  | 
566  |  |         // Add all internal edges first.  | 
567  | 0  |         markCellWithNames(visitor, cell);  | 
568  |  |         // Allow nodes to add custom edges not represented by metadata.  | 
569  | 0  |         cell->getVT()->snapshotMetaData.addEdges(cell, gc, snap);  | 
570  | 0  |         auto stackTracesTreeNode =  | 
571  | 0  |             allocationLocationTracker.getStackTracesTreeNodeForAlloc(  | 
572  | 0  |                 gc.getObjectID(cell));  | 
573  | 0  |         snap.endNode(  | 
574  | 0  |             cell->getVT()->snapshotMetaData.nodeType(),  | 
575  | 0  |             cell->getVT()->snapshotMetaData.nameForNode(cell, gc),  | 
576  | 0  |             gc.getObjectID(cell),  | 
577  | 0  |             cell->getAllocatedSize(),  | 
578  | 0  |             stackTracesTreeNode ? stackTracesTreeNode->id : 0);  | 
579  | 0  |       };  | 
580  | 0  |   gc.forAllObjs(snapshotForObject);  | 
581  |  |   // Scan all WeakMapEntrySlot so that PrimitiveNodeAcceptor won't miss  | 
582  |  |   // primitives stored as WeakMap values.  | 
583  | 0  |   weakMapEntrySlots_.forEach([&primitiveAcceptor](WeakMapEntrySlot &slot) { | 
584  | 0  |     primitiveAcceptor.accept(slot.mappedValue);  | 
585  | 0  |   });  | 
586  |  |   // Write the singleton number nodes into the snapshot.  | 
587  | 0  |   primitiveAcceptor.writeAllNodes();  | 
588  | 0  |   snap.endSection(HeapSnapshot::Section::Nodes);  | 
589  |  | 
  | 
590  | 0  |   snap.beginSection(HeapSnapshot::Section::Edges);  | 
591  | 0  |   rootScan();  | 
592  |  |   // No need to run the primitive scan again, as it only adds nodes, not edges.  | 
593  |  |   // Add edges between objects in the heap.  | 
594  | 0  |   gc.forAllObjs(snapshotForObject);  | 
595  | 0  |   snap.endSection(HeapSnapshot::Section::Edges);  | 
596  |  | 
  | 
597  | 0  |   snap.emitAllocationTraceInfo();  | 
598  |  | 
  | 
599  | 0  |   snap.beginSection(HeapSnapshot::Section::Samples);  | 
600  | 0  |   getAllocationLocationTracker().addSamplesToSnapshot(snap);  | 
601  | 0  |   snap.endSection(HeapSnapshot::Section::Samples);  | 
602  |  | 
  | 
603  | 0  |   snap.beginSection(HeapSnapshot::Section::Locations);  | 
604  | 0  |   forAllObjs([&snap, &gc](GCCell *cell) { | 
605  | 0  |     cell->getVT()->snapshotMetaData.addLocations(cell, gc, snap);  | 
606  | 0  |   });  | 
607  | 0  |   snap.endSection(HeapSnapshot::Section::Locations);  | 
608  | 0  | }  | 
609  |  |  | 
610  |  | void GCBase::createSnapshot(  | 
611  |  |     GC &gc,  | 
612  |  |     llvh::raw_ostream &os,  | 
613  | 0  |     bool captureNumericValue) { | 
614  | 0  |   if (!captureNumericValue) { | 
615  | 0  |     idTracker_.stopTrackingNumberIDs();  | 
616  | 0  |   }  | 
617  |  |   // Chrome 125 requires correct node count and edge count in the "snapshot"  | 
618  |  |   // field, which is at the beginning of the heap snapshot. We do two passes to  | 
619  |  |   // populate the correct node/edge count. First, we create a dummy HeapSnapshot  | 
620  |  |   // instance with a no-op JSON emitter, and invoke createSnapshotImpl() with  | 
621  |  |   // it. From that instance we can get the node count and edge count, and use  | 
622  |  |   // them to create a HeapSnapShot instance in the second pass.  | 
623  | 0  |   JSONEmitter dummyJSON{llvh::nulls()}; | 
624  | 0  |   HeapSnapshot dummySnap{dummyJSON, 0, 0, 0, gcCallbacks_.getStackTracesTree()}; | 
625  |  |   // Array for saving the number of edges for each root section. We set the  | 
626  |  |   // value the first time we visit a root section, and make sure the same number  | 
627  |  |   // of edges are added in a single call of this function.  | 
628  | 0  |   SavedNumRootEdges numRootEdges;  | 
629  | 0  |   createSnapshotImpl(gc, dummySnap, numRootEdges);  | 
630  |  |  | 
631  |  |   // Second pass, write out the real snapshot with the correct node_count and  | 
632  |  |   // edge_count.  | 
633  | 0  |   JSONEmitter json{os}; | 
634  | 0  |   HeapSnapshot snap{ | 
635  | 0  |       json,  | 
636  | 0  |       dummySnap.getNodeCount(),  | 
637  | 0  |       dummySnap.getEdgeCount(),  | 
638  | 0  |       dummySnap.getTraceFunctionCount(),  | 
639  | 0  |       gcCallbacks_.getStackTracesTree()};  | 
640  | 0  |   createSnapshotImpl(gc, snap, numRootEdges);  | 
641  |  |   // Check if the node/edge counts of the two passes are equal.  | 
642  | 0  |   assert(  | 
643  | 0  |       dummySnap.getNodeCount() == snap.getNodeCount() &&  | 
644  | 0  |       "Node count of two passes of createSnapshotImpl are not equal");  | 
645  | 0  |   assert(  | 
646  | 0  |       dummySnap.getEdgeCount() == snap.getEdgeCount() &&  | 
647  | 0  |       "Edge count of two passes of createSnapshotImpl are not equal");  | 
648  | 0  |   idTracker_.startTrackingNumberIDs();  | 
649  | 0  | }  | 
650  |  |  | 
651  | 0  | void GCBase::snapshotAddGCNativeNodes(HeapSnapshot &snap) { | 
652  | 0  |   snap.beginNode();  | 
653  | 0  |   snap.endNode(  | 
654  | 0  |       HeapSnapshot::NodeType::Native,  | 
655  | 0  |       "hermes::ManagedChunkedList<WeakRefSlot>",  | 
656  | 0  |       IDTracker::reserved(IDTracker::ReservedObjectID::WeakRefSlotStorage),  | 
657  | 0  |       weakSlots_.capacity() * sizeof(decltype(weakSlots_)::value_type),  | 
658  | 0  |       0);  | 
659  | 0  | }  | 
660  |  |  | 
661  | 0  | void GCBase::snapshotAddGCNativeEdges(HeapSnapshot &snap) { | 
662  | 0  |   snap.addNamedEdge(  | 
663  | 0  |       HeapSnapshot::EdgeType::Internal,  | 
664  | 0  |       "weakRefSlots",  | 
665  | 0  |       IDTracker::reserved(IDTracker::ReservedObjectID::WeakRefSlotStorage));  | 
666  | 0  | }  | 
667  |  |  | 
668  |  | void GCBase::enableHeapProfiler(  | 
669  |  |     std::function<void(  | 
670  |  |         uint64_t,  | 
671  |  |         std::chrono::microseconds,  | 
672  |  |         std::vector<GCBase::AllocationLocationTracker::HeapStatsUpdate>)>  | 
673  | 0  |         fragmentCallback) { | 
674  | 0  |   getAllocationLocationTracker().enable(std::move(fragmentCallback));  | 
675  | 0  | }  | 
676  |  |  | 
677  | 0  | void GCBase::disableHeapProfiler() { | 
678  | 0  |   getAllocationLocationTracker().disable();  | 
679  | 0  | }  | 
680  |  |  | 
681  | 0  | void GCBase::enableSamplingHeapProfiler(size_t samplingInterval, int64_t seed) { | 
682  | 0  |   getSamplingAllocationTracker().enable(samplingInterval, seed);  | 
683  | 0  | }  | 
684  |  |  | 
685  | 0  | void GCBase::disableSamplingHeapProfiler(llvh::raw_ostream &os) { | 
686  | 0  |   getSamplingAllocationTracker().disable(os);  | 
687  | 0  | }  | 
688  |  | #endif // HERMES_MEMORY_INSTRUMENTATION  | 
689  |  |  | 
690  | 0  | void GCBase::checkTripwire(size_t dataSize) { | 
691  | 0  |   if (LLVM_LIKELY(!tripwireCallback_) ||  | 
692  | 0  |       LLVM_LIKELY(dataSize < tripwireLimit_) || tripwireCalled_) { | 
693  | 0  |     return;  | 
694  | 0  |   }  | 
695  |  |  | 
696  | 0  | #ifdef HERMES_MEMORY_INSTRUMENTATION  | 
697  | 0  |   class Ctx : public GCTripwireContext { | 
698  | 0  |    public:  | 
699  | 0  |     Ctx(GCBase *gc) : gc_(gc) {} | 
700  |  | 
  | 
701  | 0  |     std::error_code createSnapshotToFile(const std::string &path) override { | 
702  | 0  |       return gc_->createSnapshotToFile(path);  | 
703  | 0  |     }  | 
704  |  | 
  | 
705  | 0  |     std::error_code createSnapshot(std::ostream &os, bool captureNumericValue)  | 
706  | 0  |         override { | 
707  | 0  |       llvh::raw_os_ostream ros(os);  | 
708  | 0  |       gc_->createSnapshot(ros, captureNumericValue);  | 
709  | 0  |       return std::error_code{}; | 
710  | 0  |     }  | 
711  |  | 
  | 
712  | 0  |    private:  | 
713  | 0  |     GCBase *gc_;  | 
714  | 0  |   } ctx(this);  | 
715  |  | #else // !defined(HERMES_MEMORY_INSTRUMENTATION)  | 
716  |  |   class Ctx : public GCTripwireContext { | 
717  |  |    public:  | 
718  |  |     std::error_code createSnapshotToFile(const std::string &path) override { | 
719  |  |       return std::error_code(ENOSYS, std::system_category());  | 
720  |  |     }  | 
721  |  |  | 
722  |  |     std::error_code createSnapshot(std::ostream &os, bool captureNumericValue)  | 
723  |  |         override { | 
724  |  |       return std::error_code(ENOSYS, std::system_category());  | 
725  |  |     }  | 
726  |  |   } ctx;  | 
727  |  | #endif // !defined(HERMES_MEMORY_INSTRUMENTATION)  | 
728  |  | 
  | 
729  | 0  |   tripwireCalled_ = true;  | 
730  | 0  |   tripwireCallback_(ctx);  | 
731  | 0  | }  | 
732  |  |  | 
733  | 0  | void GCBase::printAllCollectedStats(llvh::raw_ostream &os) { | 
734  | 0  |   if (!recordGcStats_)  | 
735  | 0  |     return;  | 
736  |  |  | 
737  | 0  |   dump(os);  | 
738  | 0  |   os << "GC stats:\n";  | 
739  | 0  |   JSONEmitter json{os, /*pretty*/ true}; | 
740  | 0  |   json.openDict();  | 
741  | 0  |   printStats(json);  | 
742  | 0  |   json.closeDict();  | 
743  | 0  |   os << "\n";  | 
744  | 0  | }  | 
745  |  |  | 
746  | 0  | void GCBase::getHeapInfo(HeapInfo &info) { | 
747  | 0  |   info.numCollections = cumStats_.numCollections;  | 
748  | 0  | }  | 
749  |  |  | 
750  | 0  | void GCBase::getHeapInfoWithMallocSize(HeapInfo &info) { | 
751  |  |   // Assign to overwrite anything previously in the heap info.  | 
752  | 0  |   info.mallocSizeEstimate =  | 
753  | 0  |       weakSlots_.capacity() * sizeof(decltype(weakSlots_)::value_type);  | 
754  | 0  | }  | 
755  |  |  | 
756  |  | #ifndef NDEBUG  | 
757  | 0  | void GCBase::getDebugHeapInfo(DebugHeapInfo &info) { | 
758  | 0  |   recordNumAllocatedObjects();  | 
759  | 0  |   info.numAllocatedObjects = numAllocatedObjects_;  | 
760  | 0  |   info.numReachableObjects = numReachableObjects_;  | 
761  | 0  |   info.numCollectedObjects = numCollectedObjects_;  | 
762  | 0  |   info.numFinalizedObjects = numFinalizedObjects_;  | 
763  | 0  |   info.numMarkedSymbols = numMarkedSymbols_;  | 
764  | 0  |   info.numHiddenClasses = numHiddenClasses_;  | 
765  | 0  |   info.numLeafHiddenClasses = numLeafHiddenClasses_;  | 
766  | 0  | }  | 
767  |  |  | 
768  | 0  | size_t GCBase::countUsedWeakRefs() const { | 
769  | 0  |   return weakSlots_.sizeForTests();  | 
770  | 0  | }  | 
771  |  | #endif  | 
772  |  |  | 
773  |  | #ifndef NDEBUG  | 
774  | 0  | void GCBase::DebugHeapInfo::assertInvariants() const { | 
775  |  |   // The number of allocated objects at any time is at least the number  | 
776  |  |   // found reachable in the last collection.  | 
777  | 0  |   assert(numAllocatedObjects >= numReachableObjects);  | 
778  |  |   // The number of objects finalized in the last collection is at most the  | 
779  |  |   // number of objects collected.  | 
780  | 0  |   assert(numCollectedObjects >= numFinalizedObjects);  | 
781  | 0  | }  | 
782  |  | #endif  | 
783  |  |  | 
784  | 0  | void GCBase::dump(llvh::raw_ostream &, bool) { /* nop */ } | 
785  |  |  | 
786  | 0  | void GCBase::printStats(JSONEmitter &json) { | 
787  | 0  |   json.emitKeyValue("type", "hermes"); | 
788  | 0  |   json.emitKeyValue("version", 0); | 
789  | 0  |   gcCallbacks_.printRuntimeGCStats(json);  | 
790  |  | 
  | 
791  | 0  |   std::chrono::duration<double> elapsedTime =  | 
792  | 0  |       std::chrono::steady_clock::now() - execStartTime_;  | 
793  | 0  |   auto elapsedCPUSeconds =  | 
794  | 0  |       std::chrono::duration_cast<std::chrono::duration<double>>(  | 
795  | 0  |           oscompat::thread_cpu_time())  | 
796  | 0  |           .count() -  | 
797  | 0  |       std::chrono::duration_cast<std::chrono::duration<double>>(  | 
798  | 0  |           execStartCPUTime_)  | 
799  | 0  |           .count();  | 
800  |  | 
  | 
801  | 0  |   HeapInfo info;  | 
802  | 0  |   getHeapInfoWithMallocSize(info);  | 
803  | 0  |   getHeapInfo(info);  | 
804  | 0  | #ifndef NDEBUG  | 
805  | 0  |   DebugHeapInfo debugInfo;  | 
806  | 0  |   getDebugHeapInfo(debugInfo);  | 
807  | 0  | #endif  | 
808  |  | 
  | 
809  | 0  |   json.emitKey("heapInfo"); | 
810  | 0  |   json.openDict();  | 
811  | 0  | #ifndef NDEBUG  | 
812  | 0  |   json.emitKeyValue("Num allocated cells", debugInfo.numAllocatedObjects); | 
813  | 0  |   json.emitKeyValue("Num reachable cells", debugInfo.numReachableObjects); | 
814  | 0  |   json.emitKeyValue("Num collected cells", debugInfo.numCollectedObjects); | 
815  | 0  |   json.emitKeyValue("Num finalized cells", debugInfo.numFinalizedObjects); | 
816  | 0  |   json.emitKeyValue("Num marked symbols", debugInfo.numMarkedSymbols); | 
817  | 0  |   json.emitKeyValue("Num hidden classes", debugInfo.numHiddenClasses); | 
818  | 0  |   json.emitKeyValue("Num leaf classes", debugInfo.numLeafHiddenClasses); | 
819  | 0  |   json.emitKeyValue("Num weak references", ((GC *)this)->countUsedWeakRefs()); | 
820  | 0  | #endif  | 
821  | 0  |   json.emitKeyValue("Peak RSS", oscompat::peak_rss()); | 
822  | 0  |   json.emitKeyValue("Current RSS", oscompat::current_rss()); | 
823  | 0  |   json.emitKeyValue("Current Dirty", oscompat::current_private_dirty()); | 
824  | 0  |   json.emitKeyValue("Heap size", info.heapSize); | 
825  | 0  |   json.emitKeyValue("Allocated bytes", info.allocatedBytes); | 
826  | 0  |   json.emitKeyValue("Num collections", info.numCollections); | 
827  | 0  |   json.emitKeyValue("Malloc size", info.mallocSizeEstimate); | 
828  | 0  |   json.closeDict();  | 
829  |  | 
  | 
830  | 0  |   long vol = -1;  | 
831  | 0  |   long invol = -1;  | 
832  | 0  |   if (oscompat::num_context_switches(vol, invol)) { | 
833  | 0  |     vol -= startNumVoluntaryContextSwitches_;  | 
834  | 0  |     invol -= startNumInvoluntaryContextSwitches_;  | 
835  | 0  |   }  | 
836  |  | 
  | 
837  | 0  |   json.emitKey("general"); | 
838  | 0  |   json.openDict();  | 
839  | 0  |   json.emitKeyValue("numCollections", cumStats_.numCollections); | 
840  | 0  |   json.emitKeyValue("totalTime", elapsedTime.count()); | 
841  | 0  |   json.emitKeyValue("totalCPUTime", elapsedCPUSeconds); | 
842  | 0  |   json.emitKeyValue("totalGCTime", formatSecs(cumStats_.gcWallTime.sum()).secs); | 
843  | 0  |   json.emitKeyValue("volCtxSwitch", vol); | 
844  | 0  |   json.emitKeyValue("involCtxSwitch", invol); | 
845  | 0  |   json.emitKeyValue(  | 
846  | 0  |       "avgGCPause", formatSecs(cumStats_.gcWallTime.average()).secs);  | 
847  | 0  |   json.emitKeyValue("maxGCPause", formatSecs(cumStats_.gcWallTime.max()).secs); | 
848  | 0  |   json.emitKeyValue(  | 
849  | 0  |       "totalGCCPUTime", formatSecs(cumStats_.gcCPUTime.sum()).secs);  | 
850  | 0  |   json.emitKeyValue(  | 
851  | 0  |       "avgGCCPUPause", formatSecs(cumStats_.gcCPUTime.average()).secs);  | 
852  | 0  |   json.emitKeyValue(  | 
853  | 0  |       "maxGCCPUPause", formatSecs(cumStats_.gcCPUTime.max()).secs);  | 
854  | 0  |   json.emitKeyValue("finalHeapSize", formatSize(cumStats_.finalHeapSize).bytes); | 
855  | 0  |   json.emitKeyValue(  | 
856  | 0  |       "peakAllocatedBytes", formatSize(getPeakAllocatedBytes()).bytes);  | 
857  | 0  |   json.emitKeyValue("peakLiveAfterGC", formatSize(getPeakLiveAfterGC()).bytes); | 
858  | 0  |   json.emitKeyValue(  | 
859  | 0  |       "totalAllocatedBytes", formatSize(info.totalAllocatedBytes).bytes);  | 
860  | 0  |   json.closeDict();  | 
861  |  | 
  | 
862  | 0  |   json.emitKey("collections"); | 
863  | 0  |   json.openArray();  | 
864  | 0  |   for (const auto &event : analyticsEvents_) { | 
865  | 0  |     json.openDict();  | 
866  | 0  |     json.emitKeyValue("runtimeDescription", event.runtimeDescription); | 
867  | 0  |     json.emitKeyValue("gcKind", event.gcKind); | 
868  | 0  |     json.emitKeyValue("collectionType", event.collectionType); | 
869  | 0  |     json.emitKeyValue("cause", event.cause); | 
870  | 0  |     json.emitKeyValue("duration", event.duration.count()); | 
871  | 0  |     json.emitKeyValue("cpuDuration", event.cpuDuration.count()); | 
872  | 0  |     json.emitKeyValue("preAllocated", event.allocated.before); | 
873  | 0  |     json.emitKeyValue("postAllocated", event.allocated.after); | 
874  | 0  |     json.emitKeyValue("preSize", event.size.before); | 
875  | 0  |     json.emitKeyValue("postSize", event.size.after); | 
876  | 0  |     json.emitKeyValue("preExternal", event.external.before); | 
877  | 0  |     json.emitKeyValue("postExternal", event.external.after); | 
878  | 0  |     json.emitKeyValue("survivalRatio", event.survivalRatio); | 
879  | 0  |     json.emitKey("tags"); | 
880  | 0  |     json.openArray();  | 
881  | 0  |     for (const auto &tag : event.tags) { | 
882  | 0  |       json.emitValue(tag);  | 
883  | 0  |     }  | 
884  | 0  |     json.closeArray();  | 
885  | 0  |     json.closeDict();  | 
886  | 0  |   }  | 
887  | 0  |   json.closeArray();  | 
888  | 0  | }  | 
889  |  |  | 
890  |  | void GCBase::recordGCStats(  | 
891  |  |     const GCAnalyticsEvent &event,  | 
892  |  |     CumulativeHeapStats *stats,  | 
893  | 226  |     bool onMutator) { | 
894  |  |   // Hades OG collections do not block the mutator, and so do not contribute to  | 
895  |  |   // the max pause time or the total execution time.  | 
896  | 226  |   if (onMutator)  | 
897  | 226  |     stats->gcWallTime.record(  | 
898  | 226  |         std::chrono::duration<double>(event.duration).count());  | 
899  | 226  |   stats->gcCPUTime.record(  | 
900  | 226  |       std::chrono::duration<double>(event.cpuDuration).count());  | 
901  | 226  |   stats->finalHeapSize = event.size.after;  | 
902  | 226  |   stats->usedBefore.record(event.allocated.before);  | 
903  | 226  |   stats->usedAfter.record(event.allocated.after);  | 
904  | 226  |   stats->numCollections++;  | 
905  | 226  | }  | 
906  |  |  | 
907  | 113  | void GCBase::recordGCStats(const GCAnalyticsEvent &event, bool onMutator) { | 
908  | 113  |   if (analyticsCallback_) { | 
909  | 0  |     analyticsCallback_(event);  | 
910  | 0  |   }  | 
911  | 113  |   if (recordGcStats_) { | 
912  | 0  |     analyticsEvents_.push_back(event);  | 
913  | 0  |   }  | 
914  | 113  |   recordGCStats(event, &cumStats_, onMutator);  | 
915  | 113  | }  | 
916  |  |  | 
917  | 0  | void GCBase::oom(std::error_code reason) { | 
918  | 0  |   hasOOMed_ = true;  | 
919  | 0  |   char detailBuffer[400];  | 
920  | 0  |   oomDetail(detailBuffer, reason);  | 
921  |  | #ifdef HERMESVM_EXCEPTION_ON_OOM  | 
922  |  |   // No need to run finalizeAll, the exception will propagate and eventually run  | 
923  |  |   // ~Runtime.  | 
924  |  |   throw JSOutOfMemoryError(  | 
925  |  |       std::string(detailBuffer) + "\ncall stack:\n" +  | 
926  |  |       gcCallbacks_.getCallStackNoAlloc());  | 
927  |  | #else  | 
928  | 0  |   hermesLog("HermesGC", "OOM: %s.", detailBuffer); | 
929  |  |   // Record the OOM custom data with the crash manager.  | 
930  | 0  |   crashMgr_->setCustomData("HermesGCOOMDetailBasic", detailBuffer); | 
931  | 0  |   hermes_fatal("OOM", reason); | 
932  | 0  | #endif  | 
933  | 0  | }  | 
934  |  |  | 
935  |  | void GCBase::oomDetail(  | 
936  |  |     llvh::MutableArrayRef<char> detailBuffer,  | 
937  | 0  |     std::error_code reason) { | 
938  | 0  |   HeapInfo heapInfo;  | 
939  | 0  |   getHeapInfo(heapInfo);  | 
940  | 0  |   snprintf(  | 
941  | 0  |       detailBuffer.data(),  | 
942  | 0  |       detailBuffer.size(),  | 
943  | 0  |       "[%.20s] reason = %.150s (%d from category: %.50s), numCollections = %u, heapSize = %" PRIu64  | 
944  | 0  |       ", allocated = %" PRIu64 ", va = %" PRIu64 ", external = %" PRIu64,  | 
945  | 0  |       name_.c_str(),  | 
946  | 0  |       reason.message().c_str(),  | 
947  | 0  |       reason.value(),  | 
948  | 0  |       reason.category().name(),  | 
949  | 0  |       heapInfo.numCollections,  | 
950  | 0  |       heapInfo.heapSize,  | 
951  | 0  |       heapInfo.allocatedBytes,  | 
952  | 0  |       heapInfo.va,  | 
953  | 0  |       heapInfo.externalBytes);  | 
954  | 0  | }  | 
955  |  |  | 
956  |  | #ifdef HERMESVM_SANITIZE_HANDLES  | 
957  |  | bool GCBase::shouldSanitizeHandles() { | 
958  |  |   static std::uniform_real_distribution<> dist(0.0, 1.0);  | 
959  |  |   return dist(randomEngine_) < sanitizeRate_;  | 
960  |  | }  | 
961  |  | #endif  | 
962  |  |  | 
963  |  | #ifdef HERMESVM_GC_RUNTIME  | 
964  |  |  | 
965  |  | #define GCBASE_BARRIER_1(name, type1)                     \  | 
966  |  |   void GCBase::name(type1 arg1) {                         \ | 
967  |  |     runtimeGCDispatch([&](auto *gc) { gc->name(arg1); }); \ | 
968  |  |   }  | 
969  |  |  | 
970  |  | #define GCBASE_BARRIER_2(name, type1, type2)                    \  | 
971  |  |   void GCBase::name(type1 arg1, type2 arg2) {                   \ | 
972  |  |     runtimeGCDispatch([&](auto *gc) { gc->name(arg1, arg2); }); \ | 
973  |  |   }  | 
974  |  |  | 
975  |  | GCBASE_BARRIER_2(writeBarrier, const GCHermesValue *, HermesValue);  | 
976  |  | GCBASE_BARRIER_2(writeBarrier, const GCSmallHermesValue *, SmallHermesValue);  | 
977  |  | GCBASE_BARRIER_2(writeBarrier, const GCPointerBase *, const GCCell *);  | 
978  |  | GCBASE_BARRIER_2(constructorWriteBarrier, const GCHermesValue *, HermesValue);  | 
979  |  | GCBASE_BARRIER_2(  | 
980  |  |     constructorWriteBarrier,  | 
981  |  |     const GCSmallHermesValue *,  | 
982  |  |     SmallHermesValue);  | 
983  |  | GCBASE_BARRIER_2(  | 
984  |  |     constructorWriteBarrier,  | 
985  |  |     const GCPointerBase *,  | 
986  |  |     const GCCell *);  | 
987  |  | GCBASE_BARRIER_2(writeBarrierRange, const GCHermesValue *, uint32_t);  | 
988  |  | GCBASE_BARRIER_2(writeBarrierRange, const GCSmallHermesValue *, uint32_t);  | 
989  |  | GCBASE_BARRIER_2(constructorWriteBarrierRange, const GCHermesValue *, uint32_t);  | 
990  |  | GCBASE_BARRIER_2(  | 
991  |  |     constructorWriteBarrierRange,  | 
992  |  |     const GCSmallHermesValue *,  | 
993  |  |     uint32_t);  | 
994  |  | GCBASE_BARRIER_1(snapshotWriteBarrier, const GCHermesValue *);  | 
995  |  | GCBASE_BARRIER_1(snapshotWriteBarrier, const GCSmallHermesValue *);  | 
996  |  | GCBASE_BARRIER_1(snapshotWriteBarrier, const GCPointerBase *);  | 
997  |  | GCBASE_BARRIER_1(snapshotWriteBarrier, const GCSymbolID *);  | 
998  |  | GCBASE_BARRIER_2(snapshotWriteBarrierRange, const GCHermesValue *, uint32_t);  | 
999  |  | GCBASE_BARRIER_2(  | 
1000  |  |     snapshotWriteBarrierRange,  | 
1001  |  |     const GCSmallHermesValue *,  | 
1002  |  |     uint32_t);  | 
1003  |  | GCBASE_BARRIER_1(weakRefReadBarrier, HermesValue);  | 
1004  |  | GCBASE_BARRIER_1(weakRefReadBarrier, GCCell *);  | 
1005  |  |  | 
1006  |  | #undef GCBASE_BARRIER_1  | 
1007  |  | #undef GCBASE_BARRIER_2  | 
1008  |  | #endif  | 
1009  |  |  | 
1010  | 95.9k  | WeakRefSlot *GCBase::allocWeakSlot(CompressedPointer ptr) { | 
1011  | 95.9k  |   return &weakSlots_.add(ptr);  | 
1012  | 95.9k  | }  | 
1013  |  |  | 
1014  |  | WeakMapEntrySlot *GCBase::allocWeakMapEntrySlot(  | 
1015  |  |     JSObject *key,  | 
1016  |  |     HermesValue value,  | 
1017  | 0  |     JSWeakMapImplBase *owner) { | 
1018  | 0  |   return &weakMapEntrySlots_.add(  | 
1019  | 0  |       CompressedPointer::encode(key, getPointerBase()),  | 
1020  | 0  |       value,  | 
1021  | 0  |       CompressedPointer::encode(owner, getPointerBase()));  | 
1022  | 0  | }  | 
1023  |  |  | 
1024  | 0  | HeapSnapshot::NodeID GCBase::getObjectID(const GCCell *cell) { | 
1025  | 0  |   assert(cell && "Called getObjectID on a null pointer");  | 
1026  | 0  |   return getObjectID(CompressedPointer::encodeNonNull(  | 
1027  | 0  |       const_cast<GCCell *>(cell), pointerBase_));  | 
1028  | 0  | }  | 
1029  |  |  | 
1030  | 0  | HeapSnapshot::NodeID GCBase::getObjectIDMustExist(const GCCell *cell) { | 
1031  | 0  |   assert(cell && "Called getObjectID on a null pointer");  | 
1032  | 0  |   return idTracker_.getObjectIDMustExist(CompressedPointer::encodeNonNull(  | 
1033  | 0  |       const_cast<GCCell *>(cell), pointerBase_));  | 
1034  | 0  | }  | 
1035  |  |  | 
1036  | 0  | HeapSnapshot::NodeID GCBase::getObjectID(CompressedPointer cell) { | 
1037  | 0  |   assert(cell && "Called getObjectID on a null pointer");  | 
1038  | 0  |   return idTracker_.getObjectID(cell);  | 
1039  | 0  | }  | 
1040  |  |  | 
1041  | 0  | HeapSnapshot::NodeID GCBase::getObjectID(SymbolID sym) { | 
1042  | 0  |   return idTracker_.getObjectID(sym);  | 
1043  | 0  | }  | 
1044  |  |  | 
1045  | 0  | HeapSnapshot::NodeID GCBase::getNativeID(const void *mem) { | 
1046  | 0  |   assert(mem && "Called getNativeID on a null pointer");  | 
1047  | 0  |   return idTracker_.getNativeID(mem);  | 
1048  | 0  | }  | 
1049  |  |  | 
1050  | 0  | bool GCBase::hasObjectID(const GCCell *cell) { | 
1051  | 0  |   assert(cell && "Called hasObjectID on a null pointer");  | 
1052  | 0  |   return idTracker_.hasObjectID(CompressedPointer::encodeNonNull(  | 
1053  | 0  |       const_cast<GCCell *>(cell), pointerBase_));  | 
1054  | 0  | }  | 
1055  |  |  | 
1056  | 8.69M  | void GCBase::newAlloc(const GCCell *ptr, uint32_t sz) { | 
1057  | 8.69M  | #ifdef HERMES_MEMORY_INSTRUMENTATION  | 
1058  | 8.69M  |   allocationLocationTracker_.newAlloc(ptr, sz);  | 
1059  | 8.69M  |   samplingAllocationTracker_.newAlloc(ptr, sz);  | 
1060  | 8.69M  | #endif  | 
1061  | 8.69M  | }  | 
1062  |  |  | 
1063  |  | void GCBase::moveObject(  | 
1064  |  |     const GCCell *oldPtr,  | 
1065  |  |     uint32_t oldSize,  | 
1066  |  |     const GCCell *newPtr,  | 
1067  | 0  |     uint32_t newSize) { | 
1068  | 0  |   idTracker_.moveObject(  | 
1069  | 0  |       CompressedPointer::encodeNonNull(  | 
1070  | 0  |           const_cast<GCCell *>(oldPtr), pointerBase_),  | 
1071  | 0  |       CompressedPointer::encodeNonNull(  | 
1072  | 0  |           const_cast<GCCell *>(newPtr), pointerBase_));  | 
1073  | 0  | #ifdef HERMES_MEMORY_INSTRUMENTATION  | 
1074  |  |   // Use newPtr here because the idTracker_ just moved it.  | 
1075  | 0  |   allocationLocationTracker_.updateSize(newPtr, oldSize, newSize);  | 
1076  | 0  |   samplingAllocationTracker_.updateSize(newPtr, oldSize, newSize);  | 
1077  | 0  | #endif  | 
1078  | 0  | }  | 
1079  |  |  | 
1080  | 0  | void GCBase::untrackObject(const GCCell *cell, uint32_t sz) { | 
1081  | 0  |   assert(cell && "Called untrackObject on a null pointer");  | 
1082  |  |   // The allocation tracker needs to use the ID, so this needs to come  | 
1083  |  |   // before untrackObject.  | 
1084  | 0  | #ifdef HERMES_MEMORY_INSTRUMENTATION  | 
1085  | 0  |   getAllocationLocationTracker().freeAlloc(cell, sz);  | 
1086  | 0  |   getSamplingAllocationTracker().freeAlloc(cell, sz);  | 
1087  | 0  | #endif  | 
1088  | 0  |   idTracker_.untrackObject(CompressedPointer::encodeNonNull(  | 
1089  | 0  |       const_cast<GCCell *>(cell), pointerBase_));  | 
1090  | 0  | }  | 
1091  |  |  | 
1092  |  | #ifndef NDEBUG  | 
1093  | 8.69M  | uint64_t GCBase::nextObjectID() { | 
1094  | 8.69M  |   return debugAllocationCounter_++;  | 
1095  | 8.69M  | }  | 
1096  |  | #endif  | 
1097  |  |  | 
1098  | 0  | const GCExecTrace &GCBase::getGCExecTrace() const { | 
1099  | 0  |   return execTrace_;  | 
1100  | 0  | }  | 
1101  |  |  | 
1102  |  | llvh::raw_ostream &operator<<(  | 
1103  |  |     llvh::raw_ostream &os,  | 
1104  | 0  |     const DurationFormatObj &dfo) { | 
1105  | 0  |   if (dfo.secs >= 1.0) { | 
1106  | 0  |     os << format("%5.3f", dfo.secs) << " s"; | 
1107  | 0  |   } else if (dfo.secs >= 0.001) { | 
1108  | 0  |     os << format("%5.3f", dfo.secs * 1000.0) << " ms"; | 
1109  | 0  |   } else { | 
1110  | 0  |     os << format("%5.3f", dfo.secs * 1000000.0) << " us"; | 
1111  | 0  |   }  | 
1112  | 0  |   return os;  | 
1113  | 0  | }  | 
1114  |  |  | 
1115  | 0  | llvh::raw_ostream &operator<<(llvh::raw_ostream &os, const SizeFormatObj &sfo) { | 
1116  | 0  |   double dblsize = static_cast<double>(sfo.bytes);  | 
1117  | 0  |   if (sfo.bytes >= (1024 * 1024 * 1024)) { | 
1118  | 0  |     double gbs = dblsize / (1024.0 * 1024.0 * 1024.0);  | 
1119  | 0  |     os << format("%0.3f GiB", gbs); | 
1120  | 0  |   } else if (sfo.bytes >= (1024 * 1024)) { | 
1121  | 0  |     double mbs = dblsize / (1024.0 * 1024.0);  | 
1122  | 0  |     os << format("%0.3f MiB", mbs); | 
1123  | 0  |   } else if (sfo.bytes >= 1024) { | 
1124  | 0  |     double kbs = dblsize / 1024.0;  | 
1125  | 0  |     os << format("%0.3f KiB", kbs); | 
1126  | 0  |   } else { | 
1127  | 0  |     os << sfo.bytes << " B";  | 
1128  | 0  |   }  | 
1129  | 0  |   return os;  | 
1130  | 0  | }  | 
1131  |  |  | 
1132  | 160  | GCBase::GCCallbacks::~GCCallbacks() {} | 
1133  |  |  | 
1134  | 160  | GCBase::IDTracker::IDTracker() { | 
1135  | 160  |   assert(lastID_ % 2 == 1 && "First JS object ID isn't odd");  | 
1136  | 160  | }  | 
1137  |  |  | 
1138  |  | void GCBase::IDTracker::moveObject(  | 
1139  |  |     CompressedPointer oldLocation,  | 
1140  | 0  |     CompressedPointer newLocation) { | 
1141  | 0  |   if (oldLocation == newLocation) { | 
1142  |  |     // Don't need to do anything if the object isn't moving anywhere. This can  | 
1143  |  |     // happen in old generations where it is compacted to the same location.  | 
1144  | 0  |     return;  | 
1145  | 0  |   }  | 
1146  | 0  |   std::lock_guard<Mutex> lk{mtx_}; | 
1147  | 0  |   auto old = objectIDMap_.find(oldLocation.getRaw());  | 
1148  | 0  |   if (old == objectIDMap_.end()) { | 
1149  |  |     // Avoid making new keys for objects that don't need to be tracked.  | 
1150  | 0  |     return;  | 
1151  | 0  |   }  | 
1152  | 0  |   const auto oldID = old->second;  | 
1153  | 0  |   assert(  | 
1154  | 0  |       objectIDMap_.count(newLocation.getRaw()) == 0 &&  | 
1155  | 0  |       "Moving to a location that is already tracked");  | 
1156  |  |   // Have to erase first, because any other access can invalidate the iterator.  | 
1157  | 0  |   objectIDMap_.erase(old);  | 
1158  | 0  |   objectIDMap_[newLocation.getRaw()] = oldID;  | 
1159  |  |   // Update the reverse map entry if it exists.  | 
1160  | 0  |   auto reverseMappingIt = idObjectMap_.find(oldID);  | 
1161  | 0  |   if (reverseMappingIt != idObjectMap_.end()) { | 
1162  | 0  |     assert(  | 
1163  | 0  |         reverseMappingIt->second == oldLocation.getRaw() &&  | 
1164  | 0  |         "The reverse mapping should have the old address");  | 
1165  | 0  |     reverseMappingIt->second = newLocation.getRaw();  | 
1166  | 0  |   }  | 
1167  | 0  | }  | 
1168  |  |  | 
1169  |  | llvh::SmallVector<HeapSnapshot::NodeID, 1> &  | 
1170  | 0  | GCBase::IDTracker::getExtraNativeIDs(HeapSnapshot::NodeID node) { | 
1171  | 0  |   std::lock_guard<Mutex> lk{mtx_}; | 
1172  |  |   // The operator[] will default construct the vector to be empty if it doesn't  | 
1173  |  |   // exist.  | 
1174  | 0  |   return extraNativeIDs_[node];  | 
1175  | 0  | }  | 
1176  |  |  | 
1177  | 0  | HeapSnapshot::NodeID GCBase::IDTracker::getNumberID(double num) { | 
1178  | 0  |   std::lock_guard<Mutex> lk{mtx_}; | 
1179  | 0  |   if (isTrackingNumberIDs_) { | 
1180  | 0  |     auto &numberRef = numberIDMap_[num];  | 
1181  |  |     // If the entry didn't exist, the value was initialized to 0.  | 
1182  | 0  |     if (numberRef != 0) { | 
1183  | 0  |       return numberRef;  | 
1184  | 0  |     }  | 
1185  |  |     // Else, it is a number that hasn't been seen before.  | 
1186  | 0  |     return numberRef = nextNumberID();  | 
1187  | 0  |   } else { | 
1188  | 0  |     return GCBase::IDTracker::reserved(  | 
1189  | 0  |         GCBase::IDTracker::ReservedObjectID::Number);  | 
1190  | 0  |   }  | 
1191  | 0  | }  | 
1192  |  |  | 
1193  |  | llvh::Optional<CompressedPointer> GCBase::IDTracker::getObjectForID(  | 
1194  | 0  |     HeapSnapshot::NodeID id) { | 
1195  | 0  |   std::lock_guard<Mutex> lk{mtx_}; | 
1196  | 0  |   auto it = idObjectMap_.find(id);  | 
1197  | 0  |   if (it != idObjectMap_.end()) { | 
1198  | 0  |     return CompressedPointer::fromRaw(it->second);  | 
1199  | 0  |   }  | 
1200  |  |   // Do an O(N) search through the map, then cache the result.  | 
1201  |  |   // This trades time for memory, since this is a rare operation.  | 
1202  | 0  |   for (const auto &p : objectIDMap_) { | 
1203  | 0  |     if (p.second == id) { | 
1204  |  |       // Cache the result so repeated lookups are fast.  | 
1205  |  |       // This cache is unlikely to grow that large, unless someone hovers over  | 
1206  |  |       // every single object in a snapshot in Chrome.  | 
1207  | 0  |       auto itAndDidInsert = idObjectMap_.try_emplace(p.second, p.first);  | 
1208  | 0  |       assert(itAndDidInsert.second);  | 
1209  | 0  |       return CompressedPointer::fromRaw(itAndDidInsert.first->second);  | 
1210  | 0  |     }  | 
1211  | 0  |   }  | 
1212  |  |   // ID not found in the map, wasn't an object to begin with.  | 
1213  | 0  |   return llvh::None;  | 
1214  | 0  | }  | 
1215  |  |  | 
1216  | 160  | bool GCBase::IDTracker::hasNativeIDs() { | 
1217  | 160  |   std::lock_guard<Mutex> lk{mtx_}; | 
1218  | 160  |   return !nativeIDMap_.empty();  | 
1219  | 160  | }  | 
1220  |  |  | 
1221  | 226  | bool GCBase::IDTracker::hasTrackedObjectIDs() { | 
1222  | 226  |   std::lock_guard<Mutex> lk{mtx_}; | 
1223  | 226  |   return !objectIDMap_.empty();  | 
1224  | 226  | }  | 
1225  |  |  | 
1226  | 0  | bool GCBase::IDTracker::isTrackingNumberIDs() { | 
1227  | 0  |   std::lock_guard<Mutex> lk{mtx_}; | 
1228  | 0  |   return isTrackingNumberIDs_;  | 
1229  | 0  | }  | 
1230  |  |  | 
1231  | 0  | void GCBase::IDTracker::startTrackingNumberIDs() { | 
1232  | 0  |   std::lock_guard<Mutex> lk{mtx_}; | 
1233  | 0  |   isTrackingNumberIDs_ = true;  | 
1234  | 0  | }  | 
1235  |  |  | 
1236  | 0  | void GCBase::IDTracker::stopTrackingNumberIDs() { | 
1237  | 0  |   std::lock_guard<Mutex> lk{mtx_}; | 
1238  | 0  |   isTrackingNumberIDs_ = false;  | 
1239  | 0  | }  | 
1240  |  |  | 
1241  | 0  | HeapSnapshot::NodeID GCBase::IDTracker::getObjectID(CompressedPointer cell) { | 
1242  | 0  |   std::lock_guard<Mutex> lk{mtx_}; | 
1243  | 0  |   auto iter = objectIDMap_.find(cell.getRaw());  | 
1244  | 0  |   if (iter != objectIDMap_.end()) { | 
1245  | 0  |     return iter->second;  | 
1246  | 0  |   }  | 
1247  |  |   // Else, assume it is an object that needs to be tracked and give it a new ID.  | 
1248  | 0  |   const auto objID = nextObjectID();  | 
1249  | 0  |   objectIDMap_[cell.getRaw()] = objID;  | 
1250  | 0  |   return objID;  | 
1251  | 0  | }  | 
1252  |  |  | 
1253  | 0  | bool GCBase::IDTracker::hasObjectID(CompressedPointer cell) { | 
1254  | 0  |   std::lock_guard<Mutex> lk{mtx_}; | 
1255  | 0  |   return objectIDMap_.count(cell.getRaw());  | 
1256  | 0  | }  | 
1257  |  |  | 
1258  |  | HeapSnapshot::NodeID GCBase::IDTracker::getObjectIDMustExist(  | 
1259  | 0  |     CompressedPointer cell) { | 
1260  | 0  |   std::lock_guard<Mutex> lk{mtx_}; | 
1261  | 0  |   auto iter = objectIDMap_.find(cell.getRaw());  | 
1262  | 0  |   assert(iter != objectIDMap_.end() && "cell must already have an ID");  | 
1263  | 0  |   return iter->second;  | 
1264  | 0  | }  | 
1265  |  |  | 
1266  | 0  | HeapSnapshot::NodeID GCBase::IDTracker::getObjectID(SymbolID sym) { | 
1267  | 0  |   std::lock_guard<Mutex> lk{mtx_}; | 
1268  | 0  |   auto iter = symbolIDMap_.find(sym.unsafeGetIndex());  | 
1269  | 0  |   if (iter != symbolIDMap_.end()) { | 
1270  | 0  |     return iter->second;  | 
1271  | 0  |   }  | 
1272  |  |   // Else, assume it is a symbol that needs to be tracked and give it a new ID.  | 
1273  | 0  |   const auto symID = nextObjectID();  | 
1274  | 0  |   symbolIDMap_[sym.unsafeGetIndex()] = symID;  | 
1275  | 0  |   return symID;  | 
1276  | 0  | }  | 
1277  |  |  | 
1278  | 0  | HeapSnapshot::NodeID GCBase::IDTracker::getNativeID(const void *mem) { | 
1279  | 0  |   std::lock_guard<Mutex> lk{mtx_}; | 
1280  | 0  |   auto iter = nativeIDMap_.find(mem);  | 
1281  | 0  |   if (iter != nativeIDMap_.end()) { | 
1282  | 0  |     return iter->second;  | 
1283  | 0  |   }  | 
1284  |  |   // Else, assume it is a piece of native memory that needs to be tracked and  | 
1285  |  |   // give it a new ID.  | 
1286  | 0  |   const auto objID = nextNativeID();  | 
1287  | 0  |   nativeIDMap_[mem] = objID;  | 
1288  | 0  |   return objID;  | 
1289  | 0  | }  | 
1290  |  |  | 
1291  | 0  | void GCBase::IDTracker::untrackObject(CompressedPointer cell) { | 
1292  | 0  |   std::lock_guard<Mutex> lk{mtx_}; | 
1293  |  |   // It's ok if this didn't exist before, since erase will remove it anyway, and  | 
1294  |  |   // the default constructed zero ID won't be present in extraNativeIDs_.  | 
1295  | 0  |   const auto id = objectIDMap_[cell.getRaw()];  | 
1296  | 0  |   objectIDMap_.erase(cell.getRaw());  | 
1297  | 0  |   extraNativeIDs_.erase(id);  | 
1298  |  |   // Erase the reverse mapping entry if it exists.  | 
1299  | 0  |   idObjectMap_.erase(id);  | 
1300  | 0  | }  | 
1301  |  |  | 
1302  | 10.1k  | void GCBase::IDTracker::untrackNative(const void *mem) { | 
1303  | 10.1k  |   std::lock_guard<Mutex> lk{mtx_}; | 
1304  | 10.1k  |   nativeIDMap_.erase(mem);  | 
1305  | 10.1k  | }  | 
1306  |  |  | 
1307  | 0  | void GCBase::IDTracker::untrackSymbol(uint32_t symIdx) { | 
1308  | 0  |   std::lock_guard<Mutex> lk{mtx_}; | 
1309  | 0  |   symbolIDMap_.erase(symIdx);  | 
1310  | 0  | }  | 
1311  |  |  | 
1312  | 0  | HeapSnapshot::NodeID GCBase::IDTracker::lastID() const { | 
1313  | 0  |   return lastID_;  | 
1314  | 0  | }  | 
1315  |  |  | 
1316  | 0  | HeapSnapshot::NodeID GCBase::IDTracker::nextObjectID() { | 
1317  |  |   // This must be unique for most features that rely on it, check for overflow.  | 
1318  | 0  |   if (LLVM_UNLIKELY(  | 
1319  | 0  |           lastID_ >=  | 
1320  | 0  |           std::numeric_limits<HeapSnapshot::NodeID>::max() - kIDStep)) { | 
1321  | 0  |     hermes_fatal("Ran out of object IDs"); | 
1322  | 0  |   }  | 
1323  | 0  |   return lastID_ += kIDStep;  | 
1324  | 0  | }  | 
1325  |  |  | 
1326  | 0  | HeapSnapshot::NodeID GCBase::IDTracker::nextNativeID() { | 
1327  |  |   // Calling nextObjectID effectively allocates two new IDs, one even  | 
1328  |  |   // and one odd, returning the latter. For native objects, we want the former.  | 
1329  | 0  |   HeapSnapshot::NodeID id = nextObjectID();  | 
1330  | 0  |   assert(id > 0 && "nextObjectID should check for overflow");  | 
1331  | 0  |   return id - 1;  | 
1332  | 0  | }  | 
1333  |  |  | 
1334  | 0  | HeapSnapshot::NodeID GCBase::IDTracker::nextNumberID() { | 
1335  |  |   // Numbers will all be considered JS memory, not native memory.  | 
1336  | 0  |   return nextObjectID();  | 
1337  | 0  | }  | 
1338  |  |  | 
1339  |  | #ifdef HERMES_MEMORY_INSTRUMENTATION  | 
1340  |  |  | 
1341  |  | GCBase::AllocationLocationTracker::AllocationLocationTracker(GCBase *gc)  | 
1342  | 160  |     : gc_(gc) {} | 
1343  |  |  | 
1344  | 226  | bool GCBase::AllocationLocationTracker::isEnabled() const { | 
1345  | 226  |   return enabled_;  | 
1346  | 226  | }  | 
1347  |  |  | 
1348  |  | StackTracesTreeNode *  | 
1349  |  | GCBase::AllocationLocationTracker::getStackTracesTreeNodeForAlloc(  | 
1350  | 0  |     HeapSnapshot::NodeID id) const { | 
1351  | 0  |   auto mapIt = stackMap_.find(id);  | 
1352  | 0  |   return mapIt == stackMap_.end() ? nullptr : mapIt->second;  | 
1353  | 0  | }  | 
1354  |  |  | 
1355  |  | void GCBase::AllocationLocationTracker::enable(  | 
1356  |  |     std::function<  | 
1357  |  |         void(uint64_t, std::chrono::microseconds, std::vector<HeapStatsUpdate>)>  | 
1358  | 0  |         callback) { | 
1359  | 0  |   assert(!enabled_ && "Shouldn't enable twice");  | 
1360  | 0  |   enabled_ = true;  | 
1361  | 0  |   std::lock_guard<Mutex> lk{mtx_}; | 
1362  |  |   // For correct visualization of the allocation timeline, it's necessary that  | 
1363  |  |   // objects in the heap snapshot that existed before sampling was enabled have  | 
1364  |  |   // numerically lower IDs than those allocated during sampling. We ensure this  | 
1365  |  |   // by assigning IDs to everything here.  | 
1366  | 0  |   uint64_t numObjects = 0;  | 
1367  | 0  |   uint64_t numBytes = 0;  | 
1368  | 0  |   gc_->forAllObjs([&numObjects, &numBytes, this](GCCell *cell) { | 
1369  | 0  |     numObjects++;  | 
1370  | 0  |     numBytes += cell->getAllocatedSize();  | 
1371  | 0  |     gc_->getObjectID(cell);  | 
1372  | 0  |   });  | 
1373  | 0  |   fragmentCallback_ = std::move(callback);  | 
1374  | 0  |   startTime_ = std::chrono::steady_clock::now();  | 
1375  | 0  |   fragments_.clear();  | 
1376  |  |   // The first fragment has all objects that were live before the profiler was  | 
1377  |  |   // enabled.  | 
1378  |  |   // The ID and timestamp will be filled out via flushCallback.  | 
1379  | 0  |   fragments_.emplace_back(Fragment{ | 
1380  | 0  |       IDTracker::kInvalidNode,  | 
1381  | 0  |       std::chrono::microseconds(),  | 
1382  | 0  |       numObjects,  | 
1383  | 0  |       numBytes,  | 
1384  |  |       // Say the fragment is touched here so it is written out  | 
1385  |  |       // automatically by flushCallback.  | 
1386  | 0  |       true});  | 
1387  |  |   // Immediately flush the first fragment.  | 
1388  | 0  |   flushCallback();  | 
1389  | 0  | }  | 
1390  |  |  | 
1391  | 0  | void GCBase::AllocationLocationTracker::disable() { | 
1392  | 0  |   std::lock_guard<Mutex> lk{mtx_}; | 
1393  | 0  |   flushCallback();  | 
1394  | 0  |   enabled_ = false;  | 
1395  | 0  |   fragmentCallback_ = nullptr;  | 
1396  | 0  | }  | 
1397  |  |  | 
1398  |  | void GCBase::AllocationLocationTracker::newAlloc(  | 
1399  |  |     const GCCell *ptr,  | 
1400  | 8.69M  |     uint32_t sz) { | 
1401  |  |   // Note we always get the current IP even if allocation tracking is not  | 
1402  |  |   // enabled as it allows us to assert this feature works across many tests.  | 
1403  |  |   // Note it's not very slow, it's slower than the non-virtual version  | 
1404  |  |   // in Runtime though.  | 
1405  | 8.69M  |   const auto *ip = gc_->gcCallbacks_.getCurrentIPSlow();  | 
1406  | 8.69M  |   if (!enabled_) { | 
1407  | 8.69M  |     return;  | 
1408  | 8.69M  |   }  | 
1409  | 0  |   std::lock_guard<Mutex> lk{mtx_}; | 
1410  |  |   // This is stateful and causes the object to have an ID assigned.  | 
1411  | 0  |   const auto id = gc_->getObjectID(ptr);  | 
1412  | 0  |   HERMES_SLOW_ASSERT(  | 
1413  | 0  |       &findFragmentForID(id) == &fragments_.back() &&  | 
1414  | 0  |       "Should only ever be allocating into the newest fragment");  | 
1415  | 0  |   Fragment &lastFrag = fragments_.back();  | 
1416  | 0  |   assert(  | 
1417  | 0  |       lastFrag.lastSeenObjectID_ == IDTracker::kInvalidNode &&  | 
1418  | 0  |       "Last fragment should not have an ID assigned yet");  | 
1419  | 0  |   lastFrag.numObjects_++;  | 
1420  | 0  |   lastFrag.numBytes_ += sz;  | 
1421  | 0  |   lastFrag.touchedSinceLastFlush_ = true;  | 
1422  | 0  |   if (lastFrag.numBytes_ >= kFlushThreshold) { | 
1423  | 0  |     flushCallback();  | 
1424  | 0  |   }  | 
1425  | 0  |   if (auto node = gc_->gcCallbacks_.getCurrentStackTracesTreeNode(ip)) { | 
1426  | 0  |     auto itAndDidInsert = stackMap_.try_emplace(id, node);  | 
1427  | 0  |     assert(itAndDidInsert.second && "Failed to create a new node");  | 
1428  | 0  |     (void)itAndDidInsert;  | 
1429  | 0  |   }  | 
1430  | 0  | }  | 
1431  |  |  | 
1432  |  | void GCBase::AllocationLocationTracker::updateSize(  | 
1433  |  |     const GCCell *ptr,  | 
1434  |  |     uint32_t oldSize,  | 
1435  | 0  |     uint32_t newSize) { | 
1436  | 0  |   int32_t delta = static_cast<int32_t>(newSize) - static_cast<int32_t>(oldSize);  | 
1437  | 0  |   if (!delta || !enabled_) { | 
1438  |  |     // Nothing to update.  | 
1439  | 0  |     return;  | 
1440  | 0  |   }  | 
1441  | 0  |   std::lock_guard<Mutex> lk{mtx_}; | 
1442  | 0  |   const auto id = gc_->getObjectIDMustExist(ptr);  | 
1443  | 0  |   Fragment &frag = findFragmentForID(id);  | 
1444  | 0  |   frag.numBytes_ += delta;  | 
1445  | 0  |   frag.touchedSinceLastFlush_ = true;  | 
1446  | 0  | }  | 
1447  |  |  | 
1448  |  | void GCBase::AllocationLocationTracker::freeAlloc(  | 
1449  |  |     const GCCell *ptr,  | 
1450  | 0  |     uint32_t sz) { | 
1451  | 0  |   if (!enabled_) { | 
1452  |  |     // Fragments won't exist if the heap profiler isn't enabled.  | 
1453  | 0  |     return;  | 
1454  | 0  |   }  | 
1455  |  |   // Hold a lock during freeAlloc because concurrent Hades might be creating an  | 
1456  |  |   // alloc (newAlloc) at the same time.  | 
1457  | 0  |   std::lock_guard<Mutex> lk{mtx_}; | 
1458  |  |   // The ID must exist here since the memory profiler guarantees everything has  | 
1459  |  |   // an ID (it does a heap pass at the beginning to assign them all).  | 
1460  | 0  |   const auto id = gc_->getObjectIDMustExist(ptr);  | 
1461  | 0  |   stackMap_.erase(id);  | 
1462  | 0  |   Fragment &frag = findFragmentForID(id);  | 
1463  | 0  |   assert(  | 
1464  | 0  |       frag.numObjects_ >= 1 && "Num objects decremented too much for fragment");  | 
1465  | 0  |   frag.numObjects_--;  | 
1466  | 0  |   assert(frag.numBytes_ >= sz && "Num bytes decremented too much for fragment");  | 
1467  | 0  |   frag.numBytes_ -= sz;  | 
1468  | 0  |   frag.touchedSinceLastFlush_ = true;  | 
1469  | 0  | }  | 
1470  |  |  | 
1471  |  | GCBase::AllocationLocationTracker::Fragment &  | 
1472  | 0  | GCBase::AllocationLocationTracker::findFragmentForID(HeapSnapshot::NodeID id) { | 
1473  | 0  |   assert(fragments_.size() >= 1 && "Must have at least one fragment available");  | 
1474  | 0  |   for (auto it = fragments_.begin(); it != fragments_.end() - 1; ++it) { | 
1475  | 0  |     if (it->lastSeenObjectID_ >= id) { | 
1476  | 0  |       return *it;  | 
1477  | 0  |     }  | 
1478  | 0  |   }  | 
1479  |  |   // Since no previous fragments matched, it must be the last fragment.  | 
1480  | 0  |   return fragments_.back();  | 
1481  | 0  | }  | 
1482  |  |  | 
1483  | 0  | void GCBase::AllocationLocationTracker::flushCallback() { | 
1484  | 0  |   Fragment &lastFrag = fragments_.back();  | 
1485  | 0  |   const auto lastID = gc_->getIDTracker().lastID();  | 
1486  | 0  |   const auto duration = std::chrono::duration_cast<std::chrono::microseconds>(  | 
1487  | 0  |       std::chrono::steady_clock::now() - startTime_);  | 
1488  | 0  |   assert(  | 
1489  | 0  |       lastFrag.lastSeenObjectID_ == IDTracker::kInvalidNode &&  | 
1490  | 0  |       "Last fragment should not have an ID assigned yet");  | 
1491  |  |   // In case a flush happens without any allocations occurring, don't add a new  | 
1492  |  |   // fragment.  | 
1493  | 0  |   if (lastFrag.touchedSinceLastFlush_) { | 
1494  | 0  |     lastFrag.lastSeenObjectID_ = lastID;  | 
1495  | 0  |     lastFrag.timestamp_ = duration;  | 
1496  |  |     // Place an empty fragment at the end, for any new allocs.  | 
1497  | 0  |     fragments_.emplace_back(Fragment{ | 
1498  | 0  |         IDTracker::kInvalidNode, std::chrono::microseconds(), 0, 0, false});  | 
1499  | 0  |   }  | 
1500  | 0  |   if (fragmentCallback_) { | 
1501  | 0  |     std::vector<HeapStatsUpdate> updatedFragments;  | 
1502  |  |     // Don't include the last fragment, which is newly created (or has no  | 
1503  |  |     // objects in it).  | 
1504  | 0  |     for (size_t i = 0; i < fragments_.size() - 1; ++i) { | 
1505  | 0  |       auto &fragment = fragments_[i];  | 
1506  | 0  |       if (fragment.touchedSinceLastFlush_) { | 
1507  | 0  |         updatedFragments.emplace_back(  | 
1508  | 0  |             i, fragment.numObjects_, fragment.numBytes_);  | 
1509  | 0  |         fragment.touchedSinceLastFlush_ = false;  | 
1510  | 0  |       }  | 
1511  | 0  |     }  | 
1512  | 0  |     fragmentCallback_(lastID, duration, std::move(updatedFragments));  | 
1513  | 0  |   }  | 
1514  | 0  | }  | 
1515  |  |  | 
1516  |  | void GCBase::AllocationLocationTracker::addSamplesToSnapshot(  | 
1517  | 0  |     HeapSnapshot &snap) { | 
1518  | 0  |   std::lock_guard<Mutex> lk{mtx_}; | 
1519  | 0  |   if (enabled_) { | 
1520  | 0  |     flushCallback();  | 
1521  | 0  |   }  | 
1522  |  |   // There might not be fragments if tracking has never been enabled. If there  | 
1523  |  |   // are, the last one is always invalid.  | 
1524  | 0  |   assert(  | 
1525  | 0  |       (fragments_.empty() ||  | 
1526  | 0  |        fragments_.back().lastSeenObjectID_ == IDTracker::kInvalidNode) &&  | 
1527  | 0  |       "Last fragment should not have an ID assigned yet");  | 
1528  |  |   // Loop over the fragments if we have any, and always skip the last one.  | 
1529  | 0  |   for (size_t i = 0, e = fragments_.size(); i + 1 < e; ++i) { | 
1530  | 0  |     const auto &fragment = fragments_[i];  | 
1531  | 0  |     snap.addSample(fragment.timestamp_, fragment.lastSeenObjectID_);  | 
1532  | 0  |   }  | 
1533  | 0  | }  | 
1534  |  |  | 
1535  |  | void GCBase::SamplingAllocationLocationTracker::enable(  | 
1536  |  |     size_t samplingInterval,  | 
1537  | 0  |     int64_t seed) { | 
1538  | 0  |   if (seed < 0) { | 
1539  | 0  |     seed = std::random_device()();  | 
1540  | 0  |   }  | 
1541  | 0  |   randomEngine_.seed(seed);  | 
1542  | 0  |   dist_ = llvh::make_unique<std::poisson_distribution<>>(samplingInterval);  | 
1543  | 0  |   limit_ = nextSample();  | 
1544  | 0  | }  | 
1545  |  |  | 
1546  | 0  | void GCBase::SamplingAllocationLocationTracker::disable(llvh::raw_ostream &os) { | 
1547  | 0  |   JSONEmitter json{os}; | 
1548  | 0  |   ChromeSamplingMemoryProfile profile{json}; | 
1549  | 0  |   std::lock_guard<Mutex> lk{mtx_}; | 
1550  |  |   // Track a map of size -> count for each stack tree node.  | 
1551  | 0  |   llvh::DenseMap<StackTracesTreeNode *, llvh::DenseMap<size_t, size_t>>  | 
1552  | 0  |       sizesToCounts;  | 
1553  |  |   // Do a pre-pass to compute sizesToCounts.  | 
1554  | 0  |   for (const auto &s : samples_) { | 
1555  | 0  |     const Sample &sample = s.second;  | 
1556  | 0  |     sizesToCounts[sample.node][sample.size]++;  | 
1557  | 0  |   }  | 
1558  |  |  | 
1559  |  |   // Have to emit the tree of stack frames before emitting samples, Chrome  | 
1560  |  |   // requires the tree emitted first.  | 
1561  | 0  |   profile.emitTree(gc_->gcCallbacks_.getStackTracesTree(), sizesToCounts);  | 
1562  | 0  |   profile.beginSamples();  | 
1563  | 0  |   for (const auto &s : samples_) { | 
1564  | 0  |     const Sample &sample = s.second;  | 
1565  | 0  |     profile.emitSample(sample.size, sample.node, sample.id);  | 
1566  | 0  |   }  | 
1567  | 0  |   profile.endSamples();  | 
1568  | 0  |   dist_.reset();  | 
1569  | 0  |   samples_.clear();  | 
1570  | 0  |   limit_ = 0;  | 
1571  | 0  | }  | 
1572  |  |  | 
1573  |  | void GCBase::SamplingAllocationLocationTracker::newAlloc(  | 
1574  |  |     const GCCell *ptr,  | 
1575  | 8.69M  |     uint32_t sz) { | 
1576  |  |   // If the sampling profiler isn't enabled, don't check anything else.  | 
1577  | 8.69M  |   if (!isEnabled()) { | 
1578  | 8.69M  |     return;  | 
1579  | 8.69M  |   }  | 
1580  | 0  |   if (sz <= limit_) { | 
1581  |  |     // Exit if it's not time for a sample yet.  | 
1582  | 0  |     limit_ -= sz;  | 
1583  | 0  |     return;  | 
1584  | 0  |   }  | 
1585  | 0  |   const auto *ip = gc_->gcCallbacks_.getCurrentIPSlow();  | 
1586  |  |   // This is stateful and causes the object to have an ID assigned.  | 
1587  | 0  |   const auto id = gc_->getObjectID(ptr);  | 
1588  | 0  |   if (StackTracesTreeNode *node =  | 
1589  | 0  |           gc_->gcCallbacks_.getCurrentStackTracesTreeNode(ip)) { | 
1590  |  |     // Hold a lock while modifying samples_.  | 
1591  | 0  |     std::lock_guard<Mutex> lk{mtx_}; | 
1592  | 0  |     auto sampleItAndDidInsert =  | 
1593  | 0  |         samples_.try_emplace(id, Sample{sz, node, nextSampleID_++}); | 
1594  | 0  |     assert(sampleItAndDidInsert.second && "Failed to create a sample");  | 
1595  | 0  |     (void)sampleItAndDidInsert;  | 
1596  | 0  |   }  | 
1597  |  |   // Reset the limit.  | 
1598  | 0  |   limit_ = nextSample();  | 
1599  | 0  | }  | 
1600  |  |  | 
1601  |  | void GCBase::SamplingAllocationLocationTracker::freeAlloc(  | 
1602  |  |     const GCCell *ptr,  | 
1603  | 0  |     uint32_t sz) { | 
1604  |  |   // If the sampling profiler isn't enabled, don't check anything else.  | 
1605  | 0  |   if (!isEnabled()) { | 
1606  | 0  |     return;  | 
1607  | 0  |   }  | 
1608  | 0  |   if (!gc_->hasObjectID(ptr)) { | 
1609  |  |     // This object's lifetime isn't being tracked.  | 
1610  | 0  |     return;  | 
1611  | 0  |   }  | 
1612  | 0  |   const auto id = gc_->getObjectIDMustExist(ptr);  | 
1613  |  |   // Hold a lock while modifying samples_.  | 
1614  | 0  |   std::lock_guard<Mutex> lk{mtx_}; | 
1615  | 0  |   samples_.erase(id);  | 
1616  | 0  | }  | 
1617  |  |  | 
1618  |  | void GCBase::SamplingAllocationLocationTracker::updateSize(  | 
1619  |  |     const GCCell *ptr,  | 
1620  |  |     uint32_t oldSize,  | 
1621  | 0  |     uint32_t newSize) { | 
1622  | 0  |   int32_t delta = static_cast<int32_t>(newSize) - static_cast<int32_t>(oldSize);  | 
1623  | 0  |   if (!delta || !isEnabled() || !gc_->hasObjectID(ptr)) { | 
1624  |  |     // Nothing to update.  | 
1625  | 0  |     return;  | 
1626  | 0  |   }  | 
1627  | 0  |   const auto id = gc_->getObjectIDMustExist(ptr);  | 
1628  |  |   // Hold a lock while modifying samples_.  | 
1629  | 0  |   std::lock_guard<Mutex> lk{mtx_}; | 
1630  | 0  |   const auto it = samples_.find(id);  | 
1631  | 0  |   if (it == samples_.end()) { | 
1632  | 0  |     return;  | 
1633  | 0  |   }  | 
1634  | 0  |   Sample &sample = it->second;  | 
1635  |  |   // Update the size stored in the sample.  | 
1636  | 0  |   sample.size = newSize;  | 
1637  | 0  | }  | 
1638  |  |  | 
1639  | 0  | size_t GCBase::SamplingAllocationLocationTracker::nextSample() { | 
1640  | 0  |   return (*dist_)(randomEngine_);  | 
1641  | 0  | }  | 
1642  |  | #endif // HERMES_MEMORY_INSTRUMENTATION  | 
1643  |  |  | 
1644  | 0  | llvh::Optional<HeapSnapshot::NodeID> GCBase::getSnapshotID(HermesValue val) { | 
1645  | 0  |   if (val.isPointer() && val.getPointer()) { | 
1646  |  |     // Make nullptr HermesValue look like a JS null.  | 
1647  |  |     // This should be rare, but is occasionally used by some parts of the VM.  | 
1648  | 0  |     return val.getPointer()  | 
1649  | 0  |         ? getObjectID(static_cast<GCCell *>(val.getPointer()))  | 
1650  | 0  |         : IDTracker::reserved(IDTracker::ReservedObjectID::Null);  | 
1651  | 0  |   } else if (val.isNumber()) { | 
1652  | 0  |     return idTracker_.getNumberID(val.getNumber());  | 
1653  | 0  |   } else if (val.isSymbol() && val.getSymbol().isValid()) { | 
1654  | 0  |     return idTracker_.getObjectID(val.getSymbol());  | 
1655  | 0  |   } else if (val.isUndefined()) { | 
1656  | 0  |     return IDTracker::reserved(IDTracker::ReservedObjectID::Undefined);  | 
1657  | 0  |   } else if (val.isNull()) { | 
1658  | 0  |     return static_cast<HeapSnapshot::NodeID>(  | 
1659  | 0  |         IDTracker::reserved(IDTracker::ReservedObjectID::Null));  | 
1660  | 0  |   } else if (val.isBool()) { | 
1661  | 0  |     return static_cast<HeapSnapshot::NodeID>(  | 
1662  | 0  |         val.getBool()  | 
1663  | 0  |             ? IDTracker::reserved(IDTracker::ReservedObjectID::True)  | 
1664  | 0  |             : IDTracker::reserved(IDTracker::ReservedObjectID::False));  | 
1665  | 0  |   } else { | 
1666  | 0  |     return llvh::None;  | 
1667  | 0  |   }  | 
1668  | 0  | }  | 
1669  |  |  | 
1670  | 0  | void *GCBase::getObjectForID(HeapSnapshot::NodeID id) { | 
1671  | 0  |   if (llvh::Optional<CompressedPointer> ptr = idTracker_.getObjectForID(id)) { | 
1672  | 0  |     return ptr->get(pointerBase_);  | 
1673  | 0  |   }  | 
1674  | 0  |   return nullptr;  | 
1675  | 0  | }  | 
1676  |  |  | 
1677  | 0  | void GCBase::sizeDiagnosticCensus(size_t allocatedBytes) { | 
1678  | 0  |   struct DiagnosticStat { | 
1679  | 0  |     uint64_t count{0}; | 
1680  | 0  |     uint64_t size{0}; | 
1681  | 0  |     std::map<std::string, DiagnosticStat> breakdown;  | 
1682  |  | 
  | 
1683  | 0  |     static constexpr double getPercent(double numer, double denom) { | 
1684  | 0  |       return denom != 0 ? 100 * numer / denom : 0.0;  | 
1685  | 0  |     }  | 
1686  | 0  |     void printBreakdown(size_t depth) const { | 
1687  | 0  |       if (breakdown.empty())  | 
1688  | 0  |         return;  | 
1689  |  |  | 
1690  | 0  |       static const char *fmtBase =  | 
1691  | 0  |           "%-25s : %'10" PRIu64 " [%'10" PRIu64 " B | %4.1f%%]";  | 
1692  | 0  |       const std::string fmtStr = std::string(depth, '\t') + fmtBase;  | 
1693  | 0  |       size_t totalSize = 0;  | 
1694  | 0  |       size_t totalCount = 0;  | 
1695  | 0  |       for (const auto &stat : breakdown) { | 
1696  | 0  |         hermesLog(  | 
1697  | 0  |             "HermesGC",  | 
1698  | 0  |             fmtStr.c_str(),  | 
1699  | 0  |             stat.first.c_str(),  | 
1700  | 0  |             stat.second.count,  | 
1701  | 0  |             stat.second.size,  | 
1702  | 0  |             getPercent(stat.second.size, size));  | 
1703  | 0  |         stat.second.printBreakdown(depth + 1);  | 
1704  | 0  |         totalSize += stat.second.size;  | 
1705  | 0  |         totalCount += stat.second.count;  | 
1706  | 0  |       }  | 
1707  | 0  |       if (size_t other = size - totalSize)  | 
1708  | 0  |         hermesLog(  | 
1709  | 0  |             "HermesGC",  | 
1710  | 0  |             fmtStr.c_str(),  | 
1711  | 0  |             "Other",  | 
1712  | 0  |             count - totalCount,  | 
1713  | 0  |             other,  | 
1714  | 0  |             getPercent(other, size));  | 
1715  | 0  |     }  | 
1716  | 0  |   };  | 
1717  |  | 
  | 
1718  | 0  |   struct HeapSizeDiagnostic { | 
1719  | 0  |     uint64_t numCell = 0;  | 
1720  | 0  |     uint64_t numVariableSizedObject = 0;  | 
1721  | 0  |     DiagnosticStat stats;  | 
1722  |  | 
  | 
1723  | 0  |     void rootsDiagnosticFrame() const { | 
1724  |  |       // Use this to print commas on large numbers  | 
1725  | 0  |       char *currentLocale = std::setlocale(LC_NUMERIC, nullptr);  | 
1726  | 0  |       std::setlocale(LC_NUMERIC, "");  | 
1727  | 0  |       hermesLog("HermesGC", "Root size: %'7" PRIu64 " B", stats.size); | 
1728  | 0  |       stats.printBreakdown(1);  | 
1729  | 0  |       std::setlocale(LC_NUMERIC, currentLocale);  | 
1730  | 0  |     }  | 
1731  |  | 
  | 
1732  | 0  |     void sizeDiagnosticFrame() const { | 
1733  |  |       // Use this to print commas on large numbers  | 
1734  | 0  |       char *currentLocale = std::setlocale(LC_NUMERIC, nullptr);  | 
1735  | 0  |       std::setlocale(LC_NUMERIC, "");  | 
1736  |  | 
  | 
1737  | 0  |       hermesLog("HermesGC", "Heap size: %'7" PRIu64 " B", stats.size); | 
1738  | 0  |       hermesLog("HermesGC", "\tTotal cells: %'7" PRIu64, numCell); | 
1739  | 0  |       hermesLog(  | 
1740  | 0  |           "HermesGC",  | 
1741  | 0  |           "\tNum variable size cells: %'7" PRIu64,  | 
1742  | 0  |           numVariableSizedObject);  | 
1743  |  | 
  | 
1744  | 0  |       stats.printBreakdown(1);  | 
1745  |  | 
  | 
1746  | 0  |       std::setlocale(LC_NUMERIC, currentLocale);  | 
1747  | 0  |     }  | 
1748  | 0  |   };  | 
1749  |  | 
  | 
1750  | 0  |   struct HeapSizeDiagnosticAcceptor final : public RootAndSlotAcceptor { | 
1751  |  |     // Can't be static in a local class.  | 
1752  | 0  |     const int64_t HINT8_MIN = -(1 << 7);  | 
1753  | 0  |     const int64_t HINT8_MAX = (1 << 7) - 1;  | 
1754  | 0  |     const int64_t HINT16_MIN = -(1 << 15);  | 
1755  | 0  |     const int64_t HINT16_MAX = (1 << 15) - 1;  | 
1756  | 0  |     const int64_t HINT24_MIN = -(1 << 23);  | 
1757  | 0  |     const int64_t HINT24_MAX = (1 << 23) - 1;  | 
1758  | 0  |     const int64_t HINT32_MIN = -(1LL << 31);  | 
1759  | 0  |     const int64_t HINT32_MAX = (1LL << 31) - 1;  | 
1760  |  | 
  | 
1761  | 0  |     HeapSizeDiagnostic diagnostic;  | 
1762  | 0  |     PointerBase &pointerBase_;  | 
1763  |  | 
  | 
1764  | 0  |     HeapSizeDiagnosticAcceptor(PointerBase &pb) : pointerBase_{pb} {} | 
1765  |  | 
  | 
1766  | 0  |     using SlotAcceptor::accept;  | 
1767  |  | 
  | 
1768  | 0  |     void accept(GCCell *&ptr) override { | 
1769  | 0  |       diagnostic.stats.breakdown["Pointer"].count++;  | 
1770  | 0  |       diagnostic.stats.breakdown["Pointer"].size += sizeof(GCCell *);  | 
1771  | 0  |     }  | 
1772  |  | 
  | 
1773  | 0  |     void accept(GCPointerBase &ptr) override { | 
1774  | 0  |       diagnostic.stats.breakdown["GCPointer"].count++;  | 
1775  | 0  |       diagnostic.stats.breakdown["GCPointer"].size += sizeof(GCPointerBase);  | 
1776  | 0  |     }  | 
1777  |  | 
  | 
1778  | 0  |     void accept(PinnedHermesValue &hv) override { | 
1779  | 0  |       acceptNullable(hv);  | 
1780  | 0  |     }  | 
1781  | 0  |     void acceptNullable(PinnedHermesValue &hv) override { | 
1782  | 0  |       acceptHV(  | 
1783  | 0  |           hv,  | 
1784  | 0  |           diagnostic.stats.breakdown["HermesValue"],  | 
1785  | 0  |           sizeof(PinnedHermesValue));  | 
1786  | 0  |     }  | 
1787  | 0  |     void accept(GCHermesValue &hv) override { | 
1788  | 0  |       acceptHV(  | 
1789  | 0  |           hv, diagnostic.stats.breakdown["HermesValue"], sizeof(GCHermesValue));  | 
1790  | 0  |     }  | 
1791  | 0  |     void accept(GCSmallHermesValue &shv) override { | 
1792  | 0  |       acceptHV(  | 
1793  | 0  |           shv.toHV(pointerBase_),  | 
1794  | 0  |           diagnostic.stats.breakdown["SmallHermesValue"],  | 
1795  | 0  |           sizeof(GCSmallHermesValue));  | 
1796  | 0  |     }  | 
1797  | 0  |     void acceptHV(  | 
1798  | 0  |         const HermesValue &hv,  | 
1799  | 0  |         DiagnosticStat &diag,  | 
1800  | 0  |         const size_t hvBytes) { | 
1801  | 0  |       diag.count++;  | 
1802  | 0  |       diag.size += hvBytes;  | 
1803  | 0  |       llvh::StringRef hvType;  | 
1804  | 0  |       if (hv.isBool()) { | 
1805  | 0  |         hvType = "Bool";  | 
1806  | 0  |       } else if (hv.isNumber()) { | 
1807  | 0  |         hvType = "Number";  | 
1808  | 0  |         double val = hv.getNumber();  | 
1809  | 0  |         double intpart;  | 
1810  | 0  |         llvh::StringRef numType = "Doubles";  | 
1811  | 0  |         if (std::modf(val, &intpart) == 0.0) { | 
1812  | 0  |           if (val >= static_cast<double>(HINT8_MIN) &&  | 
1813  | 0  |               val <= static_cast<double>(HINT8_MAX)) { | 
1814  | 0  |             numType = "Int8";  | 
1815  | 0  |           } else if (  | 
1816  | 0  |               val >= static_cast<double>(HINT16_MIN) &&  | 
1817  | 0  |               val <= static_cast<double>(HINT16_MAX)) { | 
1818  | 0  |             numType = "Int16";  | 
1819  | 0  |           } else if (  | 
1820  | 0  |               val >= static_cast<double>(HINT24_MIN) &&  | 
1821  | 0  |               val <= static_cast<double>(HINT24_MAX)) { | 
1822  | 0  |             numType = "Int24";  | 
1823  | 0  |           } else if (  | 
1824  | 0  |               val >= static_cast<double>(HINT32_MIN) &&  | 
1825  | 0  |               val <= static_cast<double>(HINT32_MAX)) { | 
1826  | 0  |             numType = "Int32";  | 
1827  | 0  |           }  | 
1828  | 0  |         }  | 
1829  | 0  |         diag.breakdown["Number"].breakdown[numType].count++;  | 
1830  | 0  |         diag.breakdown["Number"].breakdown[numType].size += hvBytes;  | 
1831  | 0  |       } else if (hv.isString()) { | 
1832  | 0  |         hvType = "StringPointer";  | 
1833  | 0  |       } else if (hv.isSymbol()) { | 
1834  | 0  |         hvType = "Symbol";  | 
1835  | 0  |       } else if (hv.isObject()) { | 
1836  | 0  |         hvType = "ObjectPointer";  | 
1837  | 0  |       } else if (hv.isNull()) { | 
1838  | 0  |         hvType = "Null";  | 
1839  | 0  |       } else if (hv.isUndefined()) { | 
1840  | 0  |         hvType = "Undefined";  | 
1841  | 0  |       } else if (hv.isEmpty()) { | 
1842  | 0  |         hvType = "Empty";  | 
1843  | 0  |       } else if (hv.isNativeValue()) { | 
1844  | 0  |         hvType = "NativeValue";  | 
1845  | 0  |       } else { | 
1846  | 0  |         assert(false && "Should be no other hermes values");  | 
1847  | 0  |       }  | 
1848  | 0  |       diag.breakdown[hvType].count++;  | 
1849  | 0  |       diag.breakdown[hvType].size += hvBytes;  | 
1850  | 0  |     }  | 
1851  |  | 
  | 
1852  | 0  |     void accept(const RootSymbolID &sym) override { | 
1853  | 0  |       acceptSym(sym);  | 
1854  | 0  |     }  | 
1855  | 0  |     void accept(const GCSymbolID &sym) override { | 
1856  | 0  |       acceptSym(sym);  | 
1857  | 0  |     }  | 
1858  | 0  |     void acceptSym(SymbolID sym) { | 
1859  | 0  |       diagnostic.stats.breakdown["Symbol"].count++;  | 
1860  | 0  |       diagnostic.stats.breakdown["Symbol"].size += sizeof(SymbolID);  | 
1861  | 0  |     }  | 
1862  | 0  |   };  | 
1863  |  | 
  | 
1864  | 0  |   hermesLog("HermesGC", "%s:", "Roots"); | 
1865  | 0  |   HeapSizeDiagnosticAcceptor rootAcceptor{getPointerBase()}; | 
1866  | 0  |   DroppingAcceptor<HeapSizeDiagnosticAcceptor> namedRootAcceptor{rootAcceptor}; | 
1867  | 0  |   markRoots(namedRootAcceptor, /* markLongLived */ true);  | 
1868  |  |   // For roots, compute the overall size and counts from the breakdown.  | 
1869  | 0  |   for (const auto &substat : rootAcceptor.diagnostic.stats.breakdown) { | 
1870  | 0  |     rootAcceptor.diagnostic.stats.count += substat.second.count;  | 
1871  | 0  |     rootAcceptor.diagnostic.stats.size += substat.second.size;  | 
1872  | 0  |   }  | 
1873  | 0  |   rootAcceptor.diagnostic.rootsDiagnosticFrame();  | 
1874  |  | 
  | 
1875  | 0  |   hermesLog("HermesGC", "%s:", "Heap contents"); | 
1876  | 0  |   HeapSizeDiagnosticAcceptor acceptor{getPointerBase()}; | 
1877  | 0  |   forAllObjs([&acceptor, this](GCCell *cell) { | 
1878  | 0  |     markCell(cell, acceptor);  | 
1879  | 0  |     acceptor.diagnostic.numCell++;  | 
1880  | 0  |     if (cell->isVariableSize()) { | 
1881  | 0  |       acceptor.diagnostic.numVariableSizedObject++;  | 
1882  |  |       // In theory should use sizeof(VariableSizeRuntimeCell), but that includes  | 
1883  |  |       // padding sometimes. To be conservative, use the field it contains  | 
1884  |  |       // directly instead.  | 
1885  | 0  |       acceptor.diagnostic.stats.breakdown["Cell headers"].size +=  | 
1886  | 0  |           (sizeof(GCCell) + sizeof(uint32_t));  | 
1887  | 0  |     } else { | 
1888  | 0  |       acceptor.diagnostic.stats.breakdown["Cell headers"].size +=  | 
1889  | 0  |           sizeof(GCCell);  | 
1890  | 0  |     }  | 
1891  |  |  | 
1892  |  |     // We include ExternalStringPrimitives because we're including external  | 
1893  |  |     // memory in the overall heap size. We do not include  | 
1894  |  |     // BufferedStringPrimitives because they just store a pointer to an  | 
1895  |  |     // ExternalStringPrimitive (which is already tracked).  | 
1896  | 0  |     auto *strprim = dyn_vmcast<StringPrimitive>(cell);  | 
1897  | 0  |     if (strprim && !isBufferedStringPrimitive(cell)) { | 
1898  | 0  |       auto &stat = strprim->isASCII()  | 
1899  | 0  |           ? acceptor.diagnostic.stats.breakdown["StringPrimitive (ASCII)"]  | 
1900  | 0  |           : acceptor.diagnostic.stats.breakdown["StringPrimitive (UTF-16)"];  | 
1901  | 0  |       stat.count++;  | 
1902  | 0  |       const size_t len = strprim->getStringLength();  | 
1903  |  |       // If the string is UTF-16 then the length is in terms of 16 bit  | 
1904  |  |       // characters.  | 
1905  | 0  |       const size_t sz = strprim->isASCII() ? len : len * 2;  | 
1906  | 0  |       stat.size += sz;  | 
1907  | 0  |       if (len < 8) { | 
1908  | 0  |         auto &subStat =  | 
1909  | 0  |             stat.breakdown  | 
1910  | 0  |                 ["StringPrimitive (size " + std::to_string(len) + ")"];  | 
1911  | 0  |         subStat.count++;  | 
1912  | 0  |         subStat.size += sz;  | 
1913  | 0  |       }  | 
1914  | 0  |     }  | 
1915  | 0  |   });  | 
1916  |  | 
  | 
1917  | 0  |   assert(  | 
1918  | 0  |       acceptor.diagnostic.stats.size == 0 &&  | 
1919  | 0  |       acceptor.diagnostic.stats.count == 0 &&  | 
1920  | 0  |       "Should not be setting overall stats during heap scan.");  | 
1921  | 0  |   for (const auto &substat : acceptor.diagnostic.stats.breakdown)  | 
1922  | 0  |     acceptor.diagnostic.stats.count += substat.second.count;  | 
1923  | 0  |   acceptor.diagnostic.stats.size = allocatedBytes;  | 
1924  | 0  |   acceptor.diagnostic.sizeDiagnosticFrame();  | 
1925  | 0  | }  | 
1926  |  |  | 
1927  |  | } // namespace vm  | 
1928  |  | } // namespace hermes  | 
1929  |  |  | 
1930  |  | #undef DEBUG_TYPE  |