/src/mozilla-central/xpcom/base/nsCycleCollector.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ |
2 | | /* vim: set ts=8 sts=2 et sw=2 tw=80: */ |
3 | | /* This Source Code Form is subject to the terms of the Mozilla Public |
4 | | * License, v. 2.0. If a copy of the MPL was not distributed with this |
5 | | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
6 | | |
7 | | // |
8 | | // This file implements a garbage-cycle collector based on the paper |
9 | | // |
10 | | // Concurrent Cycle Collection in Reference Counted Systems |
11 | | // Bacon & Rajan (2001), ECOOP 2001 / Springer LNCS vol 2072 |
12 | | // |
13 | | // We are not using the concurrent or acyclic cases of that paper; so |
14 | | // the green, red and orange colors are not used. |
15 | | // |
16 | | // The collector is based on tracking pointers of four colors: |
17 | | // |
18 | | // Black nodes are definitely live. If we ever determine a node is |
19 | | // black, it's ok to forget about, drop from our records. |
20 | | // |
21 | | // White nodes are definitely garbage cycles. Once we finish with our |
22 | | // scanning, we unlink all the white nodes and expect that by |
23 | | // unlinking them they will self-destruct (since a garbage cycle is |
24 | | // only keeping itself alive with internal links, by definition). |
25 | | // |
26 | | // Snow-white is an addition to the original algorithm. Snow-white object |
27 | | // has reference count zero and is just waiting for deletion. |
28 | | // |
29 | | // Grey nodes are being scanned. Nodes that turn grey will turn |
30 | | // either black if we determine that they're live, or white if we |
31 | | // determine that they're a garbage cycle. After the main collection |
32 | | // algorithm there should be no grey nodes. |
33 | | // |
34 | | // Purple nodes are *candidates* for being scanned. They are nodes we |
35 | | // haven't begun scanning yet because they're not old enough, or we're |
36 | | // still partway through the algorithm. |
37 | | // |
38 | | // XPCOM objects participating in garbage-cycle collection are obliged |
39 | | // to inform us when they ought to turn purple; that is, when their |
40 | | // refcount transitions from N+1 -> N, for nonzero N. Furthermore we |
41 | | // require that *after* an XPCOM object has informed us of turning |
42 | | // purple, they will tell us when they either transition back to being |
43 | | // black (incremented refcount) or are ultimately deleted. |
44 | | |
45 | | // Incremental cycle collection |
46 | | // |
47 | | // Beyond the simple state machine required to implement incremental |
48 | | // collection, the CC needs to be able to compensate for things the browser |
49 | | // is doing during the collection. There are two kinds of problems. For each |
50 | | // of these, there are two cases to deal with: purple-buffered C++ objects |
51 | | // and JS objects. |
52 | | |
53 | | // The first problem is that an object in the CC's graph can become garbage. |
54 | | // This is bad because the CC touches the objects in its graph at every |
55 | | // stage of its operation. |
56 | | // |
57 | | // All cycle collected C++ objects that die during a cycle collection |
58 | | // will end up actually getting deleted by the SnowWhiteKiller. Before |
59 | | // the SWK deletes an object, it checks if an ICC is running, and if so, |
60 | | // if the object is in the graph. If it is, the CC clears mPointer and |
61 | | // mParticipant so it does not point to the raw object any more. Because |
62 | | // objects could die any time the CC returns to the mutator, any time the CC |
63 | | // accesses a PtrInfo it must perform a null check on mParticipant to |
64 | | // ensure the object has not gone away. |
65 | | // |
66 | | // JS objects don't always run finalizers, so the CC can't remove them from |
67 | | // the graph when they die. Fortunately, JS objects can only die during a GC, |
68 | | // so if a GC is begun during an ICC, the browser synchronously finishes off |
69 | | // the ICC, which clears the entire CC graph. If the GC and CC are scheduled |
70 | | // properly, this should be rare. |
71 | | // |
72 | | // The second problem is that objects in the graph can be changed, say by |
73 | | // being addrefed or released, or by having a field updated, after the object |
74 | | // has been added to the graph. The problem is that ICC can miss a newly |
75 | | // created reference to an object, and end up unlinking an object that is |
76 | | // actually alive. |
77 | | // |
78 | | // The basic idea of the solution, from "An on-the-fly Reference Counting |
79 | | // Garbage Collector for Java" by Levanoni and Petrank, is to notice if an |
80 | | // object has had an additional reference to it created during the collection, |
81 | | // and if so, don't collect it during the current collection. This avoids having |
82 | | // to rerun the scan as in Bacon & Rajan 2001. |
83 | | // |
84 | | // For cycle collected C++ objects, we modify AddRef to place the object in |
85 | | // the purple buffer, in addition to Release. Then, in the CC, we treat any |
86 | | // objects in the purple buffer as being alive, after graph building has |
87 | | // completed. Because they are in the purple buffer, they will be suspected |
88 | | // in the next CC, so there's no danger of leaks. This is imprecise, because |
89 | | // we will treat as live an object that has been Released but not AddRefed |
90 | | // during graph building, but that's probably rare enough that the additional |
91 | | // bookkeeping overhead is not worthwhile. |
92 | | // |
93 | | // For JS objects, the cycle collector is only looking at gray objects. If a |
94 | | // gray object is touched during ICC, it will be made black by UnmarkGray. |
95 | | // Thus, if a JS object has become black during the ICC, we treat it as live. |
96 | | // Merged JS zones have to be handled specially: we scan all zone globals. |
97 | | // If any are black, we treat the zone as being black. |
98 | | |
99 | | |
100 | | // Safety |
101 | | // |
102 | | // An XPCOM object is either scan-safe or scan-unsafe, purple-safe or |
103 | | // purple-unsafe. |
104 | | // |
105 | | // An nsISupports object is scan-safe if: |
106 | | // |
107 | | // - It can be QI'ed to |nsXPCOMCycleCollectionParticipant|, though |
108 | | // this operation loses ISupports identity (like nsIClassInfo). |
109 | | // - Additionally, the operation |traverse| on the resulting |
110 | | // nsXPCOMCycleCollectionParticipant does not cause *any* refcount |
111 | | // adjustment to occur (no AddRef / Release calls). |
112 | | // |
113 | | // A non-nsISupports ("native") object is scan-safe by explicitly |
114 | | // providing its nsCycleCollectionParticipant. |
115 | | // |
116 | | // An object is purple-safe if it satisfies the following properties: |
117 | | // |
118 | | // - The object is scan-safe. |
119 | | // |
120 | | // When we receive a pointer |ptr| via |
121 | | // |nsCycleCollector::suspect(ptr)|, we assume it is purple-safe. We |
122 | | // can check the scan-safety, but have no way to ensure the |
123 | | // purple-safety; objects must obey, or else the entire system falls |
124 | | // apart. Don't involve an object in this scheme if you can't |
125 | | // guarantee its purple-safety. The easiest way to ensure that an |
126 | | // object is purple-safe is to use nsCycleCollectingAutoRefCnt. |
127 | | // |
128 | | // When we have a scannable set of purple nodes ready, we begin |
129 | | // our walks. During the walks, the nodes we |traverse| should only |
130 | | // feed us more scan-safe nodes, and should not adjust the refcounts |
131 | | // of those nodes. |
132 | | // |
133 | | // We do not |AddRef| or |Release| any objects during scanning. We |
134 | | // rely on the purple-safety of the roots that call |suspect| to |
135 | | // hold, such that we will clear the pointer from the purple buffer |
136 | | // entry to the object before it is destroyed. The pointers that are |
137 | | // merely scan-safe we hold only for the duration of scanning, and |
138 | | // there should be no objects released from the scan-safe set during |
139 | | // the scan. |
140 | | // |
141 | | // We *do* call |Root| and |Unroot| on every white object, on |
142 | | // either side of the calls to |Unlink|. This keeps the set of white |
143 | | // objects alive during the unlinking. |
144 | | // |
145 | | |
146 | | #if !defined(__MINGW32__) |
147 | | #ifdef WIN32 |
148 | | #include <crtdbg.h> |
149 | | #include <errno.h> |
150 | | #endif |
151 | | #endif |
152 | | |
153 | | #include "base/process_util.h" |
154 | | |
155 | | #include "mozilla/ArrayUtils.h" |
156 | | #include "mozilla/AutoRestore.h" |
157 | | #include "mozilla/CycleCollectedJSContext.h" |
158 | | #include "mozilla/CycleCollectedJSRuntime.h" |
159 | | #include "mozilla/DebugOnly.h" |
160 | | #include "mozilla/HashFunctions.h" |
161 | | #include "mozilla/HashTable.h" |
162 | | #include "mozilla/HoldDropJSObjects.h" |
163 | | /* This must occur *after* base/process_util.h to avoid typedefs conflicts. */ |
164 | | #include "mozilla/LinkedList.h" |
165 | | #include "mozilla/MemoryReporting.h" |
166 | | #include "mozilla/Move.h" |
167 | | #include "mozilla/MruCache.h" |
168 | | #include "mozilla/SegmentedVector.h" |
169 | | |
170 | | #include "nsCycleCollectionParticipant.h" |
171 | | #include "nsCycleCollectionNoteRootCallback.h" |
172 | | #include "nsDeque.h" |
173 | | #include "nsExceptionHandler.h" |
174 | | #include "nsCycleCollector.h" |
175 | | #include "nsThreadUtils.h" |
176 | | #include "nsXULAppAPI.h" |
177 | | #include "prenv.h" |
178 | | #include "nsPrintfCString.h" |
179 | | #include "nsTArray.h" |
180 | | #include "nsIConsoleService.h" |
181 | | #include "mozilla/Attributes.h" |
182 | | #include "nsICycleCollectorListener.h" |
183 | | #include "nsISerialEventTarget.h" |
184 | | #include "nsIMemoryReporter.h" |
185 | | #include "nsIFile.h" |
186 | | #include "nsDumpUtils.h" |
187 | | #include "xpcpublic.h" |
188 | | #include "GeckoProfiler.h" |
189 | | #include <stdint.h> |
190 | | #include <stdio.h> |
191 | | |
192 | | #include "mozilla/AutoGlobalTimelineMarker.h" |
193 | | #include "mozilla/Likely.h" |
194 | | #include "mozilla/PoisonIOInterposer.h" |
195 | | #include "mozilla/Telemetry.h" |
196 | | #include "mozilla/ThreadLocal.h" |
197 | | |
198 | | using namespace mozilla; |
199 | | |
200 | | struct NurseryPurpleBufferEntry |
201 | | { |
202 | | void* mPtr; |
203 | | nsCycleCollectionParticipant* mParticipant; |
204 | | nsCycleCollectingAutoRefCnt* mRefCnt; |
205 | | }; |
206 | | |
207 | 0 | #define NURSERY_PURPLE_BUFFER_SIZE 2048 |
208 | | bool gNurseryPurpleBufferEnabled = true; |
209 | | NurseryPurpleBufferEntry gNurseryPurpleBufferEntry[NURSERY_PURPLE_BUFFER_SIZE]; |
210 | | uint32_t gNurseryPurpleBufferEntryCount = 0; |
211 | | |
212 | | void ClearNurseryPurpleBuffer(); |
213 | | |
214 | | void SuspectUsingNurseryPurpleBuffer(void* aPtr, |
215 | | nsCycleCollectionParticipant* aCp, |
216 | | nsCycleCollectingAutoRefCnt* aRefCnt) |
217 | 0 | { |
218 | 0 | MOZ_ASSERT(NS_IsMainThread(), "Wrong thread!"); |
219 | 0 | MOZ_ASSERT(gNurseryPurpleBufferEnabled); |
220 | 0 | if (gNurseryPurpleBufferEntryCount == NURSERY_PURPLE_BUFFER_SIZE) { |
221 | 0 | ClearNurseryPurpleBuffer(); |
222 | 0 | } |
223 | 0 |
|
224 | 0 | gNurseryPurpleBufferEntry[gNurseryPurpleBufferEntryCount] = |
225 | 0 | { aPtr, aCp, aRefCnt }; |
226 | 0 | ++gNurseryPurpleBufferEntryCount; |
227 | 0 | } |
228 | | |
229 | | //#define COLLECT_TIME_DEBUG |
230 | | |
231 | | // Enable assertions that are useful for diagnosing errors in graph construction. |
232 | | //#define DEBUG_CC_GRAPH |
233 | | |
234 | 0 | #define DEFAULT_SHUTDOWN_COLLECTIONS 5 |
235 | | |
236 | | // One to do the freeing, then another to detect there is no more work to do. |
237 | | #define NORMAL_SHUTDOWN_COLLECTIONS 2 |
238 | | |
239 | | // Cycle collector environment variables |
240 | | // |
241 | | // MOZ_CC_LOG_ALL: If defined, always log cycle collector heaps. |
242 | | // |
243 | | // MOZ_CC_LOG_SHUTDOWN: If defined, log cycle collector heaps at shutdown. |
244 | | // |
245 | | // MOZ_CC_LOG_THREAD: If set to "main", only automatically log main thread |
246 | | // CCs. If set to "worker", only automatically log worker CCs. If set to "all", |
247 | | // log either. The default value is "all". This must be used with either |
248 | | // MOZ_CC_LOG_ALL or MOZ_CC_LOG_SHUTDOWN for it to do anything. |
249 | | // |
250 | | // MOZ_CC_LOG_PROCESS: If set to "main", only automatically log main process |
251 | | // CCs. If set to "content", only automatically log tab CCs. If set to |
252 | | // "plugins", only automatically log plugin CCs. If set to "all", log |
253 | | // everything. The default value is "all". This must be used with either |
254 | | // MOZ_CC_LOG_ALL or MOZ_CC_LOG_SHUTDOWN for it to do anything. |
255 | | // |
256 | | // MOZ_CC_ALL_TRACES: If set to "all", any cycle collector |
257 | | // logging done will be WantAllTraces, which disables |
258 | | // various cycle collector optimizations to give a fuller picture of |
259 | | // the heap. If set to "shutdown", only shutdown logging will be WantAllTraces. |
260 | | // The default is none. |
261 | | // |
262 | | // MOZ_CC_RUN_DURING_SHUTDOWN: In non-DEBUG or builds, if this is set, |
263 | | // run cycle collections at shutdown. |
264 | | // |
265 | | // MOZ_CC_LOG_DIRECTORY: The directory in which logs are placed (such as |
266 | | // logs from MOZ_CC_LOG_ALL and MOZ_CC_LOG_SHUTDOWN, or other uses |
267 | | // of nsICycleCollectorListener) |
268 | | |
269 | | // Various parameters of this collector can be tuned using environment |
270 | | // variables. |
271 | | |
272 | | struct nsCycleCollectorParams |
273 | | { |
274 | | bool mLogAll; |
275 | | bool mLogShutdown; |
276 | | bool mAllTracesAll; |
277 | | bool mAllTracesShutdown; |
278 | | bool mLogThisThread; |
279 | | |
280 | | nsCycleCollectorParams() : |
281 | | mLogAll(PR_GetEnv("MOZ_CC_LOG_ALL") != nullptr), |
282 | | mLogShutdown(PR_GetEnv("MOZ_CC_LOG_SHUTDOWN") != nullptr), |
283 | | mAllTracesAll(false), |
284 | | mAllTracesShutdown(false) |
285 | 3 | { |
286 | 3 | const char* logThreadEnv = PR_GetEnv("MOZ_CC_LOG_THREAD"); |
287 | 3 | bool threadLogging = true; |
288 | 3 | if (logThreadEnv && !!strcmp(logThreadEnv, "all")) { |
289 | 0 | if (NS_IsMainThread()) { |
290 | 0 | threadLogging = !strcmp(logThreadEnv, "main"); |
291 | 0 | } else { |
292 | 0 | threadLogging = !strcmp(logThreadEnv, "worker"); |
293 | 0 | } |
294 | 0 | } |
295 | 3 | |
296 | 3 | const char* logProcessEnv = PR_GetEnv("MOZ_CC_LOG_PROCESS"); |
297 | 3 | bool processLogging = true; |
298 | 3 | if (logProcessEnv && !!strcmp(logProcessEnv, "all")) { |
299 | 0 | switch (XRE_GetProcessType()) { |
300 | 0 | case GeckoProcessType_Default: |
301 | 0 | processLogging = !strcmp(logProcessEnv, "main"); |
302 | 0 | break; |
303 | 0 | case GeckoProcessType_Plugin: |
304 | 0 | processLogging = !strcmp(logProcessEnv, "plugins"); |
305 | 0 | break; |
306 | 0 | case GeckoProcessType_Content: |
307 | 0 | processLogging = !strcmp(logProcessEnv, "content"); |
308 | 0 | break; |
309 | 0 | default: |
310 | 0 | processLogging = false; |
311 | 0 | break; |
312 | 3 | } |
313 | 3 | } |
314 | 3 | mLogThisThread = threadLogging && processLogging; |
315 | 3 | |
316 | 3 | const char* allTracesEnv = PR_GetEnv("MOZ_CC_ALL_TRACES"); |
317 | 3 | if (allTracesEnv) { |
318 | 0 | if (!strcmp(allTracesEnv, "all")) { |
319 | 0 | mAllTracesAll = true; |
320 | 0 | } else if (!strcmp(allTracesEnv, "shutdown")) { |
321 | 0 | mAllTracesShutdown = true; |
322 | 0 | } |
323 | 0 | } |
324 | 3 | } |
325 | | |
326 | | bool LogThisCC(bool aIsShutdown) |
327 | 0 | { |
328 | 0 | return (mLogAll || (aIsShutdown && mLogShutdown)) && mLogThisThread; |
329 | 0 | } |
330 | | |
331 | | bool AllTracesThisCC(bool aIsShutdown) |
332 | 0 | { |
333 | 0 | return mAllTracesAll || (aIsShutdown && mAllTracesShutdown); |
334 | 0 | } |
335 | | }; |
336 | | |
337 | | #ifdef COLLECT_TIME_DEBUG |
338 | | class TimeLog |
339 | | { |
340 | | public: |
341 | | TimeLog() : mLastCheckpoint(TimeStamp::Now()) |
342 | | { |
343 | | } |
344 | | |
345 | | void |
346 | | Checkpoint(const char* aEvent) |
347 | | { |
348 | | TimeStamp now = TimeStamp::Now(); |
349 | | double dur = (now - mLastCheckpoint).ToMilliseconds(); |
350 | | if (dur >= 0.5) { |
351 | | printf("cc: %s took %.1fms\n", aEvent, dur); |
352 | | } |
353 | | mLastCheckpoint = now; |
354 | | } |
355 | | |
356 | | private: |
357 | | TimeStamp mLastCheckpoint; |
358 | | }; |
359 | | #else |
360 | | class TimeLog |
361 | | { |
362 | | public: |
363 | | TimeLog() |
364 | 0 | { |
365 | 0 | } |
366 | | void Checkpoint(const char* aEvent) |
367 | 0 | { |
368 | 0 | } |
369 | | }; |
370 | | #endif |
371 | | |
372 | | |
373 | | //////////////////////////////////////////////////////////////////////// |
374 | | // Base types |
375 | | //////////////////////////////////////////////////////////////////////// |
376 | | |
377 | | class PtrInfo; |
378 | | |
379 | | class EdgePool |
380 | | { |
381 | | public: |
382 | | // EdgePool allocates arrays of void*, primarily to hold PtrInfo*. |
383 | | // However, at the end of a block, the last two pointers are a null |
384 | | // and then a void** pointing to the next block. This allows |
385 | | // EdgePool::Iterators to be a single word but still capable of crossing |
386 | | // block boundaries. |
387 | | |
388 | | EdgePool() |
389 | 3 | { |
390 | 3 | mSentinelAndBlocks[0].block = nullptr; |
391 | 3 | mSentinelAndBlocks[1].block = nullptr; |
392 | 3 | } |
393 | | |
394 | | ~EdgePool() |
395 | 0 | { |
396 | 0 | MOZ_ASSERT(!mSentinelAndBlocks[0].block && |
397 | 0 | !mSentinelAndBlocks[1].block, |
398 | 0 | "Didn't call Clear()?"); |
399 | 0 | } |
400 | | |
401 | | void Clear() |
402 | 0 | { |
403 | 0 | EdgeBlock* b = EdgeBlocks(); |
404 | 0 | while (b) { |
405 | 0 | EdgeBlock* next = b->Next(); |
406 | 0 | delete b; |
407 | 0 | b = next; |
408 | 0 | } |
409 | 0 |
|
410 | 0 | mSentinelAndBlocks[0].block = nullptr; |
411 | 0 | mSentinelAndBlocks[1].block = nullptr; |
412 | 0 | } |
413 | | |
414 | | #ifdef DEBUG |
415 | | bool IsEmpty() |
416 | | { |
417 | | return !mSentinelAndBlocks[0].block && |
418 | | !mSentinelAndBlocks[1].block; |
419 | | } |
420 | | #endif |
421 | | |
422 | | private: |
423 | | struct EdgeBlock; |
424 | | union PtrInfoOrBlock |
425 | | { |
426 | | // Use a union to avoid reinterpret_cast and the ensuing |
427 | | // potential aliasing bugs. |
428 | | PtrInfo* ptrInfo; |
429 | | EdgeBlock* block; |
430 | | }; |
431 | | struct EdgeBlock |
432 | | { |
433 | | enum { EdgeBlockSize = 16 * 1024 }; |
434 | | |
435 | | PtrInfoOrBlock mPointers[EdgeBlockSize]; |
436 | | EdgeBlock() |
437 | 0 | { |
438 | 0 | mPointers[EdgeBlockSize - 2].block = nullptr; // sentinel |
439 | 0 | mPointers[EdgeBlockSize - 1].block = nullptr; // next block pointer |
440 | 0 | } |
441 | | EdgeBlock*& Next() |
442 | 0 | { |
443 | 0 | return mPointers[EdgeBlockSize - 1].block; |
444 | 0 | } |
445 | | PtrInfoOrBlock* Start() |
446 | 0 | { |
447 | 0 | return &mPointers[0]; |
448 | 0 | } |
449 | | PtrInfoOrBlock* End() |
450 | 0 | { |
451 | 0 | return &mPointers[EdgeBlockSize - 2]; |
452 | 0 | } |
453 | | }; |
454 | | |
455 | | // Store the null sentinel so that we can have valid iterators |
456 | | // before adding any edges and without adding any blocks. |
457 | | PtrInfoOrBlock mSentinelAndBlocks[2]; |
458 | | |
459 | | EdgeBlock*& EdgeBlocks() |
460 | 0 | { |
461 | 0 | return mSentinelAndBlocks[1].block; |
462 | 0 | } |
463 | | EdgeBlock* EdgeBlocks() const |
464 | 0 | { |
465 | 0 | return mSentinelAndBlocks[1].block; |
466 | 0 | } |
467 | | |
468 | | public: |
469 | | class Iterator |
470 | | { |
471 | | public: |
472 | 0 | Iterator() : mPointer(nullptr) {} |
473 | 0 | explicit Iterator(PtrInfoOrBlock* aPointer) : mPointer(aPointer) {} |
474 | 0 | Iterator(const Iterator& aOther) : mPointer(aOther.mPointer) {} |
475 | | |
476 | | Iterator& operator++() |
477 | 0 | { |
478 | 0 | if (!mPointer->ptrInfo) { |
479 | 0 | // Null pointer is a sentinel for link to the next block. |
480 | 0 | mPointer = (mPointer + 1)->block->mPointers; |
481 | 0 | } |
482 | 0 | ++mPointer; |
483 | 0 | return *this; |
484 | 0 | } |
485 | | |
486 | | PtrInfo* operator*() const |
487 | 0 | { |
488 | 0 | if (!mPointer->ptrInfo) { |
489 | 0 | // Null pointer is a sentinel for link to the next block. |
490 | 0 | return (mPointer + 1)->block->mPointers->ptrInfo; |
491 | 0 | } |
492 | 0 | return mPointer->ptrInfo; |
493 | 0 | } |
494 | | bool operator==(const Iterator& aOther) const |
495 | 0 | { |
496 | 0 | return mPointer == aOther.mPointer; |
497 | 0 | } |
498 | | bool operator!=(const Iterator& aOther) const |
499 | 0 | { |
500 | 0 | return mPointer != aOther.mPointer; |
501 | 0 | } |
502 | | |
503 | | #ifdef DEBUG_CC_GRAPH |
504 | | bool Initialized() const |
505 | | { |
506 | | return mPointer != nullptr; |
507 | | } |
508 | | #endif |
509 | | |
510 | | private: |
511 | | PtrInfoOrBlock* mPointer; |
512 | | }; |
513 | | |
514 | | class Builder; |
515 | | friend class Builder; |
516 | | class Builder |
517 | | { |
518 | | public: |
519 | | explicit Builder(EdgePool& aPool) |
520 | | : mCurrent(&aPool.mSentinelAndBlocks[0]) |
521 | | , mBlockEnd(&aPool.mSentinelAndBlocks[0]) |
522 | | , mNextBlockPtr(&aPool.EdgeBlocks()) |
523 | 0 | { |
524 | 0 | } |
525 | | |
526 | | Iterator Mark() |
527 | 0 | { |
528 | 0 | return Iterator(mCurrent); |
529 | 0 | } |
530 | | |
531 | | void Add(PtrInfo* aEdge) |
532 | 0 | { |
533 | 0 | if (mCurrent == mBlockEnd) { |
534 | 0 | EdgeBlock* b = new EdgeBlock(); |
535 | 0 | *mNextBlockPtr = b; |
536 | 0 | mCurrent = b->Start(); |
537 | 0 | mBlockEnd = b->End(); |
538 | 0 | mNextBlockPtr = &b->Next(); |
539 | 0 | } |
540 | 0 | (mCurrent++)->ptrInfo = aEdge; |
541 | 0 | } |
542 | | private: |
543 | | // mBlockEnd points to space for null sentinel |
544 | | PtrInfoOrBlock* mCurrent; |
545 | | PtrInfoOrBlock* mBlockEnd; |
546 | | EdgeBlock** mNextBlockPtr; |
547 | | }; |
548 | | |
549 | | size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const |
550 | 0 | { |
551 | 0 | size_t n = 0; |
552 | 0 | EdgeBlock* b = EdgeBlocks(); |
553 | 0 | while (b) { |
554 | 0 | n += aMallocSizeOf(b); |
555 | 0 | b = b->Next(); |
556 | 0 | } |
557 | 0 | return n; |
558 | 0 | } |
559 | | }; |
560 | | |
561 | | #ifdef DEBUG_CC_GRAPH |
562 | | #define CC_GRAPH_ASSERT(b) MOZ_ASSERT(b) |
563 | | #else |
564 | | #define CC_GRAPH_ASSERT(b) |
565 | | #endif |
566 | | |
567 | | #define CC_TELEMETRY(_name, _value) \ |
568 | 0 | do { \ |
569 | 0 | if (NS_IsMainThread()) { \ |
570 | 0 | Telemetry::Accumulate(Telemetry::CYCLE_COLLECTOR##_name, _value); \ |
571 | 0 | } else { \ |
572 | 0 | Telemetry::Accumulate(Telemetry::CYCLE_COLLECTOR_WORKER##_name, _value); \ |
573 | 0 | } \ |
574 | 0 | } while(0) |
575 | | |
576 | | enum NodeColor { black, white, grey }; |
577 | | |
578 | | // This structure should be kept as small as possible; we may expect |
579 | | // hundreds of thousands of them to be allocated and touched |
580 | | // repeatedly during each cycle collection. |
581 | | |
582 | | class PtrInfo final |
583 | | { |
584 | | public: |
585 | | void* mPointer; |
586 | | nsCycleCollectionParticipant* mParticipant; |
587 | | uint32_t mColor : 2; |
588 | | uint32_t mInternalRefs : 30; |
589 | | uint32_t mRefCount; |
590 | | |
591 | | private: |
592 | | EdgePool::Iterator mFirstChild; |
593 | | |
594 | | static const uint32_t kInitialRefCount = UINT32_MAX - 1; |
595 | | |
596 | | public: |
597 | | |
598 | | PtrInfo(void* aPointer, nsCycleCollectionParticipant* aParticipant) |
599 | | : mPointer(aPointer), |
600 | | mParticipant(aParticipant), |
601 | | mColor(grey), |
602 | | mInternalRefs(0), |
603 | | mRefCount(kInitialRefCount), |
604 | | mFirstChild() |
605 | 0 | { |
606 | 0 | MOZ_ASSERT(aParticipant); |
607 | 0 |
|
608 | 0 | // We initialize mRefCount to a large non-zero value so |
609 | 0 | // that it doesn't look like a JS object to the cycle collector |
610 | 0 | // in the case where the object dies before being traversed. |
611 | 0 | MOZ_ASSERT(!IsGrayJS() && !IsBlackJS()); |
612 | 0 | } |
613 | | |
614 | | // Allow NodePool::NodeBlock's constructor to compile. |
615 | | PtrInfo() |
616 | | : mPointer{ nullptr } |
617 | | , mParticipant{ nullptr } |
618 | | , mColor{ 0 } |
619 | | , mInternalRefs{ 0 } |
620 | | , mRefCount{ 0 } |
621 | 0 | { |
622 | 0 | MOZ_ASSERT_UNREACHABLE("should never be called"); |
623 | 0 | } |
624 | | |
625 | | bool IsGrayJS() const |
626 | 0 | { |
627 | 0 | return mRefCount == 0; |
628 | 0 | } |
629 | | |
630 | | bool IsBlackJS() const |
631 | 0 | { |
632 | 0 | return mRefCount == UINT32_MAX; |
633 | 0 | } |
634 | | |
635 | | bool WasTraversed() const |
636 | 0 | { |
637 | 0 | return mRefCount != kInitialRefCount; |
638 | 0 | } |
639 | | |
640 | | EdgePool::Iterator FirstChild() const |
641 | 0 | { |
642 | 0 | CC_GRAPH_ASSERT(mFirstChild.Initialized()); |
643 | 0 | return mFirstChild; |
644 | 0 | } |
645 | | |
646 | | // this PtrInfo must be part of a NodePool |
647 | | EdgePool::Iterator LastChild() const |
648 | 0 | { |
649 | 0 | CC_GRAPH_ASSERT((this + 1)->mFirstChild.Initialized()); |
650 | 0 | return (this + 1)->mFirstChild; |
651 | 0 | } |
652 | | |
653 | | void SetFirstChild(EdgePool::Iterator aFirstChild) |
654 | 0 | { |
655 | 0 | CC_GRAPH_ASSERT(aFirstChild.Initialized()); |
656 | 0 | mFirstChild = aFirstChild; |
657 | 0 | } |
658 | | |
659 | | // this PtrInfo must be part of a NodePool |
660 | | void SetLastChild(EdgePool::Iterator aLastChild) |
661 | 0 | { |
662 | 0 | CC_GRAPH_ASSERT(aLastChild.Initialized()); |
663 | 0 | (this + 1)->mFirstChild = aLastChild; |
664 | 0 | } |
665 | | |
666 | | void AnnotatedReleaseAssert(bool aCondition, const char* aMessage); |
667 | | }; |
668 | | |
669 | | void |
670 | | PtrInfo::AnnotatedReleaseAssert(bool aCondition, const char* aMessage) |
671 | 0 | { |
672 | 0 | if (aCondition) { |
673 | 0 | return; |
674 | 0 | } |
675 | 0 | |
676 | 0 | const char* piName = "Unknown"; |
677 | 0 | if (mParticipant) { |
678 | 0 | piName = mParticipant->ClassName(); |
679 | 0 | } |
680 | 0 | nsPrintfCString msg("%s, for class %s", aMessage, piName); |
681 | 0 | CrashReporter::AnnotateCrashReport(CrashReporter::Annotation::CycleCollector, |
682 | 0 | msg); |
683 | 0 |
|
684 | 0 | MOZ_CRASH(); |
685 | 0 | } |
686 | | |
687 | | /** |
688 | | * A structure designed to be used like a linked list of PtrInfo, except |
689 | | * it allocates many PtrInfos at a time. |
690 | | */ |
691 | | class NodePool |
692 | | { |
693 | | private: |
694 | | // The -2 allows us to use |NodeBlockSize + 1| for |mEntries|, and fit |
695 | | // |mNext|, all without causing slop. |
696 | | enum { NodeBlockSize = 4 * 1024 - 2 }; |
697 | | |
698 | | struct NodeBlock |
699 | | { |
700 | | // We create and destroy NodeBlock using moz_xmalloc/free rather than new |
701 | | // and delete to avoid calling its constructor and destructor. |
702 | | NodeBlock() |
703 | | : mNext{ nullptr } |
704 | 0 | { |
705 | 0 | MOZ_ASSERT_UNREACHABLE("should never be called"); |
706 | 0 |
|
707 | 0 | // Ensure NodeBlock is the right size (see the comment on NodeBlockSize |
708 | 0 | // above). |
709 | 0 | static_assert( |
710 | 0 | sizeof(NodeBlock) == 81904 || // 32-bit; equals 19.996 x 4 KiB pages |
711 | 0 | sizeof(NodeBlock) == 131048, // 64-bit; equals 31.994 x 4 KiB pages |
712 | 0 | "ill-sized NodeBlock" |
713 | 0 | ); |
714 | 0 | } |
715 | | ~NodeBlock() |
716 | 0 | { |
717 | 0 | MOZ_ASSERT_UNREACHABLE("should never be called"); |
718 | 0 | } |
719 | | |
720 | | NodeBlock* mNext; |
721 | | PtrInfo mEntries[NodeBlockSize + 1]; // +1 to store last child of last node |
722 | | }; |
723 | | |
724 | | public: |
725 | | NodePool() |
726 | | : mBlocks(nullptr) |
727 | | , mLast(nullptr) |
728 | 3 | { |
729 | 3 | } |
730 | | |
731 | | ~NodePool() |
732 | 0 | { |
733 | 0 | MOZ_ASSERT(!mBlocks, "Didn't call Clear()?"); |
734 | 0 | } |
735 | | |
736 | | void Clear() |
737 | 0 | { |
738 | 0 | NodeBlock* b = mBlocks; |
739 | 0 | while (b) { |
740 | 0 | NodeBlock* n = b->mNext; |
741 | 0 | free(b); |
742 | 0 | b = n; |
743 | 0 | } |
744 | 0 |
|
745 | 0 | mBlocks = nullptr; |
746 | 0 | mLast = nullptr; |
747 | 0 | } |
748 | | |
749 | | #ifdef DEBUG |
750 | | bool IsEmpty() |
751 | | { |
752 | | return !mBlocks && !mLast; |
753 | | } |
754 | | #endif |
755 | | |
756 | | class Builder; |
757 | | friend class Builder; |
758 | | class Builder |
759 | | { |
760 | | public: |
761 | | explicit Builder(NodePool& aPool) |
762 | | : mNextBlock(&aPool.mBlocks) |
763 | | , mNext(aPool.mLast) |
764 | | , mBlockEnd(nullptr) |
765 | 0 | { |
766 | 0 | MOZ_ASSERT(!aPool.mBlocks && !aPool.mLast, "pool not empty"); |
767 | 0 | } |
768 | | PtrInfo* Add(void* aPointer, nsCycleCollectionParticipant* aParticipant) |
769 | 0 | { |
770 | 0 | if (mNext == mBlockEnd) { |
771 | 0 | NodeBlock* block = static_cast<NodeBlock*>(malloc(sizeof(NodeBlock))); |
772 | 0 | if (!block) { |
773 | 0 | return nullptr; |
774 | 0 | } |
775 | 0 | |
776 | 0 | *mNextBlock = block; |
777 | 0 | mNext = block->mEntries; |
778 | 0 | mBlockEnd = block->mEntries + NodeBlockSize; |
779 | 0 | block->mNext = nullptr; |
780 | 0 | mNextBlock = &block->mNext; |
781 | 0 | } |
782 | 0 | return new (mozilla::KnownNotNull, mNext++) PtrInfo(aPointer, aParticipant); |
783 | 0 | } |
784 | | private: |
785 | | NodeBlock** mNextBlock; |
786 | | PtrInfo*& mNext; |
787 | | PtrInfo* mBlockEnd; |
788 | | }; |
789 | | |
790 | | class Enumerator; |
791 | | friend class Enumerator; |
792 | | class Enumerator |
793 | | { |
794 | | public: |
795 | | explicit Enumerator(NodePool& aPool) |
796 | | : mFirstBlock(aPool.mBlocks) |
797 | | , mCurBlock(nullptr) |
798 | | , mNext(nullptr) |
799 | | , mBlockEnd(nullptr) |
800 | | , mLast(aPool.mLast) |
801 | 0 | { |
802 | 0 | } |
803 | | |
804 | | bool IsDone() const |
805 | 0 | { |
806 | 0 | return mNext == mLast; |
807 | 0 | } |
808 | | |
809 | | bool AtBlockEnd() const |
810 | 0 | { |
811 | 0 | return mNext == mBlockEnd; |
812 | 0 | } |
813 | | |
814 | | PtrInfo* GetNext() |
815 | 0 | { |
816 | 0 | MOZ_ASSERT(!IsDone(), "calling GetNext when done"); |
817 | 0 | if (mNext == mBlockEnd) { |
818 | 0 | NodeBlock* nextBlock = mCurBlock ? mCurBlock->mNext : mFirstBlock; |
819 | 0 | mNext = nextBlock->mEntries; |
820 | 0 | mBlockEnd = mNext + NodeBlockSize; |
821 | 0 | mCurBlock = nextBlock; |
822 | 0 | } |
823 | 0 | return mNext++; |
824 | 0 | } |
825 | | private: |
826 | | // mFirstBlock is a reference to allow an Enumerator to be constructed |
827 | | // for an empty graph. |
828 | | NodeBlock*& mFirstBlock; |
829 | | NodeBlock* mCurBlock; |
830 | | // mNext is the next value we want to return, unless mNext == mBlockEnd |
831 | | // NB: mLast is a reference to allow enumerating while building! |
832 | | PtrInfo* mNext; |
833 | | PtrInfo* mBlockEnd; |
834 | | PtrInfo*& mLast; |
835 | | }; |
836 | | |
837 | | size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const |
838 | 0 | { |
839 | 0 | // We don't measure the things pointed to by mEntries[] because those |
840 | 0 | // pointers are non-owning. |
841 | 0 | size_t n = 0; |
842 | 0 | NodeBlock* b = mBlocks; |
843 | 0 | while (b) { |
844 | 0 | n += aMallocSizeOf(b); |
845 | 0 | b = b->mNext; |
846 | 0 | } |
847 | 0 | return n; |
848 | 0 | } |
849 | | |
850 | | private: |
851 | | NodeBlock* mBlocks; |
852 | | PtrInfo* mLast; |
853 | | }; |
854 | | |
855 | | struct PtrToNodeHashPolicy |
856 | | { |
857 | | using Key = PtrInfo*; |
858 | | using Lookup = void*; |
859 | | |
860 | | static js::HashNumber hash(const Lookup& aLookup) |
861 | 0 | { |
862 | 0 | return mozilla::HashGeneric(aLookup); |
863 | 0 | } |
864 | | |
865 | | static bool match(const Key& aKey, const Lookup& aLookup) |
866 | 0 | { |
867 | 0 | return aKey->mPointer == aLookup; |
868 | 0 | } |
869 | | }; |
870 | | |
871 | | |
872 | | struct WeakMapping |
873 | | { |
874 | | // map and key will be null if the corresponding objects are GC marked |
875 | | PtrInfo* mMap; |
876 | | PtrInfo* mKey; |
877 | | PtrInfo* mKeyDelegate; |
878 | | PtrInfo* mVal; |
879 | | }; |
880 | | |
881 | | class CCGraphBuilder; |
882 | | |
883 | | struct CCGraph |
884 | | { |
885 | | NodePool mNodes; |
886 | | EdgePool mEdges; |
887 | | nsTArray<WeakMapping> mWeakMaps; |
888 | | uint32_t mRootCount; |
889 | | |
890 | | private: |
891 | | friend CCGraphBuilder; |
892 | | |
893 | | mozilla::HashSet<PtrInfo*, PtrToNodeHashPolicy> mPtrInfoMap; |
894 | | |
895 | | bool mOutOfMemory; |
896 | | |
897 | | static const uint32_t kInitialMapLength = 16384; |
898 | | |
899 | | public: |
900 | | CCGraph() |
901 | | : mRootCount(0) |
902 | | , mPtrInfoMap(kInitialMapLength) |
903 | | , mOutOfMemory(false) |
904 | 3 | { |
905 | 3 | } |
906 | | |
907 | 0 | ~CCGraph() {} |
908 | | |
909 | | void Init() |
910 | 0 | { |
911 | 0 | MOZ_ASSERT(IsEmpty(), "Failed to call CCGraph::Clear"); |
912 | 0 | } |
913 | | |
914 | | void Clear() |
915 | 0 | { |
916 | 0 | mNodes.Clear(); |
917 | 0 | mEdges.Clear(); |
918 | 0 | mWeakMaps.Clear(); |
919 | 0 | mRootCount = 0; |
920 | 0 | mPtrInfoMap.clearAndCompact(); |
921 | 0 | mOutOfMemory = false; |
922 | 0 | } |
923 | | |
924 | | #ifdef DEBUG |
925 | | bool IsEmpty() |
926 | | { |
927 | | return mNodes.IsEmpty() && mEdges.IsEmpty() && |
928 | | mWeakMaps.IsEmpty() && mRootCount == 0 && |
929 | | mPtrInfoMap.empty(); |
930 | | } |
931 | | #endif |
932 | | |
933 | | PtrInfo* FindNode(void* aPtr); |
934 | | void RemoveObjectFromMap(void* aObject); |
935 | | |
936 | | uint32_t MapCount() const |
937 | 0 | { |
938 | 0 | return mPtrInfoMap.count(); |
939 | 0 | } |
940 | | |
941 | | size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const |
942 | 0 | { |
943 | 0 | size_t n = 0; |
944 | 0 |
|
945 | 0 | n += mNodes.SizeOfExcludingThis(aMallocSizeOf); |
946 | 0 | n += mEdges.SizeOfExcludingThis(aMallocSizeOf); |
947 | 0 |
|
948 | 0 | // We don't measure what the WeakMappings point to, because the |
949 | 0 | // pointers are non-owning. |
950 | 0 | n += mWeakMaps.ShallowSizeOfExcludingThis(aMallocSizeOf); |
951 | 0 |
|
952 | 0 | n += mPtrInfoMap.shallowSizeOfExcludingThis(aMallocSizeOf); |
953 | 0 |
|
954 | 0 | return n; |
955 | 0 | } |
956 | | }; |
957 | | |
958 | | PtrInfo* |
959 | | CCGraph::FindNode(void* aPtr) |
960 | 0 | { |
961 | 0 | auto p = mPtrInfoMap.lookup(aPtr); |
962 | 0 | return p ? *p : nullptr; |
963 | 0 | } |
964 | | |
965 | | void |
966 | | CCGraph::RemoveObjectFromMap(void* aObj) |
967 | 0 | { |
968 | 0 | auto p = mPtrInfoMap.lookup(aObj); |
969 | 0 | if (p) { |
970 | 0 | PtrInfo* pinfo = *p; |
971 | 0 | pinfo->mPointer = nullptr; |
972 | 0 | pinfo->mParticipant = nullptr; |
973 | 0 | mPtrInfoMap.remove(p); |
974 | 0 | } |
975 | 0 | } |
976 | | |
977 | | |
978 | | static nsISupports* |
979 | | CanonicalizeXPCOMParticipant(nsISupports* aIn) |
980 | 0 | { |
981 | 0 | nsISupports* out = nullptr; |
982 | 0 | aIn->QueryInterface(NS_GET_IID(nsCycleCollectionISupports), |
983 | 0 | reinterpret_cast<void**>(&out)); |
984 | 0 | return out; |
985 | 0 | } |
986 | | |
987 | | struct nsPurpleBufferEntry |
988 | | { |
989 | | nsPurpleBufferEntry(void* aObject, nsCycleCollectingAutoRefCnt* aRefCnt, |
990 | | nsCycleCollectionParticipant* aParticipant) |
991 | | : mObject(aObject) |
992 | | , mRefCnt(aRefCnt) |
993 | | , mParticipant(aParticipant) |
994 | 3.24M | { |
995 | 3.24M | } |
996 | | |
997 | | nsPurpleBufferEntry(nsPurpleBufferEntry&& aOther) |
998 | | : mObject(nullptr) |
999 | | , mRefCnt(nullptr) |
1000 | | , mParticipant(nullptr) |
1001 | 3.24M | { |
1002 | 3.24M | Swap(aOther); |
1003 | 3.24M | } |
1004 | | |
1005 | | void Swap(nsPurpleBufferEntry& aOther) |
1006 | 3.24M | { |
1007 | 3.24M | std::swap(mObject, aOther.mObject); |
1008 | 3.24M | std::swap(mRefCnt, aOther.mRefCnt); |
1009 | 3.24M | std::swap(mParticipant, aOther.mParticipant); |
1010 | 3.24M | } |
1011 | | |
1012 | | void Clear() |
1013 | 0 | { |
1014 | 0 | mRefCnt->RemoveFromPurpleBuffer(); |
1015 | 0 | mRefCnt = nullptr; |
1016 | 0 | mObject = nullptr; |
1017 | 0 | mParticipant = nullptr; |
1018 | 0 | } |
1019 | | |
1020 | | ~nsPurpleBufferEntry() |
1021 | 3.24M | { |
1022 | 3.24M | if (mRefCnt) { |
1023 | 0 | mRefCnt->RemoveFromPurpleBuffer(); |
1024 | 0 | } |
1025 | 3.24M | } |
1026 | | |
1027 | | void* mObject; |
1028 | | nsCycleCollectingAutoRefCnt* mRefCnt; |
1029 | | nsCycleCollectionParticipant* mParticipant; // nullptr for nsISupports |
1030 | | }; |
1031 | | |
1032 | | class nsCycleCollector; |
1033 | | |
1034 | | struct nsPurpleBuffer |
1035 | | { |
1036 | | private: |
1037 | | uint32_t mCount; |
1038 | | |
1039 | | // Try to match the size of a jemalloc bucket, to minimize slop bytes. |
1040 | | // - On 32-bit platforms sizeof(nsPurpleBufferEntry) is 12, so mEntries' |
1041 | | // Segment is 16,372 bytes. |
1042 | | // - On 64-bit platforms sizeof(nsPurpleBufferEntry) is 24, so mEntries' |
1043 | | // Segment is 32,760 bytes. |
1044 | | static const uint32_t kEntriesPerSegment = 1365; |
1045 | | static const size_t kSegmentSize = |
1046 | | sizeof(nsPurpleBufferEntry) * kEntriesPerSegment; |
1047 | | typedef |
1048 | | SegmentedVector<nsPurpleBufferEntry, kSegmentSize, InfallibleAllocPolicy> |
1049 | | PurpleBufferVector; |
1050 | | PurpleBufferVector mEntries; |
1051 | | public: |
1052 | | nsPurpleBuffer() |
1053 | | : mCount(0) |
1054 | 3 | { |
1055 | 3 | static_assert( |
1056 | 3 | sizeof(PurpleBufferVector::Segment) == 16372 || // 32-bit |
1057 | 3 | sizeof(PurpleBufferVector::Segment) == 32760 || // 64-bit |
1058 | 3 | sizeof(PurpleBufferVector::Segment) == 32744, // 64-bit Windows |
1059 | 3 | "ill-sized nsPurpleBuffer::mEntries"); |
1060 | 3 | } |
1061 | | |
1062 | | ~nsPurpleBuffer() |
1063 | 0 | { |
1064 | 0 | } |
1065 | | |
1066 | | // This method compacts mEntries. |
1067 | | template<class PurpleVisitor> |
1068 | | void VisitEntries(PurpleVisitor& aVisitor) |
1069 | 0 | { |
1070 | 0 | Maybe<AutoRestore<bool>> ar; |
1071 | 0 | if (NS_IsMainThread()) { |
1072 | 0 | ar.emplace(gNurseryPurpleBufferEnabled); |
1073 | 0 | gNurseryPurpleBufferEnabled = false; |
1074 | 0 | ClearNurseryPurpleBuffer(); |
1075 | 0 | } |
1076 | 0 |
|
1077 | 0 | if (mEntries.IsEmpty()) { |
1078 | 0 | return; |
1079 | 0 | } |
1080 | 0 | |
1081 | 0 | uint32_t oldLength = mEntries.Length(); |
1082 | 0 | uint32_t keptLength = 0; |
1083 | 0 | auto revIter = mEntries.IterFromLast(); |
1084 | 0 | auto iter = mEntries.Iter(); |
1085 | 0 | // After iteration this points to the first empty entry. |
1086 | 0 | auto firstEmptyIter = mEntries.Iter(); |
1087 | 0 | auto iterFromLastEntry = mEntries.IterFromLast(); |
1088 | 0 | for (; !iter.Done(); iter.Next()) { |
1089 | 0 | nsPurpleBufferEntry& e = iter.Get(); |
1090 | 0 | if (e.mObject) { |
1091 | 0 | if (!aVisitor.Visit(*this, &e)) { |
1092 | 0 | return; |
1093 | 0 | } |
1094 | 0 | } |
1095 | 0 | |
1096 | 0 | // Visit call above may have cleared the entry, or the entry was empty |
1097 | 0 | // already. |
1098 | 0 | if (!e.mObject) { |
1099 | 0 | // Try to find a non-empty entry from the end of the vector. |
1100 | 0 | for (; !revIter.Done(); revIter.Prev()) { |
1101 | 0 | nsPurpleBufferEntry& otherEntry = revIter.Get(); |
1102 | 0 | if (&e == &otherEntry) { |
1103 | 0 | break; |
1104 | 0 | } |
1105 | 0 | if (otherEntry.mObject) { |
1106 | 0 | if (!aVisitor.Visit(*this, &otherEntry)) { |
1107 | 0 | return; |
1108 | 0 | } |
1109 | 0 | // Visit may have cleared otherEntry. |
1110 | 0 | if (otherEntry.mObject) { |
1111 | 0 | e.Swap(otherEntry); |
1112 | 0 | revIter.Prev(); // We've swapped this now empty entry. |
1113 | 0 | break; |
1114 | 0 | } |
1115 | 0 | } |
1116 | 0 | } |
1117 | 0 | } |
1118 | 0 |
|
1119 | 0 | // Entry is non-empty even after the Visit call, ensure it is kept |
1120 | 0 | // in mEntries. |
1121 | 0 | if (e.mObject) { |
1122 | 0 | firstEmptyIter.Next(); |
1123 | 0 | ++keptLength; |
1124 | 0 | } |
1125 | 0 |
|
1126 | 0 | if (&e == &revIter.Get()) { |
1127 | 0 | break; |
1128 | 0 | } |
1129 | 0 | } |
1130 | 0 |
|
1131 | 0 | // There were some empty entries. |
1132 | 0 | if (oldLength != keptLength) { |
1133 | 0 |
|
1134 | 0 | // While visiting entries, some new ones were possibly added. This can |
1135 | 0 | // happen during CanSkip. Move all such new entries to be after other |
1136 | 0 | // entries. Note, we don't call Visit on newly added entries! |
1137 | 0 | if (&iterFromLastEntry.Get() != &mEntries.GetLast()) { |
1138 | 0 | iterFromLastEntry.Next(); // Now pointing to the first added entry. |
1139 | 0 | auto& iterForNewEntries = iterFromLastEntry; |
1140 | 0 | while (!iterForNewEntries.Done()) { |
1141 | 0 | MOZ_ASSERT(!firstEmptyIter.Done()); |
1142 | 0 | MOZ_ASSERT(!firstEmptyIter.Get().mObject); |
1143 | 0 | firstEmptyIter.Get().Swap(iterForNewEntries.Get()); |
1144 | 0 | firstEmptyIter.Next(); |
1145 | 0 | iterForNewEntries.Next(); |
1146 | 0 | } |
1147 | 0 | } |
1148 | 0 |
|
1149 | 0 | mEntries.PopLastN(oldLength - keptLength); |
1150 | 0 | } |
1151 | 0 | } Unexecuted instantiation: void nsPurpleBuffer::VisitEntries<SelectPointersVisitor>(SelectPointersVisitor&) Unexecuted instantiation: void nsPurpleBuffer::VisitEntries<RemoveSkippableVisitor>(RemoveSkippableVisitor&) Unexecuted instantiation: void nsPurpleBuffer::VisitEntries<SnowWhiteKiller>(SnowWhiteKiller&) Unexecuted instantiation: void nsPurpleBuffer::VisitEntries<PurpleScanBlackVisitor>(PurpleScanBlackVisitor&) |
1152 | | |
1153 | | void FreeBlocks() |
1154 | 0 | { |
1155 | 0 | mCount = 0; |
1156 | 0 | mEntries.Clear(); |
1157 | 0 | } |
1158 | | |
1159 | | void SelectPointers(CCGraphBuilder& aBuilder); |
1160 | | |
1161 | | // RemoveSkippable removes entries from the purple buffer synchronously |
1162 | | // (1) if aAsyncSnowWhiteFreeing is false and nsPurpleBufferEntry::mRefCnt is 0 or |
1163 | | // (2) if the object's nsXPCOMCycleCollectionParticipant::CanSkip() returns true or |
1164 | | // (3) if nsPurpleBufferEntry::mRefCnt->IsPurple() is false. |
1165 | | // (4) If removeChildlessNodes is true, then any nodes in the purple buffer |
1166 | | // that will have no children in the cycle collector graph will also be |
1167 | | // removed. CanSkip() may be run on these children. |
1168 | | void RemoveSkippable(nsCycleCollector* aCollector, |
1169 | | js::SliceBudget& aBudget, |
1170 | | bool aRemoveChildlessNodes, |
1171 | | bool aAsyncSnowWhiteFreeing, |
1172 | | CC_ForgetSkippableCallback aCb); |
1173 | | |
1174 | | MOZ_ALWAYS_INLINE void Put(void* aObject, nsCycleCollectionParticipant* aCp, |
1175 | | nsCycleCollectingAutoRefCnt* aRefCnt) |
1176 | 3.24M | { |
1177 | 3.24M | nsPurpleBufferEntry entry(aObject, aRefCnt, aCp); |
1178 | 3.24M | Unused << mEntries.Append(std::move(entry)); |
1179 | 3.24M | MOZ_ASSERT(!entry.mRefCnt, "Move didn't work!"); |
1180 | 3.24M | ++mCount; |
1181 | 3.24M | } |
1182 | | |
1183 | | void Remove(nsPurpleBufferEntry* aEntry) |
1184 | 0 | { |
1185 | 0 | MOZ_ASSERT(mCount != 0, "must have entries"); |
1186 | 0 | --mCount; |
1187 | 0 | aEntry->Clear(); |
1188 | 0 | } |
1189 | | |
1190 | | uint32_t Count() const |
1191 | 119 | { |
1192 | 119 | return mCount; |
1193 | 119 | } |
1194 | | |
1195 | | size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const |
1196 | 0 | { |
1197 | 0 | return mEntries.SizeOfExcludingThis(aMallocSizeOf); |
1198 | 0 | } |
1199 | | }; |
1200 | | |
1201 | | static bool |
1202 | | AddPurpleRoot(CCGraphBuilder& aBuilder, void* aRoot, |
1203 | | nsCycleCollectionParticipant* aParti); |
1204 | | |
1205 | | struct SelectPointersVisitor |
1206 | | { |
1207 | | explicit SelectPointersVisitor(CCGraphBuilder& aBuilder) |
1208 | | : mBuilder(aBuilder) |
1209 | 0 | { |
1210 | 0 | } |
1211 | | |
1212 | | bool |
1213 | | Visit(nsPurpleBuffer& aBuffer, nsPurpleBufferEntry* aEntry) |
1214 | 0 | { |
1215 | 0 | MOZ_ASSERT(aEntry->mObject, "Null object in purple buffer"); |
1216 | 0 | MOZ_ASSERT(aEntry->mRefCnt->get() != 0, |
1217 | 0 | "SelectPointersVisitor: snow-white object in the purple buffer"); |
1218 | 0 | if (!aEntry->mRefCnt->IsPurple() || |
1219 | 0 | AddPurpleRoot(mBuilder, aEntry->mObject, aEntry->mParticipant)) { |
1220 | 0 | aBuffer.Remove(aEntry); |
1221 | 0 | } |
1222 | 0 | return true; |
1223 | 0 | } |
1224 | | |
1225 | | private: |
1226 | | CCGraphBuilder& mBuilder; |
1227 | | }; |
1228 | | |
1229 | | void |
1230 | | nsPurpleBuffer::SelectPointers(CCGraphBuilder& aBuilder) |
1231 | 0 | { |
1232 | 0 | SelectPointersVisitor visitor(aBuilder); |
1233 | 0 | VisitEntries(visitor); |
1234 | 0 |
|
1235 | 0 | MOZ_ASSERT(mCount == 0, "AddPurpleRoot failed"); |
1236 | 0 | if (mCount == 0) { |
1237 | 0 | FreeBlocks(); |
1238 | 0 | } |
1239 | 0 | } |
1240 | | |
1241 | | enum ccPhase |
1242 | | { |
1243 | | IdlePhase, |
1244 | | GraphBuildingPhase, |
1245 | | ScanAndCollectWhitePhase, |
1246 | | CleanupPhase |
1247 | | }; |
1248 | | |
1249 | | enum ccType |
1250 | | { |
1251 | | SliceCC, /* If a CC is in progress, continue it. Otherwise, start a new one. */ |
1252 | | ManualCC, /* Explicitly triggered. */ |
1253 | | ShutdownCC /* Shutdown CC, used for finding leaks. */ |
1254 | | }; |
1255 | | |
1256 | | //////////////////////////////////////////////////////////////////////// |
1257 | | // Top level structure for the cycle collector. |
1258 | | //////////////////////////////////////////////////////////////////////// |
1259 | | |
1260 | | using js::SliceBudget; |
1261 | | |
1262 | | class JSPurpleBuffer; |
1263 | | |
1264 | | class nsCycleCollector : public nsIMemoryReporter |
1265 | | { |
1266 | | public: |
1267 | | NS_DECL_ISUPPORTS |
1268 | | NS_DECL_NSIMEMORYREPORTER |
1269 | | |
1270 | | private: |
1271 | | bool mActivelyCollecting; |
1272 | | bool mFreeingSnowWhite; |
1273 | | // mScanInProgress should be false when we're collecting white objects. |
1274 | | bool mScanInProgress; |
1275 | | CycleCollectorResults mResults; |
1276 | | TimeStamp mCollectionStart; |
1277 | | |
1278 | | CycleCollectedJSRuntime* mCCJSRuntime; |
1279 | | |
1280 | | ccPhase mIncrementalPhase; |
1281 | | CCGraph mGraph; |
1282 | | nsAutoPtr<CCGraphBuilder> mBuilder; |
1283 | | RefPtr<nsCycleCollectorLogger> mLogger; |
1284 | | |
1285 | | #ifdef DEBUG |
1286 | | nsISerialEventTarget* mEventTarget; |
1287 | | #endif |
1288 | | |
1289 | | nsCycleCollectorParams mParams; |
1290 | | |
1291 | | uint32_t mWhiteNodeCount; |
1292 | | |
1293 | | CC_BeforeUnlinkCallback mBeforeUnlinkCB; |
1294 | | CC_ForgetSkippableCallback mForgetSkippableCB; |
1295 | | |
1296 | | nsPurpleBuffer mPurpleBuf; |
1297 | | |
1298 | | uint32_t mUnmergedNeeded; |
1299 | | uint32_t mMergedInARow; |
1300 | | |
1301 | | RefPtr<JSPurpleBuffer> mJSPurpleBuffer; |
1302 | | |
1303 | | private: |
1304 | | virtual ~nsCycleCollector(); |
1305 | | |
1306 | | public: |
1307 | | nsCycleCollector(); |
1308 | | |
1309 | | void SetCCJSRuntime(CycleCollectedJSRuntime* aCCRuntime); |
1310 | | void ClearCCJSRuntime(); |
1311 | | |
1312 | | void SetBeforeUnlinkCallback(CC_BeforeUnlinkCallback aBeforeUnlinkCB) |
1313 | 3 | { |
1314 | 3 | CheckThreadSafety(); |
1315 | 3 | mBeforeUnlinkCB = aBeforeUnlinkCB; |
1316 | 3 | } |
1317 | | |
1318 | | void SetForgetSkippableCallback(CC_ForgetSkippableCallback aForgetSkippableCB) |
1319 | 3 | { |
1320 | 3 | CheckThreadSafety(); |
1321 | 3 | mForgetSkippableCB = aForgetSkippableCB; |
1322 | 3 | } |
1323 | | |
1324 | | void Suspect(void* aPtr, nsCycleCollectionParticipant* aCp, |
1325 | | nsCycleCollectingAutoRefCnt* aRefCnt); |
1326 | | void SuspectNurseryEntries(); |
1327 | | uint32_t SuspectedCount(); |
1328 | | void ForgetSkippable(js::SliceBudget& aBudget, bool aRemoveChildlessNodes, |
1329 | | bool aAsyncSnowWhiteFreeing); |
1330 | | bool FreeSnowWhite(bool aUntilNoSWInPurpleBuffer); |
1331 | | bool FreeSnowWhiteWithBudget(js::SliceBudget& aBudget); |
1332 | | |
1333 | | // This method assumes its argument is already canonicalized. |
1334 | | void RemoveObjectFromGraph(void* aPtr); |
1335 | | |
1336 | | void PrepareForGarbageCollection(); |
1337 | | void FinishAnyCurrentCollection(); |
1338 | | |
1339 | | bool Collect(ccType aCCType, |
1340 | | SliceBudget& aBudget, |
1341 | | nsICycleCollectorListener* aManualListener, |
1342 | | bool aPreferShorterSlices = false); |
1343 | | void Shutdown(bool aDoCollect); |
1344 | | |
1345 | 18 | bool IsIdle() const { return mIncrementalPhase == IdlePhase; } |
1346 | | |
1347 | | void SizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf, |
1348 | | size_t* aObjectSize, |
1349 | | size_t* aGraphSize, |
1350 | | size_t* aPurpleBufferSize) const; |
1351 | | |
1352 | | JSPurpleBuffer* GetJSPurpleBuffer(); |
1353 | | |
1354 | 0 | CycleCollectedJSRuntime* Runtime() { return mCCJSRuntime; } |
1355 | | |
1356 | | private: |
1357 | | void CheckThreadSafety(); |
1358 | | void ShutdownCollect(); |
1359 | | |
1360 | | void FixGrayBits(bool aForceGC, TimeLog& aTimeLog); |
1361 | | bool IsIncrementalGCInProgress(); |
1362 | | void FinishAnyIncrementalGCInProgress(); |
1363 | | bool ShouldMergeZones(ccType aCCType); |
1364 | | |
1365 | | void BeginCollection(ccType aCCType, nsICycleCollectorListener* aManualListener); |
1366 | | void MarkRoots(SliceBudget& aBudget); |
1367 | | void ScanRoots(bool aFullySynchGraphBuild); |
1368 | | void ScanIncrementalRoots(); |
1369 | | void ScanWhiteNodes(bool aFullySynchGraphBuild); |
1370 | | void ScanBlackNodes(); |
1371 | | void ScanWeakMaps(); |
1372 | | |
1373 | | // returns whether anything was collected |
1374 | | bool CollectWhite(); |
1375 | | |
1376 | | void CleanupAfterCollection(); |
1377 | | }; |
1378 | | |
1379 | | NS_IMPL_ISUPPORTS(nsCycleCollector, nsIMemoryReporter) |
1380 | | |
1381 | | /** |
1382 | | * GraphWalker is templatized over a Visitor class that must provide |
1383 | | * the following two methods: |
1384 | | * |
1385 | | * bool ShouldVisitNode(PtrInfo const *pi); |
1386 | | * void VisitNode(PtrInfo *pi); |
1387 | | */ |
1388 | | template<class Visitor> |
1389 | | class GraphWalker |
1390 | | { |
1391 | | private: |
1392 | | Visitor mVisitor; |
1393 | | |
1394 | | void DoWalk(nsDeque& aQueue); |
1395 | | |
1396 | | void CheckedPush(nsDeque& aQueue, PtrInfo* aPi) |
1397 | 0 | { |
1398 | 0 | if (!aPi) { |
1399 | 0 | MOZ_CRASH(); |
1400 | 0 | } |
1401 | 0 | if (!aQueue.Push(aPi, fallible)) { |
1402 | 0 | mVisitor.Failed(); |
1403 | 0 | } |
1404 | 0 | } |
1405 | | |
1406 | | public: |
1407 | | void Walk(PtrInfo* aPi); |
1408 | | void WalkFromRoots(CCGraph& aGraph); |
1409 | | // copy-constructing the visitor should be cheap, and less |
1410 | | // indirection than using a reference |
1411 | | explicit GraphWalker(const Visitor aVisitor) : mVisitor(aVisitor) |
1412 | 0 | { |
1413 | 0 | } |
1414 | | }; |
1415 | | |
1416 | | |
1417 | | //////////////////////////////////////////////////////////////////////// |
1418 | | // The static collector struct |
1419 | | //////////////////////////////////////////////////////////////////////// |
1420 | | |
1421 | | struct CollectorData |
1422 | | { |
1423 | | RefPtr<nsCycleCollector> mCollector; |
1424 | | CycleCollectedJSContext* mContext; |
1425 | | }; |
1426 | | |
1427 | | static MOZ_THREAD_LOCAL(CollectorData*) sCollectorData; |
1428 | | |
1429 | | //////////////////////////////////////////////////////////////////////// |
1430 | | // Utility functions |
1431 | | //////////////////////////////////////////////////////////////////////// |
1432 | | |
1433 | | static inline void |
1434 | | ToParticipant(nsISupports* aPtr, nsXPCOMCycleCollectionParticipant** aCp) |
1435 | 0 | { |
1436 | 0 | // We use QI to move from an nsISupports to an |
1437 | 0 | // nsXPCOMCycleCollectionParticipant, which is a per-class singleton helper |
1438 | 0 | // object that implements traversal and unlinking logic for the nsISupports |
1439 | 0 | // in question. |
1440 | 0 | *aCp = nullptr; |
1441 | 0 | CallQueryInterface(aPtr, aCp); |
1442 | 0 | } |
1443 | | |
1444 | | static void |
1445 | | ToParticipant(void* aParti, nsCycleCollectionParticipant** aCp) |
1446 | 0 | { |
1447 | 0 | // If the participant is null, this is an nsISupports participant, |
1448 | 0 | // so we must QI to get the real participant. |
1449 | 0 |
|
1450 | 0 | if (!*aCp) { |
1451 | 0 | nsISupports* nsparti = static_cast<nsISupports*>(aParti); |
1452 | 0 | MOZ_ASSERT(CanonicalizeXPCOMParticipant(nsparti) == nsparti); |
1453 | 0 | nsXPCOMCycleCollectionParticipant* xcp; |
1454 | 0 | ToParticipant(nsparti, &xcp); |
1455 | 0 | *aCp = xcp; |
1456 | 0 | } |
1457 | 0 | } |
1458 | | |
1459 | | template<class Visitor> |
1460 | | MOZ_NEVER_INLINE void |
1461 | | GraphWalker<Visitor>::Walk(PtrInfo* aPi) |
1462 | 0 | { |
1463 | 0 | nsDeque queue; |
1464 | 0 | CheckedPush(queue, aPi); |
1465 | 0 | DoWalk(queue); |
1466 | 0 | } |
1467 | | |
1468 | | template<class Visitor> |
1469 | | MOZ_NEVER_INLINE void |
1470 | | GraphWalker<Visitor>::WalkFromRoots(CCGraph& aGraph) |
1471 | | { |
1472 | | nsDeque queue; |
1473 | | NodePool::Enumerator etor(aGraph.mNodes); |
1474 | | for (uint32_t i = 0; i < aGraph.mRootCount; ++i) { |
1475 | | CheckedPush(queue, etor.GetNext()); |
1476 | | } |
1477 | | DoWalk(queue); |
1478 | | } |
1479 | | |
1480 | | template<class Visitor> |
1481 | | MOZ_NEVER_INLINE void |
1482 | | GraphWalker<Visitor>::DoWalk(nsDeque& aQueue) |
1483 | 0 | { |
1484 | 0 | // Use a aQueue to match the breadth-first traversal used when we |
1485 | 0 | // built the graph, for hopefully-better locality. |
1486 | 0 | while (aQueue.GetSize() > 0) { |
1487 | 0 | PtrInfo* pi = static_cast<PtrInfo*>(aQueue.PopFront()); |
1488 | 0 |
|
1489 | 0 | if (pi->WasTraversed() && mVisitor.ShouldVisitNode(pi)) { |
1490 | 0 | mVisitor.VisitNode(pi); |
1491 | 0 | for (EdgePool::Iterator child = pi->FirstChild(), |
1492 | 0 | child_end = pi->LastChild(); |
1493 | 0 | child != child_end; ++child) { |
1494 | 0 | CheckedPush(aQueue, *child); |
1495 | 0 | } |
1496 | 0 | } |
1497 | 0 | } |
1498 | 0 | } |
1499 | | |
1500 | | struct CCGraphDescriber : public LinkedListElement<CCGraphDescriber> |
1501 | | { |
1502 | | CCGraphDescriber() |
1503 | | : mAddress("0x"), mCnt(0), mType(eUnknown) |
1504 | 0 | { |
1505 | 0 | } |
1506 | | |
1507 | | enum Type |
1508 | | { |
1509 | | eRefCountedObject, |
1510 | | eGCedObject, |
1511 | | eGCMarkedObject, |
1512 | | eEdge, |
1513 | | eRoot, |
1514 | | eGarbage, |
1515 | | eUnknown |
1516 | | }; |
1517 | | |
1518 | | nsCString mAddress; |
1519 | | nsCString mName; |
1520 | | nsCString mCompartmentOrToAddress; |
1521 | | uint32_t mCnt; |
1522 | | Type mType; |
1523 | | }; |
1524 | | |
1525 | | class LogStringMessageAsync : public CancelableRunnable |
1526 | | { |
1527 | | public: |
1528 | | explicit LogStringMessageAsync(const nsAString& aMsg) |
1529 | | : mozilla::CancelableRunnable("LogStringMessageAsync") |
1530 | | , mMsg(aMsg) |
1531 | 0 | {} |
1532 | | |
1533 | | NS_IMETHOD Run() override |
1534 | 0 | { |
1535 | 0 | nsCOMPtr<nsIConsoleService> cs = |
1536 | 0 | do_GetService(NS_CONSOLESERVICE_CONTRACTID); |
1537 | 0 | if (cs) { |
1538 | 0 | cs->LogStringMessage(mMsg.get()); |
1539 | 0 | } |
1540 | 0 | return NS_OK; |
1541 | 0 | } |
1542 | | |
1543 | | private: |
1544 | | nsString mMsg; |
1545 | | }; |
1546 | | |
1547 | | class nsCycleCollectorLogSinkToFile final : public nsICycleCollectorLogSink |
1548 | | { |
1549 | | public: |
1550 | | NS_DECL_ISUPPORTS |
1551 | | |
1552 | | nsCycleCollectorLogSinkToFile() : |
1553 | | mProcessIdentifier(base::GetCurrentProcId()), |
1554 | | mGCLog("gc-edges"), mCCLog("cc-edges") |
1555 | 0 | { |
1556 | 0 | } |
1557 | | |
1558 | | NS_IMETHOD GetFilenameIdentifier(nsAString& aIdentifier) override |
1559 | 0 | { |
1560 | 0 | aIdentifier = mFilenameIdentifier; |
1561 | 0 | return NS_OK; |
1562 | 0 | } |
1563 | | |
1564 | | NS_IMETHOD SetFilenameIdentifier(const nsAString& aIdentifier) override |
1565 | 0 | { |
1566 | 0 | mFilenameIdentifier = aIdentifier; |
1567 | 0 | return NS_OK; |
1568 | 0 | } |
1569 | | |
1570 | | NS_IMETHOD GetProcessIdentifier(int32_t* aIdentifier) override |
1571 | 0 | { |
1572 | 0 | *aIdentifier = mProcessIdentifier; |
1573 | 0 | return NS_OK; |
1574 | 0 | } |
1575 | | |
1576 | | NS_IMETHOD SetProcessIdentifier(int32_t aIdentifier) override |
1577 | 0 | { |
1578 | 0 | mProcessIdentifier = aIdentifier; |
1579 | 0 | return NS_OK; |
1580 | 0 | } |
1581 | | |
1582 | | NS_IMETHOD GetGcLog(nsIFile** aPath) override |
1583 | 0 | { |
1584 | 0 | NS_IF_ADDREF(*aPath = mGCLog.mFile); |
1585 | 0 | return NS_OK; |
1586 | 0 | } |
1587 | | |
1588 | | NS_IMETHOD GetCcLog(nsIFile** aPath) override |
1589 | 0 | { |
1590 | 0 | NS_IF_ADDREF(*aPath = mCCLog.mFile); |
1591 | 0 | return NS_OK; |
1592 | 0 | } |
1593 | | |
1594 | | NS_IMETHOD Open(FILE** aGCLog, FILE** aCCLog) override |
1595 | 0 | { |
1596 | 0 | nsresult rv; |
1597 | 0 |
|
1598 | 0 | if (mGCLog.mStream || mCCLog.mStream) { |
1599 | 0 | return NS_ERROR_UNEXPECTED; |
1600 | 0 | } |
1601 | 0 | |
1602 | 0 | rv = OpenLog(&mGCLog); |
1603 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
1604 | 0 | *aGCLog = mGCLog.mStream; |
1605 | 0 |
|
1606 | 0 | rv = OpenLog(&mCCLog); |
1607 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
1608 | 0 | *aCCLog = mCCLog.mStream; |
1609 | 0 |
|
1610 | 0 | return NS_OK; |
1611 | 0 | } |
1612 | | |
1613 | | NS_IMETHOD CloseGCLog() override |
1614 | 0 | { |
1615 | 0 | if (!mGCLog.mStream) { |
1616 | 0 | return NS_ERROR_UNEXPECTED; |
1617 | 0 | } |
1618 | 0 | CloseLog(&mGCLog, NS_LITERAL_STRING("Garbage")); |
1619 | 0 | return NS_OK; |
1620 | 0 | } |
1621 | | |
1622 | | NS_IMETHOD CloseCCLog() override |
1623 | 0 | { |
1624 | 0 | if (!mCCLog.mStream) { |
1625 | 0 | return NS_ERROR_UNEXPECTED; |
1626 | 0 | } |
1627 | 0 | CloseLog(&mCCLog, NS_LITERAL_STRING("Cycle")); |
1628 | 0 | return NS_OK; |
1629 | 0 | } |
1630 | | |
1631 | | private: |
1632 | | ~nsCycleCollectorLogSinkToFile() |
1633 | 0 | { |
1634 | 0 | if (mGCLog.mStream) { |
1635 | 0 | MozillaUnRegisterDebugFILE(mGCLog.mStream); |
1636 | 0 | fclose(mGCLog.mStream); |
1637 | 0 | } |
1638 | 0 | if (mCCLog.mStream) { |
1639 | 0 | MozillaUnRegisterDebugFILE(mCCLog.mStream); |
1640 | 0 | fclose(mCCLog.mStream); |
1641 | 0 | } |
1642 | 0 | } |
1643 | | |
1644 | | struct FileInfo |
1645 | | { |
1646 | | const char* const mPrefix; |
1647 | | nsCOMPtr<nsIFile> mFile; |
1648 | | FILE* mStream; |
1649 | | |
1650 | 0 | explicit FileInfo(const char* aPrefix) : mPrefix(aPrefix), mStream(nullptr) { } |
1651 | | }; |
1652 | | |
1653 | | /** |
1654 | | * Create a new file named something like aPrefix.$PID.$IDENTIFIER.log in |
1655 | | * $MOZ_CC_LOG_DIRECTORY or in the system's temp directory. No existing |
1656 | | * file will be overwritten; if aPrefix.$PID.$IDENTIFIER.log exists, we'll |
1657 | | * try a file named something like aPrefix.$PID.$IDENTIFIER-1.log, and so |
1658 | | * on. |
1659 | | */ |
1660 | | already_AddRefed<nsIFile> CreateTempFile(const char* aPrefix) |
1661 | 0 | { |
1662 | 0 | nsPrintfCString filename("%s.%d%s%s.log", |
1663 | 0 | aPrefix, |
1664 | 0 | mProcessIdentifier, |
1665 | 0 | mFilenameIdentifier.IsEmpty() ? "" : ".", |
1666 | 0 | NS_ConvertUTF16toUTF8(mFilenameIdentifier).get()); |
1667 | 0 |
|
1668 | 0 | // Get the log directory either from $MOZ_CC_LOG_DIRECTORY or from |
1669 | 0 | // the fallback directories in OpenTempFile. We don't use an nsCOMPtr |
1670 | 0 | // here because OpenTempFile uses an in/out param and getter_AddRefs |
1671 | 0 | // wouldn't work. |
1672 | 0 | nsIFile* logFile = nullptr; |
1673 | 0 | if (char* env = PR_GetEnv("MOZ_CC_LOG_DIRECTORY")) { |
1674 | 0 | NS_NewNativeLocalFile(nsCString(env), /* followLinks = */ true, |
1675 | 0 | &logFile); |
1676 | 0 | } |
1677 | 0 |
|
1678 | 0 | // On Android or B2G, this function will open a file named |
1679 | 0 | // aFilename under a memory-reporting-specific folder |
1680 | 0 | // (/data/local/tmp/memory-reports). Otherwise, it will open a |
1681 | 0 | // file named aFilename under "NS_OS_TEMP_DIR". |
1682 | 0 | nsresult rv = nsDumpUtils::OpenTempFile(filename, &logFile, |
1683 | 0 | NS_LITERAL_CSTRING("memory-reports")); |
1684 | 0 | if (NS_FAILED(rv)) { |
1685 | 0 | NS_IF_RELEASE(logFile); |
1686 | 0 | return nullptr; |
1687 | 0 | } |
1688 | 0 |
|
1689 | 0 | return dont_AddRef(logFile); |
1690 | 0 | } |
1691 | | |
1692 | | nsresult OpenLog(FileInfo* aLog) |
1693 | 0 | { |
1694 | 0 | // Initially create the log in a file starting with "incomplete-". |
1695 | 0 | // We'll move the file and strip off the "incomplete-" once the dump |
1696 | 0 | // completes. (We do this because we don't want scripts which poll |
1697 | 0 | // the filesystem looking for GC/CC dumps to grab a file before we're |
1698 | 0 | // finished writing to it.) |
1699 | 0 | nsAutoCString incomplete; |
1700 | 0 | incomplete += "incomplete-"; |
1701 | 0 | incomplete += aLog->mPrefix; |
1702 | 0 | MOZ_ASSERT(!aLog->mFile); |
1703 | 0 | aLog->mFile = CreateTempFile(incomplete.get()); |
1704 | 0 | if (NS_WARN_IF(!aLog->mFile)) { |
1705 | 0 | return NS_ERROR_UNEXPECTED; |
1706 | 0 | } |
1707 | 0 | |
1708 | 0 | MOZ_ASSERT(!aLog->mStream); |
1709 | 0 | nsresult rv = aLog->mFile->OpenANSIFileDesc("w", &aLog->mStream); |
1710 | 0 | if (NS_WARN_IF(NS_FAILED(rv))) { |
1711 | 0 | return NS_ERROR_UNEXPECTED; |
1712 | 0 | } |
1713 | 0 | MozillaRegisterDebugFILE(aLog->mStream); |
1714 | 0 | return NS_OK; |
1715 | 0 | } |
1716 | | |
1717 | | nsresult CloseLog(FileInfo* aLog, const nsAString& aCollectorKind) |
1718 | 0 | { |
1719 | 0 | MOZ_ASSERT(aLog->mStream); |
1720 | 0 | MOZ_ASSERT(aLog->mFile); |
1721 | 0 |
|
1722 | 0 | MozillaUnRegisterDebugFILE(aLog->mStream); |
1723 | 0 | fclose(aLog->mStream); |
1724 | 0 | aLog->mStream = nullptr; |
1725 | 0 |
|
1726 | 0 | // Strip off "incomplete-". |
1727 | 0 | nsCOMPtr<nsIFile> logFileFinalDestination = |
1728 | 0 | CreateTempFile(aLog->mPrefix); |
1729 | 0 | if (NS_WARN_IF(!logFileFinalDestination)) { |
1730 | 0 | return NS_ERROR_UNEXPECTED; |
1731 | 0 | } |
1732 | 0 | |
1733 | 0 | nsAutoString logFileFinalDestinationName; |
1734 | 0 | logFileFinalDestination->GetLeafName(logFileFinalDestinationName); |
1735 | 0 | if (NS_WARN_IF(logFileFinalDestinationName.IsEmpty())) { |
1736 | 0 | return NS_ERROR_UNEXPECTED; |
1737 | 0 | } |
1738 | 0 | |
1739 | 0 | aLog->mFile->MoveTo(/* directory */ nullptr, logFileFinalDestinationName); |
1740 | 0 |
|
1741 | 0 | // Save the file path. |
1742 | 0 | aLog->mFile = logFileFinalDestination; |
1743 | 0 |
|
1744 | 0 | // Log to the error console. |
1745 | 0 | nsAutoString logPath; |
1746 | 0 | logFileFinalDestination->GetPath(logPath); |
1747 | 0 | nsAutoString msg = aCollectorKind + |
1748 | 0 | NS_LITERAL_STRING(" Collector log dumped to ") + logPath; |
1749 | 0 |
|
1750 | 0 | // We don't want any JS to run between ScanRoots and CollectWhite calls, |
1751 | 0 | // and since ScanRoots calls this method, better to log the message |
1752 | 0 | // asynchronously. |
1753 | 0 | RefPtr<LogStringMessageAsync> log = new LogStringMessageAsync(msg); |
1754 | 0 | NS_DispatchToCurrentThread(log); |
1755 | 0 | return NS_OK; |
1756 | 0 | } |
1757 | | |
1758 | | int32_t mProcessIdentifier; |
1759 | | nsString mFilenameIdentifier; |
1760 | | FileInfo mGCLog; |
1761 | | FileInfo mCCLog; |
1762 | | }; |
1763 | | |
1764 | | NS_IMPL_ISUPPORTS(nsCycleCollectorLogSinkToFile, nsICycleCollectorLogSink) |
1765 | | |
1766 | | |
1767 | | class nsCycleCollectorLogger final : public nsICycleCollectorListener |
1768 | | { |
1769 | | ~nsCycleCollectorLogger() |
1770 | 0 | { |
1771 | 0 | ClearDescribers(); |
1772 | 0 | } |
1773 | | |
1774 | | public: |
1775 | | nsCycleCollectorLogger() |
1776 | | : mLogSink(nsCycleCollector_createLogSink()) |
1777 | | , mWantAllTraces(false) |
1778 | | , mDisableLog(false) |
1779 | | , mWantAfterProcessing(false) |
1780 | | , mCCLog(nullptr) |
1781 | 0 | { |
1782 | 0 | } |
1783 | | |
1784 | | NS_DECL_ISUPPORTS |
1785 | | |
1786 | | void SetAllTraces() |
1787 | 0 | { |
1788 | 0 | mWantAllTraces = true; |
1789 | 0 | } |
1790 | | |
1791 | | bool IsAllTraces() |
1792 | 0 | { |
1793 | 0 | return mWantAllTraces; |
1794 | 0 | } |
1795 | | |
1796 | | NS_IMETHOD AllTraces(nsICycleCollectorListener** aListener) override |
1797 | 0 | { |
1798 | 0 | SetAllTraces(); |
1799 | 0 | NS_ADDREF(*aListener = this); |
1800 | 0 | return NS_OK; |
1801 | 0 | } |
1802 | | |
1803 | | NS_IMETHOD GetWantAllTraces(bool* aAllTraces) override |
1804 | 0 | { |
1805 | 0 | *aAllTraces = mWantAllTraces; |
1806 | 0 | return NS_OK; |
1807 | 0 | } |
1808 | | |
1809 | | NS_IMETHOD GetDisableLog(bool* aDisableLog) override |
1810 | 0 | { |
1811 | 0 | *aDisableLog = mDisableLog; |
1812 | 0 | return NS_OK; |
1813 | 0 | } |
1814 | | |
1815 | | NS_IMETHOD SetDisableLog(bool aDisableLog) override |
1816 | 0 | { |
1817 | 0 | mDisableLog = aDisableLog; |
1818 | 0 | return NS_OK; |
1819 | 0 | } |
1820 | | |
1821 | | NS_IMETHOD GetWantAfterProcessing(bool* aWantAfterProcessing) override |
1822 | 0 | { |
1823 | 0 | *aWantAfterProcessing = mWantAfterProcessing; |
1824 | 0 | return NS_OK; |
1825 | 0 | } |
1826 | | |
1827 | | NS_IMETHOD SetWantAfterProcessing(bool aWantAfterProcessing) override |
1828 | 0 | { |
1829 | 0 | mWantAfterProcessing = aWantAfterProcessing; |
1830 | 0 | return NS_OK; |
1831 | 0 | } |
1832 | | |
1833 | | NS_IMETHOD GetLogSink(nsICycleCollectorLogSink** aLogSink) override |
1834 | 0 | { |
1835 | 0 | NS_ADDREF(*aLogSink = mLogSink); |
1836 | 0 | return NS_OK; |
1837 | 0 | } |
1838 | | |
1839 | | NS_IMETHOD SetLogSink(nsICycleCollectorLogSink* aLogSink) override |
1840 | 0 | { |
1841 | 0 | if (!aLogSink) { |
1842 | 0 | return NS_ERROR_INVALID_ARG; |
1843 | 0 | } |
1844 | 0 | mLogSink = aLogSink; |
1845 | 0 | return NS_OK; |
1846 | 0 | } |
1847 | | |
1848 | | nsresult Begin() |
1849 | 0 | { |
1850 | 0 | nsresult rv; |
1851 | 0 |
|
1852 | 0 | mCurrentAddress.AssignLiteral("0x"); |
1853 | 0 | ClearDescribers(); |
1854 | 0 | if (mDisableLog) { |
1855 | 0 | return NS_OK; |
1856 | 0 | } |
1857 | 0 | |
1858 | 0 | FILE* gcLog; |
1859 | 0 | rv = mLogSink->Open(&gcLog, &mCCLog); |
1860 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
1861 | 0 | // Dump the JS heap. |
1862 | 0 | CollectorData* data = sCollectorData.get(); |
1863 | 0 | if (data && data->mContext) { |
1864 | 0 | data->mContext->Runtime()->DumpJSHeap(gcLog); |
1865 | 0 | } |
1866 | 0 | rv = mLogSink->CloseGCLog(); |
1867 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
1868 | 0 |
|
1869 | 0 | fprintf(mCCLog, "# WantAllTraces=%s\n", mWantAllTraces ? "true" : "false"); |
1870 | 0 | return NS_OK; |
1871 | 0 | } |
1872 | | void NoteRefCountedObject(uint64_t aAddress, uint32_t aRefCount, |
1873 | | const char* aObjectDescription) |
1874 | 0 | { |
1875 | 0 | if (!mDisableLog) { |
1876 | 0 | fprintf(mCCLog, "%p [rc=%u] %s\n", (void*)aAddress, aRefCount, |
1877 | 0 | aObjectDescription); |
1878 | 0 | } |
1879 | 0 | if (mWantAfterProcessing) { |
1880 | 0 | CCGraphDescriber* d = new CCGraphDescriber(); |
1881 | 0 | mDescribers.insertBack(d); |
1882 | 0 | mCurrentAddress.AssignLiteral("0x"); |
1883 | 0 | mCurrentAddress.AppendInt(aAddress, 16); |
1884 | 0 | d->mType = CCGraphDescriber::eRefCountedObject; |
1885 | 0 | d->mAddress = mCurrentAddress; |
1886 | 0 | d->mCnt = aRefCount; |
1887 | 0 | d->mName.Append(aObjectDescription); |
1888 | 0 | } |
1889 | 0 | } |
1890 | | void NoteGCedObject(uint64_t aAddress, bool aMarked, |
1891 | | const char* aObjectDescription, |
1892 | | uint64_t aCompartmentAddress) |
1893 | 0 | { |
1894 | 0 | if (!mDisableLog) { |
1895 | 0 | fprintf(mCCLog, "%p [gc%s] %s\n", (void*)aAddress, |
1896 | 0 | aMarked ? ".marked" : "", aObjectDescription); |
1897 | 0 | } |
1898 | 0 | if (mWantAfterProcessing) { |
1899 | 0 | CCGraphDescriber* d = new CCGraphDescriber(); |
1900 | 0 | mDescribers.insertBack(d); |
1901 | 0 | mCurrentAddress.AssignLiteral("0x"); |
1902 | 0 | mCurrentAddress.AppendInt(aAddress, 16); |
1903 | 0 | d->mType = aMarked ? CCGraphDescriber::eGCMarkedObject : |
1904 | 0 | CCGraphDescriber::eGCedObject; |
1905 | 0 | d->mAddress = mCurrentAddress; |
1906 | 0 | d->mName.Append(aObjectDescription); |
1907 | 0 | if (aCompartmentAddress) { |
1908 | 0 | d->mCompartmentOrToAddress.AssignLiteral("0x"); |
1909 | 0 | d->mCompartmentOrToAddress.AppendInt(aCompartmentAddress, 16); |
1910 | 0 | } else { |
1911 | 0 | d->mCompartmentOrToAddress.SetIsVoid(true); |
1912 | 0 | } |
1913 | 0 | } |
1914 | 0 | } |
1915 | | void NoteEdge(uint64_t aToAddress, const char* aEdgeName) |
1916 | 0 | { |
1917 | 0 | if (!mDisableLog) { |
1918 | 0 | fprintf(mCCLog, "> %p %s\n", (void*)aToAddress, aEdgeName); |
1919 | 0 | } |
1920 | 0 | if (mWantAfterProcessing) { |
1921 | 0 | CCGraphDescriber* d = new CCGraphDescriber(); |
1922 | 0 | mDescribers.insertBack(d); |
1923 | 0 | d->mType = CCGraphDescriber::eEdge; |
1924 | 0 | d->mAddress = mCurrentAddress; |
1925 | 0 | d->mCompartmentOrToAddress.AssignLiteral("0x"); |
1926 | 0 | d->mCompartmentOrToAddress.AppendInt(aToAddress, 16); |
1927 | 0 | d->mName.Append(aEdgeName); |
1928 | 0 | } |
1929 | 0 | } |
1930 | | void NoteWeakMapEntry(uint64_t aMap, uint64_t aKey, |
1931 | | uint64_t aKeyDelegate, uint64_t aValue) |
1932 | 0 | { |
1933 | 0 | if (!mDisableLog) { |
1934 | 0 | fprintf(mCCLog, "WeakMapEntry map=%p key=%p keyDelegate=%p value=%p\n", |
1935 | 0 | (void*)aMap, (void*)aKey, (void*)aKeyDelegate, (void*)aValue); |
1936 | 0 | } |
1937 | 0 | // We don't support after-processing for weak map entries. |
1938 | 0 | } |
1939 | | void NoteIncrementalRoot(uint64_t aAddress) |
1940 | 0 | { |
1941 | 0 | if (!mDisableLog) { |
1942 | 0 | fprintf(mCCLog, "IncrementalRoot %p\n", (void*)aAddress); |
1943 | 0 | } |
1944 | 0 | // We don't support after-processing for incremental roots. |
1945 | 0 | } |
1946 | | void BeginResults() |
1947 | 0 | { |
1948 | 0 | if (!mDisableLog) { |
1949 | 0 | fputs("==========\n", mCCLog); |
1950 | 0 | } |
1951 | 0 | } |
1952 | | void DescribeRoot(uint64_t aAddress, uint32_t aKnownEdges) |
1953 | 0 | { |
1954 | 0 | if (!mDisableLog) { |
1955 | 0 | fprintf(mCCLog, "%p [known=%u]\n", (void*)aAddress, aKnownEdges); |
1956 | 0 | } |
1957 | 0 | if (mWantAfterProcessing) { |
1958 | 0 | CCGraphDescriber* d = new CCGraphDescriber(); |
1959 | 0 | mDescribers.insertBack(d); |
1960 | 0 | d->mType = CCGraphDescriber::eRoot; |
1961 | 0 | d->mAddress.AppendInt(aAddress, 16); |
1962 | 0 | d->mCnt = aKnownEdges; |
1963 | 0 | } |
1964 | 0 | } |
1965 | | void DescribeGarbage(uint64_t aAddress) |
1966 | 0 | { |
1967 | 0 | if (!mDisableLog) { |
1968 | 0 | fprintf(mCCLog, "%p [garbage]\n", (void*)aAddress); |
1969 | 0 | } |
1970 | 0 | if (mWantAfterProcessing) { |
1971 | 0 | CCGraphDescriber* d = new CCGraphDescriber(); |
1972 | 0 | mDescribers.insertBack(d); |
1973 | 0 | d->mType = CCGraphDescriber::eGarbage; |
1974 | 0 | d->mAddress.AppendInt(aAddress, 16); |
1975 | 0 | } |
1976 | 0 | } |
1977 | | void End() |
1978 | 0 | { |
1979 | 0 | if (!mDisableLog) { |
1980 | 0 | mCCLog = nullptr; |
1981 | 0 | Unused << NS_WARN_IF(NS_FAILED(mLogSink->CloseCCLog())); |
1982 | 0 | } |
1983 | 0 | } |
1984 | | NS_IMETHOD ProcessNext(nsICycleCollectorHandler* aHandler, |
1985 | | bool* aCanContinue) override |
1986 | 0 | { |
1987 | 0 | if (NS_WARN_IF(!aHandler) || NS_WARN_IF(!mWantAfterProcessing)) { |
1988 | 0 | return NS_ERROR_UNEXPECTED; |
1989 | 0 | } |
1990 | 0 | CCGraphDescriber* d = mDescribers.popFirst(); |
1991 | 0 | if (d) { |
1992 | 0 | switch (d->mType) { |
1993 | 0 | case CCGraphDescriber::eRefCountedObject: |
1994 | 0 | aHandler->NoteRefCountedObject(d->mAddress, |
1995 | 0 | d->mCnt, |
1996 | 0 | d->mName); |
1997 | 0 | break; |
1998 | 0 | case CCGraphDescriber::eGCedObject: |
1999 | 0 | case CCGraphDescriber::eGCMarkedObject: |
2000 | 0 | aHandler->NoteGCedObject(d->mAddress, |
2001 | 0 | d->mType == |
2002 | 0 | CCGraphDescriber::eGCMarkedObject, |
2003 | 0 | d->mName, |
2004 | 0 | d->mCompartmentOrToAddress); |
2005 | 0 | break; |
2006 | 0 | case CCGraphDescriber::eEdge: |
2007 | 0 | aHandler->NoteEdge(d->mAddress, |
2008 | 0 | d->mCompartmentOrToAddress, |
2009 | 0 | d->mName); |
2010 | 0 | break; |
2011 | 0 | case CCGraphDescriber::eRoot: |
2012 | 0 | aHandler->DescribeRoot(d->mAddress, |
2013 | 0 | d->mCnt); |
2014 | 0 | break; |
2015 | 0 | case CCGraphDescriber::eGarbage: |
2016 | 0 | aHandler->DescribeGarbage(d->mAddress); |
2017 | 0 | break; |
2018 | 0 | case CCGraphDescriber::eUnknown: |
2019 | 0 | MOZ_ASSERT_UNREACHABLE("CCGraphDescriber::eUnknown"); |
2020 | 0 | break; |
2021 | 0 | } |
2022 | 0 | delete d; |
2023 | 0 | } |
2024 | 0 | if (!(*aCanContinue = !mDescribers.isEmpty())) { |
2025 | 0 | mCurrentAddress.AssignLiteral("0x"); |
2026 | 0 | } |
2027 | 0 | return NS_OK; |
2028 | 0 | } |
2029 | | NS_IMETHOD AsLogger(nsCycleCollectorLogger** aRetVal) override |
2030 | 0 | { |
2031 | 0 | RefPtr<nsCycleCollectorLogger> rval = this; |
2032 | 0 | rval.forget(aRetVal); |
2033 | 0 | return NS_OK; |
2034 | 0 | } |
2035 | | private: |
2036 | | void ClearDescribers() |
2037 | 0 | { |
2038 | 0 | CCGraphDescriber* d; |
2039 | 0 | while ((d = mDescribers.popFirst())) { |
2040 | 0 | delete d; |
2041 | 0 | } |
2042 | 0 | } |
2043 | | |
2044 | | nsCOMPtr<nsICycleCollectorLogSink> mLogSink; |
2045 | | bool mWantAllTraces; |
2046 | | bool mDisableLog; |
2047 | | bool mWantAfterProcessing; |
2048 | | nsCString mCurrentAddress; |
2049 | | mozilla::LinkedList<CCGraphDescriber> mDescribers; |
2050 | | FILE* mCCLog; |
2051 | | }; |
2052 | | |
2053 | | NS_IMPL_ISUPPORTS(nsCycleCollectorLogger, nsICycleCollectorListener) |
2054 | | |
2055 | | already_AddRefed<nsICycleCollectorListener> |
2056 | | nsCycleCollector_createLogger() |
2057 | 0 | { |
2058 | 0 | nsCOMPtr<nsICycleCollectorListener> logger = new nsCycleCollectorLogger(); |
2059 | 0 | return logger.forget(); |
2060 | 0 | } |
2061 | | |
2062 | | static bool |
2063 | | GCThingIsGrayCCThing(JS::GCCellPtr thing) |
2064 | 0 | { |
2065 | 0 | return AddToCCKind(thing.kind()) && |
2066 | 0 | JS::GCThingIsMarkedGray(thing); |
2067 | 0 | } |
2068 | | |
2069 | | static bool |
2070 | | ValueIsGrayCCThing(const JS::Value& value) |
2071 | 0 | { |
2072 | 0 | return AddToCCKind(value.traceKind()) && |
2073 | 0 | JS::GCThingIsMarkedGray(value.toGCCellPtr()); |
2074 | 0 | } |
2075 | | |
2076 | | //////////////////////////////////////////////////////////////////////// |
2077 | | // Bacon & Rajan's |MarkRoots| routine. |
2078 | | //////////////////////////////////////////////////////////////////////// |
2079 | | |
2080 | | class CCGraphBuilder final : public nsCycleCollectionTraversalCallback, |
2081 | | public nsCycleCollectionNoteRootCallback |
2082 | | { |
2083 | | private: |
2084 | | CCGraph& mGraph; |
2085 | | CycleCollectorResults& mResults; |
2086 | | NodePool::Builder mNodeBuilder; |
2087 | | EdgePool::Builder mEdgeBuilder; |
2088 | | MOZ_INIT_OUTSIDE_CTOR PtrInfo* mCurrPi; |
2089 | | nsCycleCollectionParticipant* mJSParticipant; |
2090 | | nsCycleCollectionParticipant* mJSZoneParticipant; |
2091 | | nsCString mNextEdgeName; |
2092 | | RefPtr<nsCycleCollectorLogger> mLogger; |
2093 | | bool mMergeZones; |
2094 | | nsAutoPtr<NodePool::Enumerator> mCurrNode; |
2095 | | uint32_t mNoteChildCount; |
2096 | | |
2097 | | struct PtrInfoCache : public MruCache<void*, PtrInfo*, PtrInfoCache, 491> |
2098 | | { |
2099 | 0 | static HashNumber Hash(const void* aKey) { return HashGeneric(aKey); } |
2100 | | static bool Match(const void* aKey, const PtrInfo* aVal) |
2101 | 0 | { |
2102 | 0 | return aVal->mPointer == aKey; |
2103 | 0 | } |
2104 | | }; |
2105 | | |
2106 | | PtrInfoCache mGraphCache; |
2107 | | |
2108 | | public: |
2109 | | CCGraphBuilder(CCGraph& aGraph, |
2110 | | CycleCollectorResults& aResults, |
2111 | | CycleCollectedJSRuntime* aCCRuntime, |
2112 | | nsCycleCollectorLogger* aLogger, |
2113 | | bool aMergeZones); |
2114 | | virtual ~CCGraphBuilder(); |
2115 | | |
2116 | | bool WantAllTraces() const |
2117 | 0 | { |
2118 | 0 | return nsCycleCollectionNoteRootCallback::WantAllTraces(); |
2119 | 0 | } |
2120 | | |
2121 | | bool AddPurpleRoot(void* aRoot, nsCycleCollectionParticipant* aParti); |
2122 | | |
2123 | | // This is called when all roots have been added to the graph, to prepare for BuildGraph(). |
2124 | | void DoneAddingRoots(); |
2125 | | |
2126 | | // Do some work traversing nodes in the graph. Returns true if this graph building is finished. |
2127 | | bool BuildGraph(SliceBudget& aBudget); |
2128 | | |
2129 | | void RemoveCachedEntry(void* aPtr) |
2130 | 0 | { |
2131 | 0 | mGraphCache.Remove(aPtr); |
2132 | 0 | } |
2133 | | private: |
2134 | | PtrInfo* AddNode(void* aPtr, nsCycleCollectionParticipant* aParticipant); |
2135 | | PtrInfo* AddWeakMapNode(JS::GCCellPtr aThing); |
2136 | | PtrInfo* AddWeakMapNode(JSObject* aObject); |
2137 | | |
2138 | | void SetFirstChild() |
2139 | 0 | { |
2140 | 0 | mCurrPi->SetFirstChild(mEdgeBuilder.Mark()); |
2141 | 0 | } |
2142 | | |
2143 | | void SetLastChild() |
2144 | 0 | { |
2145 | 0 | mCurrPi->SetLastChild(mEdgeBuilder.Mark()); |
2146 | 0 | } |
2147 | | |
2148 | | public: |
2149 | | // nsCycleCollectionNoteRootCallback methods. |
2150 | | NS_IMETHOD_(void) NoteXPCOMRoot(nsISupports* aRoot, |
2151 | | nsCycleCollectionParticipant* aParticipant) |
2152 | | override; |
2153 | | NS_IMETHOD_(void) NoteJSRoot(JSObject* aRoot) override; |
2154 | | NS_IMETHOD_(void) NoteNativeRoot(void* aRoot, |
2155 | | nsCycleCollectionParticipant* aParticipant) |
2156 | | override; |
2157 | | NS_IMETHOD_(void) NoteWeakMapping(JSObject* aMap, JS::GCCellPtr aKey, |
2158 | | JSObject* aKdelegate, JS::GCCellPtr aVal) |
2159 | | override; |
2160 | | |
2161 | | // nsCycleCollectionTraversalCallback methods. |
2162 | | NS_IMETHOD_(void) DescribeRefCountedNode(nsrefcnt aRefCount, |
2163 | | const char* aObjName) override; |
2164 | | NS_IMETHOD_(void) DescribeGCedNode(bool aIsMarked, const char* aObjName, |
2165 | | uint64_t aCompartmentAddress) override; |
2166 | | |
2167 | | NS_IMETHOD_(void) NoteXPCOMChild(nsISupports* aChild) override; |
2168 | | NS_IMETHOD_(void) NoteJSChild(const JS::GCCellPtr& aThing) override; |
2169 | | NS_IMETHOD_(void) NoteNativeChild(void* aChild, |
2170 | | nsCycleCollectionParticipant* aParticipant) |
2171 | | override; |
2172 | | NS_IMETHOD_(void) NoteNextEdgeName(const char* aName) override; |
2173 | | |
2174 | | private: |
2175 | | void NoteJSChild(JS::GCCellPtr aChild); |
2176 | | |
2177 | | NS_IMETHOD_(void) NoteRoot(void* aRoot, |
2178 | | nsCycleCollectionParticipant* aParticipant) |
2179 | 0 | { |
2180 | 0 | MOZ_ASSERT(aRoot); |
2181 | 0 | MOZ_ASSERT(aParticipant); |
2182 | 0 |
|
2183 | 0 | if (!aParticipant->CanSkipInCC(aRoot) || MOZ_UNLIKELY(WantAllTraces())) { |
2184 | 0 | AddNode(aRoot, aParticipant); |
2185 | 0 | } |
2186 | 0 | } |
2187 | | |
2188 | | NS_IMETHOD_(void) NoteChild(void* aChild, nsCycleCollectionParticipant* aCp, |
2189 | | nsCString& aEdgeName) |
2190 | 0 | { |
2191 | 0 | PtrInfo* childPi = AddNode(aChild, aCp); |
2192 | 0 | if (!childPi) { |
2193 | 0 | return; |
2194 | 0 | } |
2195 | 0 | mEdgeBuilder.Add(childPi); |
2196 | 0 | if (mLogger) { |
2197 | 0 | mLogger->NoteEdge((uint64_t)aChild, aEdgeName.get()); |
2198 | 0 | } |
2199 | 0 | ++childPi->mInternalRefs; |
2200 | 0 | } |
2201 | | |
2202 | | JS::Zone* MergeZone(JS::GCCellPtr aGcthing) |
2203 | 0 | { |
2204 | 0 | if (!mMergeZones) { |
2205 | 0 | return nullptr; |
2206 | 0 | } |
2207 | 0 | JS::Zone* zone = JS::GetTenuredGCThingZone(aGcthing); |
2208 | 0 | if (js::IsSystemZone(zone)) { |
2209 | 0 | return nullptr; |
2210 | 0 | } |
2211 | 0 | return zone; |
2212 | 0 | } |
2213 | | }; |
2214 | | |
2215 | | CCGraphBuilder::CCGraphBuilder(CCGraph& aGraph, |
2216 | | CycleCollectorResults& aResults, |
2217 | | CycleCollectedJSRuntime* aCCRuntime, |
2218 | | nsCycleCollectorLogger* aLogger, |
2219 | | bool aMergeZones) |
2220 | | : mGraph(aGraph) |
2221 | | , mResults(aResults) |
2222 | | , mNodeBuilder(aGraph.mNodes) |
2223 | | , mEdgeBuilder(aGraph.mEdges) |
2224 | | , mJSParticipant(nullptr) |
2225 | | , mJSZoneParticipant(nullptr) |
2226 | | , mLogger(aLogger) |
2227 | | , mMergeZones(aMergeZones) |
2228 | | , mNoteChildCount(0) |
2229 | 0 | { |
2230 | 0 | // 4096 is an allocation bucket size. |
2231 | 0 | static_assert(sizeof(CCGraphBuilder) <= 4096, |
2232 | 0 | "Don't create too large CCGraphBuilder objects"); |
2233 | 0 |
|
2234 | 0 | if (aCCRuntime) { |
2235 | 0 | mJSParticipant = aCCRuntime->GCThingParticipant(); |
2236 | 0 | mJSZoneParticipant = aCCRuntime->ZoneParticipant(); |
2237 | 0 | } |
2238 | 0 |
|
2239 | 0 | if (mLogger) { |
2240 | 0 | mFlags |= nsCycleCollectionTraversalCallback::WANT_DEBUG_INFO; |
2241 | 0 | if (mLogger->IsAllTraces()) { |
2242 | 0 | mFlags |= nsCycleCollectionTraversalCallback::WANT_ALL_TRACES; |
2243 | 0 | mWantAllTraces = true; // for nsCycleCollectionNoteRootCallback |
2244 | 0 | } |
2245 | 0 | } |
2246 | 0 |
|
2247 | 0 | mMergeZones = mMergeZones && MOZ_LIKELY(!WantAllTraces()); |
2248 | 0 |
|
2249 | 0 | MOZ_ASSERT(nsCycleCollectionNoteRootCallback::WantAllTraces() == |
2250 | 0 | nsCycleCollectionTraversalCallback::WantAllTraces()); |
2251 | 0 | } |
2252 | | |
2253 | | CCGraphBuilder::~CCGraphBuilder() |
2254 | 0 | { |
2255 | 0 | } |
2256 | | |
2257 | | PtrInfo* |
2258 | | CCGraphBuilder::AddNode(void* aPtr, nsCycleCollectionParticipant* aParticipant) |
2259 | 0 | { |
2260 | 0 | if (mGraph.mOutOfMemory) { |
2261 | 0 | return nullptr; |
2262 | 0 | } |
2263 | 0 | |
2264 | 0 | PtrInfoCache::Entry cached = mGraphCache.Lookup(aPtr); |
2265 | 0 | if (cached) { |
2266 | 0 | MOZ_ASSERT(cached.Data()->mParticipant == aParticipant, |
2267 | 0 | "nsCycleCollectionParticipant shouldn't change!"); |
2268 | 0 | return cached.Data(); |
2269 | 0 | } |
2270 | 0 |
|
2271 | 0 | PtrInfo* result; |
2272 | 0 | auto p = mGraph.mPtrInfoMap.lookupForAdd(aPtr); |
2273 | 0 | if (!p) { |
2274 | 0 | // New entry |
2275 | 0 | result = mNodeBuilder.Add(aPtr, aParticipant); |
2276 | 0 | if (!result) { |
2277 | 0 | return nullptr; |
2278 | 0 | } |
2279 | 0 | |
2280 | 0 | if (!mGraph.mPtrInfoMap.add(p, result)) { |
2281 | 0 | // `result` leaks here, but we can't free it because it's |
2282 | 0 | // pool-allocated within NodePool. |
2283 | 0 | mGraph.mOutOfMemory = true; |
2284 | 0 | MOZ_ASSERT(false, "OOM while building cycle collector graph"); |
2285 | 0 | return nullptr; |
2286 | 0 | } |
2287 | 0 |
|
2288 | 0 | } else { |
2289 | 0 | result = *p; |
2290 | 0 | MOZ_ASSERT(result->mParticipant == aParticipant, |
2291 | 0 | "nsCycleCollectionParticipant shouldn't change!"); |
2292 | 0 | } |
2293 | 0 |
|
2294 | 0 | cached.Set(result); |
2295 | 0 |
|
2296 | 0 | return result; |
2297 | 0 | } |
2298 | | |
2299 | | bool |
2300 | | CCGraphBuilder::AddPurpleRoot(void* aRoot, nsCycleCollectionParticipant* aParti) |
2301 | 0 | { |
2302 | 0 | ToParticipant(aRoot, &aParti); |
2303 | 0 |
|
2304 | 0 | if (WantAllTraces() || !aParti->CanSkipInCC(aRoot)) { |
2305 | 0 | PtrInfo* pinfo = AddNode(aRoot, aParti); |
2306 | 0 | if (!pinfo) { |
2307 | 0 | return false; |
2308 | 0 | } |
2309 | 0 | } |
2310 | 0 | |
2311 | 0 | return true; |
2312 | 0 | } |
2313 | | |
2314 | | void |
2315 | | CCGraphBuilder::DoneAddingRoots() |
2316 | 0 | { |
2317 | 0 | // We've finished adding roots, and everything in the graph is a root. |
2318 | 0 | mGraph.mRootCount = mGraph.MapCount(); |
2319 | 0 |
|
2320 | 0 | mCurrNode = new NodePool::Enumerator(mGraph.mNodes); |
2321 | 0 | } |
2322 | | |
2323 | | MOZ_NEVER_INLINE bool |
2324 | | CCGraphBuilder::BuildGraph(SliceBudget& aBudget) |
2325 | 0 | { |
2326 | 0 | const intptr_t kNumNodesBetweenTimeChecks = 1000; |
2327 | 0 | const intptr_t kStep = SliceBudget::CounterReset / kNumNodesBetweenTimeChecks; |
2328 | 0 |
|
2329 | 0 | MOZ_ASSERT(mCurrNode); |
2330 | 0 |
|
2331 | 0 | while (!aBudget.isOverBudget() && !mCurrNode->IsDone()) { |
2332 | 0 | mNoteChildCount = 0; |
2333 | 0 |
|
2334 | 0 | PtrInfo* pi = mCurrNode->GetNext(); |
2335 | 0 | if (!pi) { |
2336 | 0 | MOZ_CRASH(); |
2337 | 0 | } |
2338 | 0 |
|
2339 | 0 | mCurrPi = pi; |
2340 | 0 |
|
2341 | 0 | // We need to call SetFirstChild() even on deleted nodes, to set their |
2342 | 0 | // firstChild() that may be read by a prior non-deleted neighbor. |
2343 | 0 | SetFirstChild(); |
2344 | 0 |
|
2345 | 0 | if (pi->mParticipant) { |
2346 | 0 | nsresult rv = pi->mParticipant->TraverseNativeAndJS(pi->mPointer, *this); |
2347 | 0 | MOZ_RELEASE_ASSERT(!NS_FAILED(rv), "Cycle collector Traverse method failed"); |
2348 | 0 | } |
2349 | 0 |
|
2350 | 0 | if (mCurrNode->AtBlockEnd()) { |
2351 | 0 | SetLastChild(); |
2352 | 0 | } |
2353 | 0 |
|
2354 | 0 | aBudget.step(kStep * (mNoteChildCount + 1)); |
2355 | 0 | } |
2356 | 0 |
|
2357 | 0 | if (!mCurrNode->IsDone()) { |
2358 | 0 | return false; |
2359 | 0 | } |
2360 | 0 | |
2361 | 0 | if (mGraph.mRootCount > 0) { |
2362 | 0 | SetLastChild(); |
2363 | 0 | } |
2364 | 0 |
|
2365 | 0 | mCurrNode = nullptr; |
2366 | 0 |
|
2367 | 0 | return true; |
2368 | 0 | } |
2369 | | |
2370 | | NS_IMETHODIMP_(void) |
2371 | | CCGraphBuilder::NoteXPCOMRoot(nsISupports* aRoot, |
2372 | | nsCycleCollectionParticipant* aParticipant) |
2373 | 0 | { |
2374 | 0 | MOZ_ASSERT(aRoot == CanonicalizeXPCOMParticipant(aRoot)); |
2375 | 0 |
|
2376 | | #ifdef DEBUG |
2377 | | nsXPCOMCycleCollectionParticipant* cp; |
2378 | | ToParticipant(aRoot, &cp); |
2379 | | MOZ_ASSERT(aParticipant == cp); |
2380 | | #endif |
2381 | |
|
2382 | 0 | NoteRoot(aRoot, aParticipant); |
2383 | 0 | } |
2384 | | |
2385 | | NS_IMETHODIMP_(void) |
2386 | | CCGraphBuilder::NoteJSRoot(JSObject* aRoot) |
2387 | 0 | { |
2388 | 0 | if (JS::Zone* zone = MergeZone(JS::GCCellPtr(aRoot))) { |
2389 | 0 | NoteRoot(zone, mJSZoneParticipant); |
2390 | 0 | } else { |
2391 | 0 | NoteRoot(aRoot, mJSParticipant); |
2392 | 0 | } |
2393 | 0 | } |
2394 | | |
2395 | | NS_IMETHODIMP_(void) |
2396 | | CCGraphBuilder::NoteNativeRoot(void* aRoot, |
2397 | | nsCycleCollectionParticipant* aParticipant) |
2398 | 0 | { |
2399 | 0 | NoteRoot(aRoot, aParticipant); |
2400 | 0 | } |
2401 | | |
2402 | | NS_IMETHODIMP_(void) |
2403 | | CCGraphBuilder::DescribeRefCountedNode(nsrefcnt aRefCount, const char* aObjName) |
2404 | 0 | { |
2405 | 0 | mCurrPi->AnnotatedReleaseAssert(aRefCount != 0, |
2406 | 0 | "CCed refcounted object has zero refcount"); |
2407 | 0 | mCurrPi->AnnotatedReleaseAssert(aRefCount != UINT32_MAX, |
2408 | 0 | "CCed refcounted object has overflowing refcount"); |
2409 | 0 |
|
2410 | 0 | mResults.mVisitedRefCounted++; |
2411 | 0 |
|
2412 | 0 | if (mLogger) { |
2413 | 0 | mLogger->NoteRefCountedObject((uint64_t)mCurrPi->mPointer, aRefCount, |
2414 | 0 | aObjName); |
2415 | 0 | } |
2416 | 0 |
|
2417 | 0 | mCurrPi->mRefCount = aRefCount; |
2418 | 0 | } |
2419 | | |
2420 | | NS_IMETHODIMP_(void) |
2421 | | CCGraphBuilder::DescribeGCedNode(bool aIsMarked, const char* aObjName, |
2422 | | uint64_t aCompartmentAddress) |
2423 | 0 | { |
2424 | 0 | uint32_t refCount = aIsMarked ? UINT32_MAX : 0; |
2425 | 0 | mResults.mVisitedGCed++; |
2426 | 0 |
|
2427 | 0 | if (mLogger) { |
2428 | 0 | mLogger->NoteGCedObject((uint64_t)mCurrPi->mPointer, aIsMarked, |
2429 | 0 | aObjName, aCompartmentAddress); |
2430 | 0 | } |
2431 | 0 |
|
2432 | 0 | mCurrPi->mRefCount = refCount; |
2433 | 0 | } |
2434 | | |
2435 | | NS_IMETHODIMP_(void) |
2436 | | CCGraphBuilder::NoteXPCOMChild(nsISupports* aChild) |
2437 | 0 | { |
2438 | 0 | nsCString edgeName; |
2439 | 0 | if (WantDebugInfo()) { |
2440 | 0 | edgeName.Assign(mNextEdgeName); |
2441 | 0 | mNextEdgeName.Truncate(); |
2442 | 0 | } |
2443 | 0 | if (!aChild || !(aChild = CanonicalizeXPCOMParticipant(aChild))) { |
2444 | 0 | return; |
2445 | 0 | } |
2446 | 0 | |
2447 | 0 | ++mNoteChildCount; |
2448 | 0 |
|
2449 | 0 | nsXPCOMCycleCollectionParticipant* cp; |
2450 | 0 | ToParticipant(aChild, &cp); |
2451 | 0 | if (cp && (!cp->CanSkipThis(aChild) || WantAllTraces())) { |
2452 | 0 | NoteChild(aChild, cp, edgeName); |
2453 | 0 | } |
2454 | 0 | } |
2455 | | |
2456 | | NS_IMETHODIMP_(void) |
2457 | | CCGraphBuilder::NoteNativeChild(void* aChild, |
2458 | | nsCycleCollectionParticipant* aParticipant) |
2459 | 0 | { |
2460 | 0 | nsCString edgeName; |
2461 | 0 | if (WantDebugInfo()) { |
2462 | 0 | edgeName.Assign(mNextEdgeName); |
2463 | 0 | mNextEdgeName.Truncate(); |
2464 | 0 | } |
2465 | 0 | if (!aChild) { |
2466 | 0 | return; |
2467 | 0 | } |
2468 | 0 | |
2469 | 0 | ++mNoteChildCount; |
2470 | 0 |
|
2471 | 0 | MOZ_ASSERT(aParticipant, "Need a nsCycleCollectionParticipant!"); |
2472 | 0 | if (!aParticipant->CanSkipThis(aChild) || WantAllTraces()) { |
2473 | 0 | NoteChild(aChild, aParticipant, edgeName); |
2474 | 0 | } |
2475 | 0 | } |
2476 | | |
2477 | | NS_IMETHODIMP_(void) |
2478 | | CCGraphBuilder::NoteJSChild(const JS::GCCellPtr& aChild) |
2479 | 0 | { |
2480 | 0 | if (!aChild) { |
2481 | 0 | return; |
2482 | 0 | } |
2483 | 0 | |
2484 | 0 | ++mNoteChildCount; |
2485 | 0 |
|
2486 | 0 | nsCString edgeName; |
2487 | 0 | if (MOZ_UNLIKELY(WantDebugInfo())) { |
2488 | 0 | edgeName.Assign(mNextEdgeName); |
2489 | 0 | mNextEdgeName.Truncate(); |
2490 | 0 | } |
2491 | 0 |
|
2492 | 0 | if (GCThingIsGrayCCThing(aChild) || MOZ_UNLIKELY(WantAllTraces())) { |
2493 | 0 | if (JS::Zone* zone = MergeZone(aChild)) { |
2494 | 0 | NoteChild(zone, mJSZoneParticipant, edgeName); |
2495 | 0 | } else { |
2496 | 0 | NoteChild(aChild.asCell(), mJSParticipant, edgeName); |
2497 | 0 | } |
2498 | 0 | } |
2499 | 0 | } |
2500 | | |
2501 | | NS_IMETHODIMP_(void) |
2502 | | CCGraphBuilder::NoteNextEdgeName(const char* aName) |
2503 | 0 | { |
2504 | 0 | if (WantDebugInfo()) { |
2505 | 0 | mNextEdgeName = aName; |
2506 | 0 | } |
2507 | 0 | } |
2508 | | |
2509 | | PtrInfo* |
2510 | | CCGraphBuilder::AddWeakMapNode(JS::GCCellPtr aNode) |
2511 | 0 | { |
2512 | 0 | MOZ_ASSERT(aNode, "Weak map node should be non-null."); |
2513 | 0 |
|
2514 | 0 | if (!GCThingIsGrayCCThing(aNode) && !WantAllTraces()) { |
2515 | 0 | return nullptr; |
2516 | 0 | } |
2517 | 0 | |
2518 | 0 | if (JS::Zone* zone = MergeZone(aNode)) { |
2519 | 0 | return AddNode(zone, mJSZoneParticipant); |
2520 | 0 | } |
2521 | 0 | return AddNode(aNode.asCell(), mJSParticipant); |
2522 | 0 | } |
2523 | | |
2524 | | PtrInfo* |
2525 | | CCGraphBuilder::AddWeakMapNode(JSObject* aObject) |
2526 | 0 | { |
2527 | 0 | return AddWeakMapNode(JS::GCCellPtr(aObject)); |
2528 | 0 | } |
2529 | | |
2530 | | NS_IMETHODIMP_(void) |
2531 | | CCGraphBuilder::NoteWeakMapping(JSObject* aMap, JS::GCCellPtr aKey, |
2532 | | JSObject* aKdelegate, JS::GCCellPtr aVal) |
2533 | 0 | { |
2534 | 0 | // Don't try to optimize away the entry here, as we've already attempted to |
2535 | 0 | // do that in TraceWeakMapping in nsXPConnect. |
2536 | 0 | WeakMapping* mapping = mGraph.mWeakMaps.AppendElement(); |
2537 | 0 | mapping->mMap = aMap ? AddWeakMapNode(aMap) : nullptr; |
2538 | 0 | mapping->mKey = aKey ? AddWeakMapNode(aKey) : nullptr; |
2539 | 0 | mapping->mKeyDelegate = aKdelegate ? AddWeakMapNode(aKdelegate) : mapping->mKey; |
2540 | 0 | mapping->mVal = aVal ? AddWeakMapNode(aVal) : nullptr; |
2541 | 0 |
|
2542 | 0 | if (mLogger) { |
2543 | 0 | mLogger->NoteWeakMapEntry((uint64_t)aMap, aKey ? aKey.unsafeAsInteger() : 0, |
2544 | 0 | (uint64_t)aKdelegate, |
2545 | 0 | aVal ? aVal.unsafeAsInteger() : 0); |
2546 | 0 | } |
2547 | 0 | } |
2548 | | |
2549 | | static bool |
2550 | | AddPurpleRoot(CCGraphBuilder& aBuilder, void* aRoot, |
2551 | | nsCycleCollectionParticipant* aParti) |
2552 | 0 | { |
2553 | 0 | return aBuilder.AddPurpleRoot(aRoot, aParti); |
2554 | 0 | } |
2555 | | |
2556 | | // MayHaveChild() will be false after a Traverse if the object does |
2557 | | // not have any children the CC will visit. |
2558 | | class ChildFinder : public nsCycleCollectionTraversalCallback |
2559 | | { |
2560 | | public: |
2561 | | ChildFinder() : mMayHaveChild(false) |
2562 | 0 | { |
2563 | 0 | } |
2564 | | |
2565 | | // The logic of the Note*Child functions must mirror that of their |
2566 | | // respective functions in CCGraphBuilder. |
2567 | | NS_IMETHOD_(void) NoteXPCOMChild(nsISupports* aChild) override; |
2568 | | NS_IMETHOD_(void) NoteNativeChild(void* aChild, |
2569 | | nsCycleCollectionParticipant* aHelper) |
2570 | | override; |
2571 | | NS_IMETHOD_(void) NoteJSChild(const JS::GCCellPtr& aThing) override; |
2572 | | |
2573 | | NS_IMETHOD_(void) DescribeRefCountedNode(nsrefcnt aRefcount, |
2574 | | const char* aObjname) override |
2575 | 0 | { |
2576 | 0 | } |
2577 | | NS_IMETHOD_(void) DescribeGCedNode(bool aIsMarked, |
2578 | | const char* aObjname, |
2579 | | uint64_t aCompartmentAddress) override |
2580 | 0 | { |
2581 | 0 | } |
2582 | | NS_IMETHOD_(void) NoteNextEdgeName(const char* aName) override |
2583 | 0 | { |
2584 | 0 | } |
2585 | | bool MayHaveChild() |
2586 | 0 | { |
2587 | 0 | return mMayHaveChild; |
2588 | 0 | } |
2589 | | private: |
2590 | | bool mMayHaveChild; |
2591 | | }; |
2592 | | |
2593 | | NS_IMETHODIMP_(void) |
2594 | | ChildFinder::NoteXPCOMChild(nsISupports* aChild) |
2595 | 0 | { |
2596 | 0 | if (!aChild || !(aChild = CanonicalizeXPCOMParticipant(aChild))) { |
2597 | 0 | return; |
2598 | 0 | } |
2599 | 0 | nsXPCOMCycleCollectionParticipant* cp; |
2600 | 0 | ToParticipant(aChild, &cp); |
2601 | 0 | if (cp && !cp->CanSkip(aChild, true)) { |
2602 | 0 | mMayHaveChild = true; |
2603 | 0 | } |
2604 | 0 | } |
2605 | | |
2606 | | NS_IMETHODIMP_(void) |
2607 | | ChildFinder::NoteNativeChild(void* aChild, |
2608 | | nsCycleCollectionParticipant* aHelper) |
2609 | 0 | { |
2610 | 0 | if (!aChild) { |
2611 | 0 | return; |
2612 | 0 | } |
2613 | 0 | MOZ_ASSERT(aHelper, "Native child must have a participant"); |
2614 | 0 | if (!aHelper->CanSkip(aChild, true)) { |
2615 | 0 | mMayHaveChild = true; |
2616 | 0 | } |
2617 | 0 | } |
2618 | | |
2619 | | NS_IMETHODIMP_(void) |
2620 | | ChildFinder::NoteJSChild(const JS::GCCellPtr& aChild) |
2621 | 0 | { |
2622 | 0 | if (aChild && JS::GCThingIsMarkedGray(aChild)) { |
2623 | 0 | mMayHaveChild = true; |
2624 | 0 | } |
2625 | 0 | } |
2626 | | |
2627 | | static bool |
2628 | | MayHaveChild(void* aObj, nsCycleCollectionParticipant* aCp) |
2629 | 0 | { |
2630 | 0 | ChildFinder cf; |
2631 | 0 | aCp->TraverseNativeAndJS(aObj, cf); |
2632 | 0 | return cf.MayHaveChild(); |
2633 | 0 | } |
2634 | | |
2635 | | // JSPurpleBuffer keeps references to GCThings which might affect the |
2636 | | // next cycle collection. It is owned only by itself and during unlink its |
2637 | | // self reference is broken down and the object ends up killing itself. |
2638 | | // If GC happens before CC, references to GCthings and the self reference are |
2639 | | // removed. |
2640 | | class JSPurpleBuffer |
2641 | | { |
2642 | | ~JSPurpleBuffer() |
2643 | 0 | { |
2644 | 0 | MOZ_ASSERT(mValues.IsEmpty()); |
2645 | 0 | MOZ_ASSERT(mObjects.IsEmpty()); |
2646 | 0 | } |
2647 | | |
2648 | | public: |
2649 | | explicit JSPurpleBuffer(RefPtr<JSPurpleBuffer>& aReferenceToThis) |
2650 | | : mReferenceToThis(aReferenceToThis) |
2651 | | , mValues(kSegmentSize) |
2652 | | , mObjects(kSegmentSize) |
2653 | 0 | { |
2654 | 0 | mReferenceToThis = this; |
2655 | 0 | mozilla::HoldJSObjects(this); |
2656 | 0 | } |
2657 | | |
2658 | | void Destroy() |
2659 | 0 | { |
2660 | 0 | RefPtr<JSPurpleBuffer> referenceToThis; |
2661 | 0 | mReferenceToThis.swap(referenceToThis); |
2662 | 0 | mValues.Clear(); |
2663 | 0 | mObjects.Clear(); |
2664 | 0 | mozilla::DropJSObjects(this); |
2665 | 0 | } |
2666 | | |
2667 | | NS_INLINE_DECL_CYCLE_COLLECTING_NATIVE_REFCOUNTING(JSPurpleBuffer) |
2668 | | NS_DECL_CYCLE_COLLECTION_SCRIPT_HOLDER_NATIVE_CLASS(JSPurpleBuffer) |
2669 | | |
2670 | | RefPtr<JSPurpleBuffer>& mReferenceToThis; |
2671 | | |
2672 | | // These are raw pointers instead of Heap<T> because we only need Heap<T> for |
2673 | | // pointers which may point into the nursery. The purple buffer never contains |
2674 | | // pointers to the nursery because nursery gcthings can never be gray and only |
2675 | | // gray things can be inserted into the purple buffer. |
2676 | | static const size_t kSegmentSize = 512; |
2677 | | SegmentedVector<JS::Value, kSegmentSize, InfallibleAllocPolicy> mValues; |
2678 | | SegmentedVector<JSObject*, kSegmentSize, InfallibleAllocPolicy> mObjects; |
2679 | | }; |
2680 | | |
2681 | | NS_IMPL_CYCLE_COLLECTION_CLASS(JSPurpleBuffer) |
2682 | | |
2683 | 0 | NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN(JSPurpleBuffer) |
2684 | 0 | tmp->Destroy(); |
2685 | 0 | NS_IMPL_CYCLE_COLLECTION_UNLINK_END |
2686 | | |
2687 | 0 | NS_IMPL_CYCLE_COLLECTION_TRAVERSE_BEGIN(JSPurpleBuffer) |
2688 | 0 | CycleCollectionNoteChild(cb, tmp, "self"); |
2689 | 0 | NS_IMPL_CYCLE_COLLECTION_TRAVERSE_END |
2690 | | |
2691 | | #define NS_TRACE_SEGMENTED_ARRAY(_field, _type) \ |
2692 | 0 | { \ |
2693 | 0 | for (auto iter = tmp->_field.Iter(); !iter.Done(); iter.Next()) { \ |
2694 | 0 | js::gc::CallTraceCallbackOnNonHeap<_type, TraceCallbacks>( \ |
2695 | 0 | &iter.Get(), aCallbacks, #_field, aClosure); \ |
2696 | 0 | } \ |
2697 | 0 | } |
2698 | | |
2699 | 0 | NS_IMPL_CYCLE_COLLECTION_TRACE_BEGIN(JSPurpleBuffer) |
2700 | 0 | NS_TRACE_SEGMENTED_ARRAY(mValues, JS::Value) |
2701 | 0 | NS_TRACE_SEGMENTED_ARRAY(mObjects, JSObject*) |
2702 | 0 | NS_IMPL_CYCLE_COLLECTION_TRACE_END |
2703 | | |
2704 | | NS_IMPL_CYCLE_COLLECTION_ROOT_NATIVE(JSPurpleBuffer, AddRef) |
2705 | | NS_IMPL_CYCLE_COLLECTION_UNROOT_NATIVE(JSPurpleBuffer, Release) |
2706 | | |
2707 | | class SnowWhiteKiller : public TraceCallbacks |
2708 | | { |
2709 | | struct SnowWhiteObject |
2710 | | { |
2711 | | void* mPointer; |
2712 | | nsCycleCollectionParticipant* mParticipant; |
2713 | | nsCycleCollectingAutoRefCnt* mRefCnt; |
2714 | | }; |
2715 | | |
2716 | | // Segments are 4 KiB on 32-bit and 8 KiB on 64-bit. |
2717 | | static const size_t kSegmentSize = sizeof(void*) * 1024; |
2718 | | typedef SegmentedVector<SnowWhiteObject, kSegmentSize, InfallibleAllocPolicy> |
2719 | | ObjectsVector; |
2720 | | |
2721 | | public: |
2722 | | SnowWhiteKiller(nsCycleCollector* aCollector, js::SliceBudget* aBudget) |
2723 | | : mCollector(aCollector) |
2724 | | , mObjects(kSegmentSize) |
2725 | | , mBudget(aBudget) |
2726 | | , mSawSnowWhiteObjects(false) |
2727 | 0 | { |
2728 | 0 | MOZ_ASSERT(mCollector, "Calling SnowWhiteKiller after nsCC went away"); |
2729 | 0 | } |
2730 | | |
2731 | | explicit SnowWhiteKiller(nsCycleCollector* aCollector) |
2732 | | : SnowWhiteKiller(aCollector, nullptr) |
2733 | 0 | { |
2734 | 0 | } |
2735 | | |
2736 | | ~SnowWhiteKiller() |
2737 | 0 | { |
2738 | 0 | for (auto iter = mObjects.Iter(); !iter.Done(); iter.Next()) { |
2739 | 0 | SnowWhiteObject& o = iter.Get(); |
2740 | 0 | MaybeKillObject(o); |
2741 | 0 | } |
2742 | 0 | } |
2743 | | |
2744 | | void |
2745 | | MaybeKillObject(SnowWhiteObject& aObject) |
2746 | 0 | { |
2747 | 0 | if (!aObject.mRefCnt->get() && !aObject.mRefCnt->IsInPurpleBuffer()) { |
2748 | 0 | mCollector->RemoveObjectFromGraph(aObject.mPointer); |
2749 | 0 | aObject.mRefCnt->stabilizeForDeletion(); |
2750 | 0 | { |
2751 | 0 | JS::AutoEnterCycleCollection autocc(mCollector->Runtime()->Runtime()); |
2752 | 0 | aObject.mParticipant->Trace(aObject.mPointer, *this, nullptr); |
2753 | 0 | } |
2754 | 0 | aObject.mParticipant->DeleteCycleCollectable(aObject.mPointer); |
2755 | 0 | } |
2756 | 0 | } |
2757 | | |
2758 | | bool |
2759 | | Visit(nsPurpleBuffer& aBuffer, nsPurpleBufferEntry* aEntry) |
2760 | 0 | { |
2761 | 0 | // Ignore any slice budget we have when recording/replaying, as it behaves |
2762 | 0 | // non-deterministically. |
2763 | 0 | if (mBudget && !recordreplay::IsRecordingOrReplaying()) { |
2764 | 0 | if (mBudget->isOverBudget()) { |
2765 | 0 | return false; |
2766 | 0 | } |
2767 | 0 | mBudget->step(); |
2768 | 0 | } |
2769 | 0 |
|
2770 | 0 | MOZ_ASSERT(aEntry->mObject, "Null object in purple buffer"); |
2771 | 0 | if (!aEntry->mRefCnt->get()) { |
2772 | 0 | mSawSnowWhiteObjects = true; |
2773 | 0 | void* o = aEntry->mObject; |
2774 | 0 | nsCycleCollectionParticipant* cp = aEntry->mParticipant; |
2775 | 0 | ToParticipant(o, &cp); |
2776 | 0 | SnowWhiteObject swo = { o, cp, aEntry->mRefCnt }; |
2777 | 0 | if (!mBudget) { |
2778 | 0 | mObjects.InfallibleAppend(swo); |
2779 | 0 | } |
2780 | 0 | aBuffer.Remove(aEntry); |
2781 | 0 | if (mBudget) { |
2782 | 0 | MaybeKillObject(swo); |
2783 | 0 | } |
2784 | 0 | } |
2785 | 0 | return true; |
2786 | 0 | } |
2787 | | |
2788 | | bool HasSnowWhiteObjects() const |
2789 | 0 | { |
2790 | 0 | return !mObjects.IsEmpty(); |
2791 | 0 | } |
2792 | | |
2793 | | bool SawSnowWhiteObjects() const |
2794 | 0 | { |
2795 | 0 | return mSawSnowWhiteObjects; |
2796 | 0 | } |
2797 | | |
2798 | | virtual void Trace(JS::Heap<JS::Value>* aValue, const char* aName, |
2799 | | void* aClosure) const override |
2800 | 0 | { |
2801 | 0 | const JS::Value& val = aValue->unbarrieredGet(); |
2802 | 0 | if (val.isGCThing() && ValueIsGrayCCThing(val)) { |
2803 | 0 | MOZ_ASSERT(!js::gc::IsInsideNursery(val.toGCThing())); |
2804 | 0 | mCollector->GetJSPurpleBuffer()->mValues.InfallibleAppend(val); |
2805 | 0 | } |
2806 | 0 | } |
2807 | | |
2808 | | virtual void Trace(JS::Heap<jsid>* aId, const char* aName, |
2809 | | void* aClosure) const override |
2810 | 0 | { |
2811 | 0 | } |
2812 | | |
2813 | | void AppendJSObjectToPurpleBuffer(JSObject* obj) const |
2814 | 0 | { |
2815 | 0 | if (obj && JS::ObjectIsMarkedGray(obj)) { |
2816 | 0 | MOZ_ASSERT(JS::ObjectIsTenured(obj)); |
2817 | 0 | mCollector->GetJSPurpleBuffer()->mObjects.InfallibleAppend(obj); |
2818 | 0 | } |
2819 | 0 | } |
2820 | | |
2821 | | virtual void Trace(JS::Heap<JSObject*>* aObject, const char* aName, |
2822 | | void* aClosure) const override |
2823 | 0 | { |
2824 | 0 | AppendJSObjectToPurpleBuffer(aObject->unbarrieredGet()); |
2825 | 0 | } |
2826 | | |
2827 | | virtual void Trace(JSObject** aObject, const char* aName, |
2828 | | void* aClosure) const override |
2829 | 0 | { |
2830 | 0 | AppendJSObjectToPurpleBuffer(*aObject); |
2831 | 0 | } |
2832 | | |
2833 | | virtual void Trace(JS::TenuredHeap<JSObject*>* aObject, const char* aName, |
2834 | | void* aClosure) const override |
2835 | 0 | { |
2836 | 0 | AppendJSObjectToPurpleBuffer(aObject->unbarrieredGetPtr()); |
2837 | 0 | } |
2838 | | |
2839 | | virtual void Trace(JS::Heap<JSString*>* aString, const char* aName, |
2840 | | void* aClosure) const override |
2841 | 0 | { |
2842 | 0 | } |
2843 | | |
2844 | | virtual void Trace(JS::Heap<JSScript*>* aScript, const char* aName, |
2845 | | void* aClosure) const override |
2846 | 0 | { |
2847 | 0 | } |
2848 | | |
2849 | | virtual void Trace(JS::Heap<JSFunction*>* aFunction, const char* aName, |
2850 | | void* aClosure) const override |
2851 | 0 | { |
2852 | 0 | } |
2853 | | |
2854 | | private: |
2855 | | RefPtr<nsCycleCollector> mCollector; |
2856 | | ObjectsVector mObjects; |
2857 | | js::SliceBudget* mBudget; |
2858 | | bool mSawSnowWhiteObjects; |
2859 | | }; |
2860 | | |
2861 | | class RemoveSkippableVisitor : public SnowWhiteKiller |
2862 | | { |
2863 | | public: |
2864 | | RemoveSkippableVisitor(nsCycleCollector* aCollector, |
2865 | | js::SliceBudget& aBudget, |
2866 | | bool aRemoveChildlessNodes, |
2867 | | bool aAsyncSnowWhiteFreeing, |
2868 | | CC_ForgetSkippableCallback aCb) |
2869 | | : SnowWhiteKiller(aCollector) |
2870 | | , mBudget(aBudget) |
2871 | | , mRemoveChildlessNodes(aRemoveChildlessNodes) |
2872 | | , mAsyncSnowWhiteFreeing(aAsyncSnowWhiteFreeing) |
2873 | | , mDispatchedDeferredDeletion(false) |
2874 | | , mCallback(aCb) |
2875 | 0 | { |
2876 | 0 | } |
2877 | | |
2878 | | ~RemoveSkippableVisitor() |
2879 | 0 | { |
2880 | 0 | // Note, we must call the callback before SnowWhiteKiller calls |
2881 | 0 | // DeleteCycleCollectable! |
2882 | 0 | if (mCallback) { |
2883 | 0 | mCallback(); |
2884 | 0 | } |
2885 | 0 | if (HasSnowWhiteObjects()) { |
2886 | 0 | // Effectively a continuation. |
2887 | 0 | nsCycleCollector_dispatchDeferredDeletion(true); |
2888 | 0 | } |
2889 | 0 | } |
2890 | | |
2891 | | bool |
2892 | | Visit(nsPurpleBuffer& aBuffer, nsPurpleBufferEntry* aEntry) |
2893 | 0 | { |
2894 | 0 | if (mBudget.isOverBudget()) { |
2895 | 0 | return false; |
2896 | 0 | } |
2897 | 0 | |
2898 | 0 | // CanSkip calls can be a bit slow, so increase the likelihood that |
2899 | 0 | // isOverBudget actually checks whether we're over the time budget. |
2900 | 0 | mBudget.step(5); |
2901 | 0 | MOZ_ASSERT(aEntry->mObject, "null mObject in purple buffer"); |
2902 | 0 | if (!aEntry->mRefCnt->get()) { |
2903 | 0 | if (!mAsyncSnowWhiteFreeing) { |
2904 | 0 | SnowWhiteKiller::Visit(aBuffer, aEntry); |
2905 | 0 | } else if (!mDispatchedDeferredDeletion) { |
2906 | 0 | mDispatchedDeferredDeletion = true; |
2907 | 0 | nsCycleCollector_dispatchDeferredDeletion(false); |
2908 | 0 | } |
2909 | 0 | return true; |
2910 | 0 | } |
2911 | 0 | void* o = aEntry->mObject; |
2912 | 0 | nsCycleCollectionParticipant* cp = aEntry->mParticipant; |
2913 | 0 | ToParticipant(o, &cp); |
2914 | 0 | if (aEntry->mRefCnt->IsPurple() && !cp->CanSkip(o, false) && |
2915 | 0 | (!mRemoveChildlessNodes || MayHaveChild(o, cp))) { |
2916 | 0 | return true; |
2917 | 0 | } |
2918 | 0 | aBuffer.Remove(aEntry); |
2919 | 0 | return true; |
2920 | 0 | } |
2921 | | |
2922 | | private: |
2923 | | js::SliceBudget& mBudget; |
2924 | | bool mRemoveChildlessNodes; |
2925 | | bool mAsyncSnowWhiteFreeing; |
2926 | | bool mDispatchedDeferredDeletion; |
2927 | | CC_ForgetSkippableCallback mCallback; |
2928 | | }; |
2929 | | |
2930 | | void |
2931 | | nsPurpleBuffer::RemoveSkippable(nsCycleCollector* aCollector, |
2932 | | js::SliceBudget& aBudget, |
2933 | | bool aRemoveChildlessNodes, |
2934 | | bool aAsyncSnowWhiteFreeing, |
2935 | | CC_ForgetSkippableCallback aCb) |
2936 | 0 | { |
2937 | 0 | RemoveSkippableVisitor visitor(aCollector, aBudget, aRemoveChildlessNodes, |
2938 | 0 | aAsyncSnowWhiteFreeing, aCb); |
2939 | 0 | VisitEntries(visitor); |
2940 | 0 | } |
2941 | | |
2942 | | bool |
2943 | | nsCycleCollector::FreeSnowWhite(bool aUntilNoSWInPurpleBuffer) |
2944 | 0 | { |
2945 | 0 | CheckThreadSafety(); |
2946 | 0 |
|
2947 | 0 | if (mFreeingSnowWhite) { |
2948 | 0 | return false; |
2949 | 0 | } |
2950 | 0 | |
2951 | 0 | AutoRestore<bool> ar(mFreeingSnowWhite); |
2952 | 0 | mFreeingSnowWhite = true; |
2953 | 0 |
|
2954 | 0 | bool hadSnowWhiteObjects = false; |
2955 | 0 | do { |
2956 | 0 | SnowWhiteKiller visitor(this); |
2957 | 0 | mPurpleBuf.VisitEntries(visitor); |
2958 | 0 | hadSnowWhiteObjects = hadSnowWhiteObjects || |
2959 | 0 | visitor.HasSnowWhiteObjects(); |
2960 | 0 | if (!visitor.HasSnowWhiteObjects()) { |
2961 | 0 | break; |
2962 | 0 | } |
2963 | 0 | } while (aUntilNoSWInPurpleBuffer); |
2964 | 0 | return hadSnowWhiteObjects; |
2965 | 0 | } |
2966 | | |
2967 | | bool |
2968 | | nsCycleCollector::FreeSnowWhiteWithBudget(js::SliceBudget& aBudget) |
2969 | 0 | { |
2970 | 0 | CheckThreadSafety(); |
2971 | 0 |
|
2972 | 0 | if (mFreeingSnowWhite) { |
2973 | 0 | return false; |
2974 | 0 | } |
2975 | 0 | |
2976 | 0 | AutoRestore<bool> ar(mFreeingSnowWhite); |
2977 | 0 | mFreeingSnowWhite = true; |
2978 | 0 |
|
2979 | 0 | SnowWhiteKiller visitor(this, &aBudget); |
2980 | 0 | mPurpleBuf.VisitEntries(visitor); |
2981 | 0 | return visitor.SawSnowWhiteObjects();; |
2982 | 0 | } |
2983 | | |
2984 | | void |
2985 | | nsCycleCollector::ForgetSkippable(js::SliceBudget& aBudget, |
2986 | | bool aRemoveChildlessNodes, |
2987 | | bool aAsyncSnowWhiteFreeing) |
2988 | 0 | { |
2989 | 0 | CheckThreadSafety(); |
2990 | 0 |
|
2991 | 0 | mozilla::Maybe<mozilla::AutoGlobalTimelineMarker> marker; |
2992 | 0 | if (NS_IsMainThread()) { |
2993 | 0 | marker.emplace("nsCycleCollector::ForgetSkippable", MarkerStackRequest::NO_STACK); |
2994 | 0 | } |
2995 | 0 |
|
2996 | 0 | // If we remove things from the purple buffer during graph building, we may |
2997 | 0 | // lose track of an object that was mutated during graph building. |
2998 | 0 | MOZ_ASSERT(IsIdle()); |
2999 | 0 |
|
3000 | 0 | // The cycle collector does not collect anything when recording/replaying. |
3001 | 0 | if (recordreplay::IsRecordingOrReplaying()) { |
3002 | 0 | return; |
3003 | 0 | } |
3004 | 0 | |
3005 | 0 | if (mCCJSRuntime) { |
3006 | 0 | mCCJSRuntime->PrepareForForgetSkippable(); |
3007 | 0 | } |
3008 | 0 | MOZ_ASSERT(!mScanInProgress, |
3009 | 0 | "Don't forget skippable or free snow-white while scan is in progress."); |
3010 | 0 | mPurpleBuf.RemoveSkippable(this, aBudget, aRemoveChildlessNodes, |
3011 | 0 | aAsyncSnowWhiteFreeing, mForgetSkippableCB); |
3012 | 0 | } |
3013 | | |
3014 | | MOZ_NEVER_INLINE void |
3015 | | nsCycleCollector::MarkRoots(SliceBudget& aBudget) |
3016 | 0 | { |
3017 | 0 | JS::AutoAssertNoGC nogc; |
3018 | 0 | TimeLog timeLog; |
3019 | 0 | AutoRestore<bool> ar(mScanInProgress); |
3020 | 0 | MOZ_RELEASE_ASSERT(!mScanInProgress); |
3021 | 0 | mScanInProgress = true; |
3022 | 0 | MOZ_ASSERT(mIncrementalPhase == GraphBuildingPhase); |
3023 | 0 |
|
3024 | 0 | JS::AutoEnterCycleCollection autocc(Runtime()->Runtime()); |
3025 | 0 | bool doneBuilding = mBuilder->BuildGraph(aBudget); |
3026 | 0 |
|
3027 | 0 | if (!doneBuilding) { |
3028 | 0 | timeLog.Checkpoint("MarkRoots()"); |
3029 | 0 | return; |
3030 | 0 | } |
3031 | 0 | |
3032 | 0 | mBuilder = nullptr; |
3033 | 0 | mIncrementalPhase = ScanAndCollectWhitePhase; |
3034 | 0 | timeLog.Checkpoint("MarkRoots()"); |
3035 | 0 | } |
3036 | | |
3037 | | |
3038 | | //////////////////////////////////////////////////////////////////////// |
3039 | | // Bacon & Rajan's |ScanRoots| routine. |
3040 | | //////////////////////////////////////////////////////////////////////// |
3041 | | |
3042 | | |
3043 | | struct ScanBlackVisitor |
3044 | | { |
3045 | | ScanBlackVisitor(uint32_t& aWhiteNodeCount, bool& aFailed) |
3046 | | : mWhiteNodeCount(aWhiteNodeCount), mFailed(aFailed) |
3047 | 0 | { |
3048 | 0 | } |
3049 | | |
3050 | | bool ShouldVisitNode(PtrInfo const* aPi) |
3051 | 0 | { |
3052 | 0 | return aPi->mColor != black; |
3053 | 0 | } |
3054 | | |
3055 | | MOZ_NEVER_INLINE void VisitNode(PtrInfo* aPi) |
3056 | 0 | { |
3057 | 0 | if (aPi->mColor == white) { |
3058 | 0 | --mWhiteNodeCount; |
3059 | 0 | } |
3060 | 0 | aPi->mColor = black; |
3061 | 0 | } |
3062 | | |
3063 | | void Failed() |
3064 | 0 | { |
3065 | 0 | mFailed = true; |
3066 | 0 | } |
3067 | | |
3068 | | private: |
3069 | | uint32_t& mWhiteNodeCount; |
3070 | | bool& mFailed; |
3071 | | }; |
3072 | | |
3073 | | static void |
3074 | | FloodBlackNode(uint32_t& aWhiteNodeCount, bool& aFailed, PtrInfo* aPi) |
3075 | 0 | { |
3076 | 0 | GraphWalker<ScanBlackVisitor>(ScanBlackVisitor(aWhiteNodeCount, |
3077 | 0 | aFailed)).Walk(aPi); |
3078 | 0 | MOZ_ASSERT(aPi->mColor == black || !aPi->WasTraversed(), |
3079 | 0 | "FloodBlackNode should make aPi black"); |
3080 | 0 | } |
3081 | | |
3082 | | // Iterate over the WeakMaps. If we mark anything while iterating |
3083 | | // over the WeakMaps, we must iterate over all of the WeakMaps again. |
3084 | | void |
3085 | | nsCycleCollector::ScanWeakMaps() |
3086 | 0 | { |
3087 | 0 | bool anyChanged; |
3088 | 0 | bool failed = false; |
3089 | 0 | do { |
3090 | 0 | anyChanged = false; |
3091 | 0 | for (uint32_t i = 0; i < mGraph.mWeakMaps.Length(); i++) { |
3092 | 0 | WeakMapping* wm = &mGraph.mWeakMaps[i]; |
3093 | 0 |
|
3094 | 0 | // If any of these are null, the original object was marked black. |
3095 | 0 | uint32_t mColor = wm->mMap ? wm->mMap->mColor : black; |
3096 | 0 | uint32_t kColor = wm->mKey ? wm->mKey->mColor : black; |
3097 | 0 | uint32_t kdColor = wm->mKeyDelegate ? wm->mKeyDelegate->mColor : black; |
3098 | 0 | uint32_t vColor = wm->mVal ? wm->mVal->mColor : black; |
3099 | 0 |
|
3100 | 0 | MOZ_ASSERT(mColor != grey, "Uncolored weak map"); |
3101 | 0 | MOZ_ASSERT(kColor != grey, "Uncolored weak map key"); |
3102 | 0 | MOZ_ASSERT(kdColor != grey, "Uncolored weak map key delegate"); |
3103 | 0 | MOZ_ASSERT(vColor != grey, "Uncolored weak map value"); |
3104 | 0 |
|
3105 | 0 | if (mColor == black && kColor != black && kdColor == black) { |
3106 | 0 | FloodBlackNode(mWhiteNodeCount, failed, wm->mKey); |
3107 | 0 | anyChanged = true; |
3108 | 0 | } |
3109 | 0 |
|
3110 | 0 | if (mColor == black && kColor == black && vColor != black) { |
3111 | 0 | FloodBlackNode(mWhiteNodeCount, failed, wm->mVal); |
3112 | 0 | anyChanged = true; |
3113 | 0 | } |
3114 | 0 | } |
3115 | 0 | } while (anyChanged); |
3116 | 0 |
|
3117 | 0 | if (failed) { |
3118 | 0 | MOZ_ASSERT(false, "Ran out of memory in ScanWeakMaps"); |
3119 | 0 | CC_TELEMETRY(_OOM, true); |
3120 | 0 | } |
3121 | 0 | } |
3122 | | |
3123 | | // Flood black from any objects in the purple buffer that are in the CC graph. |
3124 | | class PurpleScanBlackVisitor |
3125 | | { |
3126 | | public: |
3127 | | PurpleScanBlackVisitor(CCGraph& aGraph, nsCycleCollectorLogger* aLogger, |
3128 | | uint32_t& aCount, bool& aFailed) |
3129 | | : mGraph(aGraph), mLogger(aLogger), mCount(aCount), mFailed(aFailed) |
3130 | 0 | { |
3131 | 0 | } |
3132 | | |
3133 | | bool |
3134 | | Visit(nsPurpleBuffer& aBuffer, nsPurpleBufferEntry* aEntry) |
3135 | 0 | { |
3136 | 0 | MOZ_ASSERT(aEntry->mObject, |
3137 | 0 | "Entries with null mObject shouldn't be in the purple buffer."); |
3138 | 0 | MOZ_ASSERT(aEntry->mRefCnt->get() != 0, |
3139 | 0 | "Snow-white objects shouldn't be in the purple buffer."); |
3140 | 0 |
|
3141 | 0 | void* obj = aEntry->mObject; |
3142 | 0 |
|
3143 | 0 | MOZ_ASSERT(aEntry->mParticipant || CanonicalizeXPCOMParticipant(static_cast<nsISupports*>(obj)) == obj, |
3144 | 0 | "Suspect nsISupports pointer must be canonical"); |
3145 | 0 |
|
3146 | 0 | PtrInfo* pi = mGraph.FindNode(obj); |
3147 | 0 | if (!pi) { |
3148 | 0 | return true; |
3149 | 0 | } |
3150 | 0 | MOZ_ASSERT(pi->mParticipant, "No dead objects should be in the purple buffer."); |
3151 | 0 | if (MOZ_UNLIKELY(mLogger)) { |
3152 | 0 | mLogger->NoteIncrementalRoot((uint64_t)pi->mPointer); |
3153 | 0 | } |
3154 | 0 | if (pi->mColor == black) { |
3155 | 0 | return true; |
3156 | 0 | } |
3157 | 0 | FloodBlackNode(mCount, mFailed, pi); |
3158 | 0 | return true; |
3159 | 0 | } |
3160 | | |
3161 | | private: |
3162 | | CCGraph& mGraph; |
3163 | | RefPtr<nsCycleCollectorLogger> mLogger; |
3164 | | uint32_t& mCount; |
3165 | | bool& mFailed; |
3166 | | }; |
3167 | | |
3168 | | // Objects that have been stored somewhere since the start of incremental graph building must |
3169 | | // be treated as live for this cycle collection, because we may not have accurate information |
3170 | | // about who holds references to them. |
3171 | | void |
3172 | | nsCycleCollector::ScanIncrementalRoots() |
3173 | 0 | { |
3174 | 0 | TimeLog timeLog; |
3175 | 0 |
|
3176 | 0 | // Reference counted objects: |
3177 | 0 | // We cleared the purple buffer at the start of the current ICC, so if a |
3178 | 0 | // refcounted object is purple, it may have been AddRef'd during the current |
3179 | 0 | // ICC. (It may also have only been released.) If that is the case, we cannot |
3180 | 0 | // be sure that the set of things pointing to the object in the CC graph |
3181 | 0 | // is accurate. Therefore, for safety, we treat any purple objects as being |
3182 | 0 | // live during the current CC. We don't remove anything from the purple |
3183 | 0 | // buffer here, so these objects will be suspected and freed in the next CC |
3184 | 0 | // if they are garbage. |
3185 | 0 | bool failed = false; |
3186 | 0 | PurpleScanBlackVisitor purpleScanBlackVisitor(mGraph, mLogger, |
3187 | 0 | mWhiteNodeCount, failed); |
3188 | 0 | mPurpleBuf.VisitEntries(purpleScanBlackVisitor); |
3189 | 0 | timeLog.Checkpoint("ScanIncrementalRoots::fix purple"); |
3190 | 0 |
|
3191 | 0 | bool hasJSRuntime = !!mCCJSRuntime; |
3192 | 0 | nsCycleCollectionParticipant* jsParticipant = |
3193 | 0 | hasJSRuntime ? mCCJSRuntime->GCThingParticipant() : nullptr; |
3194 | 0 | nsCycleCollectionParticipant* zoneParticipant = |
3195 | 0 | hasJSRuntime ? mCCJSRuntime->ZoneParticipant() : nullptr; |
3196 | 0 | bool hasLogger = !!mLogger; |
3197 | 0 |
|
3198 | 0 | NodePool::Enumerator etor(mGraph.mNodes); |
3199 | 0 | while (!etor.IsDone()) { |
3200 | 0 | PtrInfo* pi = etor.GetNext(); |
3201 | 0 |
|
3202 | 0 | // As an optimization, if an object has already been determined to be live, |
3203 | 0 | // don't consider it further. We can't do this if there is a listener, |
3204 | 0 | // because the listener wants to know the complete set of incremental roots. |
3205 | 0 | if (pi->mColor == black && MOZ_LIKELY(!hasLogger)) { |
3206 | 0 | continue; |
3207 | 0 | } |
3208 | 0 | |
3209 | 0 | // Garbage collected objects: |
3210 | 0 | // If a GCed object was added to the graph with a refcount of zero, and is |
3211 | 0 | // now marked black by the GC, it was probably gray before and was exposed |
3212 | 0 | // to active JS, so it may have been stored somewhere, so it needs to be |
3213 | 0 | // treated as live. |
3214 | 0 | if (pi->IsGrayJS() && MOZ_LIKELY(hasJSRuntime)) { |
3215 | 0 | // If the object is still marked gray by the GC, nothing could have gotten |
3216 | 0 | // hold of it, so it isn't an incremental root. |
3217 | 0 | if (pi->mParticipant == jsParticipant) { |
3218 | 0 | JS::GCCellPtr ptr(pi->mPointer, JS::GCThingTraceKind(pi->mPointer)); |
3219 | 0 | if (GCThingIsGrayCCThing(ptr)) { |
3220 | 0 | continue; |
3221 | 0 | } |
3222 | 0 | } else if (pi->mParticipant == zoneParticipant) { |
3223 | 0 | JS::Zone* zone = static_cast<JS::Zone*>(pi->mPointer); |
3224 | 0 | if (js::ZoneGlobalsAreAllGray(zone)) { |
3225 | 0 | continue; |
3226 | 0 | } |
3227 | 0 | } else { |
3228 | 0 | MOZ_ASSERT(false, "Non-JS thing with 0 refcount? Treating as live."); |
3229 | 0 | } |
3230 | 0 | } else if (!pi->mParticipant && pi->WasTraversed()) { |
3231 | 0 | // Dead traversed refcounted objects: |
3232 | 0 | // If the object was traversed, it must have been alive at the start of |
3233 | 0 | // the CC, and thus had a positive refcount. It is dead now, so its |
3234 | 0 | // refcount must have decreased at some point during the CC. Therefore, |
3235 | 0 | // it would be in the purple buffer if it wasn't dead, so treat it as an |
3236 | 0 | // incremental root. |
3237 | 0 | // |
3238 | 0 | // This should not cause leaks because as the object died it should have |
3239 | 0 | // released anything it held onto, which will add them to the purple |
3240 | 0 | // buffer, which will cause them to be considered in the next CC. |
3241 | 0 | } else { |
3242 | 0 | continue; |
3243 | 0 | } |
3244 | 0 | |
3245 | 0 | // At this point, pi must be an incremental root. |
3246 | 0 | |
3247 | 0 | // If there's a listener, tell it about this root. We don't bother with the |
3248 | 0 | // optimization of skipping the Walk() if pi is black: it will just return |
3249 | 0 | // without doing anything and there's no need to make this case faster. |
3250 | 0 | if (MOZ_UNLIKELY(hasLogger) && pi->mPointer) { |
3251 | 0 | // Dead objects aren't logged. See bug 1031370. |
3252 | 0 | mLogger->NoteIncrementalRoot((uint64_t)pi->mPointer); |
3253 | 0 | } |
3254 | 0 |
|
3255 | 0 | FloodBlackNode(mWhiteNodeCount, failed, pi); |
3256 | 0 | } |
3257 | 0 |
|
3258 | 0 | timeLog.Checkpoint("ScanIncrementalRoots::fix nodes"); |
3259 | 0 |
|
3260 | 0 | if (failed) { |
3261 | 0 | NS_ASSERTION(false, "Ran out of memory in ScanIncrementalRoots"); |
3262 | 0 | CC_TELEMETRY(_OOM, true); |
3263 | 0 | } |
3264 | 0 | } |
3265 | | |
3266 | | // Mark nodes white and make sure their refcounts are ok. |
3267 | | // No nodes are marked black during this pass to ensure that refcount |
3268 | | // checking is run on all nodes not marked black by ScanIncrementalRoots. |
3269 | | void |
3270 | | nsCycleCollector::ScanWhiteNodes(bool aFullySynchGraphBuild) |
3271 | 0 | { |
3272 | 0 | NodePool::Enumerator nodeEnum(mGraph.mNodes); |
3273 | 0 | while (!nodeEnum.IsDone()) { |
3274 | 0 | PtrInfo* pi = nodeEnum.GetNext(); |
3275 | 0 | if (pi->mColor == black) { |
3276 | 0 | // Incremental roots can be in a nonsensical state, so don't |
3277 | 0 | // check them. This will miss checking nodes that are merely |
3278 | 0 | // reachable from incremental roots. |
3279 | 0 | MOZ_ASSERT(!aFullySynchGraphBuild, |
3280 | 0 | "In a synch CC, no nodes should be marked black early on."); |
3281 | 0 | continue; |
3282 | 0 | } |
3283 | 0 | MOZ_ASSERT(pi->mColor == grey); |
3284 | 0 |
|
3285 | 0 | if (!pi->WasTraversed()) { |
3286 | 0 | // This node was deleted before it was traversed, so there's no reason |
3287 | 0 | // to look at it. |
3288 | 0 | MOZ_ASSERT(!pi->mParticipant, "Live nodes should all have been traversed"); |
3289 | 0 | continue; |
3290 | 0 | } |
3291 | 0 |
|
3292 | 0 | if (pi->mInternalRefs == pi->mRefCount || pi->IsGrayJS()) { |
3293 | 0 | pi->mColor = white; |
3294 | 0 | ++mWhiteNodeCount; |
3295 | 0 | continue; |
3296 | 0 | } |
3297 | 0 | |
3298 | 0 | pi->AnnotatedReleaseAssert(pi->mInternalRefs <= pi->mRefCount, |
3299 | 0 | "More references to an object than its refcount"); |
3300 | 0 |
|
3301 | 0 | // This node will get marked black in the next pass. |
3302 | 0 | } |
3303 | 0 | } |
3304 | | |
3305 | | // Any remaining grey nodes that haven't already been deleted must be alive, |
3306 | | // so mark them and their children black. Any nodes that are black must have |
3307 | | // already had their children marked black, so there's no need to look at them |
3308 | | // again. This pass may turn some white nodes to black. |
3309 | | void |
3310 | | nsCycleCollector::ScanBlackNodes() |
3311 | 0 | { |
3312 | 0 | bool failed = false; |
3313 | 0 | NodePool::Enumerator nodeEnum(mGraph.mNodes); |
3314 | 0 | while (!nodeEnum.IsDone()) { |
3315 | 0 | PtrInfo* pi = nodeEnum.GetNext(); |
3316 | 0 | if (pi->mColor == grey && pi->WasTraversed()) { |
3317 | 0 | FloodBlackNode(mWhiteNodeCount, failed, pi); |
3318 | 0 | } |
3319 | 0 | } |
3320 | 0 |
|
3321 | 0 | if (failed) { |
3322 | 0 | NS_ASSERTION(false, "Ran out of memory in ScanBlackNodes"); |
3323 | 0 | CC_TELEMETRY(_OOM, true); |
3324 | 0 | } |
3325 | 0 | } |
3326 | | |
3327 | | void |
3328 | | nsCycleCollector::ScanRoots(bool aFullySynchGraphBuild) |
3329 | 0 | { |
3330 | 0 | JS::AutoAssertNoGC nogc; |
3331 | 0 | AutoRestore<bool> ar(mScanInProgress); |
3332 | 0 | MOZ_RELEASE_ASSERT(!mScanInProgress); |
3333 | 0 | mScanInProgress = true; |
3334 | 0 | mWhiteNodeCount = 0; |
3335 | 0 | MOZ_ASSERT(mIncrementalPhase == ScanAndCollectWhitePhase); |
3336 | 0 |
|
3337 | 0 | JS::AutoEnterCycleCollection autocc(Runtime()->Runtime()); |
3338 | 0 |
|
3339 | 0 | if (!aFullySynchGraphBuild) { |
3340 | 0 | ScanIncrementalRoots(); |
3341 | 0 | } |
3342 | 0 |
|
3343 | 0 | TimeLog timeLog; |
3344 | 0 | ScanWhiteNodes(aFullySynchGraphBuild); |
3345 | 0 | timeLog.Checkpoint("ScanRoots::ScanWhiteNodes"); |
3346 | 0 |
|
3347 | 0 | ScanBlackNodes(); |
3348 | 0 | timeLog.Checkpoint("ScanRoots::ScanBlackNodes"); |
3349 | 0 |
|
3350 | 0 | // Scanning weak maps must be done last. |
3351 | 0 | ScanWeakMaps(); |
3352 | 0 | timeLog.Checkpoint("ScanRoots::ScanWeakMaps"); |
3353 | 0 |
|
3354 | 0 | if (mLogger) { |
3355 | 0 | mLogger->BeginResults(); |
3356 | 0 |
|
3357 | 0 | NodePool::Enumerator etor(mGraph.mNodes); |
3358 | 0 | while (!etor.IsDone()) { |
3359 | 0 | PtrInfo* pi = etor.GetNext(); |
3360 | 0 | if (!pi->WasTraversed()) { |
3361 | 0 | continue; |
3362 | 0 | } |
3363 | 0 | switch (pi->mColor) { |
3364 | 0 | case black: |
3365 | 0 | if (!pi->IsGrayJS() && !pi->IsBlackJS() && |
3366 | 0 | pi->mInternalRefs != pi->mRefCount) { |
3367 | 0 | mLogger->DescribeRoot((uint64_t)pi->mPointer, |
3368 | 0 | pi->mInternalRefs); |
3369 | 0 | } |
3370 | 0 | break; |
3371 | 0 | case white: |
3372 | 0 | mLogger->DescribeGarbage((uint64_t)pi->mPointer); |
3373 | 0 | break; |
3374 | 0 | case grey: |
3375 | 0 | MOZ_ASSERT(false, "All traversed objects should be black or white"); |
3376 | 0 | break; |
3377 | 0 | } |
3378 | 0 | } |
3379 | 0 |
|
3380 | 0 | mLogger->End(); |
3381 | 0 | mLogger = nullptr; |
3382 | 0 | timeLog.Checkpoint("ScanRoots::listener"); |
3383 | 0 | } |
3384 | 0 | } |
3385 | | |
3386 | | |
3387 | | //////////////////////////////////////////////////////////////////////// |
3388 | | // Bacon & Rajan's |CollectWhite| routine, somewhat modified. |
3389 | | //////////////////////////////////////////////////////////////////////// |
3390 | | |
3391 | | bool |
3392 | | nsCycleCollector::CollectWhite() |
3393 | 0 | { |
3394 | 0 | // Explanation of "somewhat modified": we have no way to collect the |
3395 | 0 | // set of whites "all at once", we have to ask each of them to drop |
3396 | 0 | // their outgoing links and assume this will cause the garbage cycle |
3397 | 0 | // to *mostly* self-destruct (except for the reference we continue |
3398 | 0 | // to hold). |
3399 | 0 | // |
3400 | 0 | // To do this "safely" we must make sure that the white nodes we're |
3401 | 0 | // operating on are stable for the duration of our operation. So we |
3402 | 0 | // make 3 sets of calls to language runtimes: |
3403 | 0 | // |
3404 | 0 | // - Root(whites), which should pin the whites in memory. |
3405 | 0 | // - Unlink(whites), which drops outgoing links on each white. |
3406 | 0 | // - Unroot(whites), which returns the whites to normal GC. |
3407 | 0 |
|
3408 | 0 | // Segments are 4 KiB on 32-bit and 8 KiB on 64-bit. |
3409 | 0 | static const size_t kSegmentSize = sizeof(void*) * 1024; |
3410 | 0 | SegmentedVector<PtrInfo*, kSegmentSize, InfallibleAllocPolicy> |
3411 | 0 | whiteNodes(kSegmentSize); |
3412 | 0 | TimeLog timeLog; |
3413 | 0 |
|
3414 | 0 | MOZ_ASSERT(mIncrementalPhase == ScanAndCollectWhitePhase); |
3415 | 0 |
|
3416 | 0 | uint32_t numWhiteNodes = 0; |
3417 | 0 | uint32_t numWhiteGCed = 0; |
3418 | 0 | uint32_t numWhiteJSZones = 0; |
3419 | 0 |
|
3420 | 0 | { |
3421 | 0 | JS::AutoAssertNoGC nogc; |
3422 | 0 | bool hasJSRuntime = !!mCCJSRuntime; |
3423 | 0 | nsCycleCollectionParticipant* zoneParticipant = |
3424 | 0 | hasJSRuntime ? mCCJSRuntime->ZoneParticipant() : nullptr; |
3425 | 0 |
|
3426 | 0 | NodePool::Enumerator etor(mGraph.mNodes); |
3427 | 0 | while (!etor.IsDone()) { |
3428 | 0 | PtrInfo* pinfo = etor.GetNext(); |
3429 | 0 | if (pinfo->mColor == white && pinfo->mParticipant) { |
3430 | 0 | if (pinfo->IsGrayJS()) { |
3431 | 0 | MOZ_ASSERT(mCCJSRuntime); |
3432 | 0 | ++numWhiteGCed; |
3433 | 0 | JS::Zone* zone; |
3434 | 0 | if (MOZ_UNLIKELY(pinfo->mParticipant == zoneParticipant)) { |
3435 | 0 | ++numWhiteJSZones; |
3436 | 0 | zone = static_cast<JS::Zone*>(pinfo->mPointer); |
3437 | 0 | } else { |
3438 | 0 | JS::GCCellPtr ptr(pinfo->mPointer, JS::GCThingTraceKind(pinfo->mPointer)); |
3439 | 0 | zone = JS::GetTenuredGCThingZone(ptr); |
3440 | 0 | } |
3441 | 0 | mCCJSRuntime->AddZoneWaitingForGC(zone); |
3442 | 0 | } else { |
3443 | 0 | whiteNodes.InfallibleAppend(pinfo); |
3444 | 0 | pinfo->mParticipant->Root(pinfo->mPointer); |
3445 | 0 | ++numWhiteNodes; |
3446 | 0 | } |
3447 | 0 | } |
3448 | 0 | } |
3449 | 0 | } |
3450 | 0 |
|
3451 | 0 | mResults.mFreedRefCounted += numWhiteNodes; |
3452 | 0 | mResults.mFreedGCed += numWhiteGCed; |
3453 | 0 | mResults.mFreedJSZones += numWhiteJSZones; |
3454 | 0 |
|
3455 | 0 | timeLog.Checkpoint("CollectWhite::Root"); |
3456 | 0 |
|
3457 | 0 | if (mBeforeUnlinkCB) { |
3458 | 0 | mBeforeUnlinkCB(); |
3459 | 0 | timeLog.Checkpoint("CollectWhite::BeforeUnlinkCB"); |
3460 | 0 | } |
3461 | 0 |
|
3462 | 0 | // Unlink() can trigger a GC, so do not touch any JS or anything |
3463 | 0 | // else not in whiteNodes after here. |
3464 | 0 |
|
3465 | 0 | for (auto iter = whiteNodes.Iter(); !iter.Done(); iter.Next()) { |
3466 | 0 | PtrInfo* pinfo = iter.Get(); |
3467 | 0 | MOZ_ASSERT(pinfo->mParticipant, |
3468 | 0 | "Unlink shouldn't see objects removed from graph."); |
3469 | 0 | pinfo->mParticipant->Unlink(pinfo->mPointer); |
3470 | | #ifdef DEBUG |
3471 | | if (mCCJSRuntime) { |
3472 | | mCCJSRuntime->AssertNoObjectsToTrace(pinfo->mPointer); |
3473 | | } |
3474 | | #endif |
3475 | | } |
3476 | 0 | timeLog.Checkpoint("CollectWhite::Unlink"); |
3477 | 0 |
|
3478 | 0 | JS::AutoAssertNoGC nogc; |
3479 | 0 | for (auto iter = whiteNodes.Iter(); !iter.Done(); iter.Next()) { |
3480 | 0 | PtrInfo* pinfo = iter.Get(); |
3481 | 0 | MOZ_ASSERT(pinfo->mParticipant, |
3482 | 0 | "Unroot shouldn't see objects removed from graph."); |
3483 | 0 | pinfo->mParticipant->Unroot(pinfo->mPointer); |
3484 | 0 | } |
3485 | 0 | timeLog.Checkpoint("CollectWhite::Unroot"); |
3486 | 0 |
|
3487 | 0 | nsCycleCollector_dispatchDeferredDeletion(false, true); |
3488 | 0 | timeLog.Checkpoint("CollectWhite::dispatchDeferredDeletion"); |
3489 | 0 |
|
3490 | 0 | mIncrementalPhase = CleanupPhase; |
3491 | 0 |
|
3492 | 0 | return numWhiteNodes > 0 || numWhiteGCed > 0 || numWhiteJSZones > 0; |
3493 | 0 | } |
3494 | | |
3495 | | |
3496 | | //////////////////////// |
3497 | | // Memory reporting |
3498 | | //////////////////////// |
3499 | | |
3500 | | MOZ_DEFINE_MALLOC_SIZE_OF(CycleCollectorMallocSizeOf) |
3501 | | |
3502 | | NS_IMETHODIMP |
3503 | | nsCycleCollector::CollectReports(nsIHandleReportCallback* aHandleReport, |
3504 | | nsISupports* aData, bool aAnonymize) |
3505 | 0 | { |
3506 | 0 | size_t objectSize, graphSize, purpleBufferSize; |
3507 | 0 | SizeOfIncludingThis(CycleCollectorMallocSizeOf, |
3508 | 0 | &objectSize, &graphSize, |
3509 | 0 | &purpleBufferSize); |
3510 | 0 |
|
3511 | 0 | if (objectSize > 0) { |
3512 | 0 | MOZ_COLLECT_REPORT( |
3513 | 0 | "explicit/cycle-collector/collector-object", KIND_HEAP, UNITS_BYTES, |
3514 | 0 | objectSize, |
3515 | 0 | "Memory used for the cycle collector object itself."); |
3516 | 0 | } |
3517 | 0 |
|
3518 | 0 | if (graphSize > 0) { |
3519 | 0 | MOZ_COLLECT_REPORT( |
3520 | 0 | "explicit/cycle-collector/graph", KIND_HEAP, UNITS_BYTES, |
3521 | 0 | graphSize, |
3522 | 0 | "Memory used for the cycle collector's graph. This should be zero when " |
3523 | 0 | "the collector is idle."); |
3524 | 0 | } |
3525 | 0 |
|
3526 | 0 | if (purpleBufferSize > 0) { |
3527 | 0 | MOZ_COLLECT_REPORT( |
3528 | 0 | "explicit/cycle-collector/purple-buffer", KIND_HEAP, UNITS_BYTES, |
3529 | 0 | purpleBufferSize, |
3530 | 0 | "Memory used for the cycle collector's purple buffer."); |
3531 | 0 | } |
3532 | 0 |
|
3533 | 0 | return NS_OK; |
3534 | 0 | }; |
3535 | | |
3536 | | |
3537 | | //////////////////////////////////////////////////////////////////////// |
3538 | | // Collector implementation |
3539 | | //////////////////////////////////////////////////////////////////////// |
3540 | | |
3541 | | nsCycleCollector::nsCycleCollector() : |
3542 | | mActivelyCollecting(false), |
3543 | | mFreeingSnowWhite(false), |
3544 | | mScanInProgress(false), |
3545 | | mCCJSRuntime(nullptr), |
3546 | | mIncrementalPhase(IdlePhase), |
3547 | | #ifdef DEBUG |
3548 | | mEventTarget(GetCurrentThreadSerialEventTarget()), |
3549 | | #endif |
3550 | | mWhiteNodeCount(0), |
3551 | | mBeforeUnlinkCB(nullptr), |
3552 | | mForgetSkippableCB(nullptr), |
3553 | | mUnmergedNeeded(0), |
3554 | | mMergedInARow(0) |
3555 | 3 | { |
3556 | 3 | } |
3557 | | |
3558 | | nsCycleCollector::~nsCycleCollector() |
3559 | 0 | { |
3560 | 0 | MOZ_ASSERT(!mJSPurpleBuffer, "Didn't call JSPurpleBuffer::Destroy?"); |
3561 | 0 |
|
3562 | 0 | UnregisterWeakMemoryReporter(this); |
3563 | 0 | } |
3564 | | |
3565 | | void |
3566 | | nsCycleCollector::SetCCJSRuntime(CycleCollectedJSRuntime* aCCRuntime) |
3567 | 3 | { |
3568 | 3 | MOZ_RELEASE_ASSERT(!mCCJSRuntime, "Multiple registrations of CycleCollectedJSRuntime in cycle collector"); |
3569 | 3 | mCCJSRuntime = aCCRuntime; |
3570 | 3 | |
3571 | 3 | if (!NS_IsMainThread()) { |
3572 | 0 | return; |
3573 | 0 | } |
3574 | 3 | |
3575 | 3 | // We can't register as a reporter in nsCycleCollector() because that runs |
3576 | 3 | // before the memory reporter manager is initialized. So we do it here |
3577 | 3 | // instead. |
3578 | 3 | RegisterWeakMemoryReporter(this); |
3579 | 3 | } |
3580 | | |
3581 | | void |
3582 | | nsCycleCollector::ClearCCJSRuntime() |
3583 | 0 | { |
3584 | 0 | MOZ_RELEASE_ASSERT(mCCJSRuntime, "Clearing CycleCollectedJSRuntime in cycle collector before a runtime was registered"); |
3585 | 0 | mCCJSRuntime = nullptr; |
3586 | 0 | } |
3587 | | |
3588 | | #ifdef DEBUG |
3589 | | static bool |
3590 | | HasParticipant(void* aPtr, nsCycleCollectionParticipant* aParti) |
3591 | | { |
3592 | | if (aParti) { |
3593 | | return true; |
3594 | | } |
3595 | | |
3596 | | nsXPCOMCycleCollectionParticipant* xcp; |
3597 | | ToParticipant(static_cast<nsISupports*>(aPtr), &xcp); |
3598 | | return xcp != nullptr; |
3599 | | } |
3600 | | #endif |
3601 | | |
3602 | | MOZ_ALWAYS_INLINE void |
3603 | | nsCycleCollector::Suspect(void* aPtr, nsCycleCollectionParticipant* aParti, |
3604 | | nsCycleCollectingAutoRefCnt* aRefCnt) |
3605 | 3.24M | { |
3606 | 3.24M | CheckThreadSafety(); |
3607 | 3.24M | |
3608 | 3.24M | // Don't call AddRef or Release of a CCed object in a Traverse() method. |
3609 | 3.24M | MOZ_ASSERT(!mScanInProgress, "Attempted to call Suspect() while a scan was in progress"); |
3610 | 3.24M | |
3611 | 3.24M | if (MOZ_UNLIKELY(mScanInProgress)) { |
3612 | 0 | return; |
3613 | 0 | } |
3614 | 3.24M | |
3615 | 3.24M | MOZ_ASSERT(aPtr, "Don't suspect null pointers"); |
3616 | 3.24M | |
3617 | 3.24M | MOZ_ASSERT(HasParticipant(aPtr, aParti), |
3618 | 3.24M | "Suspected nsISupports pointer must QI to nsXPCOMCycleCollectionParticipant"); |
3619 | 3.24M | |
3620 | 3.24M | MOZ_ASSERT(aParti || CanonicalizeXPCOMParticipant(static_cast<nsISupports*>(aPtr)) == aPtr, |
3621 | 3.24M | "Suspect nsISupports pointer must be canonical"); |
3622 | 3.24M | |
3623 | 3.24M | mPurpleBuf.Put(aPtr, aParti, aRefCnt); |
3624 | 3.24M | } |
3625 | | |
3626 | | void |
3627 | | nsCycleCollector::SuspectNurseryEntries() |
3628 | 0 | { |
3629 | 0 | MOZ_ASSERT(NS_IsMainThread(), "Wrong thread!"); |
3630 | 0 | while (gNurseryPurpleBufferEntryCount) { |
3631 | 0 | NurseryPurpleBufferEntry& entry = |
3632 | 0 | gNurseryPurpleBufferEntry[--gNurseryPurpleBufferEntryCount]; |
3633 | 0 | mPurpleBuf.Put(entry.mPtr, entry.mParticipant, entry.mRefCnt); |
3634 | 0 | } |
3635 | 0 | } |
3636 | | |
3637 | | void |
3638 | | nsCycleCollector::CheckThreadSafety() |
3639 | 3.24M | { |
3640 | | #ifdef DEBUG |
3641 | | MOZ_ASSERT(mEventTarget->IsOnCurrentThread()); |
3642 | | #endif |
3643 | | } |
3644 | | |
3645 | | // The cycle collector uses the mark bitmap to discover what JS objects |
3646 | | // were reachable only from XPConnect roots that might participate in |
3647 | | // cycles. We ask the JS context whether we need to force a GC before |
3648 | | // this CC. It returns true on startup (before the mark bits have been set), |
3649 | | // and also when UnmarkGray has run out of stack. We also force GCs on shut |
3650 | | // down to collect cycles involving both DOM and JS. |
3651 | | void |
3652 | | nsCycleCollector::FixGrayBits(bool aForceGC, TimeLog& aTimeLog) |
3653 | 0 | { |
3654 | 0 | CheckThreadSafety(); |
3655 | 0 |
|
3656 | 0 | if (!mCCJSRuntime) { |
3657 | 0 | return; |
3658 | 0 | } |
3659 | 0 | |
3660 | 0 | if (!aForceGC) { |
3661 | 0 | mCCJSRuntime->FixWeakMappingGrayBits(); |
3662 | 0 | aTimeLog.Checkpoint("FixWeakMappingGrayBits"); |
3663 | 0 |
|
3664 | 0 | bool needGC = !mCCJSRuntime->AreGCGrayBitsValid(); |
3665 | 0 | // Only do a telemetry ping for non-shutdown CCs. |
3666 | 0 | CC_TELEMETRY(_NEED_GC, needGC); |
3667 | 0 | if (!needGC) { |
3668 | 0 | return; |
3669 | 0 | } |
3670 | 0 | mResults.mForcedGC = true; |
3671 | 0 | } |
3672 | 0 |
|
3673 | 0 | uint32_t count = 0; |
3674 | 0 | do { |
3675 | 0 | mCCJSRuntime->GarbageCollect(aForceGC ? JS::gcreason::SHUTDOWN_CC : |
3676 | 0 | JS::gcreason::CC_FORCED); |
3677 | 0 |
|
3678 | 0 | mCCJSRuntime->FixWeakMappingGrayBits(); |
3679 | 0 |
|
3680 | 0 | // It's possible that FixWeakMappingGrayBits will hit OOM when unmarking |
3681 | 0 | // gray and we will have to go round again. The second time there should not |
3682 | 0 | // be any weak mappings to fix up so the loop body should run at most twice. |
3683 | 0 | MOZ_RELEASE_ASSERT(count < 2); |
3684 | 0 | count++; |
3685 | 0 | } while (!mCCJSRuntime->AreGCGrayBitsValid()); |
3686 | 0 |
|
3687 | 0 | aTimeLog.Checkpoint("FixGrayBits"); |
3688 | 0 | } |
3689 | | |
3690 | | bool |
3691 | | nsCycleCollector::IsIncrementalGCInProgress() |
3692 | 0 | { |
3693 | 0 | return mCCJSRuntime && JS::IsIncrementalGCInProgress(mCCJSRuntime->Runtime()); |
3694 | 0 | } |
3695 | | |
3696 | | void |
3697 | | nsCycleCollector::FinishAnyIncrementalGCInProgress() |
3698 | 0 | { |
3699 | 0 | if (IsIncrementalGCInProgress()) { |
3700 | 0 | NS_WARNING("Finishing incremental GC in progress during CC"); |
3701 | 0 | JSContext* cx = CycleCollectedJSContext::Get()->Context(); |
3702 | 0 | JS::PrepareForIncrementalGC(cx); |
3703 | 0 | JS::FinishIncrementalGC(cx, JS::gcreason::CC_FORCED); |
3704 | 0 | } |
3705 | 0 | } |
3706 | | |
3707 | | void |
3708 | | nsCycleCollector::CleanupAfterCollection() |
3709 | 0 | { |
3710 | 0 | TimeLog timeLog; |
3711 | 0 | MOZ_ASSERT(mIncrementalPhase == CleanupPhase); |
3712 | 0 | MOZ_RELEASE_ASSERT(!mScanInProgress); |
3713 | 0 | mGraph.Clear(); |
3714 | 0 | timeLog.Checkpoint("CleanupAfterCollection::mGraph.Clear()"); |
3715 | 0 |
|
3716 | 0 | uint32_t interval = |
3717 | 0 | (uint32_t)((TimeStamp::Now() - mCollectionStart).ToMilliseconds()); |
3718 | | #ifdef COLLECT_TIME_DEBUG |
3719 | | printf("cc: total cycle collector time was %ums in %u slices\n", interval, |
3720 | | mResults.mNumSlices); |
3721 | | printf("cc: visited %u ref counted and %u GCed objects, freed %d ref counted and %d GCed objects", |
3722 | | mResults.mVisitedRefCounted, mResults.mVisitedGCed, |
3723 | | mResults.mFreedRefCounted, mResults.mFreedGCed); |
3724 | | uint32_t numVisited = mResults.mVisitedRefCounted + mResults.mVisitedGCed; |
3725 | | if (numVisited > 1000) { |
3726 | | uint32_t numFreed = mResults.mFreedRefCounted + mResults.mFreedGCed; |
3727 | | printf(" (%d%%)", 100 * numFreed / numVisited); |
3728 | | } |
3729 | | printf(".\ncc: \n"); |
3730 | | #endif |
3731 | |
|
3732 | 0 | CC_TELEMETRY( , interval); |
3733 | 0 | CC_TELEMETRY(_VISITED_REF_COUNTED, mResults.mVisitedRefCounted); |
3734 | 0 | CC_TELEMETRY(_VISITED_GCED, mResults.mVisitedGCed); |
3735 | 0 | CC_TELEMETRY(_COLLECTED, mWhiteNodeCount); |
3736 | 0 | timeLog.Checkpoint("CleanupAfterCollection::telemetry"); |
3737 | 0 |
|
3738 | 0 | if (mCCJSRuntime) { |
3739 | 0 | mCCJSRuntime->FinalizeDeferredThings(mResults.mAnyManual |
3740 | 0 | ? CycleCollectedJSContext::FinalizeNow |
3741 | 0 | : CycleCollectedJSContext::FinalizeIncrementally); |
3742 | 0 | mCCJSRuntime->EndCycleCollectionCallback(mResults); |
3743 | 0 | timeLog.Checkpoint("CleanupAfterCollection::EndCycleCollectionCallback()"); |
3744 | 0 | } |
3745 | 0 | mIncrementalPhase = IdlePhase; |
3746 | 0 | } |
3747 | | |
3748 | | void |
3749 | | nsCycleCollector::ShutdownCollect() |
3750 | 0 | { |
3751 | 0 | FinishAnyIncrementalGCInProgress(); |
3752 | 0 | JS::ShutdownAsyncTasks(CycleCollectedJSContext::Get()->Context()); |
3753 | 0 |
|
3754 | 0 | SliceBudget unlimitedBudget = SliceBudget::unlimited(); |
3755 | 0 | uint32_t i; |
3756 | 0 | for (i = 0; i < DEFAULT_SHUTDOWN_COLLECTIONS; ++i) { |
3757 | 0 | if (!Collect(ShutdownCC, unlimitedBudget, nullptr)) { |
3758 | 0 | break; |
3759 | 0 | } |
3760 | 0 | } |
3761 | 0 | NS_WARNING_ASSERTION(i < NORMAL_SHUTDOWN_COLLECTIONS, "Extra shutdown CC"); |
3762 | 0 | } |
3763 | | |
3764 | | static void |
3765 | | PrintPhase(const char* aPhase) |
3766 | 0 | { |
3767 | | #ifdef DEBUG_PHASES |
3768 | | printf("cc: begin %s on %s\n", aPhase, |
3769 | | NS_IsMainThread() ? "mainthread" : "worker"); |
3770 | | #endif |
3771 | | } |
3772 | | |
3773 | | bool |
3774 | | nsCycleCollector::Collect(ccType aCCType, |
3775 | | SliceBudget& aBudget, |
3776 | | nsICycleCollectorListener* aManualListener, |
3777 | | bool aPreferShorterSlices) |
3778 | 0 | { |
3779 | 0 | CheckThreadSafety(); |
3780 | 0 |
|
3781 | 0 | // This can legitimately happen in a few cases. See bug 383651. |
3782 | 0 | // When recording/replaying we do not collect cycles. |
3783 | 0 | if (mActivelyCollecting || mFreeingSnowWhite || recordreplay::IsRecordingOrReplaying()) { |
3784 | 0 | return false; |
3785 | 0 | } |
3786 | 0 | mActivelyCollecting = true; |
3787 | 0 |
|
3788 | 0 | MOZ_ASSERT(!IsIncrementalGCInProgress()); |
3789 | 0 |
|
3790 | 0 | mozilla::Maybe<mozilla::AutoGlobalTimelineMarker> marker; |
3791 | 0 | if (NS_IsMainThread()) { |
3792 | 0 | marker.emplace("nsCycleCollector::Collect", MarkerStackRequest::NO_STACK); |
3793 | 0 | } |
3794 | 0 |
|
3795 | 0 | bool startedIdle = IsIdle(); |
3796 | 0 | bool collectedAny = false; |
3797 | 0 |
|
3798 | 0 | // If the CC started idle, it will call BeginCollection, which |
3799 | 0 | // will do FreeSnowWhite, so it doesn't need to be done here. |
3800 | 0 | if (!startedIdle) { |
3801 | 0 | TimeLog timeLog; |
3802 | 0 | FreeSnowWhite(true); |
3803 | 0 | timeLog.Checkpoint("Collect::FreeSnowWhite"); |
3804 | 0 | } |
3805 | 0 |
|
3806 | 0 | if (aCCType != SliceCC) { |
3807 | 0 | mResults.mAnyManual = true; |
3808 | 0 | } |
3809 | 0 |
|
3810 | 0 | ++mResults.mNumSlices; |
3811 | 0 |
|
3812 | 0 | bool continueSlice = aBudget.isUnlimited() || !aPreferShorterSlices; |
3813 | 0 | do { |
3814 | 0 | switch (mIncrementalPhase) { |
3815 | 0 | case IdlePhase: |
3816 | 0 | PrintPhase("BeginCollection"); |
3817 | 0 | BeginCollection(aCCType, aManualListener); |
3818 | 0 | break; |
3819 | 0 | case GraphBuildingPhase: |
3820 | 0 | PrintPhase("MarkRoots"); |
3821 | 0 | MarkRoots(aBudget); |
3822 | 0 |
|
3823 | 0 | // Only continue this slice if we're running synchronously or the |
3824 | 0 | // next phase will probably be short, to reduce the max pause for this |
3825 | 0 | // collection. |
3826 | 0 | // (There's no need to check if we've finished graph building, because |
3827 | 0 | // if we haven't, we've already exceeded our budget, and will finish |
3828 | 0 | // this slice anyways.) |
3829 | 0 | continueSlice = aBudget.isUnlimited() || |
3830 | 0 | (mResults.mNumSlices < 3 && !aPreferShorterSlices); |
3831 | 0 | break; |
3832 | 0 | case ScanAndCollectWhitePhase: |
3833 | 0 | // We do ScanRoots and CollectWhite in a single slice to ensure |
3834 | 0 | // that we won't unlink a live object if a weak reference is |
3835 | 0 | // promoted to a strong reference after ScanRoots has finished. |
3836 | 0 | // See bug 926533. |
3837 | 0 | PrintPhase("ScanRoots"); |
3838 | 0 | ScanRoots(startedIdle); |
3839 | 0 | PrintPhase("CollectWhite"); |
3840 | 0 | collectedAny = CollectWhite(); |
3841 | 0 | break; |
3842 | 0 | case CleanupPhase: |
3843 | 0 | PrintPhase("CleanupAfterCollection"); |
3844 | 0 | CleanupAfterCollection(); |
3845 | 0 | continueSlice = false; |
3846 | 0 | break; |
3847 | 0 | } |
3848 | 0 | if (continueSlice) { |
3849 | 0 | // Force SliceBudget::isOverBudget to check the time. |
3850 | 0 | aBudget.step(SliceBudget::CounterReset); |
3851 | 0 | continueSlice = !aBudget.isOverBudget(); |
3852 | 0 | } |
3853 | 0 | } while (continueSlice); |
3854 | 0 |
|
3855 | 0 | // Clear mActivelyCollecting here to ensure that a recursive call to |
3856 | 0 | // Collect() does something. |
3857 | 0 | mActivelyCollecting = false; |
3858 | 0 |
|
3859 | 0 | if (aCCType != SliceCC && !startedIdle) { |
3860 | 0 | // We were in the middle of an incremental CC (using its own listener). |
3861 | 0 | // Somebody has forced a CC, so after having finished out the current CC, |
3862 | 0 | // run the CC again using the new listener. |
3863 | 0 | MOZ_ASSERT(IsIdle()); |
3864 | 0 | if (Collect(aCCType, aBudget, aManualListener)) { |
3865 | 0 | collectedAny = true; |
3866 | 0 | } |
3867 | 0 | } |
3868 | 0 |
|
3869 | 0 | MOZ_ASSERT_IF(aCCType != SliceCC, IsIdle()); |
3870 | 0 |
|
3871 | 0 | return collectedAny; |
3872 | 0 | } |
3873 | | |
3874 | | // Any JS objects we have in the graph could die when we GC, but we |
3875 | | // don't want to abandon the current CC, because the graph contains |
3876 | | // information about purple roots. So we synchronously finish off |
3877 | | // the current CC. |
3878 | | void |
3879 | | nsCycleCollector::PrepareForGarbageCollection() |
3880 | 18 | { |
3881 | 18 | if (IsIdle()) { |
3882 | 18 | MOZ_ASSERT(mGraph.IsEmpty(), "Non-empty graph when idle"); |
3883 | 18 | MOZ_ASSERT(!mBuilder, "Non-null builder when idle"); |
3884 | 18 | if (mJSPurpleBuffer) { |
3885 | 0 | mJSPurpleBuffer->Destroy(); |
3886 | 0 | } |
3887 | 18 | return; |
3888 | 18 | } |
3889 | 0 | |
3890 | 0 | FinishAnyCurrentCollection(); |
3891 | 0 | } |
3892 | | |
3893 | | void |
3894 | | nsCycleCollector::FinishAnyCurrentCollection() |
3895 | 0 | { |
3896 | 0 | if (IsIdle()) { |
3897 | 0 | return; |
3898 | 0 | } |
3899 | 0 | |
3900 | 0 | SliceBudget unlimitedBudget = SliceBudget::unlimited(); |
3901 | 0 | PrintPhase("FinishAnyCurrentCollection"); |
3902 | 0 | // Use SliceCC because we only want to finish the CC in progress. |
3903 | 0 | Collect(SliceCC, unlimitedBudget, nullptr); |
3904 | 0 |
|
3905 | 0 | // It is only okay for Collect() to have failed to finish the |
3906 | 0 | // current CC if we're reentering the CC at some point past |
3907 | 0 | // graph building. We need to be past the point where the CC will |
3908 | 0 | // look at JS objects so that it is safe to GC. |
3909 | 0 | MOZ_ASSERT(IsIdle() || |
3910 | 0 | (mActivelyCollecting && mIncrementalPhase != GraphBuildingPhase), |
3911 | 0 | "Reentered CC during graph building"); |
3912 | 0 | } |
3913 | | |
3914 | | // Don't merge too many times in a row, and do at least a minimum |
3915 | | // number of unmerged CCs in a row. |
3916 | | static const uint32_t kMinConsecutiveUnmerged = 3; |
3917 | | static const uint32_t kMaxConsecutiveMerged = 3; |
3918 | | |
3919 | | bool |
3920 | | nsCycleCollector::ShouldMergeZones(ccType aCCType) |
3921 | 0 | { |
3922 | 0 | if (!mCCJSRuntime) { |
3923 | 0 | return false; |
3924 | 0 | } |
3925 | 0 | |
3926 | 0 | MOZ_ASSERT(mUnmergedNeeded <= kMinConsecutiveUnmerged); |
3927 | 0 | MOZ_ASSERT(mMergedInARow <= kMaxConsecutiveMerged); |
3928 | 0 |
|
3929 | 0 | if (mMergedInARow == kMaxConsecutiveMerged) { |
3930 | 0 | MOZ_ASSERT(mUnmergedNeeded == 0); |
3931 | 0 | mUnmergedNeeded = kMinConsecutiveUnmerged; |
3932 | 0 | } |
3933 | 0 |
|
3934 | 0 | if (mUnmergedNeeded > 0) { |
3935 | 0 | mUnmergedNeeded--; |
3936 | 0 | mMergedInARow = 0; |
3937 | 0 | return false; |
3938 | 0 | } |
3939 | 0 | |
3940 | 0 | if (aCCType == SliceCC && mCCJSRuntime->UsefulToMergeZones()) { |
3941 | 0 | mMergedInARow++; |
3942 | 0 | return true; |
3943 | 0 | } else { |
3944 | 0 | mMergedInARow = 0; |
3945 | 0 | return false; |
3946 | 0 | } |
3947 | 0 | } |
3948 | | |
3949 | | void |
3950 | | nsCycleCollector::BeginCollection(ccType aCCType, |
3951 | | nsICycleCollectorListener* aManualListener) |
3952 | 0 | { |
3953 | 0 | TimeLog timeLog; |
3954 | 0 | MOZ_ASSERT(IsIdle()); |
3955 | 0 | MOZ_RELEASE_ASSERT(!mScanInProgress); |
3956 | 0 |
|
3957 | 0 | mCollectionStart = TimeStamp::Now(); |
3958 | 0 |
|
3959 | 0 | if (mCCJSRuntime) { |
3960 | 0 | mCCJSRuntime->BeginCycleCollectionCallback(); |
3961 | 0 | timeLog.Checkpoint("BeginCycleCollectionCallback()"); |
3962 | 0 | } |
3963 | 0 |
|
3964 | 0 | bool isShutdown = (aCCType == ShutdownCC); |
3965 | 0 |
|
3966 | 0 | // Set up the listener for this CC. |
3967 | 0 | MOZ_ASSERT_IF(isShutdown, !aManualListener); |
3968 | 0 | MOZ_ASSERT(!mLogger, "Forgot to clear a previous listener?"); |
3969 | 0 |
|
3970 | 0 | if (aManualListener) { |
3971 | 0 | aManualListener->AsLogger(getter_AddRefs(mLogger)); |
3972 | 0 | } |
3973 | 0 |
|
3974 | 0 | aManualListener = nullptr; |
3975 | 0 | if (!mLogger && mParams.LogThisCC(isShutdown)) { |
3976 | 0 | mLogger = new nsCycleCollectorLogger(); |
3977 | 0 | if (mParams.AllTracesThisCC(isShutdown)) { |
3978 | 0 | mLogger->SetAllTraces(); |
3979 | 0 | } |
3980 | 0 | } |
3981 | 0 |
|
3982 | 0 | // On a WantAllTraces CC, force a synchronous global GC to prevent |
3983 | 0 | // hijinks from ForgetSkippable and compartmental GCs. |
3984 | 0 | bool forceGC = isShutdown || (mLogger && mLogger->IsAllTraces()); |
3985 | 0 |
|
3986 | 0 | // BeginCycleCollectionCallback() might have started an IGC, and we need |
3987 | 0 | // to finish it before we run FixGrayBits. |
3988 | 0 | FinishAnyIncrementalGCInProgress(); |
3989 | 0 | timeLog.Checkpoint("Pre-FixGrayBits finish IGC"); |
3990 | 0 |
|
3991 | 0 | FixGrayBits(forceGC, timeLog); |
3992 | 0 | if (mCCJSRuntime) { |
3993 | 0 | mCCJSRuntime->CheckGrayBits(); |
3994 | 0 | } |
3995 | 0 |
|
3996 | 0 | FreeSnowWhite(true); |
3997 | 0 | timeLog.Checkpoint("BeginCollection FreeSnowWhite"); |
3998 | 0 |
|
3999 | 0 | if (mLogger && NS_FAILED(mLogger->Begin())) { |
4000 | 0 | mLogger = nullptr; |
4001 | 0 | } |
4002 | 0 |
|
4003 | 0 | // FreeSnowWhite could potentially have started an IGC, which we need |
4004 | 0 | // to finish before we look at any JS roots. |
4005 | 0 | FinishAnyIncrementalGCInProgress(); |
4006 | 0 | timeLog.Checkpoint("Post-FreeSnowWhite finish IGC"); |
4007 | 0 |
|
4008 | 0 | // Set up the data structures for building the graph. |
4009 | 0 | JS::AutoAssertNoGC nogc; |
4010 | 0 | JS::AutoEnterCycleCollection autocc(mCCJSRuntime->Runtime()); |
4011 | 0 | mGraph.Init(); |
4012 | 0 | mResults.Init(); |
4013 | 0 | mResults.mAnyManual = (aCCType != SliceCC); |
4014 | 0 | bool mergeZones = ShouldMergeZones(aCCType); |
4015 | 0 | mResults.mMergedZones = mergeZones; |
4016 | 0 |
|
4017 | 0 | MOZ_ASSERT(!mBuilder, "Forgot to clear mBuilder"); |
4018 | 0 | mBuilder = new CCGraphBuilder(mGraph, mResults, mCCJSRuntime, mLogger, |
4019 | 0 | mergeZones); |
4020 | 0 | timeLog.Checkpoint("BeginCollection prepare graph builder"); |
4021 | 0 |
|
4022 | 0 | if (mCCJSRuntime) { |
4023 | 0 | mCCJSRuntime->TraverseRoots(*mBuilder); |
4024 | 0 | timeLog.Checkpoint("mJSContext->TraverseRoots()"); |
4025 | 0 | } |
4026 | 0 |
|
4027 | 0 | AutoRestore<bool> ar(mScanInProgress); |
4028 | 0 | MOZ_RELEASE_ASSERT(!mScanInProgress); |
4029 | 0 | mScanInProgress = true; |
4030 | 0 | mPurpleBuf.SelectPointers(*mBuilder); |
4031 | 0 | timeLog.Checkpoint("SelectPointers()"); |
4032 | 0 |
|
4033 | 0 | mBuilder->DoneAddingRoots(); |
4034 | 0 | mIncrementalPhase = GraphBuildingPhase; |
4035 | 0 | } |
4036 | | |
4037 | | uint32_t |
4038 | | nsCycleCollector::SuspectedCount() |
4039 | 119 | { |
4040 | 119 | CheckThreadSafety(); |
4041 | 119 | if (NS_IsMainThread()) { |
4042 | 119 | return gNurseryPurpleBufferEntryCount + mPurpleBuf.Count(); |
4043 | 119 | } |
4044 | 0 | |
4045 | 0 | return mPurpleBuf.Count(); |
4046 | 0 | } |
4047 | | |
4048 | | void |
4049 | | nsCycleCollector::Shutdown(bool aDoCollect) |
4050 | 0 | { |
4051 | 0 | CheckThreadSafety(); |
4052 | 0 |
|
4053 | 0 | if (NS_IsMainThread()) { |
4054 | 0 | gNurseryPurpleBufferEnabled = false; |
4055 | 0 | } |
4056 | 0 |
|
4057 | 0 | // Always delete snow white objects. |
4058 | 0 | FreeSnowWhite(true); |
4059 | 0 |
|
4060 | 0 | if (aDoCollect) { |
4061 | 0 | ShutdownCollect(); |
4062 | 0 | } |
4063 | 0 |
|
4064 | 0 | if (mJSPurpleBuffer) { |
4065 | 0 | mJSPurpleBuffer->Destroy(); |
4066 | 0 | } |
4067 | 0 | } |
4068 | | |
4069 | | void |
4070 | | nsCycleCollector::RemoveObjectFromGraph(void* aObj) |
4071 | 0 | { |
4072 | 0 | if (IsIdle()) { |
4073 | 0 | return; |
4074 | 0 | } |
4075 | 0 | |
4076 | 0 | mGraph.RemoveObjectFromMap(aObj); |
4077 | 0 | if (mBuilder) { |
4078 | 0 | mBuilder->RemoveCachedEntry(aObj); |
4079 | 0 | } |
4080 | 0 | } |
4081 | | |
4082 | | void |
4083 | | nsCycleCollector::SizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf, |
4084 | | size_t* aObjectSize, |
4085 | | size_t* aGraphSize, |
4086 | | size_t* aPurpleBufferSize) const |
4087 | 0 | { |
4088 | 0 | *aObjectSize = aMallocSizeOf(this); |
4089 | 0 |
|
4090 | 0 | *aGraphSize = mGraph.SizeOfExcludingThis(aMallocSizeOf); |
4091 | 0 |
|
4092 | 0 | *aPurpleBufferSize = mPurpleBuf.SizeOfExcludingThis(aMallocSizeOf); |
4093 | 0 |
|
4094 | 0 | // These fields are deliberately not measured: |
4095 | 0 | // - mCCJSRuntime: because it's non-owning and measured by JS reporters. |
4096 | 0 | // - mParams: because it only contains scalars. |
4097 | 0 | } |
4098 | | |
4099 | | JSPurpleBuffer* |
4100 | | nsCycleCollector::GetJSPurpleBuffer() |
4101 | 0 | { |
4102 | 0 | if (!mJSPurpleBuffer) { |
4103 | 0 | // The Release call here confuses the GC analysis. |
4104 | 0 | JS::AutoSuppressGCAnalysis nogc; |
4105 | 0 | // JSPurpleBuffer keeps itself alive, but we need to create it in such way |
4106 | 0 | // that it ends up in the normal purple buffer. That happens when |
4107 | 0 | // nsRefPtr goes out of the scope and calls Release. |
4108 | 0 | RefPtr<JSPurpleBuffer> pb = new JSPurpleBuffer(mJSPurpleBuffer); |
4109 | 0 | } |
4110 | 0 | return mJSPurpleBuffer; |
4111 | 0 | } |
4112 | | |
4113 | | //////////////////////////////////////////////////////////////////////// |
4114 | | // Module public API (exported in nsCycleCollector.h) |
4115 | | // Just functions that redirect into the singleton, once it's built. |
4116 | | //////////////////////////////////////////////////////////////////////// |
4117 | | |
4118 | | void |
4119 | | nsCycleCollector_registerJSContext(CycleCollectedJSContext* aCx) |
4120 | 3 | { |
4121 | 3 | CollectorData* data = sCollectorData.get(); |
4122 | 3 | |
4123 | 3 | // We should have started the cycle collector by now. |
4124 | 3 | MOZ_ASSERT(data); |
4125 | 3 | MOZ_ASSERT(data->mCollector); |
4126 | 3 | // But we shouldn't already have a context. |
4127 | 3 | MOZ_ASSERT(!data->mContext); |
4128 | 3 | |
4129 | 3 | data->mContext = aCx; |
4130 | 3 | data->mCollector->SetCCJSRuntime(aCx->Runtime()); |
4131 | 3 | } |
4132 | | |
4133 | | void |
4134 | | nsCycleCollector_forgetJSContext() |
4135 | 0 | { |
4136 | 0 | CollectorData* data = sCollectorData.get(); |
4137 | 0 |
|
4138 | 0 | // We should have started the cycle collector by now. |
4139 | 0 | MOZ_ASSERT(data); |
4140 | 0 | // And we shouldn't have already forgotten our context. |
4141 | 0 | MOZ_ASSERT(data->mContext); |
4142 | 0 |
|
4143 | 0 | // But it may have shutdown already. |
4144 | 0 | if (data->mCollector) { |
4145 | 0 | data->mCollector->ClearCCJSRuntime(); |
4146 | 0 | data->mContext = nullptr; |
4147 | 0 | } else { |
4148 | 0 | data->mContext = nullptr; |
4149 | 0 | delete data; |
4150 | 0 | sCollectorData.set(nullptr); |
4151 | 0 | } |
4152 | 0 | } |
4153 | | |
4154 | | /* static */ CycleCollectedJSContext* |
4155 | | CycleCollectedJSContext::Get() |
4156 | 72.4M | { |
4157 | 72.4M | CollectorData* data = sCollectorData.get(); |
4158 | 72.4M | if (data) { |
4159 | 72.4M | return data->mContext; |
4160 | 72.4M | } |
4161 | 0 | return nullptr; |
4162 | 0 | } |
4163 | | |
4164 | | MOZ_NEVER_INLINE static void |
4165 | | SuspectAfterShutdown(void* aPtr, nsCycleCollectionParticipant* aCp, |
4166 | | nsCycleCollectingAutoRefCnt* aRefCnt, |
4167 | | bool* aShouldDelete) |
4168 | 0 | { |
4169 | 0 | if (aRefCnt->get() == 0) { |
4170 | 0 | if (!aShouldDelete) { |
4171 | 0 | // The CC is shut down, so we can't be in the middle of an ICC. |
4172 | 0 | ToParticipant(aPtr, &aCp); |
4173 | 0 | aRefCnt->stabilizeForDeletion(); |
4174 | 0 | aCp->DeleteCycleCollectable(aPtr); |
4175 | 0 | } else { |
4176 | 0 | *aShouldDelete = true; |
4177 | 0 | } |
4178 | 0 | } else { |
4179 | 0 | // Make sure we'll get called again. |
4180 | 0 | aRefCnt->RemoveFromPurpleBuffer(); |
4181 | 0 | } |
4182 | 0 | } |
4183 | | |
4184 | | void |
4185 | | NS_CycleCollectorSuspect3(void* aPtr, nsCycleCollectionParticipant* aCp, |
4186 | | nsCycleCollectingAutoRefCnt* aRefCnt, |
4187 | | bool* aShouldDelete) |
4188 | 3.24M | { |
4189 | 3.24M | CollectorData* data = sCollectorData.get(); |
4190 | 3.24M | |
4191 | 3.24M | // We should have started the cycle collector by now. |
4192 | 3.24M | MOZ_ASSERT(data); |
4193 | 3.24M | |
4194 | 3.24M | if (MOZ_LIKELY(data->mCollector)) { |
4195 | 3.24M | data->mCollector->Suspect(aPtr, aCp, aRefCnt); |
4196 | 3.24M | return; |
4197 | 3.24M | } |
4198 | 0 | SuspectAfterShutdown(aPtr, aCp, aRefCnt, aShouldDelete); |
4199 | 0 | } |
4200 | | |
4201 | | void ClearNurseryPurpleBuffer() |
4202 | 0 | { |
4203 | 0 | MOZ_ASSERT(NS_IsMainThread(), "Wrong thread!"); |
4204 | 0 | CollectorData* data = sCollectorData.get(); |
4205 | 0 | MOZ_ASSERT(data); |
4206 | 0 | MOZ_ASSERT(data->mCollector); |
4207 | 0 | data->mCollector->SuspectNurseryEntries(); |
4208 | 0 | } |
4209 | | |
4210 | | void |
4211 | | NS_CycleCollectorSuspectUsingNursery(void* aPtr, |
4212 | | nsCycleCollectionParticipant* aCp, |
4213 | | nsCycleCollectingAutoRefCnt* aRefCnt, |
4214 | | bool* aShouldDelete) |
4215 | 0 | { |
4216 | 0 | MOZ_ASSERT(NS_IsMainThread(), "Wrong thread!"); |
4217 | 0 | if (!gNurseryPurpleBufferEnabled) { |
4218 | 0 | NS_CycleCollectorSuspect3(aPtr, aCp, aRefCnt, aShouldDelete); |
4219 | 0 | return; |
4220 | 0 | } |
4221 | 0 | |
4222 | 0 | SuspectUsingNurseryPurpleBuffer(aPtr, aCp, aRefCnt); |
4223 | 0 | } |
4224 | | |
4225 | | uint32_t |
4226 | | nsCycleCollector_suspectedCount() |
4227 | 119 | { |
4228 | 119 | CollectorData* data = sCollectorData.get(); |
4229 | 119 | |
4230 | 119 | // We should have started the cycle collector by now. |
4231 | 119 | MOZ_ASSERT(data); |
4232 | 119 | |
4233 | 119 | // When recording/replaying we do not collect cycles. Return zero here so |
4234 | 119 | // that callers behave consistently between recording and replaying. |
4235 | 119 | if (!data->mCollector || recordreplay::IsRecordingOrReplaying()) { |
4236 | 0 | return 0; |
4237 | 0 | } |
4238 | 119 | |
4239 | 119 | return data->mCollector->SuspectedCount(); |
4240 | 119 | } |
4241 | | |
4242 | | bool |
4243 | | nsCycleCollector_init() |
4244 | 3 | { |
4245 | | #ifdef DEBUG |
4246 | | static bool sInitialized; |
4247 | | |
4248 | | MOZ_ASSERT(NS_IsMainThread(), "Wrong thread!"); |
4249 | | MOZ_ASSERT(!sInitialized, "Called twice!?"); |
4250 | | sInitialized = true; |
4251 | | #endif |
4252 | | |
4253 | 3 | return sCollectorData.init(); |
4254 | 3 | } |
4255 | | |
4256 | | static nsCycleCollector* gMainThreadCollector; |
4257 | | |
4258 | | void |
4259 | | nsCycleCollector_startup() |
4260 | 3 | { |
4261 | 3 | if (sCollectorData.get()) { |
4262 | 0 | MOZ_CRASH(); |
4263 | 0 | } |
4264 | 3 | |
4265 | 3 | CollectorData* data = new CollectorData; |
4266 | 3 | data->mCollector = new nsCycleCollector(); |
4267 | 3 | data->mContext = nullptr; |
4268 | 3 | |
4269 | 3 | sCollectorData.set(data); |
4270 | 3 | |
4271 | 3 | if (NS_IsMainThread()) { |
4272 | 3 | MOZ_ASSERT(!gMainThreadCollector); |
4273 | 3 | gMainThreadCollector = data->mCollector; |
4274 | 3 | } |
4275 | 3 | } |
4276 | | |
4277 | | void |
4278 | | nsCycleCollector_registerNonPrimaryContext(CycleCollectedJSContext* aCx) |
4279 | 0 | { |
4280 | 0 | if (sCollectorData.get()) { |
4281 | 0 | MOZ_CRASH(); |
4282 | 0 | } |
4283 | 0 |
|
4284 | 0 | MOZ_ASSERT(gMainThreadCollector); |
4285 | 0 |
|
4286 | 0 | CollectorData* data = new CollectorData; |
4287 | 0 |
|
4288 | 0 | data->mCollector = gMainThreadCollector; |
4289 | 0 | data->mContext = aCx; |
4290 | 0 |
|
4291 | 0 | sCollectorData.set(data); |
4292 | 0 | } |
4293 | | |
4294 | | void |
4295 | | nsCycleCollector_forgetNonPrimaryContext() |
4296 | 0 | { |
4297 | 0 | CollectorData* data = sCollectorData.get(); |
4298 | 0 |
|
4299 | 0 | // We should have started the cycle collector by now. |
4300 | 0 | MOZ_ASSERT(data); |
4301 | 0 | // And we shouldn't have already forgotten our context. |
4302 | 0 | MOZ_ASSERT(data->mContext); |
4303 | 0 | // We should not have shut down the cycle collector yet. |
4304 | 0 | MOZ_ASSERT(data->mCollector); |
4305 | 0 |
|
4306 | 0 | delete data; |
4307 | 0 | sCollectorData.set(nullptr); |
4308 | 0 | } |
4309 | | |
4310 | | void |
4311 | | nsCycleCollector_setBeforeUnlinkCallback(CC_BeforeUnlinkCallback aCB) |
4312 | 3 | { |
4313 | 3 | CollectorData* data = sCollectorData.get(); |
4314 | 3 | |
4315 | 3 | // We should have started the cycle collector by now. |
4316 | 3 | MOZ_ASSERT(data); |
4317 | 3 | MOZ_ASSERT(data->mCollector); |
4318 | 3 | |
4319 | 3 | data->mCollector->SetBeforeUnlinkCallback(aCB); |
4320 | 3 | } |
4321 | | |
4322 | | void |
4323 | | nsCycleCollector_setForgetSkippableCallback(CC_ForgetSkippableCallback aCB) |
4324 | 3 | { |
4325 | 3 | CollectorData* data = sCollectorData.get(); |
4326 | 3 | |
4327 | 3 | // We should have started the cycle collector by now. |
4328 | 3 | MOZ_ASSERT(data); |
4329 | 3 | MOZ_ASSERT(data->mCollector); |
4330 | 3 | |
4331 | 3 | data->mCollector->SetForgetSkippableCallback(aCB); |
4332 | 3 | } |
4333 | | |
4334 | | void |
4335 | | nsCycleCollector_forgetSkippable(js::SliceBudget& aBudget, |
4336 | | bool aRemoveChildlessNodes, |
4337 | | bool aAsyncSnowWhiteFreeing) |
4338 | 0 | { |
4339 | 0 | CollectorData* data = sCollectorData.get(); |
4340 | 0 |
|
4341 | 0 | // We should have started the cycle collector by now. |
4342 | 0 | MOZ_ASSERT(data); |
4343 | 0 | MOZ_ASSERT(data->mCollector); |
4344 | 0 |
|
4345 | 0 | AUTO_PROFILER_LABEL("nsCycleCollector_forgetSkippable", GCCC); |
4346 | 0 |
|
4347 | 0 | TimeLog timeLog; |
4348 | 0 | data->mCollector->ForgetSkippable(aBudget, |
4349 | 0 | aRemoveChildlessNodes, |
4350 | 0 | aAsyncSnowWhiteFreeing); |
4351 | 0 | timeLog.Checkpoint("ForgetSkippable()"); |
4352 | 0 | } |
4353 | | |
4354 | | void |
4355 | | nsCycleCollector_dispatchDeferredDeletion(bool aContinuation, bool aPurge) |
4356 | 119 | { |
4357 | 119 | CycleCollectedJSRuntime* rt = CycleCollectedJSRuntime::Get(); |
4358 | 119 | if (rt) { |
4359 | 119 | rt->DispatchDeferredDeletion(aContinuation, aPurge); |
4360 | 119 | } |
4361 | 119 | } |
4362 | | |
4363 | | bool |
4364 | | nsCycleCollector_doDeferredDeletion() |
4365 | 0 | { |
4366 | 0 | CollectorData* data = sCollectorData.get(); |
4367 | 0 |
|
4368 | 0 | // We should have started the cycle collector by now. |
4369 | 0 | MOZ_ASSERT(data); |
4370 | 0 | MOZ_ASSERT(data->mCollector); |
4371 | 0 | MOZ_ASSERT(data->mContext); |
4372 | 0 |
|
4373 | 0 | return data->mCollector->FreeSnowWhite(false); |
4374 | 0 | } |
4375 | | |
4376 | | bool |
4377 | | nsCycleCollector_doDeferredDeletionWithBudget(js::SliceBudget& aBudget) |
4378 | 0 | { |
4379 | 0 | CollectorData* data = sCollectorData.get(); |
4380 | 0 |
|
4381 | 0 | // We should have started the cycle collector by now. |
4382 | 0 | MOZ_ASSERT(data); |
4383 | 0 | MOZ_ASSERT(data->mCollector); |
4384 | 0 | MOZ_ASSERT(data->mContext); |
4385 | 0 |
|
4386 | 0 | return data->mCollector->FreeSnowWhiteWithBudget(aBudget); |
4387 | 0 | } |
4388 | | |
4389 | | already_AddRefed<nsICycleCollectorLogSink> |
4390 | | nsCycleCollector_createLogSink() |
4391 | 0 | { |
4392 | 0 | nsCOMPtr<nsICycleCollectorLogSink> sink = new nsCycleCollectorLogSinkToFile(); |
4393 | 0 | return sink.forget(); |
4394 | 0 | } |
4395 | | |
4396 | | void |
4397 | | nsCycleCollector_collect(nsICycleCollectorListener* aManualListener) |
4398 | 0 | { |
4399 | 0 | CollectorData* data = sCollectorData.get(); |
4400 | 0 |
|
4401 | 0 | // We should have started the cycle collector by now. |
4402 | 0 | MOZ_ASSERT(data); |
4403 | 0 | MOZ_ASSERT(data->mCollector); |
4404 | 0 |
|
4405 | 0 | AUTO_PROFILER_LABEL("nsCycleCollector_collect", GCCC); |
4406 | 0 |
|
4407 | 0 | SliceBudget unlimitedBudget = SliceBudget::unlimited(); |
4408 | 0 | data->mCollector->Collect(ManualCC, unlimitedBudget, aManualListener); |
4409 | 0 | } |
4410 | | |
4411 | | void |
4412 | | nsCycleCollector_collectSlice(SliceBudget& budget, |
4413 | | bool aPreferShorterSlices) |
4414 | 0 | { |
4415 | 0 | CollectorData* data = sCollectorData.get(); |
4416 | 0 |
|
4417 | 0 | // We should have started the cycle collector by now. |
4418 | 0 | MOZ_ASSERT(data); |
4419 | 0 | MOZ_ASSERT(data->mCollector); |
4420 | 0 |
|
4421 | 0 | AUTO_PROFILER_LABEL("nsCycleCollector_collectSlice", GCCC); |
4422 | 0 |
|
4423 | 0 | data->mCollector->Collect(SliceCC, budget, nullptr, aPreferShorterSlices); |
4424 | 0 | } |
4425 | | |
4426 | | void |
4427 | | nsCycleCollector_prepareForGarbageCollection() |
4428 | 18 | { |
4429 | 18 | CollectorData* data = sCollectorData.get(); |
4430 | 18 | |
4431 | 18 | MOZ_ASSERT(data); |
4432 | 18 | |
4433 | 18 | if (!data->mCollector) { |
4434 | 0 | return; |
4435 | 0 | } |
4436 | 18 | |
4437 | 18 | data->mCollector->PrepareForGarbageCollection(); |
4438 | 18 | } |
4439 | | |
4440 | | void |
4441 | | nsCycleCollector_finishAnyCurrentCollection() |
4442 | 0 | { |
4443 | 0 | CollectorData* data = sCollectorData.get(); |
4444 | 0 |
|
4445 | 0 | MOZ_ASSERT(data); |
4446 | 0 |
|
4447 | 0 | if (!data->mCollector) { |
4448 | 0 | return; |
4449 | 0 | } |
4450 | 0 | |
4451 | 0 | data->mCollector->FinishAnyCurrentCollection(); |
4452 | 0 | } |
4453 | | |
4454 | | void |
4455 | | nsCycleCollector_shutdown(bool aDoCollect) |
4456 | 0 | { |
4457 | 0 | CollectorData* data = sCollectorData.get(); |
4458 | 0 |
|
4459 | 0 | if (data) { |
4460 | 0 | MOZ_ASSERT(data->mCollector); |
4461 | 0 | AUTO_PROFILER_LABEL("nsCycleCollector_shutdown", OTHER); |
4462 | 0 |
|
4463 | 0 | if (gMainThreadCollector == data->mCollector) { |
4464 | 0 | gMainThreadCollector = nullptr; |
4465 | 0 | } |
4466 | 0 | data->mCollector->Shutdown(aDoCollect); |
4467 | 0 | data->mCollector = nullptr; |
4468 | 0 | if (data->mContext) { |
4469 | 0 | // Run any remaining tasks that may have been enqueued via |
4470 | 0 | // RunInStableState or DispatchToMicroTask during the final cycle collection. |
4471 | 0 | data->mContext->ProcessStableStateQueue(); |
4472 | 0 | data->mContext->PerformMicroTaskCheckPoint(true); |
4473 | 0 | } |
4474 | 0 | if (!data->mContext) { |
4475 | 0 | delete data; |
4476 | 0 | sCollectorData.set(nullptr); |
4477 | 0 | } |
4478 | 0 | } |
4479 | 0 | } |