Coverage Report

Created: 2025-11-24 06:11

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/cpython/Python/gc.c
Line
Count
Source
1
//  This implements the reference cycle garbage collector.
2
//  The Python module interface to the collector is in gcmodule.c.
3
//  See InternalDocs/garbage_collector.md for more infromation.
4
5
#include "Python.h"
6
#include "pycore_ceval.h"         // _Py_set_eval_breaker_bit()
7
#include "pycore_dict.h"          // _PyInlineValuesSize()
8
#include "pycore_initconfig.h"    // _PyStatus_OK()
9
#include "pycore_interp.h"        // PyInterpreterState.gc
10
#include "pycore_interpframe.h"   // _PyFrame_GetLocalsArray()
11
#include "pycore_object_alloc.h"  // _PyObject_MallocWithType()
12
#include "pycore_pystate.h"       // _PyThreadState_GET()
13
#include "pycore_tuple.h"         // _PyTuple_MaybeUntrack()
14
#include "pycore_weakref.h"       // _PyWeakref_ClearRef()
15
16
#include "pydtrace.h"
17
18
19
#ifndef Py_GIL_DISABLED
20
21
typedef struct _gc_runtime_state GCState;
22
23
#ifdef Py_DEBUG
24
#  define GC_DEBUG
25
#endif
26
27
// Define this when debugging the GC
28
// #define GC_EXTRA_DEBUG
29
30
31
707M
#define GC_NEXT _PyGCHead_NEXT
32
261M
#define GC_PREV _PyGCHead_PREV
33
34
// update_refs() set this bit for all objects in current generation.
35
// subtract_refs() and move_unreachable() uses this to distinguish
36
// visited object is in GCing or not.
37
//
38
// move_unreachable() removes this flag from reachable objects.
39
// Only unreachable objects have this flag.
40
//
41
// No objects in interpreter have this flag after GC ends.
42
296M
#define PREV_MASK_COLLECTING   _PyGC_PREV_MASK_COLLECTING
43
44
// Lowest bit of _gc_next is used for UNREACHABLE flag.
45
//
46
// This flag represents the object is in unreachable list in move_unreachable()
47
//
48
// Although this flag is used only in move_unreachable(), move_unreachable()
49
// doesn't clear this flag to skip unnecessary iteration.
50
// move_legacy_finalizers() removes this flag instead.
51
// Between them, unreachable list is not normal list and we can not use
52
// most gc_list_* functions for it.
53
33.4M
#define NEXT_MASK_UNREACHABLE  2
54
55
1.58G
#define AS_GC(op) _Py_AS_GC(op)
56
506M
#define FROM_GC(gc) _Py_FROM_GC(gc)
57
58
// Automatically choose the generation that needs collecting.
59
#define GENERATION_AUTO (-1)
60
61
static inline int
62
gc_is_collecting(PyGC_Head *g)
63
164M
{
64
164M
    return (g->_gc_prev & PREV_MASK_COLLECTING) != 0;
65
164M
}
66
67
static inline void
68
gc_clear_collecting(PyGC_Head *g)
69
65.4M
{
70
65.4M
    g->_gc_prev &= ~PREV_MASK_COLLECTING;
71
65.4M
}
72
73
static inline Py_ssize_t
74
gc_get_refs(PyGC_Head *g)
75
150M
{
76
150M
    return (Py_ssize_t)(g->_gc_prev >> _PyGC_PREV_SHIFT);
77
150M
}
78
79
static inline void
80
gc_set_refs(PyGC_Head *g, Py_ssize_t refs)
81
23.5M
{
82
23.5M
    g->_gc_prev = (g->_gc_prev & ~_PyGC_PREV_MASK)
83
23.5M
        | ((uintptr_t)(refs) << _PyGC_PREV_SHIFT);
84
23.5M
}
85
86
static inline void
87
gc_reset_refs(PyGC_Head *g, Py_ssize_t refs)
88
66.5M
{
89
66.5M
    g->_gc_prev = (g->_gc_prev & _PyGC_PREV_MASK_FINALIZED)
90
66.5M
        | PREV_MASK_COLLECTING
91
66.5M
        | ((uintptr_t)(refs) << _PyGC_PREV_SHIFT);
92
66.5M
}
93
94
static inline void
95
gc_decref(PyGC_Head *g)
96
29.9M
{
97
29.9M
    _PyObject_ASSERT_WITH_MSG(FROM_GC(g),
98
29.9M
                              gc_get_refs(g) > 0,
99
29.9M
                              "refcount is too small");
100
29.9M
    g->_gc_prev -= 1 << _PyGC_PREV_SHIFT;
101
29.9M
}
102
103
static inline int
104
gc_old_space(PyGC_Head *g)
105
230M
{
106
230M
    return g->_gc_next & _PyGC_NEXT_MASK_OLD_SPACE_1;
107
230M
}
108
109
static inline int
110
other_space(int space)
111
1.32k
{
112
1.32k
    assert(space == 0 || space == 1);
113
1.32k
    return space ^ _PyGC_NEXT_MASK_OLD_SPACE_1;
114
1.32k
}
115
116
static inline void
117
gc_flip_old_space(PyGC_Head *g)
118
122M
{
119
122M
    g->_gc_next ^= _PyGC_NEXT_MASK_OLD_SPACE_1;
120
122M
}
121
122
static inline void
123
gc_set_old_space(PyGC_Head *g, int space)
124
64.1M
{
125
64.1M
    assert(space == 0 || space == _PyGC_NEXT_MASK_OLD_SPACE_1);
126
64.1M
    g->_gc_next &= ~_PyGC_NEXT_MASK_OLD_SPACE_1;
127
64.1M
    g->_gc_next |= space;
128
64.1M
}
129
130
static PyGC_Head *
131
GEN_HEAD(GCState *gcstate, int n)
132
0
{
133
0
    assert((gcstate->visited_space & (~1)) == 0);
134
0
    switch(n) {
135
0
        case 0:
136
0
            return &gcstate->young.head;
137
0
        case 1:
138
0
            return &gcstate->old[gcstate->visited_space].head;
139
0
        case 2:
140
0
            return &gcstate->old[gcstate->visited_space^1].head;
141
0
        default:
142
0
            Py_UNREACHABLE();
143
0
    }
144
0
}
145
146
static GCState *
147
get_gc_state(void)
148
16.5k
{
149
16.5k
    PyInterpreterState *interp = _PyInterpreterState_GET();
150
16.5k
    return &interp->gc;
151
16.5k
}
152
153
154
void
155
_PyGC_InitState(GCState *gcstate)
156
28
{
157
28
#define INIT_HEAD(GEN) \
158
112
    do { \
159
112
        GEN.head._gc_next = (uintptr_t)&GEN.head; \
160
112
        GEN.head._gc_prev = (uintptr_t)&GEN.head; \
161
112
    } while (0)
162
163
28
    assert(gcstate->young.count == 0);
164
28
    assert(gcstate->old[0].count == 0);
165
28
    assert(gcstate->old[1].count == 0);
166
28
    INIT_HEAD(gcstate->young);
167
28
    INIT_HEAD(gcstate->old[0]);
168
28
    INIT_HEAD(gcstate->old[1]);
169
28
    INIT_HEAD(gcstate->permanent_generation);
170
171
28
#undef INIT_HEAD
172
28
}
173
174
175
PyStatus
176
_PyGC_Init(PyInterpreterState *interp)
177
28
{
178
28
    GCState *gcstate = &interp->gc;
179
180
28
    gcstate->garbage = PyList_New(0);
181
28
    if (gcstate->garbage == NULL) {
182
0
        return _PyStatus_NO_MEMORY();
183
0
    }
184
185
28
    gcstate->callbacks = PyList_New(0);
186
28
    if (gcstate->callbacks == NULL) {
187
0
        return _PyStatus_NO_MEMORY();
188
0
    }
189
28
    gcstate->heap_size = 0;
190
191
28
    return _PyStatus_OK();
192
28
}
193
194
195
/*
196
_gc_prev values
197
---------------
198
199
Between collections, _gc_prev is used for doubly linked list.
200
201
Lowest two bits of _gc_prev are used for flags.
202
PREV_MASK_COLLECTING is used only while collecting and cleared before GC ends
203
or _PyObject_GC_UNTRACK() is called.
204
205
During a collection, _gc_prev is temporary used for gc_refs, and the gc list
206
is singly linked until _gc_prev is restored.
207
208
gc_refs
209
    At the start of a collection, update_refs() copies the true refcount
210
    to gc_refs, for each object in the generation being collected.
211
    subtract_refs() then adjusts gc_refs so that it equals the number of
212
    times an object is referenced directly from outside the generation
213
    being collected.
214
215
PREV_MASK_COLLECTING
216
    Objects in generation being collected are marked PREV_MASK_COLLECTING in
217
    update_refs().
218
219
220
_gc_next values
221
---------------
222
223
_gc_next takes these values:
224
225
0
226
    The object is not tracked
227
228
!= 0
229
    Pointer to the next object in the GC list.
230
    Additionally, lowest bit is used temporary for
231
    NEXT_MASK_UNREACHABLE flag described below.
232
233
NEXT_MASK_UNREACHABLE
234
    move_unreachable() then moves objects not reachable (whether directly or
235
    indirectly) from outside the generation into an "unreachable" set and
236
    set this flag.
237
238
    Objects that are found to be reachable have gc_refs set to 1.
239
    When this flag is set for the reachable object, the object must be in
240
    "unreachable" set.
241
    The flag is unset and the object is moved back to "reachable" set.
242
243
    move_legacy_finalizers() will remove this flag from "unreachable" set.
244
*/
245
246
/*** list functions ***/
247
248
static inline void
249
gc_list_init(PyGC_Head *list)
250
234k
{
251
    // List header must not have flags.
252
    // We can assign pointer by simple cast.
253
234k
    list->_gc_prev = (uintptr_t)list;
254
234k
    list->_gc_next = (uintptr_t)list;
255
234k
}
256
257
static inline int
258
gc_list_is_empty(PyGC_Head *list)
259
124M
{
260
124M
    return (list->_gc_next == (uintptr_t)list);
261
124M
}
262
263
/* Append `node` to `list`. */
264
static inline void
265
gc_list_append(PyGC_Head *node, PyGC_Head *list)
266
5.51M
{
267
5.51M
    assert((list->_gc_prev & ~_PyGC_PREV_MASK) == 0);
268
5.51M
    PyGC_Head *last = (PyGC_Head *)list->_gc_prev;
269
270
    // last <-> node
271
5.51M
    _PyGCHead_SET_PREV(node, last);
272
5.51M
    _PyGCHead_SET_NEXT(last, node);
273
274
    // node <-> list
275
5.51M
    _PyGCHead_SET_NEXT(node, list);
276
5.51M
    list->_gc_prev = (uintptr_t)node;
277
5.51M
}
278
279
/* Remove `node` from the gc list it's currently in. */
280
static inline void
281
gc_list_remove(PyGC_Head *node)
282
0
{
283
0
    PyGC_Head *prev = GC_PREV(node);
284
0
    PyGC_Head *next = GC_NEXT(node);
285
286
0
    _PyGCHead_SET_NEXT(prev, next);
287
0
    _PyGCHead_SET_PREV(next, prev);
288
289
0
    node->_gc_next = 0; /* object is not currently tracked */
290
0
}
291
292
/* Move `node` from the gc list it's currently in (which is not explicitly
293
 * named here) to the end of `list`.  This is semantically the same as
294
 * gc_list_remove(node) followed by gc_list_append(node, list).
295
 */
296
static void
297
gc_list_move(PyGC_Head *node, PyGC_Head *list)
298
247M
{
299
    /* Unlink from current list. */
300
247M
    PyGC_Head *from_prev = GC_PREV(node);
301
247M
    PyGC_Head *from_next = GC_NEXT(node);
302
247M
    _PyGCHead_SET_NEXT(from_prev, from_next);
303
247M
    _PyGCHead_SET_PREV(from_next, from_prev);
304
305
    /* Relink at end of new list. */
306
    // list must not have flags.  So we can skip macros.
307
247M
    PyGC_Head *to_prev = (PyGC_Head*)list->_gc_prev;
308
247M
    _PyGCHead_SET_PREV(node, to_prev);
309
247M
    _PyGCHead_SET_NEXT(to_prev, node);
310
247M
    list->_gc_prev = (uintptr_t)node;
311
247M
    _PyGCHead_SET_NEXT(node, list);
312
247M
}
313
314
/* append list `from` onto list `to`; `from` becomes an empty list */
315
static void
316
gc_list_merge(PyGC_Head *from, PyGC_Head *to)
317
99.1k
{
318
99.1k
    assert(from != to);
319
99.1k
    if (!gc_list_is_empty(from)) {
320
50.4k
        PyGC_Head *to_tail = GC_PREV(to);
321
50.4k
        PyGC_Head *from_head = GC_NEXT(from);
322
50.4k
        PyGC_Head *from_tail = GC_PREV(from);
323
50.4k
        assert(from_head != from);
324
50.4k
        assert(from_tail != from);
325
50.4k
        assert(gc_list_is_empty(to) ||
326
50.4k
            gc_old_space(to_tail) == gc_old_space(from_tail));
327
328
50.4k
        _PyGCHead_SET_NEXT(to_tail, from_head);
329
50.4k
        _PyGCHead_SET_PREV(from_head, to_tail);
330
331
50.4k
        _PyGCHead_SET_NEXT(from_tail, to);
332
50.4k
        _PyGCHead_SET_PREV(to, from_tail);
333
50.4k
    }
334
99.1k
    gc_list_init(from);
335
99.1k
}
336
337
static Py_ssize_t
338
gc_list_size(PyGC_Head *list)
339
33.0k
{
340
33.0k
    PyGC_Head *gc;
341
33.0k
    Py_ssize_t n = 0;
342
65.5M
    for (gc = GC_NEXT(list); gc != list; gc = GC_NEXT(gc)) {
343
65.4M
        n++;
344
65.4M
    }
345
33.0k
    return n;
346
33.0k
}
347
348
/* Walk the list and mark all objects as non-collecting */
349
static inline void
350
gc_list_clear_collecting(PyGC_Head *collectable)
351
16.5k
{
352
16.5k
    PyGC_Head *gc;
353
1.66M
    for (gc = GC_NEXT(collectable); gc != collectable; gc = GC_NEXT(gc)) {
354
1.64M
        gc_clear_collecting(gc);
355
1.64M
    }
356
16.5k
}
357
358
/* Append objects in a GC list to a Python list.
359
 * Return 0 if all OK, < 0 if error (out of memory for list)
360
 */
361
static int
362
append_objects(PyObject *py_list, PyGC_Head *gc_list)
363
0
{
364
0
    PyGC_Head *gc;
365
0
    for (gc = GC_NEXT(gc_list); gc != gc_list; gc = GC_NEXT(gc)) {
366
0
        PyObject *op = FROM_GC(gc);
367
0
        if (op != py_list) {
368
0
            if (PyList_Append(py_list, op)) {
369
0
                return -1; /* exception */
370
0
            }
371
0
        }
372
0
    }
373
0
    return 0;
374
0
}
375
376
// Constants for validate_list's flags argument.
377
enum flagstates {collecting_clear_unreachable_clear,
378
                 collecting_clear_unreachable_set,
379
                 collecting_set_unreachable_clear,
380
                 collecting_set_unreachable_set};
381
382
#ifdef GC_DEBUG
383
// validate_list checks list consistency.  And it works as document
384
// describing when flags are expected to be set / unset.
385
// `head` must be a doubly-linked gc list, although it's fine (expected!) if
386
// the prev and next pointers are "polluted" with flags.
387
// What's checked:
388
// - The `head` pointers are not polluted.
389
// - The objects' PREV_MASK_COLLECTING and NEXT_MASK_UNREACHABLE flags are all
390
//   `set or clear, as specified by the 'flags' argument.
391
// - The prev and next pointers are mutually consistent.
392
static void
393
validate_list(PyGC_Head *head, enum flagstates flags)
394
{
395
    assert((head->_gc_prev & ~_PyGC_PREV_MASK) == 0);
396
    assert((head->_gc_next & ~_PyGC_PREV_MASK) == 0);
397
    uintptr_t prev_value = 0, next_value = 0;
398
    switch (flags) {
399
        case collecting_clear_unreachable_clear:
400
            break;
401
        case collecting_set_unreachable_clear:
402
            prev_value = PREV_MASK_COLLECTING;
403
            break;
404
        case collecting_clear_unreachable_set:
405
            next_value = NEXT_MASK_UNREACHABLE;
406
            break;
407
        case collecting_set_unreachable_set:
408
            prev_value = PREV_MASK_COLLECTING;
409
            next_value = NEXT_MASK_UNREACHABLE;
410
            break;
411
        default:
412
            assert(! "bad internal flags argument");
413
    }
414
    PyGC_Head *prev = head;
415
    PyGC_Head *gc = GC_NEXT(head);
416
    while (gc != head) {
417
        PyGC_Head *trueprev = GC_PREV(gc);
418
        PyGC_Head *truenext = GC_NEXT(gc);
419
        assert(truenext != NULL);
420
        assert(trueprev == prev);
421
        assert((gc->_gc_prev & PREV_MASK_COLLECTING) == prev_value);
422
        assert((gc->_gc_next & NEXT_MASK_UNREACHABLE) == next_value);
423
        prev = gc;
424
        gc = truenext;
425
    }
426
    assert(prev == GC_PREV(head));
427
}
428
429
#else
430
214k
#define validate_list(x, y) do{}while(0)
431
#endif
432
433
#ifdef GC_EXTRA_DEBUG
434
435
436
static void
437
gc_list_validate_space(PyGC_Head *head, int space) {
438
    PyGC_Head *gc = GC_NEXT(head);
439
    while (gc != head) {
440
        assert(gc_old_space(gc) == space);
441
        gc = GC_NEXT(gc);
442
    }
443
}
444
445
static void
446
validate_spaces(GCState *gcstate)
447
{
448
    int visited = gcstate->visited_space;
449
    int not_visited = other_space(visited);
450
    gc_list_validate_space(&gcstate->young.head, not_visited);
451
    for (int space = 0; space < 2; space++) {
452
        gc_list_validate_space(&gcstate->old[space].head, space);
453
    }
454
    gc_list_validate_space(&gcstate->permanent_generation.head, visited);
455
}
456
457
static void
458
validate_consistent_old_space(PyGC_Head *head)
459
{
460
    PyGC_Head *gc = GC_NEXT(head);
461
    if (gc == head) {
462
        return;
463
    }
464
    int old_space = gc_old_space(gc);
465
    while (gc != head) {
466
        PyGC_Head *truenext = GC_NEXT(gc);
467
        assert(truenext != NULL);
468
        assert(gc_old_space(gc) == old_space);
469
        gc = truenext;
470
    }
471
}
472
473
474
#else
475
99.9k
#define validate_spaces(g) do{}while(0)
476
82.6k
#define validate_consistent_old_space(l) do{}while(0)
477
85.3k
#define gc_list_validate_space(l, s) do{}while(0)
478
#endif
479
480
/*** end of list stuff ***/
481
482
483
/* Set all gc_refs = ob_refcnt.  After this, gc_refs is > 0 and
484
 * PREV_MASK_COLLECTING bit is set for all objects in containers.
485
 */
486
static Py_ssize_t
487
update_refs(PyGC_Head *containers)
488
33.0k
{
489
33.0k
    PyGC_Head *next;
490
33.0k
    PyGC_Head *gc = GC_NEXT(containers);
491
33.0k
    Py_ssize_t candidates = 0;
492
493
66.6M
    while (gc != containers) {
494
66.5M
        next = GC_NEXT(gc);
495
66.5M
        PyObject *op = FROM_GC(gc);
496
66.5M
        if (_Py_IsImmortal(op)) {
497
0
            assert(!_Py_IsStaticImmortal(op));
498
0
            _PyObject_GC_UNTRACK(op);
499
0
           gc = next;
500
0
           continue;
501
0
        }
502
66.5M
        gc_reset_refs(gc, Py_REFCNT(op));
503
        /* Python's cyclic gc should never see an incoming refcount
504
         * of 0:  if something decref'ed to 0, it should have been
505
         * deallocated immediately at that time.
506
         * Possible cause (if the assert triggers):  a tp_dealloc
507
         * routine left a gc-aware object tracked during its teardown
508
         * phase, and did something-- or allowed something to happen --
509
         * that called back into Python.  gc can trigger then, and may
510
         * see the still-tracked dying object.  Before this assert
511
         * was added, such mistakes went on to allow gc to try to
512
         * delete the object again.  In a debug build, that caused
513
         * a mysterious segfault, when _Py_ForgetReference tried
514
         * to remove the object from the doubly-linked list of all
515
         * objects a second time.  In a release build, an actual
516
         * double deallocation occurred, which leads to corruption
517
         * of the allocator's internal bookkeeping pointers.  That's
518
         * so serious that maybe this should be a release-build
519
         * check instead of an assert?
520
         */
521
66.5M
        _PyObject_ASSERT(op, gc_get_refs(gc) != 0);
522
66.5M
        gc = next;
523
66.5M
        candidates++;
524
66.5M
    }
525
33.0k
    return candidates;
526
33.0k
}
527
528
/* A traversal callback for subtract_refs. */
529
static int
530
visit_decref(PyObject *op, void *parent)
531
169M
{
532
169M
    OBJECT_STAT_INC(object_visits);
533
169M
    _PyObject_ASSERT(_PyObject_CAST(parent), !_PyObject_IsFreed(op));
534
535
169M
    if (_PyObject_IS_GC(op)) {
536
86.4M
        PyGC_Head *gc = AS_GC(op);
537
        /* We're only interested in gc_refs for objects in the
538
         * generation being collected, which can be recognized
539
         * because only they have positive gc_refs.
540
         */
541
86.4M
        if (gc_is_collecting(gc)) {
542
29.9M
            gc_decref(gc);
543
29.9M
        }
544
86.4M
    }
545
169M
    return 0;
546
169M
}
547
548
int
549
_PyGC_VisitStackRef(_PyStackRef *ref, visitproc visit, void *arg)
550
4.58M
{
551
    // This is a bit tricky! We want to ignore stackrefs with embedded
552
    // refcounts when computing the incoming references, but otherwise treat
553
    // them like normal.
554
4.58M
    assert(!PyStackRef_IsTaggedInt(*ref));
555
4.58M
    if (!PyStackRef_RefcountOnObject(*ref) && (visit == visit_decref)) {
556
7.28k
        return 0;
557
7.28k
    }
558
4.57M
    Py_VISIT(PyStackRef_AsPyObjectBorrow(*ref));
559
4.57M
    return 0;
560
4.57M
}
561
562
int
563
_PyGC_VisitFrameStack(_PyInterpreterFrame *frame, visitproc visit, void *arg)
564
872k
{
565
872k
    _PyStackRef *ref = _PyFrame_GetLocalsArray(frame);
566
    /* locals and stack */
567
4.89M
    for (; ref < frame->stackpointer; ref++) {
568
4.02M
        if (!PyStackRef_IsTaggedInt(*ref)) {
569
4.02M
            _Py_VISIT_STACKREF(*ref);
570
4.02M
        }
571
4.02M
    }
572
872k
    return 0;
573
872k
}
574
575
/* Subtract internal references from gc_refs.  After this, gc_refs is >= 0
576
 * for all objects in containers. The ones with gc_refs > 0 are directly
577
 * reachable from outside containers, and so can't be collected.
578
 */
579
static void
580
subtract_refs(PyGC_Head *containers)
581
33.0k
{
582
33.0k
    traverseproc traverse;
583
33.0k
    PyGC_Head *gc = GC_NEXT(containers);
584
66.6M
    for (; gc != containers; gc = GC_NEXT(gc)) {
585
66.5M
        PyObject *op = FROM_GC(gc);
586
66.5M
        traverse = Py_TYPE(op)->tp_traverse;
587
66.5M
        (void) traverse(op,
588
66.5M
                        visit_decref,
589
66.5M
                        op);
590
66.5M
    }
591
33.0k
}
592
593
/* A traversal callback for move_unreachable. */
594
static int
595
visit_reachable(PyObject *op, void *arg)
596
158M
{
597
158M
    PyGC_Head *reachable = arg;
598
158M
    OBJECT_STAT_INC(object_visits);
599
158M
    if (!_PyObject_IS_GC(op)) {
600
80.0M
        return 0;
601
80.0M
    }
602
603
78.3M
    PyGC_Head *gc = AS_GC(op);
604
78.3M
    const Py_ssize_t gc_refs = gc_get_refs(gc);
605
606
    // Ignore objects in other generation.
607
    // This also skips objects "to the left" of the current position in
608
    // move_unreachable's scan of the 'young' list - they've already been
609
    // traversed, and no longer have the PREV_MASK_COLLECTING flag.
610
78.3M
    if (! gc_is_collecting(gc)) {
611
53.7M
        return 0;
612
53.7M
    }
613
    // It would be a logic error elsewhere if the collecting flag were set on
614
    // an untracked object.
615
24.5M
    _PyObject_ASSERT(op, gc->_gc_next != 0);
616
617
24.5M
    if (gc->_gc_next & NEXT_MASK_UNREACHABLE) {
618
        /* This had gc_refs = 0 when move_unreachable got
619
         * to it, but turns out it's reachable after all.
620
         * Move it back to move_unreachable's 'young' list,
621
         * and move_unreachable will eventually get to it
622
         * again.
623
         */
624
        // Manually unlink gc from unreachable list because the list functions
625
        // don't work right in the presence of NEXT_MASK_UNREACHABLE flags.
626
5.51M
        PyGC_Head *prev = GC_PREV(gc);
627
5.51M
        PyGC_Head *next = GC_NEXT(gc);
628
5.51M
        _PyObject_ASSERT(FROM_GC(prev),
629
5.51M
                         prev->_gc_next & NEXT_MASK_UNREACHABLE);
630
5.51M
        _PyObject_ASSERT(FROM_GC(next),
631
5.51M
                         next->_gc_next & NEXT_MASK_UNREACHABLE);
632
5.51M
        prev->_gc_next = gc->_gc_next;  // copy flag bits
633
5.51M
        gc->_gc_next &= ~NEXT_MASK_UNREACHABLE;
634
5.51M
        _PyGCHead_SET_PREV(next, prev);
635
636
5.51M
        gc_list_append(gc, reachable);
637
5.51M
        gc_set_refs(gc, 1);
638
5.51M
    }
639
19.0M
    else if (gc_refs == 0) {
640
        /* This is in move_unreachable's 'young' list, but
641
         * the traversal hasn't yet gotten to it.  All
642
         * we need to do is tell move_unreachable that it's
643
         * reachable.
644
         */
645
18.0M
        gc_set_refs(gc, 1);
646
18.0M
    }
647
    /* Else there's nothing to do.
648
     * If gc_refs > 0, it must be in move_unreachable's 'young'
649
     * list, and move_unreachable will eventually get to it.
650
     */
651
1.01M
    else {
652
1.01M
        _PyObject_ASSERT_WITH_MSG(op, gc_refs > 0, "refcount is too small");
653
1.01M
    }
654
24.5M
    return 0;
655
78.3M
}
656
657
/* Move the unreachable objects from young to unreachable.  After this,
658
 * all objects in young don't have PREV_MASK_COLLECTING flag and
659
 * unreachable have the flag.
660
 * All objects in young after this are directly or indirectly reachable
661
 * from outside the original young; and all objects in unreachable are
662
 * not.
663
 *
664
 * This function restores _gc_prev pointer.  young and unreachable are
665
 * doubly linked list after this function.
666
 * But _gc_next in unreachable list has NEXT_MASK_UNREACHABLE flag.
667
 * So we can not gc_list_* functions for unreachable until we remove the flag.
668
 */
669
static void
670
move_unreachable(PyGC_Head *young, PyGC_Head *unreachable)
671
33.0k
{
672
    // previous elem in the young list, used for restore gc_prev.
673
33.0k
    PyGC_Head *prev = young;
674
33.0k
    PyGC_Head *gc = GC_NEXT(young);
675
676
    /* Invariants:  all objects "to the left" of us in young are reachable
677
     * (directly or indirectly) from outside the young list as it was at entry.
678
     *
679
     * All other objects from the original young "to the left" of us are in
680
     * unreachable now, and have NEXT_MASK_UNREACHABLE.  All objects to the
681
     * left of us in 'young' now have been scanned, and no objects here
682
     * or to the right have been scanned yet.
683
     */
684
685
33.0k
    validate_consistent_old_space(young);
686
    /* Record which old space we are in, and set NEXT_MASK_UNREACHABLE bit for convenience */
687
33.0k
    uintptr_t flags = NEXT_MASK_UNREACHABLE | (gc->_gc_next & _PyGC_NEXT_MASK_OLD_SPACE_1);
688
72.1M
    while (gc != young) {
689
72.1M
        if (gc_get_refs(gc)) {
690
            /* gc is definitely reachable from outside the
691
             * original 'young'.  Mark it as such, and traverse
692
             * its pointers to find any other objects that may
693
             * be directly reachable from it.  Note that the
694
             * call to tp_traverse may append objects to young,
695
             * so we have to wait until it returns to determine
696
             * the next object to visit.
697
             */
698
63.3M
            PyObject *op = FROM_GC(gc);
699
63.3M
            traverseproc traverse = Py_TYPE(op)->tp_traverse;
700
63.3M
            _PyObject_ASSERT_WITH_MSG(op, gc_get_refs(gc) > 0,
701
63.3M
                                      "refcount is too small");
702
            // NOTE: visit_reachable may change gc->_gc_next when
703
            // young->_gc_prev == gc.  Don't do gc = GC_NEXT(gc) before!
704
63.3M
            (void) traverse(op,
705
63.3M
                    visit_reachable,
706
63.3M
                    (void *)young);
707
            // relink gc_prev to prev element.
708
63.3M
            _PyGCHead_SET_PREV(gc, prev);
709
            // gc is not COLLECTING state after here.
710
63.3M
            gc_clear_collecting(gc);
711
63.3M
            prev = gc;
712
63.3M
        }
713
8.80M
        else {
714
            /* This *may* be unreachable.  To make progress,
715
             * assume it is.  gc isn't directly reachable from
716
             * any object we've already traversed, but may be
717
             * reachable from an object we haven't gotten to yet.
718
             * visit_reachable will eventually move gc back into
719
             * young if that's so, and we'll see it again.
720
             */
721
            // Move gc to unreachable.
722
            // No need to gc->next->prev = prev because it is single linked.
723
8.80M
            prev->_gc_next = gc->_gc_next;
724
725
            // We can't use gc_list_append() here because we use
726
            // NEXT_MASK_UNREACHABLE here.
727
8.80M
            PyGC_Head *last = GC_PREV(unreachable);
728
            // NOTE: Since all objects in unreachable set has
729
            // NEXT_MASK_UNREACHABLE flag, we set it unconditionally.
730
            // But this may pollute the unreachable list head's 'next' pointer
731
            // too. That's semantically senseless but expedient here - the
732
            // damage is repaired when this function ends.
733
8.80M
            last->_gc_next = flags | (uintptr_t)gc;
734
8.80M
            _PyGCHead_SET_PREV(gc, last);
735
8.80M
            gc->_gc_next = flags | (uintptr_t)unreachable;
736
8.80M
            unreachable->_gc_prev = (uintptr_t)gc;
737
8.80M
        }
738
72.1M
        gc = _PyGCHead_NEXT(prev);
739
72.1M
    }
740
    // young->_gc_prev must be last element remained in the list.
741
33.0k
    young->_gc_prev = (uintptr_t)prev;
742
33.0k
    young->_gc_next &= _PyGC_PREV_MASK;
743
    // don't let the pollution of the list head's next pointer leak
744
33.0k
    unreachable->_gc_next &= _PyGC_PREV_MASK;
745
33.0k
}
746
747
/* In theory, all tuples should be younger than the
748
* objects they refer to, as tuples are immortal.
749
* Therefore, untracking tuples in oldest-first order in the
750
* young generation before promoting them should have tracked
751
* all the tuples that can be untracked.
752
*
753
* Unfortunately, the C API allows tuples to be created
754
* and then filled in. So this won't untrack all tuples
755
* that can be untracked. It should untrack most of them
756
* and is much faster than a more complex approach that
757
* would untrack all relevant tuples.
758
*/
759
static void
760
untrack_tuples(PyGC_Head *head)
761
34.4k
{
762
34.4k
    PyGC_Head *gc = GC_NEXT(head);
763
179M
    while (gc != head) {
764
179M
        PyObject *op = FROM_GC(gc);
765
179M
        PyGC_Head *next = GC_NEXT(gc);
766
179M
        if (PyTuple_CheckExact(op)) {
767
96.2M
            _PyTuple_MaybeUntrack(op);
768
96.2M
        }
769
179M
        gc = next;
770
179M
    }
771
34.4k
}
772
773
/* Return true if object has a pre-PEP 442 finalization method. */
774
static int
775
has_legacy_finalizer(PyObject *op)
776
1.64M
{
777
1.64M
    return Py_TYPE(op)->tp_del != NULL;
778
1.64M
}
779
780
/* Move the objects in unreachable with tp_del slots into `finalizers`.
781
 *
782
 * This function also removes NEXT_MASK_UNREACHABLE flag
783
 * from _gc_next in unreachable.
784
 */
785
static void
786
move_legacy_finalizers(PyGC_Head *unreachable, PyGC_Head *finalizers)
787
16.5k
{
788
16.5k
    PyGC_Head *gc, *next;
789
16.5k
    _PyObject_ASSERT(
790
16.5k
        FROM_GC(unreachable),
791
16.5k
        (unreachable->_gc_next & NEXT_MASK_UNREACHABLE) == 0);
792
793
    /* March over unreachable.  Move objects with finalizers into
794
     * `finalizers`.
795
     */
796
1.66M
    for (gc = GC_NEXT(unreachable); gc != unreachable; gc = next) {
797
1.64M
        PyObject *op = FROM_GC(gc);
798
799
1.64M
        _PyObject_ASSERT(op, gc->_gc_next & NEXT_MASK_UNREACHABLE);
800
1.64M
        next = GC_NEXT(gc);
801
1.64M
        gc->_gc_next &= ~NEXT_MASK_UNREACHABLE;
802
803
1.64M
        if (has_legacy_finalizer(op)) {
804
0
            gc_clear_collecting(gc);
805
0
            gc_list_move(gc, finalizers);
806
0
        }
807
1.64M
    }
808
16.5k
}
809
810
static inline void
811
clear_unreachable_mask(PyGC_Head *unreachable)
812
16.5k
{
813
    /* Check that the list head does not have the unreachable bit set */
814
16.5k
    _PyObject_ASSERT(
815
16.5k
        FROM_GC(unreachable),
816
16.5k
        ((uintptr_t)unreachable & NEXT_MASK_UNREACHABLE) == 0);
817
16.5k
    _PyObject_ASSERT(
818
16.5k
        FROM_GC(unreachable),
819
16.5k
        (unreachable->_gc_next & NEXT_MASK_UNREACHABLE) == 0);
820
821
16.5k
    PyGC_Head *gc, *next;
822
1.66M
    for (gc = GC_NEXT(unreachable); gc != unreachable; gc = next) {
823
1.64M
        _PyObject_ASSERT((PyObject*)FROM_GC(gc), gc->_gc_next & NEXT_MASK_UNREACHABLE);
824
1.64M
        next = GC_NEXT(gc);
825
1.64M
        gc->_gc_next &= ~NEXT_MASK_UNREACHABLE;
826
1.64M
    }
827
16.5k
    validate_list(unreachable, collecting_set_unreachable_clear);
828
16.5k
}
829
830
/* A traversal callback for move_legacy_finalizer_reachable. */
831
static int
832
visit_move(PyObject *op, void *arg)
833
0
{
834
0
    PyGC_Head *tolist = arg;
835
0
    OBJECT_STAT_INC(object_visits);
836
0
    if (_PyObject_IS_GC(op)) {
837
0
        PyGC_Head *gc = AS_GC(op);
838
0
        if (gc_is_collecting(gc)) {
839
0
            gc_list_move(gc, tolist);
840
0
            gc_clear_collecting(gc);
841
0
        }
842
0
    }
843
0
    return 0;
844
0
}
845
846
/* Move objects that are reachable from finalizers, from the unreachable set
847
 * into finalizers set.
848
 */
849
static void
850
move_legacy_finalizer_reachable(PyGC_Head *finalizers)
851
16.5k
{
852
16.5k
    traverseproc traverse;
853
16.5k
    PyGC_Head *gc = GC_NEXT(finalizers);
854
16.5k
    for (; gc != finalizers; gc = GC_NEXT(gc)) {
855
        /* Note that the finalizers list may grow during this. */
856
0
        traverse = Py_TYPE(FROM_GC(gc))->tp_traverse;
857
0
        (void) traverse(FROM_GC(gc),
858
0
                        visit_move,
859
0
                        (void *)finalizers);
860
0
    }
861
16.5k
}
862
863
/* Handle weakref callbacks.  Note that it's possible for such weakrefs to be
864
 * outside the unreachable set -- indeed, those are precisely the weakrefs
865
 * whose callbacks must be invoked.  See gc_weakref.txt for overview & some
866
 * details.
867
 *
868
 * The clearing of weakrefs is suble and must be done carefully, as there was
869
 * previous bugs related to this.  First, weakrefs to the unreachable set of
870
 * objects must be cleared before we start calling `tp_clear`.  If we don't,
871
 * those weakrefs can reveal unreachable objects to Python-level code and that
872
 * is not safe.  Many objects are not usable after `tp_clear` has been called
873
 * and could even cause crashes if accessed (see bpo-38006 and gh-91636 as
874
 * example bugs).
875
 *
876
 * Weakrefs with callbacks always need to be cleared before executing the
877
 * callback.  That's because sometimes a callback will call the ref object,
878
 * to check if the reference is actually dead (KeyedRef does this, for
879
 * example).  We want to indicate that it is dead, even though it is possible
880
 * a finalizer might resurrect it.  Clearing also prevents the callback from
881
 * executing more than once.
882
 *
883
 * Since Python 2.3, all weakrefs to cyclic garbage have been cleared *before*
884
 * calling finalizers.  However, since tp_subclasses started being necessary
885
 * to invalidate caches (e.g. by PyType_Modified), that clearing has created
886
 * a bug.  If the weakref to the subclass is cleared before a finalizer is
887
 * called, the cache may not be correctly invalidated.  That can lead to
888
 * segfaults since the caches can refer to deallocated objects (GH-135552
889
 * is an example).  Now, we delay the clear of weakrefs without callbacks
890
 * until *after* finalizers have been executed.  That means weakrefs without
891
 * callbacks are still usable while finalizers are being executed.
892
 *
893
 * The weakrefs that are inside the unreachable set must also be cleared.
894
 * The reason this is required is not immediately obvious.  If the weakref
895
 * refers to an object outside of the unreachable set, that object might go
896
 * away when we start clearing objects.  Normally, the object should also be
897
 * part of the unreachable set but that's not true in the case of incomplete
898
 * or missing `tp_traverse` methods.  When that object goes away, the callback
899
 * for weakref can be executed and that could reveal unreachable objects to
900
 * Python-level code.  See bpo-38006 as an example bug.
901
 */
902
static int
903
handle_weakref_callbacks(PyGC_Head *unreachable, PyGC_Head *old)
904
16.5k
{
905
16.5k
    PyGC_Head *gc;
906
16.5k
    PyGC_Head wrcb_to_call;     /* weakrefs with callbacks to call */
907
16.5k
    PyGC_Head *next;
908
16.5k
    int num_freed = 0;
909
910
16.5k
    gc_list_init(&wrcb_to_call);
911
912
    /* Find all weakrefs with callbacks and move into `wrcb_to_call` if the
913
     * callback needs to be invoked. We make another pass over wrcb_to_call,
914
     * invoking callbacks, after this pass completes.
915
     */
916
1.66M
    for (gc = GC_NEXT(unreachable); gc != unreachable; gc = next) {
917
1.64M
        PyWeakReference **wrlist;
918
919
1.64M
        PyObject *op = FROM_GC(gc);
920
1.64M
        next = GC_NEXT(gc);
921
922
1.64M
        if (! _PyType_SUPPORTS_WEAKREFS(Py_TYPE(op))) {
923
985k
            continue;
924
985k
        }
925
926
        /* It supports weakrefs.  Does it have any?
927
         *
928
         * This is never triggered for static types so we can avoid the
929
         * (slightly) more costly _PyObject_GET_WEAKREFS_LISTPTR().
930
         */
931
661k
        wrlist = _PyObject_GET_WEAKREFS_LISTPTR_FROM_OFFSET(op);
932
933
        /* `op` may have some weakrefs.  March over the list and move the
934
         * weakrefs with callbacks that must be called into wrcb_to_call.
935
         */
936
661k
        PyWeakReference *next_wr;
937
932k
        for (PyWeakReference *wr = *wrlist; wr != NULL; wr = next_wr) {
938
            // Get the next list element to get iterator progress if we omit
939
            // clearing of the weakref (because _PyWeakref_ClearRef changes
940
            // next pointer in the wrlist).
941
270k
            next_wr = wr->wr_next;
942
943
270k
            if (wr->wr_callback == NULL) {
944
                /* no callback */
945
270k
                continue;
946
270k
            }
947
948
            // Clear the weakref.  See the comments above this function for
949
            // reasons why we need to clear weakrefs that have callbacks.
950
            // Note that _PyWeakref_ClearRef clears the weakref but leaves the
951
            // callback pointer intact.  Obscure: it also changes *wrlist.
952
0
            _PyObject_ASSERT((PyObject *)wr, wr->wr_object == op);
953
0
            _PyWeakref_ClearRef(wr);
954
0
            _PyObject_ASSERT((PyObject *)wr, wr->wr_object == Py_None);
955
956
            /* Headache time.  `op` is going away, and is weakly referenced by
957
             * `wr`, which has a callback.  Should the callback be invoked?  If wr
958
             * is also trash, no:
959
             *
960
             * 1. There's no need to call it.  The object and the weakref are
961
             *    both going away, so it's legitimate to pretend the weakref is
962
             *    going away first.  The user has to ensure a weakref outlives its
963
             *    referent if they want a guarantee that the wr callback will get
964
             *    invoked.
965
             *
966
             * 2. It may be catastrophic to call it.  If the callback is also in
967
             *    cyclic trash (CT), then although the CT is unreachable from
968
             *    outside the current generation, CT may be reachable from the
969
             *    callback.  Then the callback could resurrect insane objects.
970
             *
971
             * Since the callback is never needed and may be unsafe in this
972
             * case, wr is simply left in the unreachable set.  Note that
973
             * clear_weakrefs() will ensure its callback will not trigger
974
             * inside delete_garbage().
975
             *
976
             * OTOH, if wr isn't part of CT, we should invoke the callback:  the
977
             * weakref outlived the trash.  Note that since wr isn't CT in this
978
             * case, its callback can't be CT either -- wr acted as an external
979
             * root to this generation, and therefore its callback did too.  So
980
             * nothing in CT is reachable from the callback either, so it's hard
981
             * to imagine how calling it later could create a problem for us.  wr
982
             * is moved to wrcb_to_call in this case.
983
             */
984
0
            if (gc_is_collecting(AS_GC((PyObject *)wr))) {
985
0
                continue;
986
0
            }
987
988
            /* Create a new reference so that wr can't go away
989
             * before we can process it again.
990
             */
991
0
            Py_INCREF(wr);
992
993
            /* Move wr to wrcb_to_call, for the next pass. */
994
0
            PyGC_Head *wrasgc = AS_GC((PyObject *)wr);
995
            // wrasgc is reachable, but next isn't, so they can't be the same
996
0
            _PyObject_ASSERT((PyObject *)wr, wrasgc != next);
997
0
            gc_list_move(wrasgc, &wrcb_to_call);
998
0
        }
999
661k
    }
1000
1001
    /* Invoke the callbacks we decided to honor.  It's safe to invoke them
1002
     * because they can't reference unreachable objects.
1003
     */
1004
16.5k
    int visited_space = get_gc_state()->visited_space;
1005
16.5k
    while (! gc_list_is_empty(&wrcb_to_call)) {
1006
0
        PyObject *temp;
1007
0
        PyObject *callback;
1008
1009
0
        gc = (PyGC_Head*)wrcb_to_call._gc_next;
1010
0
        PyObject *op = FROM_GC(gc);
1011
0
        _PyObject_ASSERT(op, PyWeakref_Check(op));
1012
0
        PyWeakReference *wr = (PyWeakReference *)op;
1013
0
        callback = wr->wr_callback;
1014
0
        _PyObject_ASSERT(op, callback != NULL);
1015
1016
        /* copy-paste of weakrefobject.c's handle_callback() */
1017
0
        temp = PyObject_CallOneArg(callback, (PyObject *)wr);
1018
0
        if (temp == NULL) {
1019
0
            PyErr_FormatUnraisable("Exception ignored on "
1020
0
                                   "calling weakref callback %R", callback);
1021
0
        }
1022
0
        else {
1023
0
            Py_DECREF(temp);
1024
0
        }
1025
1026
        /* Give up the reference we created in the first pass.  When
1027
         * op's refcount hits 0 (which it may or may not do right now),
1028
         * op's tp_dealloc will decref op->wr_callback too.  Note
1029
         * that the refcount probably will hit 0 now, and because this
1030
         * weakref was reachable to begin with, gc didn't already
1031
         * add it to its count of freed objects.  Example:  a reachable
1032
         * weak value dict maps some key to this reachable weakref.
1033
         * The callback removes this key->weakref mapping from the
1034
         * dict, leaving no other references to the weakref (excepting
1035
         * ours).
1036
         */
1037
0
        Py_DECREF(op);
1038
0
        if (wrcb_to_call._gc_next == (uintptr_t)gc) {
1039
            /* object is still alive -- move it */
1040
0
            gc_set_old_space(gc, visited_space);
1041
0
            gc_list_move(gc, old);
1042
0
        }
1043
0
        else {
1044
0
            ++num_freed;
1045
0
        }
1046
0
    }
1047
1048
16.5k
    return num_freed;
1049
16.5k
}
1050
1051
/* Clear all weakrefs to unreachable objects.  When this returns, no object in
1052
 * `unreachable` is weakly referenced anymore.  See the comments above
1053
 * handle_weakref_callbacks() for why these weakrefs need to be cleared.
1054
 */
1055
static void
1056
clear_weakrefs(PyGC_Head *unreachable)
1057
16.5k
{
1058
16.5k
    PyGC_Head *gc;
1059
16.5k
    PyGC_Head *next;
1060
1061
1.66M
    for (gc = GC_NEXT(unreachable); gc != unreachable; gc = next) {
1062
1.64M
        PyWeakReference **wrlist;
1063
1064
1.64M
        PyObject *op = FROM_GC(gc);
1065
1.64M
        next = GC_NEXT(gc);
1066
1067
1.64M
        if (PyWeakref_Check(op)) {
1068
            /* A weakref inside the unreachable set is always cleared. See
1069
             * the comments above handle_weakref_callbacks() for why these
1070
             * must be cleared.
1071
             */
1072
0
            _PyWeakref_ClearRef((PyWeakReference *)op);
1073
0
        }
1074
1075
1.64M
        if (! _PyType_SUPPORTS_WEAKREFS(Py_TYPE(op))) {
1076
985k
            continue;
1077
985k
        }
1078
1079
        /* It supports weakrefs.  Does it have any?
1080
         *
1081
         * This is never triggered for static types so we can avoid the
1082
         * (slightly) more costly _PyObject_GET_WEAKREFS_LISTPTR().
1083
         */
1084
661k
        wrlist = _PyObject_GET_WEAKREFS_LISTPTR_FROM_OFFSET(op);
1085
1086
        /* `op` may have some weakrefs.  March over the list, clear
1087
         * all the weakrefs.
1088
         */
1089
932k
        for (PyWeakReference *wr = *wrlist; wr != NULL; wr = *wrlist) {
1090
            /* _PyWeakref_ClearRef clears the weakref but leaves
1091
             * the callback pointer intact.  Obscure:  it also
1092
             * changes *wrlist.
1093
             */
1094
270k
            _PyObject_ASSERT((PyObject *)wr, wr->wr_object == op);
1095
270k
            _PyWeakref_ClearRef(wr);
1096
270k
            _PyObject_ASSERT((PyObject *)wr, wr->wr_object == Py_None);
1097
270k
        }
1098
661k
    }
1099
16.5k
}
1100
1101
static void
1102
debug_cycle(const char *msg, PyObject *op)
1103
0
{
1104
0
    PySys_FormatStderr("gc: %s <%s %p>\n",
1105
0
                       msg, Py_TYPE(op)->tp_name, op);
1106
0
}
1107
1108
/* Handle uncollectable garbage (cycles with tp_del slots, and stuff reachable
1109
 * only from such cycles).
1110
 * If _PyGC_DEBUG_SAVEALL, all objects in finalizers are appended to the module
1111
 * garbage list (a Python list), else only the objects in finalizers with
1112
 * __del__ methods are appended to garbage.  All objects in finalizers are
1113
 * merged into the old list regardless.
1114
 */
1115
static void
1116
handle_legacy_finalizers(PyThreadState *tstate,
1117
                         GCState *gcstate,
1118
                         PyGC_Head *finalizers, PyGC_Head *old)
1119
16.5k
{
1120
16.5k
    assert(!_PyErr_Occurred(tstate));
1121
16.5k
    assert(gcstate->garbage != NULL);
1122
1123
16.5k
    PyGC_Head *gc = GC_NEXT(finalizers);
1124
16.5k
    for (; gc != finalizers; gc = GC_NEXT(gc)) {
1125
0
        PyObject *op = FROM_GC(gc);
1126
1127
0
        if ((gcstate->debug & _PyGC_DEBUG_SAVEALL) || has_legacy_finalizer(op)) {
1128
0
            if (PyList_Append(gcstate->garbage, op) < 0) {
1129
0
                _PyErr_Clear(tstate);
1130
0
                break;
1131
0
            }
1132
0
        }
1133
0
    }
1134
1135
16.5k
    gc_list_merge(finalizers, old);
1136
16.5k
}
1137
1138
/* Run first-time finalizers (if any) on all the objects in collectable.
1139
 * Note that this may remove some (or even all) of the objects from the
1140
 * list, due to refcounts falling to 0.
1141
 */
1142
static void
1143
finalize_garbage(PyThreadState *tstate, PyGC_Head *collectable)
1144
16.5k
{
1145
16.5k
    destructor finalize;
1146
16.5k
    PyGC_Head seen;
1147
1148
    /* While we're going through the loop, `finalize(op)` may cause op, or
1149
     * other objects, to be reclaimed via refcounts falling to zero.  So
1150
     * there's little we can rely on about the structure of the input
1151
     * `collectable` list across iterations.  For safety, we always take the
1152
     * first object in that list and move it to a temporary `seen` list.
1153
     * If objects vanish from the `collectable` and `seen` lists we don't
1154
     * care.
1155
     */
1156
16.5k
    gc_list_init(&seen);
1157
1158
1.66M
    while (!gc_list_is_empty(collectable)) {
1159
1.64M
        PyGC_Head *gc = GC_NEXT(collectable);
1160
1.64M
        PyObject *op = FROM_GC(gc);
1161
1.64M
        gc_list_move(gc, &seen);
1162
1.64M
        if (!_PyGC_FINALIZED(op) &&
1163
1.64M
            (finalize = Py_TYPE(op)->tp_finalize) != NULL)
1164
0
        {
1165
0
            _PyGC_SET_FINALIZED(op);
1166
0
            Py_INCREF(op);
1167
0
            finalize(op);
1168
0
            assert(!_PyErr_Occurred(tstate));
1169
0
            Py_DECREF(op);
1170
0
        }
1171
1.64M
    }
1172
16.5k
    gc_list_merge(&seen, collectable);
1173
16.5k
}
1174
1175
/* Break reference cycles by clearing the containers involved.  This is
1176
 * tricky business as the lists can be changing and we don't know which
1177
 * objects may be freed.  It is possible I screwed something up here.
1178
 */
1179
static void
1180
delete_garbage(PyThreadState *tstate, GCState *gcstate,
1181
               PyGC_Head *collectable, PyGC_Head *old)
1182
16.5k
{
1183
16.5k
    assert(!_PyErr_Occurred(tstate));
1184
1185
861k
    while (!gc_list_is_empty(collectable)) {
1186
844k
        PyGC_Head *gc = GC_NEXT(collectable);
1187
844k
        PyObject *op = FROM_GC(gc);
1188
1189
844k
        _PyObject_ASSERT_WITH_MSG(op, Py_REFCNT(op) > 0,
1190
844k
                                  "refcount is too small");
1191
1192
844k
        if (gcstate->debug & _PyGC_DEBUG_SAVEALL) {
1193
0
            assert(gcstate->garbage != NULL);
1194
0
            if (PyList_Append(gcstate->garbage, op) < 0) {
1195
0
                _PyErr_Clear(tstate);
1196
0
            }
1197
0
        }
1198
844k
        else {
1199
844k
            inquiry clear;
1200
844k
            if ((clear = Py_TYPE(op)->tp_clear) != NULL) {
1201
580k
                Py_INCREF(op);
1202
580k
                (void) clear(op);
1203
580k
                if (_PyErr_Occurred(tstate)) {
1204
0
                    PyErr_FormatUnraisable("Exception ignored in tp_clear of %s",
1205
0
                                           Py_TYPE(op)->tp_name);
1206
0
                }
1207
580k
                Py_DECREF(op);
1208
580k
            }
1209
844k
        }
1210
844k
        if (GC_NEXT(collectable) == gc) {
1211
            /* object is still alive, move it, it may die later */
1212
534k
            gc_clear_collecting(gc);
1213
534k
            gc_list_move(gc, old);
1214
534k
        }
1215
844k
    }
1216
16.5k
}
1217
1218
1219
/* Deduce which objects among "base" are unreachable from outside the list
1220
   and move them to 'unreachable'. The process consist in the following steps:
1221
1222
1. Copy all reference counts to a different field (gc_prev is used to hold
1223
   this copy to save memory).
1224
2. Traverse all objects in "base" and visit all referred objects using
1225
   "tp_traverse" and for every visited object, subtract 1 to the reference
1226
   count (the one that we copied in the previous step). After this step, all
1227
   objects that can be reached directly from outside must have strictly positive
1228
   reference count, while all unreachable objects must have a count of exactly 0.
1229
3. Identify all unreachable objects (the ones with 0 reference count) and move
1230
   them to the "unreachable" list. This step also needs to move back to "base" all
1231
   objects that were initially marked as unreachable but are referred transitively
1232
   by the reachable objects (the ones with strictly positive reference count).
1233
1234
Contracts:
1235
1236
    * The "base" has to be a valid list with no mask set.
1237
1238
    * The "unreachable" list must be uninitialized (this function calls
1239
      gc_list_init over 'unreachable').
1240
1241
IMPORTANT: This function leaves 'unreachable' with the NEXT_MASK_UNREACHABLE
1242
flag set but it does not clear it to skip unnecessary iteration. Before the
1243
flag is cleared (for example, by using 'clear_unreachable_mask' function or
1244
by a call to 'move_legacy_finalizers'), the 'unreachable' list is not a normal
1245
list and we can not use most gc_list_* functions for it. */
1246
static inline Py_ssize_t
1247
33.0k
deduce_unreachable(PyGC_Head *base, PyGC_Head *unreachable) {
1248
33.0k
    validate_list(base, collecting_clear_unreachable_clear);
1249
    /* Using ob_refcnt and gc_refs, calculate which objects in the
1250
     * container set are reachable from outside the set (i.e., have a
1251
     * refcount greater than 0 when all the references within the
1252
     * set are taken into account).
1253
     */
1254
33.0k
    Py_ssize_t candidates = update_refs(base);  // gc_prev is used for gc_refs
1255
33.0k
    subtract_refs(base);
1256
1257
    /* Leave everything reachable from outside base in base, and move
1258
     * everything else (in base) to unreachable.
1259
     *
1260
     * NOTE:  This used to move the reachable objects into a reachable
1261
     * set instead.  But most things usually turn out to be reachable,
1262
     * so it's more efficient to move the unreachable things.  It "sounds slick"
1263
     * to move the unreachable objects, until you think about it - the reason it
1264
     * pays isn't actually obvious.
1265
     *
1266
     * Suppose we create objects A, B, C in that order.  They appear in the young
1267
     * generation in the same order.  If B points to A, and C to B, and C is
1268
     * reachable from outside, then the adjusted refcounts will be 0, 0, and 1
1269
     * respectively.
1270
     *
1271
     * When move_unreachable finds A, A is moved to the unreachable list.  The
1272
     * same for B when it's first encountered.  Then C is traversed, B is moved
1273
     * _back_ to the reachable list.  B is eventually traversed, and then A is
1274
     * moved back to the reachable list.
1275
     *
1276
     * So instead of not moving at all, the reachable objects B and A are moved
1277
     * twice each.  Why is this a win?  A straightforward algorithm to move the
1278
     * reachable objects instead would move A, B, and C once each.
1279
     *
1280
     * The key is that this dance leaves the objects in order C, B, A - it's
1281
     * reversed from the original order.  On all _subsequent_ scans, none of
1282
     * them will move.  Since most objects aren't in cycles, this can save an
1283
     * unbounded number of moves across an unbounded number of later collections.
1284
     * It can cost more only the first time the chain is scanned.
1285
     *
1286
     * Drawback:  move_unreachable is also used to find out what's still trash
1287
     * after finalizers may resurrect objects.  In _that_ case most unreachable
1288
     * objects will remain unreachable, so it would be more efficient to move
1289
     * the reachable objects instead.  But this is a one-time cost, probably not
1290
     * worth complicating the code to speed just a little.
1291
     */
1292
33.0k
    move_unreachable(base, unreachable);  // gc_prev is pointer again
1293
33.0k
    validate_list(base, collecting_clear_unreachable_clear);
1294
33.0k
    validate_list(unreachable, collecting_set_unreachable_set);
1295
33.0k
    return candidates;
1296
33.0k
}
1297
1298
/* Handle objects that may have resurrected after a call to 'finalize_garbage', moving
1299
   them to 'old_generation' and placing the rest on 'still_unreachable'.
1300
1301
   Contracts:
1302
       * After this function 'unreachable' must not be used anymore and 'still_unreachable'
1303
         will contain the objects that did not resurrect.
1304
1305
       * The "still_unreachable" list must be uninitialized (this function calls
1306
         gc_list_init over 'still_unreachable').
1307
1308
IMPORTANT: After a call to this function, the 'still_unreachable' set will have the
1309
PREV_MARK_COLLECTING set, but the objects in this set are going to be removed so
1310
we can skip the expense of clearing the flag to avoid extra iteration. */
1311
static inline void
1312
handle_resurrected_objects(PyGC_Head *unreachable, PyGC_Head* still_unreachable,
1313
                           PyGC_Head *old_generation)
1314
16.5k
{
1315
    // Remove the PREV_MASK_COLLECTING from unreachable
1316
    // to prepare it for a new call to 'deduce_unreachable'
1317
16.5k
    gc_list_clear_collecting(unreachable);
1318
1319
    // After the call to deduce_unreachable, the 'still_unreachable' set will
1320
    // have the PREV_MARK_COLLECTING set, but the objects are going to be
1321
    // removed so we can skip the expense of clearing the flag.
1322
16.5k
    PyGC_Head* resurrected = unreachable;
1323
16.5k
    deduce_unreachable(resurrected, still_unreachable);
1324
16.5k
    clear_unreachable_mask(still_unreachable);
1325
1326
    // Move the resurrected objects to the old generation for future collection.
1327
16.5k
    gc_list_merge(resurrected, old_generation);
1328
16.5k
}
1329
1330
static void
1331
gc_collect_region(PyThreadState *tstate,
1332
                  PyGC_Head *from,
1333
                  PyGC_Head *to,
1334
                  struct gc_collection_stats *stats);
1335
1336
static inline Py_ssize_t
1337
gc_list_set_space(PyGC_Head *list, int space)
1338
17.8k
{
1339
17.8k
    Py_ssize_t size = 0;
1340
17.8k
    PyGC_Head *gc;
1341
63.8M
    for (gc = GC_NEXT(list); gc != list; gc = GC_NEXT(gc)) {
1342
63.8M
        gc_set_old_space(gc, space);
1343
63.8M
        size++;
1344
63.8M
    }
1345
17.8k
    return size;
1346
17.8k
}
1347
1348
/* Making progress in the incremental collector
1349
 * In order to eventually collect all cycles
1350
 * the incremental collector must progress through the old
1351
 * space faster than objects are added to the old space.
1352
 *
1353
 * Each young or incremental collection adds a number of
1354
 * objects, S (for survivors) to the old space, and
1355
 * incremental collectors scan I objects from the old space.
1356
 * I > S must be true. We also want I > S * N to be where
1357
 * N > 1. Higher values of N mean that the old space is
1358
 * scanned more rapidly.
1359
 * The default incremental threshold of 10 translates to
1360
 * N == 1.4 (1 + 4/threshold)
1361
 */
1362
1363
/* Divide by 10, so that the default incremental threshold of 10
1364
 * scans objects at 1% of the heap size */
1365
80.7k
#define SCAN_RATE_DIVISOR 10
1366
1367
static void
1368
add_stats(GCState *gcstate, int gen, struct gc_collection_stats *stats)
1369
80.7k
{
1370
80.7k
    gcstate->generation_stats[gen].duration += stats->duration;
1371
80.7k
    gcstate->generation_stats[gen].collected += stats->collected;
1372
80.7k
    gcstate->generation_stats[gen].uncollectable += stats->uncollectable;
1373
80.7k
    gcstate->generation_stats[gen].candidates += stats->candidates;
1374
80.7k
    gcstate->generation_stats[gen].collections += 1;
1375
80.7k
}
1376
1377
static void
1378
gc_collect_young(PyThreadState *tstate,
1379
                 struct gc_collection_stats *stats)
1380
0
{
1381
0
    GCState *gcstate = &tstate->interp->gc;
1382
0
    validate_spaces(gcstate);
1383
0
    PyGC_Head *young = &gcstate->young.head;
1384
0
    PyGC_Head *visited = &gcstate->old[gcstate->visited_space].head;
1385
0
    untrack_tuples(young);
1386
0
    GC_STAT_ADD(0, collections, 1);
1387
1388
0
    PyGC_Head survivors;
1389
0
    gc_list_init(&survivors);
1390
0
    gc_list_set_space(young, gcstate->visited_space);
1391
0
    gc_collect_region(tstate, young, &survivors, stats);
1392
0
    gc_list_merge(&survivors, visited);
1393
0
    validate_spaces(gcstate);
1394
0
    gcstate->young.count = 0;
1395
0
    gcstate->old[gcstate->visited_space].count++;
1396
0
    validate_spaces(gcstate);
1397
0
}
1398
1399
#ifndef NDEBUG
1400
static inline int
1401
IS_IN_VISITED(PyGC_Head *gc, int visited_space)
1402
{
1403
    assert(visited_space == 0 || other_space(visited_space) == 0);
1404
    return gc_old_space(gc) == visited_space;
1405
}
1406
#endif
1407
1408
struct container_and_flag {
1409
    PyGC_Head *container;
1410
    int visited_space;
1411
    intptr_t size;
1412
};
1413
1414
/* A traversal callback for adding to container) */
1415
static int
1416
visit_add_to_container(PyObject *op, void *arg)
1417
465M
{
1418
465M
    OBJECT_STAT_INC(object_visits);
1419
465M
    struct container_and_flag *cf = (struct container_and_flag *)arg;
1420
465M
    int visited = cf->visited_space;
1421
465M
    assert(visited == get_gc_state()->visited_space);
1422
465M
    if (!_Py_IsImmortal(op) && _PyObject_IS_GC(op)) {
1423
264M
        PyGC_Head *gc = AS_GC(op);
1424
264M
        if (_PyObject_GC_IS_TRACKED(op) &&
1425
227M
            gc_old_space(gc) != visited) {
1426
121M
            gc_flip_old_space(gc);
1427
121M
            gc_list_move(gc, cf->container);
1428
121M
            cf->size++;
1429
121M
        }
1430
264M
    }
1431
465M
    return 0;
1432
465M
}
1433
1434
static intptr_t
1435
expand_region_transitively_reachable(PyGC_Head *container, PyGC_Head *gc, GCState *gcstate)
1436
318k
{
1437
318k
    struct container_and_flag arg = {
1438
318k
        .container = container,
1439
318k
        .visited_space = gcstate->visited_space,
1440
318k
        .size = 0
1441
318k
    };
1442
318k
    assert(GC_NEXT(gc) == container);
1443
1.44M
    while (gc != container) {
1444
        /* Survivors will be moved to visited space, so they should
1445
         * have been marked as visited */
1446
1.12M
        assert(IS_IN_VISITED(gc, gcstate->visited_space));
1447
1.12M
        PyObject *op = FROM_GC(gc);
1448
1.12M
        assert(_PyObject_GC_IS_TRACKED(op));
1449
1.12M
        if (_Py_IsImmortal(op)) {
1450
0
            PyGC_Head *next = GC_NEXT(gc);
1451
0
            gc_list_move(gc, &gcstate->permanent_generation.head);
1452
0
            gc = next;
1453
0
            continue;
1454
0
        }
1455
1.12M
        traverseproc traverse = Py_TYPE(op)->tp_traverse;
1456
1.12M
        (void) traverse(op,
1457
1.12M
                        visit_add_to_container,
1458
1.12M
                        &arg);
1459
1.12M
        gc = GC_NEXT(gc);
1460
1.12M
    }
1461
318k
    return arg.size;
1462
318k
}
1463
1464
/* Do bookkeeping for a completed GC cycle */
1465
static void
1466
completed_scavenge(GCState *gcstate)
1467
1.32k
{
1468
    /* We must observe two invariants:
1469
    * 1. Members of the permanent generation must be marked visited.
1470
    * 2. We cannot touch members of the permanent generation. */
1471
1.32k
    int visited;
1472
1.32k
    if (gc_list_is_empty(&gcstate->permanent_generation.head)) {
1473
        /* Permanent generation is empty so we can flip spaces bit */
1474
1.32k
        int not_visited = gcstate->visited_space;
1475
1.32k
        visited = other_space(not_visited);
1476
1.32k
        gcstate->visited_space = visited;
1477
        /* Make sure all objects have visited bit set correctly */
1478
1.32k
        gc_list_set_space(&gcstate->young.head, not_visited);
1479
1.32k
    }
1480
0
    else {
1481
         /* We must move the objects from visited to pending space. */
1482
0
        visited = gcstate->visited_space;
1483
0
        int not_visited = other_space(visited);
1484
0
        assert(gc_list_is_empty(&gcstate->old[not_visited].head));
1485
0
        gc_list_merge(&gcstate->old[visited].head, &gcstate->old[not_visited].head);
1486
0
        gc_list_set_space(&gcstate->old[not_visited].head, not_visited);
1487
0
    }
1488
1.32k
    assert(gc_list_is_empty(&gcstate->old[visited].head));
1489
1.32k
    gcstate->work_to_do = 0;
1490
1.32k
    gcstate->phase = GC_PHASE_MARK;
1491
1.32k
}
1492
1493
static intptr_t
1494
move_to_reachable(PyObject *op, PyGC_Head *reachable, int visited_space)
1495
2.09M
{
1496
2.09M
    if (op != NULL && !_Py_IsImmortal(op) && _PyObject_IS_GC(op)) {
1497
1.08M
        PyGC_Head *gc = AS_GC(op);
1498
1.08M
        if (_PyObject_GC_IS_TRACKED(op) &&
1499
1.08M
            gc_old_space(gc) != visited_space) {
1500
356k
            gc_flip_old_space(gc);
1501
356k
            gc_list_move(gc, reachable);
1502
356k
            return 1;
1503
356k
        }
1504
1.08M
    }
1505
1.73M
    return 0;
1506
2.09M
}
1507
1508
static intptr_t
1509
mark_all_reachable(PyGC_Head *reachable, PyGC_Head *visited, int visited_space)
1510
19.2k
{
1511
    // Transitively traverse all objects from reachable, until empty
1512
19.2k
    struct container_and_flag arg = {
1513
19.2k
        .container = reachable,
1514
19.2k
        .visited_space = visited_space,
1515
19.2k
        .size = 0
1516
19.2k
    };
1517
121M
    while (!gc_list_is_empty(reachable)) {
1518
121M
        PyGC_Head *gc = _PyGCHead_NEXT(reachable);
1519
121M
        assert(gc_old_space(gc) == visited_space);
1520
121M
        gc_list_move(gc, visited);
1521
121M
        PyObject *op = FROM_GC(gc);
1522
121M
        traverseproc traverse = Py_TYPE(op)->tp_traverse;
1523
121M
        (void) traverse(op,
1524
121M
                        visit_add_to_container,
1525
121M
                        &arg);
1526
121M
    }
1527
19.2k
    gc_list_validate_space(visited, visited_space);
1528
19.2k
    return arg.size;
1529
19.2k
}
1530
1531
static intptr_t
1532
mark_stacks(PyInterpreterState *interp, PyGC_Head *visited, int visited_space, bool start)
1533
17.8k
{
1534
17.8k
    PyGC_Head reachable;
1535
17.8k
    gc_list_init(&reachable);
1536
17.8k
    Py_ssize_t objects_marked = 0;
1537
    // Move all objects on stacks to reachable
1538
17.8k
    _PyRuntimeState *runtime = &_PyRuntime;
1539
17.8k
    HEAD_LOCK(runtime);
1540
17.8k
    PyThreadState* ts = PyInterpreterState_ThreadHead(interp);
1541
17.8k
    HEAD_UNLOCK(runtime);
1542
35.7k
    while (ts) {
1543
17.8k
        _PyInterpreterFrame *frame = ts->current_frame;
1544
915k
        while (frame) {
1545
912k
            if (frame->owner >= FRAME_OWNED_BY_INTERPRETER) {
1546
153k
                frame = frame->previous;
1547
153k
                continue;
1548
153k
            }
1549
758k
            _PyStackRef *locals = frame->localsplus;
1550
758k
            _PyStackRef *sp = frame->stackpointer;
1551
758k
            objects_marked += move_to_reachable(frame->f_locals, &reachable, visited_space);
1552
758k
            PyObject *func = PyStackRef_AsPyObjectBorrow(frame->f_funcobj);
1553
758k
            objects_marked += move_to_reachable(func, &reachable, visited_space);
1554
6.58M
            while (sp > locals) {
1555
5.82M
                sp--;
1556
5.82M
                if (PyStackRef_IsNullOrInt(*sp)) {
1557
2.19M
                    continue;
1558
2.19M
                }
1559
3.63M
                PyObject *op = PyStackRef_AsPyObjectBorrow(*sp);
1560
3.63M
                if (_Py_IsImmortal(op)) {
1561
742k
                    continue;
1562
742k
                }
1563
2.88M
                if (_PyObject_IS_GC(op)) {
1564
2.39M
                    PyGC_Head *gc = AS_GC(op);
1565
2.39M
                    if (_PyObject_GC_IS_TRACKED(op) &&
1566
2.38M
                        gc_old_space(gc) != visited_space) {
1567
1.23M
                        gc_flip_old_space(gc);
1568
1.23M
                        objects_marked++;
1569
1.23M
                        gc_list_move(gc, &reachable);
1570
1.23M
                    }
1571
2.39M
                }
1572
2.88M
            }
1573
758k
            if (!start && frame->visited) {
1574
                // If this frame has already been visited, then the lower frames
1575
                // will have already been visited and will not have changed
1576
14.7k
                break;
1577
14.7k
            }
1578
744k
            frame->visited = 1;
1579
744k
            frame = frame->previous;
1580
744k
        }
1581
17.8k
        HEAD_LOCK(runtime);
1582
17.8k
        ts = PyThreadState_Next(ts);
1583
17.8k
        HEAD_UNLOCK(runtime);
1584
17.8k
    }
1585
17.8k
    objects_marked += mark_all_reachable(&reachable, visited, visited_space);
1586
17.8k
    assert(gc_list_is_empty(&reachable));
1587
17.8k
    return objects_marked;
1588
17.8k
}
1589
1590
static intptr_t
1591
mark_global_roots(PyInterpreterState *interp, PyGC_Head *visited, int visited_space)
1592
1.35k
{
1593
1.35k
    PyGC_Head reachable;
1594
1.35k
    gc_list_init(&reachable);
1595
1.35k
    Py_ssize_t objects_marked = 0;
1596
1.35k
    objects_marked += move_to_reachable(interp->sysdict, &reachable, visited_space);
1597
1.35k
    objects_marked += move_to_reachable(interp->builtins, &reachable, visited_space);
1598
1.35k
    objects_marked += move_to_reachable(interp->dict, &reachable, visited_space);
1599
1.35k
    struct types_state *types = &interp->types;
1600
272k
    for (int i = 0; i < _Py_MAX_MANAGED_STATIC_BUILTIN_TYPES; i++) {
1601
271k
        objects_marked += move_to_reachable(types->builtins.initialized[i].tp_dict, &reachable, visited_space);
1602
271k
        objects_marked += move_to_reachable(types->builtins.initialized[i].tp_subclasses, &reachable, visited_space);
1603
271k
    }
1604
14.9k
    for (int i = 0; i < _Py_MAX_MANAGED_STATIC_EXT_TYPES; i++) {
1605
13.5k
        objects_marked += move_to_reachable(types->for_extensions.initialized[i].tp_dict, &reachable, visited_space);
1606
13.5k
        objects_marked += move_to_reachable(types->for_extensions.initialized[i].tp_subclasses, &reachable, visited_space);
1607
13.5k
    }
1608
1.35k
    objects_marked += mark_all_reachable(&reachable, visited, visited_space);
1609
1.35k
    assert(gc_list_is_empty(&reachable));
1610
1.35k
    return objects_marked;
1611
1.35k
}
1612
1613
static intptr_t
1614
mark_at_start(PyThreadState *tstate)
1615
1.35k
{
1616
    // TO DO -- Make this incremental
1617
1.35k
    GCState *gcstate = &tstate->interp->gc;
1618
1.35k
    PyGC_Head *visited = &gcstate->old[gcstate->visited_space].head;
1619
1.35k
    Py_ssize_t objects_marked = mark_global_roots(tstate->interp, visited, gcstate->visited_space);
1620
1.35k
    objects_marked += mark_stacks(tstate->interp, visited, gcstate->visited_space, true);
1621
1.35k
    gcstate->work_to_do -= objects_marked;
1622
1.35k
    gcstate->phase = GC_PHASE_COLLECT;
1623
1.35k
    validate_spaces(gcstate);
1624
1.35k
    return objects_marked;
1625
1.35k
}
1626
1627
static intptr_t
1628
assess_work_to_do(GCState *gcstate)
1629
80.7k
{
1630
    /* The amount of work we want to do depends on three things.
1631
     * 1. The number of new objects created
1632
     * 2. The growth in heap size since the last collection
1633
     * 3. The heap size (up to the number of new objects, to avoid quadratic effects)
1634
     *
1635
     * For a steady state heap, the amount of work to do is three times the number
1636
     * of new objects added to the heap. This ensures that we stay ahead in the
1637
     * worst case of all new objects being garbage.
1638
     *
1639
     * This could be improved by tracking survival rates, but it is still a
1640
     * large improvement on the non-marking approach.
1641
     */
1642
80.7k
    intptr_t scale_factor = gcstate->old[0].threshold;
1643
80.7k
    if (scale_factor < 2) {
1644
0
        scale_factor = 2;
1645
0
    }
1646
80.7k
    intptr_t new_objects = gcstate->young.count;
1647
80.7k
    intptr_t max_heap_fraction = new_objects*2;
1648
80.7k
    intptr_t heap_fraction = gcstate->heap_size / SCAN_RATE_DIVISOR / scale_factor;
1649
80.7k
    if (heap_fraction > max_heap_fraction) {
1650
9.07k
        heap_fraction = max_heap_fraction;
1651
9.07k
    }
1652
80.7k
    gcstate->young.count = 0;
1653
80.7k
    return new_objects + heap_fraction;
1654
80.7k
}
1655
1656
static void
1657
gc_collect_increment(PyThreadState *tstate, struct gc_collection_stats *stats)
1658
80.7k
{
1659
80.7k
    GC_STAT_ADD(1, collections, 1);
1660
80.7k
    GCState *gcstate = &tstate->interp->gc;
1661
80.7k
    gcstate->work_to_do += assess_work_to_do(gcstate);
1662
80.7k
    if (gcstate->work_to_do < 0) {
1663
62.8k
        return;
1664
62.8k
    }
1665
17.8k
    untrack_tuples(&gcstate->young.head);
1666
17.8k
    if (gcstate->phase == GC_PHASE_MARK) {
1667
1.35k
        Py_ssize_t objects_marked = mark_at_start(tstate);
1668
1.35k
        GC_STAT_ADD(1, objects_transitively_reachable, objects_marked);
1669
1.35k
        gcstate->work_to_do -= objects_marked;
1670
1.35k
        stats->candidates += objects_marked;
1671
1.35k
        validate_spaces(gcstate);
1672
1.35k
        return;
1673
1.35k
    }
1674
16.5k
    PyGC_Head *not_visited = &gcstate->old[gcstate->visited_space^1].head;
1675
16.5k
    PyGC_Head *visited = &gcstate->old[gcstate->visited_space].head;
1676
16.5k
    PyGC_Head increment;
1677
16.5k
    gc_list_init(&increment);
1678
16.5k
    int scale_factor = gcstate->old[0].threshold;
1679
16.5k
    if (scale_factor < 2) {
1680
0
        scale_factor = 2;
1681
0
    }
1682
16.5k
    intptr_t objects_marked = mark_stacks(tstate->interp, visited, gcstate->visited_space, false);
1683
16.5k
    GC_STAT_ADD(1, objects_transitively_reachable, objects_marked);
1684
16.5k
    gcstate->work_to_do -= objects_marked;
1685
16.5k
    gc_list_set_space(&gcstate->young.head, gcstate->visited_space);
1686
16.5k
    gc_list_merge(&gcstate->young.head, &increment);
1687
16.5k
    gc_list_validate_space(&increment, gcstate->visited_space);
1688
16.5k
    Py_ssize_t increment_size = gc_list_size(&increment);
1689
335k
    while (increment_size < gcstate->work_to_do) {
1690
319k
        if (gc_list_is_empty(not_visited)) {
1691
1.29k
            break;
1692
1.29k
        }
1693
318k
        PyGC_Head *gc = _PyGCHead_NEXT(not_visited);
1694
318k
        gc_list_move(gc, &increment);
1695
318k
        increment_size++;
1696
318k
        assert(!_Py_IsImmortal(FROM_GC(gc)));
1697
318k
        gc_set_old_space(gc, gcstate->visited_space);
1698
318k
        increment_size += expand_region_transitively_reachable(&increment, gc, gcstate);
1699
318k
    }
1700
16.5k
    GC_STAT_ADD(1, objects_not_transitively_reachable, increment_size);
1701
16.5k
    validate_list(&increment, collecting_clear_unreachable_clear);
1702
16.5k
    gc_list_validate_space(&increment, gcstate->visited_space);
1703
16.5k
    PyGC_Head survivors;
1704
16.5k
    gc_list_init(&survivors);
1705
16.5k
    gc_collect_region(tstate, &increment, &survivors, stats);
1706
16.5k
    gc_list_merge(&survivors, visited);
1707
16.5k
    assert(gc_list_is_empty(&increment));
1708
16.5k
    gcstate->work_to_do -= increment_size;
1709
1710
16.5k
    if (gc_list_is_empty(not_visited)) {
1711
1.32k
        completed_scavenge(gcstate);
1712
1.32k
    }
1713
16.5k
    validate_spaces(gcstate);
1714
16.5k
}
1715
1716
static void
1717
gc_collect_full(PyThreadState *tstate,
1718
                struct gc_collection_stats *stats)
1719
0
{
1720
0
    GC_STAT_ADD(2, collections, 1);
1721
0
    GCState *gcstate = &tstate->interp->gc;
1722
0
    validate_spaces(gcstate);
1723
0
    PyGC_Head *young = &gcstate->young.head;
1724
0
    PyGC_Head *pending = &gcstate->old[gcstate->visited_space^1].head;
1725
0
    PyGC_Head *visited = &gcstate->old[gcstate->visited_space].head;
1726
0
    untrack_tuples(young);
1727
    /* merge all generations into visited */
1728
0
    gc_list_merge(young, pending);
1729
0
    gc_list_validate_space(pending, 1-gcstate->visited_space);
1730
0
    gc_list_set_space(pending, gcstate->visited_space);
1731
0
    gcstate->young.count = 0;
1732
0
    gc_list_merge(pending, visited);
1733
0
    validate_spaces(gcstate);
1734
1735
0
    gc_collect_region(tstate, visited, visited,
1736
0
                      stats);
1737
0
    validate_spaces(gcstate);
1738
0
    gcstate->young.count = 0;
1739
0
    gcstate->old[0].count = 0;
1740
0
    gcstate->old[1].count = 0;
1741
0
    completed_scavenge(gcstate);
1742
0
    _PyGC_ClearAllFreeLists(tstate->interp);
1743
0
    validate_spaces(gcstate);
1744
0
}
1745
1746
/* This is the main function. Read this to understand how the
1747
 * collection process works. */
1748
static void
1749
gc_collect_region(PyThreadState *tstate,
1750
                  PyGC_Head *from,
1751
                  PyGC_Head *to,
1752
                  struct gc_collection_stats *stats)
1753
16.5k
{
1754
16.5k
    PyGC_Head unreachable; /* non-problematic unreachable trash */
1755
16.5k
    PyGC_Head finalizers;  /* objects with, & reachable from, __del__ */
1756
16.5k
    PyGC_Head *gc; /* initialize to prevent a compiler warning */
1757
16.5k
    GCState *gcstate = &tstate->interp->gc;
1758
1759
16.5k
    assert(gcstate->garbage != NULL);
1760
16.5k
    assert(!_PyErr_Occurred(tstate));
1761
1762
16.5k
    gc_list_init(&unreachable);
1763
16.5k
    stats->candidates = deduce_unreachable(from, &unreachable);
1764
16.5k
    validate_consistent_old_space(from);
1765
16.5k
    untrack_tuples(from);
1766
1767
  /* Move reachable objects to next generation. */
1768
16.5k
    validate_consistent_old_space(to);
1769
16.5k
    if (from != to) {
1770
16.5k
        gc_list_merge(from, to);
1771
16.5k
    }
1772
16.5k
    validate_consistent_old_space(to);
1773
1774
    /* All objects in unreachable are trash, but objects reachable from
1775
     * legacy finalizers (e.g. tp_del) can't safely be deleted.
1776
     */
1777
16.5k
    gc_list_init(&finalizers);
1778
    // NEXT_MASK_UNREACHABLE is cleared here.
1779
    // After move_legacy_finalizers(), unreachable is normal list.
1780
16.5k
    move_legacy_finalizers(&unreachable, &finalizers);
1781
    /* finalizers contains the unreachable objects with a legacy finalizer;
1782
     * unreachable objects reachable *from* those are also uncollectable,
1783
     * and we move those into the finalizers list too.
1784
     */
1785
16.5k
    move_legacy_finalizer_reachable(&finalizers);
1786
16.5k
    validate_list(&finalizers, collecting_clear_unreachable_clear);
1787
16.5k
    validate_list(&unreachable, collecting_set_unreachable_clear);
1788
    /* Print debugging information. */
1789
16.5k
    if (gcstate->debug & _PyGC_DEBUG_COLLECTABLE) {
1790
0
        for (gc = GC_NEXT(&unreachable); gc != &unreachable; gc = GC_NEXT(gc)) {
1791
0
            debug_cycle("collectable", FROM_GC(gc));
1792
0
        }
1793
0
    }
1794
1795
    /* Invoke weakref callbacks as necessary. */
1796
16.5k
    stats->collected += handle_weakref_callbacks(&unreachable, to);
1797
16.5k
    gc_list_validate_space(to, gcstate->visited_space);
1798
16.5k
    validate_list(to, collecting_clear_unreachable_clear);
1799
16.5k
    validate_list(&unreachable, collecting_set_unreachable_clear);
1800
1801
    /* Call tp_finalize on objects which have one. */
1802
16.5k
    finalize_garbage(tstate, &unreachable);
1803
    /* Handle any objects that may have resurrected after the call
1804
     * to 'finalize_garbage' and continue the collection with the
1805
     * objects that are still unreachable */
1806
16.5k
    PyGC_Head final_unreachable;
1807
16.5k
    gc_list_init(&final_unreachable);
1808
16.5k
    handle_resurrected_objects(&unreachable, &final_unreachable, to);
1809
1810
    /* Clear weakrefs to objects in the unreachable set.  See the comments
1811
     * above handle_weakref_callbacks() for details.
1812
     */
1813
16.5k
    clear_weakrefs(&final_unreachable);
1814
1815
    /* Call tp_clear on objects in the final_unreachable set.  This will cause
1816
    * the reference cycles to be broken.  It may also cause some objects
1817
    * in finalizers to be freed.
1818
    */
1819
16.5k
    stats->collected += gc_list_size(&final_unreachable);
1820
16.5k
    delete_garbage(tstate, gcstate, &final_unreachable, to);
1821
1822
    /* Collect statistics on uncollectable objects found and print
1823
     * debugging information. */
1824
16.5k
    Py_ssize_t n = 0;
1825
16.5k
    for (gc = GC_NEXT(&finalizers); gc != &finalizers; gc = GC_NEXT(gc)) {
1826
0
        n++;
1827
0
        if (gcstate->debug & _PyGC_DEBUG_UNCOLLECTABLE)
1828
0
            debug_cycle("uncollectable", FROM_GC(gc));
1829
0
    }
1830
16.5k
    stats->uncollectable = n;
1831
    /* Append instances in the uncollectable set to a Python
1832
     * reachable list of garbage.  The programmer has to deal with
1833
     * this if they insist on creating this type of structure.
1834
     */
1835
16.5k
    handle_legacy_finalizers(tstate, gcstate, &finalizers, to);
1836
16.5k
    gc_list_validate_space(to, gcstate->visited_space);
1837
16.5k
    validate_list(to, collecting_clear_unreachable_clear);
1838
16.5k
}
1839
1840
/* Invoke progress callbacks to notify clients that garbage collection
1841
 * is starting or stopping
1842
 */
1843
static void
1844
do_gc_callback(GCState *gcstate, const char *phase,
1845
                   int generation, struct gc_collection_stats *stats)
1846
161k
{
1847
161k
    assert(!PyErr_Occurred());
1848
1849
    /* The local variable cannot be rebound, check it for sanity */
1850
161k
    assert(PyList_CheckExact(gcstate->callbacks));
1851
161k
    PyObject *info = NULL;
1852
161k
    if (PyList_GET_SIZE(gcstate->callbacks) != 0) {
1853
0
        info = Py_BuildValue("{sisnsnsnsd}",
1854
0
            "generation", generation,
1855
0
            "collected", stats->collected,
1856
0
            "uncollectable", stats->uncollectable,
1857
0
            "candidates", stats->candidates,
1858
0
            "duration", stats->duration);
1859
0
        if (info == NULL) {
1860
0
            PyErr_FormatUnraisable("Exception ignored while invoking gc callbacks");
1861
0
            return;
1862
0
        }
1863
0
    }
1864
1865
161k
    PyObject *phase_obj = PyUnicode_FromString(phase);
1866
161k
    if (phase_obj == NULL) {
1867
0
        Py_XDECREF(info);
1868
0
        PyErr_FormatUnraisable("Exception ignored while invoking gc callbacks");
1869
0
        return;
1870
0
    }
1871
1872
161k
    PyObject *stack[] = {phase_obj, info};
1873
161k
    for (Py_ssize_t i=0; i<PyList_GET_SIZE(gcstate->callbacks); i++) {
1874
0
        PyObject *r, *cb = PyList_GET_ITEM(gcstate->callbacks, i);
1875
0
        Py_INCREF(cb); /* make sure cb doesn't go away */
1876
0
        r = PyObject_Vectorcall(cb, stack, 2, NULL);
1877
0
        if (r == NULL) {
1878
0
            PyErr_FormatUnraisable("Exception ignored while "
1879
0
                                   "calling GC callback %R", cb);
1880
0
        }
1881
0
        else {
1882
0
            Py_DECREF(r);
1883
0
        }
1884
0
        Py_DECREF(cb);
1885
0
    }
1886
161k
    Py_DECREF(phase_obj);
1887
161k
    Py_XDECREF(info);
1888
161k
    assert(!PyErr_Occurred());
1889
161k
}
1890
1891
static void
1892
invoke_gc_callback(GCState *gcstate, const char *phase,
1893
                   int generation, struct gc_collection_stats *stats)
1894
161k
{
1895
161k
    if (gcstate->callbacks == NULL) {
1896
0
        return;
1897
0
    }
1898
161k
    do_gc_callback(gcstate, phase, generation, stats);
1899
161k
}
1900
1901
static int
1902
referrersvisit(PyObject* obj, void *arg)
1903
0
{
1904
0
    PyObject *objs = arg;
1905
0
    Py_ssize_t i;
1906
0
    for (i = 0; i < PyTuple_GET_SIZE(objs); i++) {
1907
0
        if (PyTuple_GET_ITEM(objs, i) == obj) {
1908
0
            return 1;
1909
0
        }
1910
0
    }
1911
0
    return 0;
1912
0
}
1913
1914
static int
1915
gc_referrers_for(PyObject *objs, PyGC_Head *list, PyObject *resultlist)
1916
0
{
1917
0
    PyGC_Head *gc;
1918
0
    PyObject *obj;
1919
0
    traverseproc traverse;
1920
0
    for (gc = GC_NEXT(list); gc != list; gc = GC_NEXT(gc)) {
1921
0
        obj = FROM_GC(gc);
1922
0
        traverse = Py_TYPE(obj)->tp_traverse;
1923
0
        if (obj == objs || obj == resultlist) {
1924
0
            continue;
1925
0
        }
1926
0
        if (traverse(obj, referrersvisit, objs)) {
1927
0
            if (PyList_Append(resultlist, obj) < 0) {
1928
0
                return 0; /* error */
1929
0
            }
1930
0
        }
1931
0
    }
1932
0
    return 1; /* no error */
1933
0
}
1934
1935
PyObject *
1936
_PyGC_GetReferrers(PyInterpreterState *interp, PyObject *objs)
1937
0
{
1938
0
    PyObject *result = PyList_New(0);
1939
0
    if (!result) {
1940
0
        return NULL;
1941
0
    }
1942
1943
0
    GCState *gcstate = &interp->gc;
1944
0
    for (int i = 0; i < NUM_GENERATIONS; i++) {
1945
0
        if (!(gc_referrers_for(objs, GEN_HEAD(gcstate, i), result))) {
1946
0
            Py_DECREF(result);
1947
0
            return NULL;
1948
0
        }
1949
0
    }
1950
0
    return result;
1951
0
}
1952
1953
PyObject *
1954
_PyGC_GetObjects(PyInterpreterState *interp, int generation)
1955
0
{
1956
0
    assert(generation >= -1 && generation < NUM_GENERATIONS);
1957
0
    GCState *gcstate = &interp->gc;
1958
1959
0
    PyObject *result = PyList_New(0);
1960
    /* Generation:
1961
     * -1: Return all objects
1962
     * 0: All young objects
1963
     * 1: No objects
1964
     * 2: All old objects
1965
     */
1966
0
    if (result == NULL || generation == 1) {
1967
0
        return result;
1968
0
    }
1969
0
    if (generation <= 0) {
1970
0
        if (append_objects(result, &gcstate->young.head)) {
1971
0
            goto error;
1972
0
        }
1973
0
    }
1974
0
    if (generation != 0) {
1975
0
        if (append_objects(result, &gcstate->old[0].head)) {
1976
0
            goto error;
1977
0
        }
1978
0
        if (append_objects(result, &gcstate->old[1].head)) {
1979
0
            goto error;
1980
0
        }
1981
0
    }
1982
1983
0
    return result;
1984
0
error:
1985
0
    Py_DECREF(result);
1986
0
    return NULL;
1987
0
}
1988
1989
void
1990
_PyGC_Freeze(PyInterpreterState *interp)
1991
0
{
1992
0
    GCState *gcstate = &interp->gc;
1993
    /* The permanent_generation must be visited */
1994
0
    gc_list_set_space(&gcstate->young.head, gcstate->visited_space);
1995
0
    gc_list_merge(&gcstate->young.head, &gcstate->permanent_generation.head);
1996
0
    gcstate->young.count = 0;
1997
0
    PyGC_Head*old0 = &gcstate->old[0].head;
1998
0
    PyGC_Head*old1 = &gcstate->old[1].head;
1999
0
    if (gcstate->visited_space) {
2000
0
        gc_list_set_space(old0, 1);
2001
0
    }
2002
0
    else {
2003
0
        gc_list_set_space(old1, 0);
2004
0
    }
2005
0
    gc_list_merge(old0, &gcstate->permanent_generation.head);
2006
0
    gcstate->old[0].count = 0;
2007
0
    gc_list_merge(old1, &gcstate->permanent_generation.head);
2008
0
    gcstate->old[1].count = 0;
2009
0
    validate_spaces(gcstate);
2010
0
}
2011
2012
void
2013
_PyGC_Unfreeze(PyInterpreterState *interp)
2014
0
{
2015
0
    GCState *gcstate = &interp->gc;
2016
0
    gc_list_merge(&gcstate->permanent_generation.head,
2017
0
                  &gcstate->old[gcstate->visited_space].head);
2018
0
    validate_spaces(gcstate);
2019
0
}
2020
2021
Py_ssize_t
2022
_PyGC_GetFreezeCount(PyInterpreterState *interp)
2023
0
{
2024
0
    GCState *gcstate = &interp->gc;
2025
0
    return gc_list_size(&gcstate->permanent_generation.head);
2026
0
}
2027
2028
/* C API for controlling the state of the garbage collector */
2029
int
2030
PyGC_Enable(void)
2031
0
{
2032
0
    GCState *gcstate = get_gc_state();
2033
0
    int old_state = gcstate->enabled;
2034
0
    gcstate->enabled = 1;
2035
0
    return old_state;
2036
0
}
2037
2038
int
2039
PyGC_Disable(void)
2040
0
{
2041
0
    GCState *gcstate = get_gc_state();
2042
0
    int old_state = gcstate->enabled;
2043
0
    gcstate->enabled = 0;
2044
0
    return old_state;
2045
0
}
2046
2047
int
2048
PyGC_IsEnabled(void)
2049
0
{
2050
0
    GCState *gcstate = get_gc_state();
2051
0
    return gcstate->enabled;
2052
0
}
2053
2054
// Show stats for objects in each generations
2055
static void
2056
show_stats_each_generations(GCState *gcstate)
2057
0
{
2058
0
    char buf[100];
2059
0
    size_t pos = 0;
2060
2061
0
    for (int i = 0; i < NUM_GENERATIONS && pos < sizeof(buf); i++) {
2062
0
        pos += PyOS_snprintf(buf+pos, sizeof(buf)-pos,
2063
0
                             " %zd",
2064
0
                             gc_list_size(GEN_HEAD(gcstate, i)));
2065
0
    }
2066
0
    PySys_FormatStderr(
2067
0
        "gc: objects in each generation:%s\n"
2068
0
        "gc: objects in permanent generation: %zd\n",
2069
0
        buf, gc_list_size(&gcstate->permanent_generation.head));
2070
0
}
2071
2072
Py_ssize_t
2073
_PyGC_Collect(PyThreadState *tstate, int generation, _PyGC_Reason reason)
2074
80.7k
{
2075
80.7k
    GCState *gcstate = &tstate->interp->gc;
2076
80.7k
    assert(tstate->current_frame == NULL || tstate->current_frame->stackpointer != NULL);
2077
2078
80.7k
    int expected = 0;
2079
80.7k
    if (!_Py_atomic_compare_exchange_int(&gcstate->collecting, &expected, 1)) {
2080
        // Don't start a garbage collection if one is already in progress.
2081
0
        return 0;
2082
0
    }
2083
80.7k
    gcstate->frame = tstate->current_frame;
2084
2085
80.7k
    struct gc_collection_stats stats = { 0 };
2086
80.7k
    if (reason != _Py_GC_REASON_SHUTDOWN) {
2087
80.7k
        invoke_gc_callback(gcstate, "start", generation, &stats);
2088
80.7k
    }
2089
80.7k
    if (gcstate->debug & _PyGC_DEBUG_STATS) {
2090
0
        PySys_WriteStderr("gc: collecting generation %d...\n", generation);
2091
0
        show_stats_each_generations(gcstate);
2092
0
    }
2093
80.7k
    if (PyDTrace_GC_START_ENABLED()) {
2094
0
        PyDTrace_GC_START(generation);
2095
0
    }
2096
80.7k
    PyTime_t start, stop;
2097
80.7k
    (void)PyTime_PerfCounterRaw(&start);
2098
80.7k
    PyObject *exc = _PyErr_GetRaisedException(tstate);
2099
80.7k
    switch(generation) {
2100
0
        case 0:
2101
0
            gc_collect_young(tstate, &stats);
2102
0
            break;
2103
80.7k
        case 1:
2104
80.7k
            gc_collect_increment(tstate, &stats);
2105
80.7k
            break;
2106
0
        case 2:
2107
0
            gc_collect_full(tstate, &stats);
2108
0
            break;
2109
0
        default:
2110
0
            Py_UNREACHABLE();
2111
80.7k
    }
2112
80.7k
    (void)PyTime_PerfCounterRaw(&stop);
2113
80.7k
    stats.duration = PyTime_AsSecondsDouble(stop - start);
2114
80.7k
    add_stats(gcstate, generation, &stats);
2115
80.7k
    if (PyDTrace_GC_DONE_ENABLED()) {
2116
0
        PyDTrace_GC_DONE(stats.uncollectable + stats.collected);
2117
0
    }
2118
80.7k
    if (reason != _Py_GC_REASON_SHUTDOWN) {
2119
80.7k
        invoke_gc_callback(gcstate, "stop", generation, &stats);
2120
80.7k
    }
2121
80.7k
    _PyErr_SetRaisedException(tstate, exc);
2122
80.7k
    GC_STAT_ADD(generation, objects_collected, stats.collected);
2123
#ifdef Py_STATS
2124
    PyStats *s = _PyStats_GET();
2125
    if (s) {
2126
        GC_STAT_ADD(generation, object_visits,
2127
            s->object_stats.object_visits);
2128
        s->object_stats.object_visits = 0;
2129
    }
2130
#endif
2131
80.7k
    validate_spaces(gcstate);
2132
80.7k
    gcstate->frame = NULL;
2133
80.7k
    _Py_atomic_store_int(&gcstate->collecting, 0);
2134
2135
80.7k
    if (gcstate->debug & _PyGC_DEBUG_STATS) {
2136
0
        PySys_WriteStderr(
2137
0
            "gc: done, %zd unreachable, %zd uncollectable, %.4fs elapsed\n",
2138
0
            stats.collected + stats.uncollectable, stats.uncollectable, stats.duration
2139
0
        );
2140
0
    }
2141
2142
80.7k
    return stats.uncollectable + stats.collected;
2143
80.7k
}
2144
2145
/* Public API to invoke gc.collect() from C */
2146
Py_ssize_t
2147
PyGC_Collect(void)
2148
0
{
2149
0
    return _PyGC_Collect(_PyThreadState_GET(), 2, _Py_GC_REASON_MANUAL);
2150
0
}
2151
2152
void
2153
_PyGC_CollectNoFail(PyThreadState *tstate)
2154
0
{
2155
    /* Ideally, this function is only called on interpreter shutdown,
2156
       and therefore not recursively.  Unfortunately, when there are daemon
2157
       threads, a daemon thread can start a cyclic garbage collection
2158
       during interpreter shutdown (and then never finish it).
2159
       See http://bugs.python.org/issue8713#msg195178 for an example.
2160
       */
2161
0
    _PyGC_Collect(_PyThreadState_GET(), 2, _Py_GC_REASON_SHUTDOWN);
2162
0
}
2163
2164
void
2165
_PyGC_DumpShutdownStats(PyInterpreterState *interp)
2166
0
{
2167
0
    GCState *gcstate = &interp->gc;
2168
0
    if (!(gcstate->debug & _PyGC_DEBUG_SAVEALL)
2169
0
        && gcstate->garbage != NULL && PyList_GET_SIZE(gcstate->garbage) > 0) {
2170
0
        const char *message;
2171
0
        if (gcstate->debug & _PyGC_DEBUG_UNCOLLECTABLE) {
2172
0
            message = "gc: %zd uncollectable objects at shutdown";
2173
0
        }
2174
0
        else {
2175
0
            message = "gc: %zd uncollectable objects at shutdown; " \
2176
0
                "use gc.set_debug(gc.DEBUG_UNCOLLECTABLE) to list them";
2177
0
        }
2178
        /* PyErr_WarnFormat does too many things and we are at shutdown,
2179
           the warnings module's dependencies (e.g. linecache) may be gone
2180
           already. */
2181
0
        if (PyErr_WarnExplicitFormat(PyExc_ResourceWarning, "gc", 0,
2182
0
                                     "gc", NULL, message,
2183
0
                                     PyList_GET_SIZE(gcstate->garbage)))
2184
0
        {
2185
0
            PyErr_FormatUnraisable("Exception ignored in GC shutdown");
2186
0
        }
2187
0
        if (gcstate->debug & _PyGC_DEBUG_UNCOLLECTABLE) {
2188
0
            PyObject *repr = NULL, *bytes = NULL;
2189
0
            repr = PyObject_Repr(gcstate->garbage);
2190
0
            if (!repr || !(bytes = PyUnicode_EncodeFSDefault(repr))) {
2191
0
                PyErr_FormatUnraisable("Exception ignored in GC shutdown "
2192
0
                                       "while formatting garbage");
2193
0
            }
2194
0
            else {
2195
0
                PySys_WriteStderr(
2196
0
                    "      %s\n",
2197
0
                    PyBytes_AS_STRING(bytes)
2198
0
                    );
2199
0
            }
2200
0
            Py_XDECREF(repr);
2201
0
            Py_XDECREF(bytes);
2202
0
        }
2203
0
    }
2204
0
}
2205
2206
static void
2207
0
finalize_unlink_gc_head(PyGC_Head *gc) {
2208
0
    PyGC_Head *prev = GC_PREV(gc);
2209
0
    PyGC_Head *next = GC_NEXT(gc);
2210
0
    _PyGCHead_SET_NEXT(prev, next);
2211
0
    _PyGCHead_SET_PREV(next, prev);
2212
0
}
2213
2214
void
2215
_PyGC_Fini(PyInterpreterState *interp)
2216
0
{
2217
0
    GCState *gcstate = &interp->gc;
2218
0
    Py_CLEAR(gcstate->garbage);
2219
0
    Py_CLEAR(gcstate->callbacks);
2220
2221
    /* Prevent a subtle bug that affects sub-interpreters that use basic
2222
     * single-phase init extensions (m_size == -1).  Those extensions cause objects
2223
     * to be shared between interpreters, via the PyDict_Update(mdict, m_copy) call
2224
     * in import_find_extension().
2225
     *
2226
     * If they are GC objects, their GC head next or prev links could refer to
2227
     * the interpreter _gc_runtime_state PyGC_Head nodes.  Those nodes go away
2228
     * when the interpreter structure is freed and so pointers to them become
2229
     * invalid.  If those objects are still used by another interpreter and
2230
     * UNTRACK is called on them, a crash will happen.  We untrack the nodes
2231
     * here to avoid that.
2232
     *
2233
     * This bug was originally fixed when reported as gh-90228.  The bug was
2234
     * re-introduced in gh-94673.
2235
     */
2236
0
    finalize_unlink_gc_head(&gcstate->young.head);
2237
0
    finalize_unlink_gc_head(&gcstate->old[0].head);
2238
0
    finalize_unlink_gc_head(&gcstate->old[1].head);
2239
0
    finalize_unlink_gc_head(&gcstate->permanent_generation.head);
2240
0
}
2241
2242
/* for debugging */
2243
void
2244
_PyGC_Dump(PyGC_Head *g)
2245
0
{
2246
0
    PyUnstable_Object_Dump(FROM_GC(g));
2247
0
}
2248
2249
2250
#ifdef Py_DEBUG
2251
static int
2252
visit_validate(PyObject *op, void *parent_raw)
2253
{
2254
    PyObject *parent = _PyObject_CAST(parent_raw);
2255
    if (_PyObject_IsFreed(op)) {
2256
        _PyObject_ASSERT_FAILED_MSG(parent,
2257
                                    "PyObject_GC_Track() object is not valid");
2258
    }
2259
    return 0;
2260
}
2261
#endif
2262
2263
2264
/* extension modules might be compiled with GC support so these
2265
   functions must always be available */
2266
2267
void
2268
PyObject_GC_Track(void *op_raw)
2269
176M
{
2270
176M
    PyObject *op = _PyObject_CAST(op_raw);
2271
176M
    if (_PyObject_GC_IS_TRACKED(op)) {
2272
0
        _PyObject_ASSERT_FAILED_MSG(op,
2273
0
                                    "object already tracked "
2274
0
                                    "by the garbage collector");
2275
0
    }
2276
176M
    _PyObject_GC_TRACK(op);
2277
2278
#ifdef Py_DEBUG
2279
    /* Check that the object is valid: validate objects traversed
2280
       by tp_traverse() */
2281
    traverseproc traverse = Py_TYPE(op)->tp_traverse;
2282
    (void)traverse(op, visit_validate, op);
2283
#endif
2284
176M
}
2285
2286
void
2287
PyObject_GC_UnTrack(void *op_raw)
2288
1.83G
{
2289
1.83G
    PyObject *op = _PyObject_CAST(op_raw);
2290
    /* The code for some objects, such as tuples, is a bit
2291
     * sloppy about when the object is tracked and untracked. */
2292
1.83G
    if (_PyObject_GC_IS_TRACKED(op)) {
2293
1.59G
        _PyObject_GC_UNTRACK(op);
2294
1.59G
    }
2295
1.83G
}
2296
2297
int
2298
PyObject_IS_GC(PyObject *obj)
2299
973M
{
2300
973M
    return _PyObject_IS_GC(obj);
2301
973M
}
2302
2303
void
2304
_Py_ScheduleGC(PyThreadState *tstate)
2305
6.58M
{
2306
6.58M
    if (!_Py_eval_breaker_bit_is_set(tstate, _PY_GC_SCHEDULED_BIT))
2307
80.7k
    {
2308
80.7k
        _Py_set_eval_breaker_bit(tstate, _PY_GC_SCHEDULED_BIT);
2309
80.7k
    }
2310
6.58M
}
2311
2312
void
2313
_Py_TriggerGC(struct _gc_runtime_state *gcstate)
2314
6.65M
{
2315
6.65M
    PyThreadState *tstate = _PyThreadState_GET();
2316
6.65M
    if (gcstate->enabled &&
2317
6.65M
        gcstate->young.threshold != 0 &&
2318
6.65M
        !_Py_atomic_load_int_relaxed(&gcstate->collecting) &&
2319
6.65M
        !_PyErr_Occurred(tstate))
2320
6.58M
    {
2321
6.58M
        _Py_ScheduleGC(tstate);
2322
6.58M
    }
2323
6.65M
}
2324
2325
void
2326
_PyObject_GC_Link(PyObject *op)
2327
578M
{
2328
578M
    PyGC_Head *gc = AS_GC(op);
2329
    // gc must be correctly aligned
2330
578M
    _PyObject_ASSERT(op, ((uintptr_t)gc & (sizeof(uintptr_t)-1)) == 0);
2331
578M
    gc->_gc_next = 0;
2332
578M
    gc->_gc_prev = 0;
2333
2334
578M
}
2335
2336
void
2337
_Py_RunGC(PyThreadState *tstate)
2338
80.7k
{
2339
80.7k
    if (tstate->interp->gc.enabled) {
2340
80.7k
        _PyGC_Collect(tstate, 1, _Py_GC_REASON_HEAP);
2341
80.7k
    }
2342
80.7k
}
2343
2344
static PyObject *
2345
gc_alloc(PyTypeObject *tp, size_t basicsize, size_t presize)
2346
461M
{
2347
461M
    PyThreadState *tstate = _PyThreadState_GET();
2348
461M
    if (basicsize > PY_SSIZE_T_MAX - presize) {
2349
0
        return _PyErr_NoMemory(tstate);
2350
0
    }
2351
461M
    size_t size = presize + basicsize;
2352
461M
    char *mem = _PyObject_MallocWithType(tp, size);
2353
461M
    if (mem == NULL) {
2354
0
        return _PyErr_NoMemory(tstate);
2355
0
    }
2356
461M
    ((PyObject **)mem)[0] = NULL;
2357
461M
    ((PyObject **)mem)[1] = NULL;
2358
461M
    PyObject *op = (PyObject *)(mem + presize);
2359
461M
    _PyObject_GC_Link(op);
2360
461M
    return op;
2361
461M
}
2362
2363
2364
PyObject *
2365
_PyObject_GC_New(PyTypeObject *tp)
2366
199M
{
2367
199M
    size_t presize = _PyType_PreHeaderSize(tp);
2368
199M
    size_t size = _PyObject_SIZE(tp);
2369
199M
    if (_PyType_HasFeature(tp, Py_TPFLAGS_INLINE_VALUES)) {
2370
56
        size += _PyInlineValuesSize(tp);
2371
56
    }
2372
199M
    PyObject *op = gc_alloc(tp, size, presize);
2373
199M
    if (op == NULL) {
2374
0
        return NULL;
2375
0
    }
2376
199M
    _PyObject_Init(op, tp);
2377
199M
    if (tp->tp_flags & Py_TPFLAGS_INLINE_VALUES) {
2378
56
        _PyObject_InitInlineValues(op, tp);
2379
56
    }
2380
199M
    return op;
2381
199M
}
2382
2383
PyVarObject *
2384
_PyObject_GC_NewVar(PyTypeObject *tp, Py_ssize_t nitems)
2385
261M
{
2386
261M
    PyVarObject *op;
2387
2388
261M
    if (nitems < 0) {
2389
0
        PyErr_BadInternalCall();
2390
0
        return NULL;
2391
0
    }
2392
261M
    size_t presize = _PyType_PreHeaderSize(tp);
2393
261M
    size_t size = _PyObject_VAR_SIZE(tp, nitems);
2394
261M
    op = (PyVarObject *)gc_alloc(tp, size, presize);
2395
261M
    if (op == NULL) {
2396
0
        return NULL;
2397
0
    }
2398
261M
    _PyObject_InitVar(op, tp, nitems);
2399
261M
    return op;
2400
261M
}
2401
2402
PyObject *
2403
PyUnstable_Object_GC_NewWithExtraData(PyTypeObject *tp, size_t extra_size)
2404
0
{
2405
0
    size_t presize = _PyType_PreHeaderSize(tp);
2406
0
    size_t size = _PyObject_SIZE(tp) + extra_size;
2407
0
    PyObject *op = gc_alloc(tp, size, presize);
2408
0
    if (op == NULL) {
2409
0
        return NULL;
2410
0
    }
2411
0
    memset((char *)op + sizeof(PyObject), 0, size - sizeof(PyObject));
2412
0
    _PyObject_Init(op, tp);
2413
0
    return op;
2414
0
}
2415
2416
PyVarObject *
2417
_PyObject_GC_Resize(PyVarObject *op, Py_ssize_t nitems)
2418
28
{
2419
28
    const size_t basicsize = _PyObject_VAR_SIZE(Py_TYPE(op), nitems);
2420
28
    const size_t presize = _PyType_PreHeaderSize(Py_TYPE(op));
2421
28
    _PyObject_ASSERT((PyObject *)op, !_PyObject_GC_IS_TRACKED(op));
2422
28
    if (basicsize > (size_t)PY_SSIZE_T_MAX - presize) {
2423
0
        return (PyVarObject *)PyErr_NoMemory();
2424
0
    }
2425
28
    char *mem = (char *)op - presize;
2426
28
    mem = (char *)_PyObject_ReallocWithType(Py_TYPE(op), mem, presize + basicsize);
2427
28
    if (mem == NULL) {
2428
0
        return (PyVarObject *)PyErr_NoMemory();
2429
0
    }
2430
28
    op = (PyVarObject *) (mem + presize);
2431
28
    Py_SET_SIZE(op, nitems);
2432
28
    return op;
2433
28
}
2434
2435
void
2436
PyObject_GC_Del(void *op)
2437
577M
{
2438
577M
    size_t presize = _PyType_PreHeaderSize(Py_TYPE(op));
2439
577M
    PyGC_Head *g = AS_GC(op);
2440
577M
    if (_PyObject_GC_IS_TRACKED(op)) {
2441
0
        gc_list_remove(g);
2442
0
        GCState *gcstate = get_gc_state();
2443
0
        if (gcstate->young.count > 0) {
2444
0
            gcstate->young.count--;
2445
0
        }
2446
0
        gcstate->heap_size--;
2447
#ifdef Py_DEBUG
2448
        PyObject *exc = PyErr_GetRaisedException();
2449
        if (PyErr_WarnExplicitFormat(PyExc_ResourceWarning, "gc", 0,
2450
                                     "gc", NULL,
2451
                                     "Object of type %s is not untracked "
2452
                                     "before destruction",
2453
                                     Py_TYPE(op)->tp_name))
2454
        {
2455
            PyErr_FormatUnraisable("Exception ignored on object deallocation");
2456
        }
2457
        PyErr_SetRaisedException(exc);
2458
#endif
2459
0
    }
2460
577M
    PyObject_Free(((char *)op)-presize);
2461
577M
}
2462
2463
int
2464
PyObject_GC_IsTracked(PyObject* obj)
2465
0
{
2466
0
    if (_PyObject_IS_GC(obj) && _PyObject_GC_IS_TRACKED(obj)) {
2467
0
        return 1;
2468
0
    }
2469
0
    return 0;
2470
0
}
2471
2472
int
2473
PyObject_GC_IsFinalized(PyObject *obj)
2474
0
{
2475
0
    if (_PyObject_IS_GC(obj) && _PyGC_FINALIZED(obj)) {
2476
0
         return 1;
2477
0
    }
2478
0
    return 0;
2479
0
}
2480
2481
static int
2482
visit_generation(gcvisitobjects_t callback, void *arg, struct gc_generation *gen)
2483
0
{
2484
0
    PyGC_Head *gc_list, *gc;
2485
0
    gc_list = &gen->head;
2486
0
    for (gc = GC_NEXT(gc_list); gc != gc_list; gc = GC_NEXT(gc)) {
2487
0
        PyObject *op = FROM_GC(gc);
2488
0
        Py_INCREF(op);
2489
0
        int res = callback(op, arg);
2490
0
        Py_DECREF(op);
2491
0
        if (!res) {
2492
0
            return -1;
2493
0
        }
2494
0
    }
2495
0
    return 0;
2496
0
}
2497
2498
void
2499
PyUnstable_GC_VisitObjects(gcvisitobjects_t callback, void *arg)
2500
0
{
2501
0
    GCState *gcstate = get_gc_state();
2502
0
    int original_state = gcstate->enabled;
2503
0
    gcstate->enabled = 0;
2504
0
    if (visit_generation(callback, arg, &gcstate->young) < 0) {
2505
0
        goto done;
2506
0
    }
2507
0
    if (visit_generation(callback, arg, &gcstate->old[0]) < 0) {
2508
0
        goto done;
2509
0
    }
2510
0
    if (visit_generation(callback, arg, &gcstate->old[1]) < 0) {
2511
0
        goto done;
2512
0
    }
2513
0
    visit_generation(callback, arg, &gcstate->permanent_generation);
2514
0
done:
2515
0
    gcstate->enabled = original_state;
2516
0
}
2517
2518
#endif  // Py_GIL_DISABLED