/src/Python-3.8.3/Modules/gcmodule.c
Line  | Count  | Source  | 
1  |  | /*  | 
2  |  |  | 
3  |  |   Reference Cycle Garbage Collection  | 
4  |  |   ==================================  | 
5  |  |  | 
6  |  |   Neil Schemenauer <nas@arctrix.com>  | 
7  |  |  | 
8  |  |   Based on a post on the python-dev list.  Ideas from Guido van Rossum,  | 
9  |  |   Eric Tiedemann, and various others.  | 
10  |  |  | 
11  |  |   http://www.arctrix.com/nas/python/gc/  | 
12  |  |  | 
13  |  |   The following mailing list threads provide a historical perspective on  | 
14  |  |   the design of this module.  Note that a fair amount of refinement has  | 
15  |  |   occurred since those discussions.  | 
16  |  |  | 
17  |  |   http://mail.python.org/pipermail/python-dev/2000-March/002385.html  | 
18  |  |   http://mail.python.org/pipermail/python-dev/2000-March/002434.html  | 
19  |  |   http://mail.python.org/pipermail/python-dev/2000-March/002497.html  | 
20  |  |  | 
21  |  |   For a highlevel view of the collection process, read the collect  | 
22  |  |   function.  | 
23  |  |  | 
24  |  | */  | 
25  |  |  | 
26  |  | #include "Python.h"  | 
27  |  | #include "pycore_context.h"  | 
28  |  | #include "pycore_object.h"  | 
29  |  | #include "pycore_pymem.h"  | 
30  |  | #include "pycore_pystate.h"  | 
31  |  | #include "frameobject.h"        /* for PyFrame_ClearFreeList */  | 
32  |  | #include "pydtrace.h"  | 
33  |  | #include "pytime.h"             /* for _PyTime_GetMonotonicClock() */  | 
34  |  |  | 
35  |  | /*[clinic input]  | 
36  |  | module gc  | 
37  |  | [clinic start generated code]*/  | 
38  |  | /*[clinic end generated code: output=da39a3ee5e6b4b0d input=b5c9690ecc842d79]*/  | 
39  |  |  | 
40  |  | #define GC_DEBUG (0)  /* Enable more asserts */  | 
41  |  |  | 
42  | 294k  | #define GC_NEXT _PyGCHead_NEXT  | 
43  | 36.0k  | #define GC_PREV _PyGCHead_PREV  | 
44  |  |  | 
45  |  | // update_refs() set this bit for all objects in current generation.  | 
46  |  | // subtract_refs() and move_unreachable() uses this to distinguish  | 
47  |  | // visited object is in GCing or not.  | 
48  |  | //  | 
49  |  | // move_unreachable() removes this flag from reachable objects.  | 
50  |  | // Only unreachable objects have this flag.  | 
51  |  | //  | 
52  |  | // No objects in interpreter have this flag after GC ends.  | 
53  | 384k  | #define PREV_MASK_COLLECTING   _PyGC_PREV_MASK_COLLECTING  | 
54  |  |  | 
55  |  | // Lowest bit of _gc_next is used for UNREACHABLE flag.  | 
56  |  | //  | 
57  |  | // This flag represents the object is in unreachable list in move_unreachable()  | 
58  |  | //  | 
59  |  | // Although this flag is used only in move_unreachable(), move_unreachable()  | 
60  |  | // doesn't clear this flag to skip unnecessary iteration.  | 
61  |  | // move_legacy_finalizers() removes this flag instead.  | 
62  |  | // Between them, unreachable list is not normal list and we can not use  | 
63  |  | // most gc_list_* functions for it.  | 
64  | 117k  | #define NEXT_MASK_UNREACHABLE  (1)  | 
65  |  |  | 
66  |  | /* Get an object's GC head */  | 
67  | 228k  | #define AS_GC(o) ((PyGC_Head *)(o)-1)  | 
68  |  |  | 
69  |  | /* Get the object given the GC head */  | 
70  | 514k  | #define FROM_GC(g) ((PyObject *)(((PyGC_Head *)g)+1))  | 
71  |  |  | 
72  |  | static inline int  | 
73  |  | gc_is_collecting(PyGC_Head *g)  | 
74  | 193k  | { | 
75  | 193k  |     return (g->_gc_prev & PREV_MASK_COLLECTING) != 0;  | 
76  | 193k  | }  | 
77  |  |  | 
78  |  | static inline void  | 
79  |  | gc_clear_collecting(PyGC_Head *g)  | 
80  | 95.5k  | { | 
81  | 95.5k  |     g->_gc_prev &= ~PREV_MASK_COLLECTING;  | 
82  | 95.5k  | }  | 
83  |  |  | 
84  |  | static inline Py_ssize_t  | 
85  |  | gc_get_refs(PyGC_Head *g)  | 
86  | 211k  | { | 
87  | 211k  |     return (Py_ssize_t)(g->_gc_prev >> _PyGC_PREV_SHIFT);  | 
88  | 211k  | }  | 
89  |  |  | 
90  |  | static inline void  | 
91  |  | gc_set_refs(PyGC_Head *g, Py_ssize_t refs)  | 
92  | 60.9k  | { | 
93  | 60.9k  |     g->_gc_prev = (g->_gc_prev & ~_PyGC_PREV_MASK)  | 
94  | 60.9k  |         | ((uintptr_t)(refs) << _PyGC_PREV_SHIFT);  | 
95  | 60.9k  | }  | 
96  |  |  | 
97  |  | static inline void  | 
98  |  | gc_reset_refs(PyGC_Head *g, Py_ssize_t refs)  | 
99  | 95.5k  | { | 
100  | 95.5k  |     g->_gc_prev = (g->_gc_prev & _PyGC_PREV_MASK_FINALIZED)  | 
101  | 95.5k  |         | PREV_MASK_COLLECTING  | 
102  | 95.5k  |         | ((uintptr_t)(refs) << _PyGC_PREV_SHIFT);  | 
103  | 95.5k  | }  | 
104  |  |  | 
105  |  | static inline void  | 
106  |  | gc_decref(PyGC_Head *g)  | 
107  | 89.6k  | { | 
108  | 89.6k  |     _PyObject_ASSERT_WITH_MSG(FROM_GC(g),  | 
109  | 89.6k  |                               gc_get_refs(g) > 0,  | 
110  | 89.6k  |                               "refcount is too small");  | 
111  | 89.6k  |     g->_gc_prev -= 1 << _PyGC_PREV_SHIFT;  | 
112  | 89.6k  | }  | 
113  |  |  | 
114  |  | /* Python string to use if unhandled exception occurs */  | 
115  |  | static PyObject *gc_str = NULL;  | 
116  |  |  | 
117  |  | /* set for debugging information */  | 
118  | 268  | #define DEBUG_STATS             (1<<0) /* print collection statistics */  | 
119  | 134  | #define DEBUG_COLLECTABLE       (1<<1) /* print collectable objects */  | 
120  | 0  | #define DEBUG_UNCOLLECTABLE     (1<<2) /* print uncollectable objects */  | 
121  | 71  | #define DEBUG_SAVEALL           (1<<5) /* save all garbage in gc.garbage */  | 
122  |  | #define DEBUG_LEAK              DEBUG_COLLECTABLE | \  | 
123  |  |                 DEBUG_UNCOLLECTABLE | \  | 
124  |  |                 DEBUG_SAVEALL  | 
125  |  |  | 
126  | 368  | #define GEN_HEAD(state, n) (&(state)->generations[n].head)  | 
127  |  |  | 
128  |  | void  | 
129  |  | _PyGC_Initialize(struct _gc_runtime_state *state)  | 
130  | 14  | { | 
131  | 14  |     state->enabled = 1; /* automatic collection enabled? */  | 
132  |  |  | 
133  | 84  | #define _GEN_HEAD(n) GEN_HEAD(state, n)  | 
134  | 14  |     struct gc_generation generations[NUM_GENERATIONS] = { | 
135  |  |         /* PyGC_Head,                                    threshold,    count */  | 
136  | 14  |         {{(uintptr_t)_GEN_HEAD(0), (uintptr_t)_GEN_HEAD(0)},   700,        0}, | 
137  | 14  |         {{(uintptr_t)_GEN_HEAD(1), (uintptr_t)_GEN_HEAD(1)},   10,         0}, | 
138  | 14  |         {{(uintptr_t)_GEN_HEAD(2), (uintptr_t)_GEN_HEAD(2)},   10,         0}, | 
139  | 14  |     };  | 
140  | 56  |     for (int i = 0; i < NUM_GENERATIONS; i++) { | 
141  | 42  |         state->generations[i] = generations[i];  | 
142  | 42  |     };  | 
143  | 14  |     state->generation0 = GEN_HEAD(state, 0);  | 
144  | 14  |     struct gc_generation permanent_generation = { | 
145  | 14  |           {(uintptr_t)&state->permanent_generation.head, | 
146  | 14  |            (uintptr_t)&state->permanent_generation.head}, 0, 0  | 
147  | 14  |     };  | 
148  | 14  |     state->permanent_generation = permanent_generation;  | 
149  | 14  | }  | 
150  |  |  | 
151  |  | /*  | 
152  |  | _gc_prev values  | 
153  |  | ---------------  | 
154  |  |  | 
155  |  | Between collections, _gc_prev is used for doubly linked list.  | 
156  |  |  | 
157  |  | Lowest two bits of _gc_prev are used for flags.  | 
158  |  | PREV_MASK_COLLECTING is used only while collecting and cleared before GC ends  | 
159  |  | or _PyObject_GC_UNTRACK() is called.  | 
160  |  |  | 
161  |  | During a collection, _gc_prev is temporary used for gc_refs, and the gc list  | 
162  |  | is singly linked until _gc_prev is restored.  | 
163  |  |  | 
164  |  | gc_refs  | 
165  |  |     At the start of a collection, update_refs() copies the true refcount  | 
166  |  |     to gc_refs, for each object in the generation being collected.  | 
167  |  |     subtract_refs() then adjusts gc_refs so that it equals the number of  | 
168  |  |     times an object is referenced directly from outside the generation  | 
169  |  |     being collected.  | 
170  |  |  | 
171  |  | PREV_MASK_COLLECTING  | 
172  |  |     Objects in generation being collected are marked PREV_MASK_COLLECTING in  | 
173  |  |     update_refs().  | 
174  |  |  | 
175  |  |  | 
176  |  | _gc_next values  | 
177  |  | ---------------  | 
178  |  |  | 
179  |  | _gc_next takes these values:  | 
180  |  |  | 
181  |  | 0  | 
182  |  |     The object is not tracked  | 
183  |  |  | 
184  |  | != 0  | 
185  |  |     Pointer to the next object in the GC list.  | 
186  |  |     Additionally, lowest bit is used temporary for  | 
187  |  |     NEXT_MASK_UNREACHABLE flag described below.  | 
188  |  |  | 
189  |  | NEXT_MASK_UNREACHABLE  | 
190  |  |     move_unreachable() then moves objects not reachable (whether directly or  | 
191  |  |     indirectly) from outside the generation into an "unreachable" set and  | 
192  |  |     set this flag.  | 
193  |  |  | 
194  |  |     Objects that are found to be reachable have gc_refs set to 1.  | 
195  |  |     When this flag is set for the reachable object, the object must be in  | 
196  |  |     "unreachable" set.  | 
197  |  |     The flag is unset and the object is moved back to "reachable" set.  | 
198  |  |  | 
199  |  |     move_legacy_finalizers() will remove this flag from "unreachable" set.  | 
200  |  | */  | 
201  |  |  | 
202  |  | /*** list functions ***/  | 
203  |  |  | 
204  |  | static inline void  | 
205  |  | gc_list_init(PyGC_Head *list)  | 
206  | 939  | { | 
207  |  |     // List header must not have flags.  | 
208  |  |     // We can assign pointer by simple cast.  | 
209  | 939  |     list->_gc_prev = (uintptr_t)list;  | 
210  | 939  |     list->_gc_next = (uintptr_t)list;  | 
211  | 939  | }  | 
212  |  |  | 
213  |  | static inline int  | 
214  |  | gc_list_is_empty(PyGC_Head *list)  | 
215  | 972  | { | 
216  | 972  |     return (list->_gc_next == (uintptr_t)list);  | 
217  | 972  | }  | 
218  |  |  | 
219  |  | /* Append `node` to `list`. */  | 
220  |  | static inline void  | 
221  |  | gc_list_append(PyGC_Head *node, PyGC_Head *list)  | 
222  | 17.7k  | { | 
223  | 17.7k  |     PyGC_Head *last = (PyGC_Head *)list->_gc_prev;  | 
224  |  |  | 
225  |  |     // last <-> node  | 
226  | 17.7k  |     _PyGCHead_SET_PREV(node, last);  | 
227  | 17.7k  |     _PyGCHead_SET_NEXT(last, node);  | 
228  |  |  | 
229  |  |     // node <-> list  | 
230  | 17.7k  |     _PyGCHead_SET_NEXT(node, list);  | 
231  | 17.7k  |     list->_gc_prev = (uintptr_t)node;  | 
232  | 17.7k  | }  | 
233  |  |  | 
234  |  | /* Remove `node` from the gc list it's currently in. */  | 
235  |  | static inline void  | 
236  |  | gc_list_remove(PyGC_Head *node)  | 
237  | 0  | { | 
238  | 0  |     PyGC_Head *prev = GC_PREV(node);  | 
239  | 0  |     PyGC_Head *next = GC_NEXT(node);  | 
240  |  | 
  | 
241  | 0  |     _PyGCHead_SET_NEXT(prev, next);  | 
242  | 0  |     _PyGCHead_SET_PREV(next, prev);  | 
243  |  | 
  | 
244  | 0  |     node->_gc_next = 0; /* object is not currently tracked */  | 
245  | 0  | }  | 
246  |  |  | 
247  |  | /* Move `node` from the gc list it's currently in (which is not explicitly  | 
248  |  |  * named here) to the end of `list`.  This is semantically the same as  | 
249  |  |  * gc_list_remove(node) followed by gc_list_append(node, list).  | 
250  |  |  */  | 
251  |  | static void  | 
252  |  | gc_list_move(PyGC_Head *node, PyGC_Head *list)  | 
253  | 161  | { | 
254  |  |     /* Unlink from current list. */  | 
255  | 161  |     PyGC_Head *from_prev = GC_PREV(node);  | 
256  | 161  |     PyGC_Head *from_next = GC_NEXT(node);  | 
257  | 161  |     _PyGCHead_SET_NEXT(from_prev, from_next);  | 
258  | 161  |     _PyGCHead_SET_PREV(from_next, from_prev);  | 
259  |  |  | 
260  |  |     /* Relink at end of new list. */  | 
261  |  |     // list must not have flags.  So we can skip macros.  | 
262  | 161  |     PyGC_Head *to_prev = (PyGC_Head*)list->_gc_prev;  | 
263  | 161  |     _PyGCHead_SET_PREV(node, to_prev);  | 
264  | 161  |     _PyGCHead_SET_NEXT(to_prev, node);  | 
265  | 161  |     list->_gc_prev = (uintptr_t)node;  | 
266  | 161  |     _PyGCHead_SET_NEXT(node, list);  | 
267  | 161  | }  | 
268  |  |  | 
269  |  | /* append list `from` onto list `to`; `from` becomes an empty list */  | 
270  |  | static void  | 
271  |  | gc_list_merge(PyGC_Head *from, PyGC_Head *to)  | 
272  | 403  | { | 
273  | 403  |     assert(from != to);  | 
274  | 403  |     if (!gc_list_is_empty(from)) { | 
275  | 138  |         PyGC_Head *to_tail = GC_PREV(to);  | 
276  | 138  |         PyGC_Head *from_head = GC_NEXT(from);  | 
277  | 138  |         PyGC_Head *from_tail = GC_PREV(from);  | 
278  | 138  |         assert(from_head != from);  | 
279  | 138  |         assert(from_tail != from);  | 
280  |  |  | 
281  | 138  |         _PyGCHead_SET_NEXT(to_tail, from_head);  | 
282  | 138  |         _PyGCHead_SET_PREV(from_head, to_tail);  | 
283  |  |  | 
284  | 138  |         _PyGCHead_SET_NEXT(from_tail, to);  | 
285  | 138  |         _PyGCHead_SET_PREV(to, from_tail);  | 
286  | 138  |     }  | 
287  | 403  |     gc_list_init(from);  | 
288  | 403  | }  | 
289  |  |  | 
290  |  | static Py_ssize_t  | 
291  |  | gc_list_size(PyGC_Head *list)  | 
292  | 135  | { | 
293  | 135  |     PyGC_Head *gc;  | 
294  | 135  |     Py_ssize_t n = 0;  | 
295  | 5.30k  |     for (gc = GC_NEXT(list); gc != list; gc = GC_NEXT(gc)) { | 
296  | 5.16k  |         n++;  | 
297  | 5.16k  |     }  | 
298  | 135  |     return n;  | 
299  | 135  | }  | 
300  |  |  | 
301  |  | /* Append objects in a GC list to a Python list.  | 
302  |  |  * Return 0 if all OK, < 0 if error (out of memory for list).  | 
303  |  |  */  | 
304  |  | static int  | 
305  |  | append_objects(PyObject *py_list, PyGC_Head *gc_list)  | 
306  | 0  | { | 
307  | 0  |     PyGC_Head *gc;  | 
308  | 0  |     for (gc = GC_NEXT(gc_list); gc != gc_list; gc = GC_NEXT(gc)) { | 
309  | 0  |         PyObject *op = FROM_GC(gc);  | 
310  | 0  |         if (op != py_list) { | 
311  | 0  |             if (PyList_Append(py_list, op)) { | 
312  | 0  |                 return -1; /* exception */  | 
313  | 0  |             }  | 
314  | 0  |         }  | 
315  | 0  |     }  | 
316  | 0  |     return 0;  | 
317  | 0  | }  | 
318  |  |  | 
319  |  | #if GC_DEBUG  | 
320  |  | // validate_list checks list consistency.  And it works as document  | 
321  |  | // describing when expected_mask is set / unset.  | 
322  |  | static void  | 
323  |  | validate_list(PyGC_Head *head, uintptr_t expected_mask)  | 
324  |  | { | 
325  |  |     PyGC_Head *prev = head;  | 
326  |  |     PyGC_Head *gc = GC_NEXT(head);  | 
327  |  |     while (gc != head) { | 
328  |  |         assert(GC_NEXT(gc) != NULL);  | 
329  |  |         assert(GC_PREV(gc) == prev);  | 
330  |  |         assert((gc->_gc_prev & PREV_MASK_COLLECTING) == expected_mask);  | 
331  |  |         prev = gc;  | 
332  |  |         gc = GC_NEXT(gc);  | 
333  |  |     }  | 
334  |  |     assert(prev == GC_PREV(head));  | 
335  |  | }  | 
336  |  | #else  | 
337  | 1.07k  | #define validate_list(x,y) do{}while(0) | 
338  |  | #endif  | 
339  |  |  | 
340  |  | /*** end of list stuff ***/  | 
341  |  |  | 
342  |  |  | 
343  |  | /* Set all gc_refs = ob_refcnt.  After this, gc_refs is > 0 and  | 
344  |  |  * PREV_MASK_COLLECTING bit is set for all objects in containers.  | 
345  |  |  */  | 
346  |  | static void  | 
347  |  | update_refs(PyGC_Head *containers)  | 
348  | 134  | { | 
349  | 134  |     PyGC_Head *gc = GC_NEXT(containers);  | 
350  | 95.6k  |     for (; gc != containers; gc = GC_NEXT(gc)) { | 
351  | 95.5k  |         gc_reset_refs(gc, Py_REFCNT(FROM_GC(gc)));  | 
352  |  |         /* Python's cyclic gc should never see an incoming refcount  | 
353  |  |          * of 0:  if something decref'ed to 0, it should have been  | 
354  |  |          * deallocated immediately at that time.  | 
355  |  |          * Possible cause (if the assert triggers):  a tp_dealloc  | 
356  |  |          * routine left a gc-aware object tracked during its teardown  | 
357  |  |          * phase, and did something-- or allowed something to happen --  | 
358  |  |          * that called back into Python.  gc can trigger then, and may  | 
359  |  |          * see the still-tracked dying object.  Before this assert  | 
360  |  |          * was added, such mistakes went on to allow gc to try to  | 
361  |  |          * delete the object again.  In a debug build, that caused  | 
362  |  |          * a mysterious segfault, when _Py_ForgetReference tried  | 
363  |  |          * to remove the object from the doubly-linked list of all  | 
364  |  |          * objects a second time.  In a release build, an actual  | 
365  |  |          * double deallocation occurred, which leads to corruption  | 
366  |  |          * of the allocator's internal bookkeeping pointers.  That's  | 
367  |  |          * so serious that maybe this should be a release-build  | 
368  |  |          * check instead of an assert?  | 
369  |  |          */  | 
370  | 95.5k  |         _PyObject_ASSERT(FROM_GC(gc), gc_get_refs(gc) != 0);  | 
371  | 95.5k  |     }  | 
372  | 134  | }  | 
373  |  |  | 
374  |  | /* A traversal callback for subtract_refs. */  | 
375  |  | static int  | 
376  |  | visit_decref(PyObject *op, void *parent)  | 
377  | 331k  | { | 
378  | 331k  |     _PyObject_ASSERT(_PyObject_CAST(parent), !_PyObject_IsFreed(op));  | 
379  |  |  | 
380  | 331k  |     if (PyObject_IS_GC(op)) { | 
381  | 98.6k  |         PyGC_Head *gc = AS_GC(op);  | 
382  |  |         /* We're only interested in gc_refs for objects in the  | 
383  |  |          * generation being collected, which can be recognized  | 
384  |  |          * because only they have positive gc_refs.  | 
385  |  |          */  | 
386  | 98.6k  |         if (gc_is_collecting(gc)) { | 
387  | 89.6k  |             gc_decref(gc);  | 
388  | 89.6k  |         }  | 
389  | 98.6k  |     }  | 
390  | 331k  |     return 0;  | 
391  | 331k  | }  | 
392  |  |  | 
393  |  | /* Subtract internal references from gc_refs.  After this, gc_refs is >= 0  | 
394  |  |  * for all objects in containers, and is GC_REACHABLE for all tracked gc  | 
395  |  |  * objects not in containers.  The ones with gc_refs > 0 are directly  | 
396  |  |  * reachable from outside containers, and so can't be collected.  | 
397  |  |  */  | 
398  |  | static void  | 
399  |  | subtract_refs(PyGC_Head *containers)  | 
400  | 268  | { | 
401  | 268  |     traverseproc traverse;  | 
402  | 268  |     PyGC_Head *gc = GC_NEXT(containers);  | 
403  | 95.8k  |     for (; gc != containers; gc = GC_NEXT(gc)) { | 
404  | 95.6k  |         PyObject *op = FROM_GC(gc);  | 
405  | 95.6k  |         traverse = Py_TYPE(op)->tp_traverse;  | 
406  | 95.6k  |         (void) traverse(FROM_GC(gc),  | 
407  | 95.6k  |                        (visitproc)visit_decref,  | 
408  | 95.6k  |                        op);  | 
409  | 95.6k  |     }  | 
410  | 268  | }  | 
411  |  |  | 
412  |  | /* A traversal callback for move_unreachable. */  | 
413  |  | static int  | 
414  |  | visit_reachable(PyObject *op, PyGC_Head *reachable)  | 
415  | 330k  | { | 
416  | 330k  |     if (!PyObject_IS_GC(op)) { | 
417  | 232k  |         return 0;  | 
418  | 232k  |     }  | 
419  |  |  | 
420  | 98.2k  |     PyGC_Head *gc = AS_GC(op);  | 
421  | 98.2k  |     const Py_ssize_t gc_refs = gc_get_refs(gc);  | 
422  |  |  | 
423  |  |     // Ignore untracked objects and objects in other generation.  | 
424  | 98.2k  |     if (gc->_gc_next == 0 || !gc_is_collecting(gc)) { | 
425  | 34.5k  |         return 0;  | 
426  | 34.5k  |     }  | 
427  |  |  | 
428  | 63.7k  |     if (gc->_gc_next & NEXT_MASK_UNREACHABLE) { | 
429  |  |         /* This had gc_refs = 0 when move_unreachable got  | 
430  |  |          * to it, but turns out it's reachable after all.  | 
431  |  |          * Move it back to move_unreachable's 'young' list,  | 
432  |  |          * and move_unreachable will eventually get to it  | 
433  |  |          * again.  | 
434  |  |          */  | 
435  |  |         // Manually unlink gc from unreachable list because  | 
436  | 17.7k  |         PyGC_Head *prev = GC_PREV(gc);  | 
437  | 17.7k  |         PyGC_Head *next = (PyGC_Head*)(gc->_gc_next & ~NEXT_MASK_UNREACHABLE);  | 
438  | 17.7k  |         _PyObject_ASSERT(FROM_GC(prev),  | 
439  | 17.7k  |                          prev->_gc_next & NEXT_MASK_UNREACHABLE);  | 
440  | 17.7k  |         _PyObject_ASSERT(FROM_GC(next),  | 
441  | 17.7k  |                          next->_gc_next & NEXT_MASK_UNREACHABLE);  | 
442  | 17.7k  |         prev->_gc_next = gc->_gc_next;  // copy NEXT_MASK_UNREACHABLE  | 
443  | 17.7k  |         _PyGCHead_SET_PREV(next, prev);  | 
444  |  |  | 
445  | 17.7k  |         gc_list_append(gc, reachable);  | 
446  | 17.7k  |         gc_set_refs(gc, 1);  | 
447  | 17.7k  |     }  | 
448  | 46.0k  |     else if (gc_refs == 0) { | 
449  |  |         /* This is in move_unreachable's 'young' list, but  | 
450  |  |          * the traversal hasn't yet gotten to it.  All  | 
451  |  |          * we need to do is tell move_unreachable that it's  | 
452  |  |          * reachable.  | 
453  |  |          */  | 
454  | 43.0k  |         gc_set_refs(gc, 1);  | 
455  | 43.0k  |     }  | 
456  |  |     /* Else there's nothing to do.  | 
457  |  |      * If gc_refs > 0, it must be in move_unreachable's 'young'  | 
458  |  |      * list, and move_unreachable will eventually get to it.  | 
459  |  |      */  | 
460  | 2.95k  |     else { | 
461  | 2.95k  |         _PyObject_ASSERT_WITH_MSG(op, gc_refs > 0, "refcount is too small");  | 
462  | 2.95k  |     }  | 
463  | 63.7k  |     return 0;  | 
464  | 98.2k  | }  | 
465  |  |  | 
466  |  | /* Move the unreachable objects from young to unreachable.  After this,  | 
467  |  |  * all objects in young don't have PREV_MASK_COLLECTING flag and  | 
468  |  |  * unreachable have the flag.  | 
469  |  |  * All objects in young after this are directly or indirectly reachable  | 
470  |  |  * from outside the original young; and all objects in unreachable are  | 
471  |  |  * not.  | 
472  |  |  *  | 
473  |  |  * This function restores _gc_prev pointer.  young and unreachable are  | 
474  |  |  * doubly linked list after this function.  | 
475  |  |  * But _gc_next in unreachable list has NEXT_MASK_UNREACHABLE flag.  | 
476  |  |  * So we can not gc_list_* functions for unreachable until we remove the flag.  | 
477  |  |  */  | 
478  |  | static void  | 
479  |  | move_unreachable(PyGC_Head *young, PyGC_Head *unreachable)  | 
480  | 134  | { | 
481  |  |     // previous elem in the young list, used for restore gc_prev.  | 
482  | 134  |     PyGC_Head *prev = young;  | 
483  | 134  |     PyGC_Head *gc = GC_NEXT(young);  | 
484  |  |  | 
485  |  |     /* Invariants:  all objects "to the left" of us in young are reachable  | 
486  |  |      * (directly or indirectly) from outside the young list as it was at entry.  | 
487  |  |      *  | 
488  |  |      * All other objects from the original young "to the left" of us are in  | 
489  |  |      * unreachable now, and have NEXT_MASK_UNREACHABLE.  All objects to the  | 
490  |  |      * left of us in 'young' now have been scanned, and no objects here  | 
491  |  |      * or to the right have been scanned yet.  | 
492  |  |      */  | 
493  |  |  | 
494  | 113k  |     while (gc != young) { | 
495  | 113k  |         if (gc_get_refs(gc)) { | 
496  |  |             /* gc is definitely reachable from outside the  | 
497  |  |              * original 'young'.  Mark it as such, and traverse  | 
498  |  |              * its pointers to find any other objects that may  | 
499  |  |              * be directly reachable from it.  Note that the  | 
500  |  |              * call to tp_traverse may append objects to young,  | 
501  |  |              * so we have to wait until it returns to determine  | 
502  |  |              * the next object to visit.  | 
503  |  |              */  | 
504  | 95.4k  |             PyObject *op = FROM_GC(gc);  | 
505  | 95.4k  |             traverseproc traverse = Py_TYPE(op)->tp_traverse;  | 
506  | 95.4k  |             _PyObject_ASSERT_WITH_MSG(op, gc_get_refs(gc) > 0,  | 
507  | 95.4k  |                                       "refcount is too small");  | 
508  |  |             // NOTE: visit_reachable may change gc->_gc_next when  | 
509  |  |             // young->_gc_prev == gc.  Don't do gc = GC_NEXT(gc) before!  | 
510  | 95.4k  |             (void) traverse(op,  | 
511  | 95.4k  |                     (visitproc)visit_reachable,  | 
512  | 95.4k  |                     (void *)young);  | 
513  |  |             // relink gc_prev to prev element.  | 
514  | 95.4k  |             _PyGCHead_SET_PREV(gc, prev);  | 
515  |  |             // gc is not COLLECTING state after here.  | 
516  | 95.4k  |             gc_clear_collecting(gc);  | 
517  | 95.4k  |             prev = gc;  | 
518  | 95.4k  |         }  | 
519  | 17.8k  |         else { | 
520  |  |             /* This *may* be unreachable.  To make progress,  | 
521  |  |              * assume it is.  gc isn't directly reachable from  | 
522  |  |              * any object we've already traversed, but may be  | 
523  |  |              * reachable from an object we haven't gotten to yet.  | 
524  |  |              * visit_reachable will eventually move gc back into  | 
525  |  |              * young if that's so, and we'll see it again.  | 
526  |  |              */  | 
527  |  |             // Move gc to unreachable.  | 
528  |  |             // No need to gc->next->prev = prev because it is single linked.  | 
529  | 17.8k  |             prev->_gc_next = gc->_gc_next;  | 
530  |  |  | 
531  |  |             // We can't use gc_list_append() here because we use  | 
532  |  |             // NEXT_MASK_UNREACHABLE here.  | 
533  | 17.8k  |             PyGC_Head *last = GC_PREV(unreachable);  | 
534  |  |             // NOTE: Since all objects in unreachable set has  | 
535  |  |             // NEXT_MASK_UNREACHABLE flag, we set it unconditionally.  | 
536  |  |             // But this may set the flat to unreachable too.  | 
537  |  |             // move_legacy_finalizers() should care about it.  | 
538  | 17.8k  |             last->_gc_next = (NEXT_MASK_UNREACHABLE | (uintptr_t)gc);  | 
539  | 17.8k  |             _PyGCHead_SET_PREV(gc, last);  | 
540  | 17.8k  |             gc->_gc_next = (NEXT_MASK_UNREACHABLE | (uintptr_t)unreachable);  | 
541  | 17.8k  |             unreachable->_gc_prev = (uintptr_t)gc;  | 
542  | 17.8k  |         }  | 
543  | 113k  |         gc = (PyGC_Head*)prev->_gc_next;  | 
544  | 113k  |     }  | 
545  |  |     // young->_gc_prev must be last element remained in the list.  | 
546  | 134  |     young->_gc_prev = (uintptr_t)prev;  | 
547  | 134  | }  | 
548  |  |  | 
549  |  | static void  | 
550  |  | untrack_tuples(PyGC_Head *head)  | 
551  | 134  | { | 
552  | 134  |     PyGC_Head *next, *gc = GC_NEXT(head);  | 
553  | 95.5k  |     while (gc != head) { | 
554  | 95.4k  |         PyObject *op = FROM_GC(gc);  | 
555  | 95.4k  |         next = GC_NEXT(gc);  | 
556  | 95.4k  |         if (PyTuple_CheckExact(op)) { | 
557  | 31.9k  |             _PyTuple_MaybeUntrack(op);  | 
558  | 31.9k  |         }  | 
559  | 95.4k  |         gc = next;  | 
560  | 95.4k  |     }  | 
561  | 134  | }  | 
562  |  |  | 
563  |  | /* Try to untrack all currently tracked dictionaries */  | 
564  |  | static void  | 
565  |  | untrack_dicts(PyGC_Head *head)  | 
566  | 0  | { | 
567  | 0  |     PyGC_Head *next, *gc = GC_NEXT(head);  | 
568  | 0  |     while (gc != head) { | 
569  | 0  |         PyObject *op = FROM_GC(gc);  | 
570  | 0  |         next = GC_NEXT(gc);  | 
571  | 0  |         if (PyDict_CheckExact(op)) { | 
572  | 0  |             _PyDict_MaybeUntrack(op);  | 
573  | 0  |         }  | 
574  | 0  |         gc = next;  | 
575  | 0  |     }  | 
576  | 0  | }  | 
577  |  |  | 
578  |  | /* Return true if object has a pre-PEP 442 finalization method. */  | 
579  |  | static int  | 
580  |  | has_legacy_finalizer(PyObject *op)  | 
581  | 96  | { | 
582  | 96  |     return op->ob_type->tp_del != NULL;  | 
583  | 96  | }  | 
584  |  |  | 
585  |  | /* Move the objects in unreachable with tp_del slots into `finalizers`.  | 
586  |  |  *  | 
587  |  |  * This function also removes NEXT_MASK_UNREACHABLE flag  | 
588  |  |  * from _gc_next in unreachable.  | 
589  |  |  */  | 
590  |  | static void  | 
591  |  | move_legacy_finalizers(PyGC_Head *unreachable, PyGC_Head *finalizers)  | 
592  | 134  | { | 
593  | 134  |     PyGC_Head *gc, *next;  | 
594  | 134  |     unreachable->_gc_next &= ~NEXT_MASK_UNREACHABLE;  | 
595  |  |  | 
596  |  |     /* March over unreachable.  Move objects with finalizers into  | 
597  |  |      * `finalizers`.  | 
598  |  |      */  | 
599  | 230  |     for (gc = GC_NEXT(unreachable); gc != unreachable; gc = next) { | 
600  | 96  |         PyObject *op = FROM_GC(gc);  | 
601  |  |  | 
602  | 96  |         _PyObject_ASSERT(op, gc->_gc_next & NEXT_MASK_UNREACHABLE);  | 
603  | 96  |         gc->_gc_next &= ~NEXT_MASK_UNREACHABLE;  | 
604  | 96  |         next = (PyGC_Head*)gc->_gc_next;  | 
605  |  |  | 
606  | 96  |         if (has_legacy_finalizer(op)) { | 
607  | 0  |             gc_clear_collecting(gc);  | 
608  | 0  |             gc_list_move(gc, finalizers);  | 
609  | 0  |         }  | 
610  | 96  |     }  | 
611  | 134  | }  | 
612  |  |  | 
613  |  | /* A traversal callback for move_legacy_finalizer_reachable. */  | 
614  |  | static int  | 
615  |  | visit_move(PyObject *op, PyGC_Head *tolist)  | 
616  | 0  | { | 
617  | 0  |     if (PyObject_IS_GC(op)) { | 
618  | 0  |         PyGC_Head *gc = AS_GC(op);  | 
619  | 0  |         if (gc_is_collecting(gc)) { | 
620  | 0  |             gc_list_move(gc, tolist);  | 
621  | 0  |             gc_clear_collecting(gc);  | 
622  | 0  |         }  | 
623  | 0  |     }  | 
624  | 0  |     return 0;  | 
625  | 0  | }  | 
626  |  |  | 
627  |  | /* Move objects that are reachable from finalizers, from the unreachable set  | 
628  |  |  * into finalizers set.  | 
629  |  |  */  | 
630  |  | static void  | 
631  |  | move_legacy_finalizer_reachable(PyGC_Head *finalizers)  | 
632  | 134  | { | 
633  | 134  |     traverseproc traverse;  | 
634  | 134  |     PyGC_Head *gc = GC_NEXT(finalizers);  | 
635  | 134  |     for (; gc != finalizers; gc = GC_NEXT(gc)) { | 
636  |  |         /* Note that the finalizers list may grow during this. */  | 
637  | 0  |         traverse = Py_TYPE(FROM_GC(gc))->tp_traverse;  | 
638  | 0  |         (void) traverse(FROM_GC(gc),  | 
639  | 0  |                         (visitproc)visit_move,  | 
640  | 0  |                         (void *)finalizers);  | 
641  | 0  |     }  | 
642  | 134  | }  | 
643  |  |  | 
644  |  | /* Clear all weakrefs to unreachable objects, and if such a weakref has a  | 
645  |  |  * callback, invoke it if necessary.  Note that it's possible for such  | 
646  |  |  * weakrefs to be outside the unreachable set -- indeed, those are precisely  | 
647  |  |  * the weakrefs whose callbacks must be invoked.  See gc_weakref.txt for  | 
648  |  |  * overview & some details.  Some weakrefs with callbacks may be reclaimed  | 
649  |  |  * directly by this routine; the number reclaimed is the return value.  Other  | 
650  |  |  * weakrefs with callbacks may be moved into the `old` generation.  Objects  | 
651  |  |  * moved into `old` have gc_refs set to GC_REACHABLE; the objects remaining in  | 
652  |  |  * unreachable are left at GC_TENTATIVELY_UNREACHABLE.  When this returns,  | 
653  |  |  * no object in `unreachable` is weakly referenced anymore.  | 
654  |  |  */  | 
655  |  | static int  | 
656  |  | handle_weakrefs(PyGC_Head *unreachable, PyGC_Head *old)  | 
657  | 134  | { | 
658  | 134  |     PyGC_Head *gc;  | 
659  | 134  |     PyObject *op;               /* generally FROM_GC(gc) */  | 
660  | 134  |     PyWeakReference *wr;        /* generally a cast of op */  | 
661  | 134  |     PyGC_Head wrcb_to_call;     /* weakrefs with callbacks to call */  | 
662  | 134  |     PyGC_Head *next;  | 
663  | 134  |     int num_freed = 0;  | 
664  |  |  | 
665  | 134  |     gc_list_init(&wrcb_to_call);  | 
666  |  |  | 
667  |  |     /* Clear all weakrefs to the objects in unreachable.  If such a weakref  | 
668  |  |      * also has a callback, move it into `wrcb_to_call` if the callback  | 
669  |  |      * needs to be invoked.  Note that we cannot invoke any callbacks until  | 
670  |  |      * all weakrefs to unreachable objects are cleared, lest the callback  | 
671  |  |      * resurrect an unreachable object via a still-active weakref.  We  | 
672  |  |      * make another pass over wrcb_to_call, invoking callbacks, after this  | 
673  |  |      * pass completes.  | 
674  |  |      */  | 
675  | 230  |     for (gc = GC_NEXT(unreachable); gc != unreachable; gc = next) { | 
676  | 96  |         PyWeakReference **wrlist;  | 
677  |  |  | 
678  | 96  |         op = FROM_GC(gc);  | 
679  | 96  |         next = GC_NEXT(gc);  | 
680  |  |  | 
681  | 96  |         if (PyWeakref_Check(op)) { | 
682  |  |             /* A weakref inside the unreachable set must be cleared.  If we  | 
683  |  |              * allow its callback to execute inside delete_garbage(), it  | 
684  |  |              * could expose objects that have tp_clear already called on  | 
685  |  |              * them.  Or, it could resurrect unreachable objects.  One way  | 
686  |  |              * this can happen is if some container objects do not implement  | 
687  |  |              * tp_traverse.  Then, wr_object can be outside the unreachable  | 
688  |  |              * set but can be deallocated as a result of breaking the  | 
689  |  |              * reference cycle.  If we don't clear the weakref, the callback  | 
690  |  |              * will run and potentially cause a crash.  See bpo-38006 for  | 
691  |  |              * one example.  | 
692  |  |              */  | 
693  | 0  |             _PyWeakref_ClearRef((PyWeakReference *)op);  | 
694  | 0  |         }  | 
695  |  |  | 
696  | 96  |         if (! PyType_SUPPORTS_WEAKREFS(Py_TYPE(op)))  | 
697  | 49  |             continue;  | 
698  |  |  | 
699  |  |         /* It supports weakrefs.  Does it have any? */  | 
700  | 47  |         wrlist = (PyWeakReference **)  | 
701  | 47  |                                 PyObject_GET_WEAKREFS_LISTPTR(op);  | 
702  |  |  | 
703  |  |         /* `op` may have some weakrefs.  March over the list, clear  | 
704  |  |          * all the weakrefs, and move the weakrefs with callbacks  | 
705  |  |          * that must be called into wrcb_to_call.  | 
706  |  |          */  | 
707  | 53  |         for (wr = *wrlist; wr != NULL; wr = *wrlist) { | 
708  | 6  |             PyGC_Head *wrasgc;                  /* AS_GC(wr) */  | 
709  |  |  | 
710  |  |             /* _PyWeakref_ClearRef clears the weakref but leaves  | 
711  |  |              * the callback pointer intact.  Obscure:  it also  | 
712  |  |              * changes *wrlist.  | 
713  |  |              */  | 
714  | 6  |             _PyObject_ASSERT((PyObject *)wr, wr->wr_object == op);  | 
715  | 6  |             _PyWeakref_ClearRef(wr);  | 
716  | 6  |             _PyObject_ASSERT((PyObject *)wr, wr->wr_object == Py_None);  | 
717  | 6  |             if (wr->wr_callback == NULL) { | 
718  |  |                 /* no callback */  | 
719  | 6  |                 continue;  | 
720  | 6  |             }  | 
721  |  |  | 
722  |  |             /* Headache time.  `op` is going away, and is weakly referenced by  | 
723  |  |              * `wr`, which has a callback.  Should the callback be invoked?  If wr  | 
724  |  |              * is also trash, no:  | 
725  |  |              *  | 
726  |  |              * 1. There's no need to call it.  The object and the weakref are  | 
727  |  |              *    both going away, so it's legitimate to pretend the weakref is  | 
728  |  |              *    going away first.  The user has to ensure a weakref outlives its  | 
729  |  |              *    referent if they want a guarantee that the wr callback will get  | 
730  |  |              *    invoked.  | 
731  |  |              *  | 
732  |  |              * 2. It may be catastrophic to call it.  If the callback is also in  | 
733  |  |              *    cyclic trash (CT), then although the CT is unreachable from  | 
734  |  |              *    outside the current generation, CT may be reachable from the  | 
735  |  |              *    callback.  Then the callback could resurrect insane objects.  | 
736  |  |              *  | 
737  |  |              * Since the callback is never needed and may be unsafe in this case,  | 
738  |  |              * wr is simply left in the unreachable set.  Note that because we  | 
739  |  |              * already called _PyWeakref_ClearRef(wr), its callback will never  | 
740  |  |              * trigger.  | 
741  |  |              *  | 
742  |  |              * OTOH, if wr isn't part of CT, we should invoke the callback:  the  | 
743  |  |              * weakref outlived the trash.  Note that since wr isn't CT in this  | 
744  |  |              * case, its callback can't be CT either -- wr acted as an external  | 
745  |  |              * root to this generation, and therefore its callback did too.  So  | 
746  |  |              * nothing in CT is reachable from the callback either, so it's hard  | 
747  |  |              * to imagine how calling it later could create a problem for us.  wr  | 
748  |  |              * is moved to wrcb_to_call in this case.  | 
749  |  |              */  | 
750  | 0  |             if (gc_is_collecting(AS_GC(wr))) { | 
751  |  |                 /* it should already have been cleared above */  | 
752  | 0  |                 assert(wr->wr_object == Py_None);  | 
753  | 0  |                 continue;  | 
754  | 0  |             }  | 
755  |  |  | 
756  |  |             /* Create a new reference so that wr can't go away  | 
757  |  |              * before we can process it again.  | 
758  |  |              */  | 
759  | 0  |             Py_INCREF(wr);  | 
760  |  |  | 
761  |  |             /* Move wr to wrcb_to_call, for the next pass. */  | 
762  | 0  |             wrasgc = AS_GC(wr);  | 
763  | 0  |             assert(wrasgc != next); /* wrasgc is reachable, but  | 
764  |  |                                        next isn't, so they can't  | 
765  |  |                                        be the same */  | 
766  | 0  |             gc_list_move(wrasgc, &wrcb_to_call);  | 
767  | 0  |         }  | 
768  | 47  |     }  | 
769  |  |  | 
770  |  |     /* Invoke the callbacks we decided to honor.  It's safe to invoke them  | 
771  |  |      * because they can't reference unreachable objects.  | 
772  |  |      */  | 
773  | 134  |     while (! gc_list_is_empty(&wrcb_to_call)) { | 
774  | 0  |         PyObject *temp;  | 
775  | 0  |         PyObject *callback;  | 
776  |  | 
  | 
777  | 0  |         gc = (PyGC_Head*)wrcb_to_call._gc_next;  | 
778  | 0  |         op = FROM_GC(gc);  | 
779  | 0  |         _PyObject_ASSERT(op, PyWeakref_Check(op));  | 
780  | 0  |         wr = (PyWeakReference *)op;  | 
781  | 0  |         callback = wr->wr_callback;  | 
782  | 0  |         _PyObject_ASSERT(op, callback != NULL);  | 
783  |  |  | 
784  |  |         /* copy-paste of weakrefobject.c's handle_callback() */  | 
785  | 0  |         temp = PyObject_CallFunctionObjArgs(callback, wr, NULL);  | 
786  | 0  |         if (temp == NULL)  | 
787  | 0  |             PyErr_WriteUnraisable(callback);  | 
788  | 0  |         else  | 
789  | 0  |             Py_DECREF(temp);  | 
790  |  |  | 
791  |  |         /* Give up the reference we created in the first pass.  When  | 
792  |  |          * op's refcount hits 0 (which it may or may not do right now),  | 
793  |  |          * op's tp_dealloc will decref op->wr_callback too.  Note  | 
794  |  |          * that the refcount probably will hit 0 now, and because this  | 
795  |  |          * weakref was reachable to begin with, gc didn't already  | 
796  |  |          * add it to its count of freed objects.  Example:  a reachable  | 
797  |  |          * weak value dict maps some key to this reachable weakref.  | 
798  |  |          * The callback removes this key->weakref mapping from the  | 
799  |  |          * dict, leaving no other references to the weakref (excepting  | 
800  |  |          * ours).  | 
801  |  |          */  | 
802  | 0  |         Py_DECREF(op);  | 
803  | 0  |         if (wrcb_to_call._gc_next == (uintptr_t)gc) { | 
804  |  |             /* object is still alive -- move it */  | 
805  | 0  |             gc_list_move(gc, old);  | 
806  | 0  |         }  | 
807  | 0  |         else { | 
808  | 0  |             ++num_freed;  | 
809  | 0  |         }  | 
810  | 0  |     }  | 
811  |  |  | 
812  | 134  |     return num_freed;  | 
813  | 134  | }  | 
814  |  |  | 
815  |  | static void  | 
816  |  | debug_cycle(const char *msg, PyObject *op)  | 
817  | 0  | { | 
818  | 0  |     PySys_FormatStderr("gc: %s <%s %p>\n", | 
819  | 0  |                        msg, Py_TYPE(op)->tp_name, op);  | 
820  | 0  | }  | 
821  |  |  | 
822  |  | /* Handle uncollectable garbage (cycles with tp_del slots, and stuff reachable  | 
823  |  |  * only from such cycles).  | 
824  |  |  * If DEBUG_SAVEALL, all objects in finalizers are appended to the module  | 
825  |  |  * garbage list (a Python list), else only the objects in finalizers with  | 
826  |  |  * __del__ methods are appended to garbage.  All objects in finalizers are  | 
827  |  |  * merged into the old list regardless.  | 
828  |  |  */  | 
829  |  | static void  | 
830  |  | handle_legacy_finalizers(struct _gc_runtime_state *state,  | 
831  |  |                          PyGC_Head *finalizers, PyGC_Head *old)  | 
832  | 134  | { | 
833  | 134  |     assert(!PyErr_Occurred());  | 
834  |  |  | 
835  | 134  |     PyGC_Head *gc = GC_NEXT(finalizers);  | 
836  | 134  |     if (state->garbage == NULL) { | 
837  | 14  |         state->garbage = PyList_New(0);  | 
838  | 14  |         if (state->garbage == NULL)  | 
839  | 0  |             Py_FatalError("gc couldn't create gc.garbage list"); | 
840  | 14  |     }  | 
841  | 134  |     for (; gc != finalizers; gc = GC_NEXT(gc)) { | 
842  | 0  |         PyObject *op = FROM_GC(gc);  | 
843  |  | 
  | 
844  | 0  |         if ((state->debug & DEBUG_SAVEALL) || has_legacy_finalizer(op)) { | 
845  | 0  |             if (PyList_Append(state->garbage, op) < 0) { | 
846  | 0  |                 PyErr_Clear();  | 
847  | 0  |                 break;  | 
848  | 0  |             }  | 
849  | 0  |         }  | 
850  | 0  |     }  | 
851  |  |  | 
852  | 134  |     gc_list_merge(finalizers, old);  | 
853  | 134  | }  | 
854  |  |  | 
855  |  | /* Run first-time finalizers (if any) on all the objects in collectable.  | 
856  |  |  * Note that this may remove some (or even all) of the objects from the  | 
857  |  |  * list, due to refcounts falling to 0.  | 
858  |  |  */  | 
859  |  | static void  | 
860  |  | finalize_garbage(PyGC_Head *collectable)  | 
861  | 134  | { | 
862  | 134  |     destructor finalize;  | 
863  | 134  |     PyGC_Head seen;  | 
864  |  |  | 
865  |  |     /* While we're going through the loop, `finalize(op)` may cause op, or  | 
866  |  |      * other objects, to be reclaimed via refcounts falling to zero.  So  | 
867  |  |      * there's little we can rely on about the structure of the input  | 
868  |  |      * `collectable` list across iterations.  For safety, we always take the  | 
869  |  |      * first object in that list and move it to a temporary `seen` list.  | 
870  |  |      * If objects vanish from the `collectable` and `seen` lists we don't  | 
871  |  |      * care.  | 
872  |  |      */  | 
873  | 134  |     gc_list_init(&seen);  | 
874  |  |  | 
875  | 230  |     while (!gc_list_is_empty(collectable)) { | 
876  | 96  |         PyGC_Head *gc = GC_NEXT(collectable);  | 
877  | 96  |         PyObject *op = FROM_GC(gc);  | 
878  | 96  |         gc_list_move(gc, &seen);  | 
879  | 96  |         if (!_PyGCHead_FINALIZED(gc) &&  | 
880  | 96  |                 (finalize = Py_TYPE(op)->tp_finalize) != NULL) { | 
881  | 0  |             _PyGCHead_SET_FINALIZED(gc);  | 
882  | 0  |             Py_INCREF(op);  | 
883  | 0  |             finalize(op);  | 
884  | 0  |             assert(!PyErr_Occurred());  | 
885  | 0  |             Py_DECREF(op);  | 
886  | 0  |         }  | 
887  | 96  |     }  | 
888  | 134  |     gc_list_merge(&seen, collectable);  | 
889  | 134  | }  | 
890  |  |  | 
891  |  | /* Walk the collectable list and check that they are really unreachable  | 
892  |  |    from the outside (some objects could have been resurrected by a  | 
893  |  |    finalizer). */  | 
894  |  | static int  | 
895  |  | check_garbage(PyGC_Head *collectable)  | 
896  | 134  | { | 
897  | 134  |     int ret = 0;  | 
898  | 134  |     PyGC_Head *gc;  | 
899  | 230  |     for (gc = GC_NEXT(collectable); gc != collectable; gc = GC_NEXT(gc)) { | 
900  |  |         // Use gc_refs and break gc_prev again.  | 
901  | 96  |         gc_set_refs(gc, Py_REFCNT(FROM_GC(gc)));  | 
902  | 96  |         _PyObject_ASSERT(FROM_GC(gc), gc_get_refs(gc) != 0);  | 
903  | 96  |     }  | 
904  | 134  |     subtract_refs(collectable);  | 
905  | 134  |     PyGC_Head *prev = collectable;  | 
906  | 230  |     for (gc = GC_NEXT(collectable); gc != collectable; gc = GC_NEXT(gc)) { | 
907  | 96  |         _PyObject_ASSERT_WITH_MSG(FROM_GC(gc),  | 
908  | 96  |                                   gc_get_refs(gc) >= 0,  | 
909  | 96  |                                   "refcount is too small");  | 
910  | 96  |         if (gc_get_refs(gc) != 0) { | 
911  | 0  |             ret = -1;  | 
912  | 0  |         }  | 
913  |  |         // Restore gc_prev here.  | 
914  | 96  |         _PyGCHead_SET_PREV(gc, prev);  | 
915  | 96  |         gc_clear_collecting(gc);  | 
916  | 96  |         prev = gc;  | 
917  | 96  |     }  | 
918  | 134  |     return ret;  | 
919  | 134  | }  | 
920  |  |  | 
921  |  | /* Break reference cycles by clearing the containers involved.  This is  | 
922  |  |  * tricky business as the lists can be changing and we don't know which  | 
923  |  |  * objects may be freed.  It is possible I screwed something up here.  | 
924  |  |  */  | 
925  |  | static void  | 
926  |  | delete_garbage(struct _gc_runtime_state *state,  | 
927  |  |                PyGC_Head *collectable, PyGC_Head *old)  | 
928  | 134  | { | 
929  | 134  |     assert(!PyErr_Occurred());  | 
930  |  |  | 
931  | 205  |     while (!gc_list_is_empty(collectable)) { | 
932  | 71  |         PyGC_Head *gc = GC_NEXT(collectable);  | 
933  | 71  |         PyObject *op = FROM_GC(gc);  | 
934  |  |  | 
935  | 71  |         _PyObject_ASSERT_WITH_MSG(op, Py_REFCNT(op) > 0,  | 
936  | 71  |                                   "refcount is too small");  | 
937  |  |  | 
938  | 71  |         if (state->debug & DEBUG_SAVEALL) { | 
939  | 0  |             assert(state->garbage != NULL);  | 
940  | 0  |             if (PyList_Append(state->garbage, op) < 0) { | 
941  | 0  |                 PyErr_Clear();  | 
942  | 0  |             }  | 
943  | 0  |         }  | 
944  | 71  |         else { | 
945  | 71  |             inquiry clear;  | 
946  | 71  |             if ((clear = Py_TYPE(op)->tp_clear) != NULL) { | 
947  | 59  |                 Py_INCREF(op);  | 
948  | 59  |                 (void) clear(op);  | 
949  | 59  |                 if (PyErr_Occurred()) { | 
950  | 0  |                     _PyErr_WriteUnraisableMsg("in tp_clear of", | 
951  | 0  |                                               (PyObject*)Py_TYPE(op));  | 
952  | 0  |                 }  | 
953  | 59  |                 Py_DECREF(op);  | 
954  | 59  |             }  | 
955  | 71  |         }  | 
956  | 71  |         if (GC_NEXT(collectable) == gc) { | 
957  |  |             /* object is still alive, move it, it may die later */  | 
958  | 65  |             gc_list_move(gc, old);  | 
959  | 65  |         }  | 
960  | 71  |     }  | 
961  | 134  | }  | 
962  |  |  | 
963  |  | /* Clear all free lists  | 
964  |  |  * All free lists are cleared during the collection of the highest generation.  | 
965  |  |  * Allocated items in the free list may keep a pymalloc arena occupied.  | 
966  |  |  * Clearing the free lists may give back memory to the OS earlier.  | 
967  |  |  */  | 
968  |  | static void  | 
969  |  | clear_freelists(void)  | 
970  | 0  | { | 
971  | 0  |     (void)PyMethod_ClearFreeList();  | 
972  | 0  |     (void)PyFrame_ClearFreeList();  | 
973  | 0  |     (void)PyCFunction_ClearFreeList();  | 
974  | 0  |     (void)PyTuple_ClearFreeList();  | 
975  | 0  |     (void)PyUnicode_ClearFreeList();  | 
976  | 0  |     (void)PyFloat_ClearFreeList();  | 
977  | 0  |     (void)PyList_ClearFreeList();  | 
978  | 0  |     (void)PyDict_ClearFreeList();  | 
979  | 0  |     (void)PySet_ClearFreeList();  | 
980  | 0  |     (void)PyAsyncGen_ClearFreeLists();  | 
981  | 0  |     (void)PyContext_ClearFreeList();  | 
982  | 0  | }  | 
983  |  |  | 
984  |  | // Show stats for objects in each gennerations.  | 
985  |  | static void  | 
986  |  | show_stats_each_generations(struct _gc_runtime_state *state)  | 
987  | 0  | { | 
988  | 0  |     char buf[100];  | 
989  | 0  |     size_t pos = 0;  | 
990  |  | 
  | 
991  | 0  |     for (int i = 0; i < NUM_GENERATIONS && pos < sizeof(buf); i++) { | 
992  | 0  |         pos += PyOS_snprintf(buf+pos, sizeof(buf)-pos,  | 
993  | 0  |                              " %"PY_FORMAT_SIZE_T"d",  | 
994  | 0  |                              gc_list_size(GEN_HEAD(state, i)));  | 
995  | 0  |     }  | 
996  |  | 
  | 
997  | 0  |     PySys_FormatStderr(  | 
998  | 0  |         "gc: objects in each generation:%s\n"  | 
999  | 0  |         "gc: objects in permanent generation: %zd\n",  | 
1000  | 0  |         buf, gc_list_size(&state->permanent_generation.head));  | 
1001  | 0  | }  | 
1002  |  |  | 
1003  |  | /* This is the main function.  Read this to understand how the  | 
1004  |  |  * collection process works. */  | 
1005  |  | static Py_ssize_t  | 
1006  |  | collect(struct _gc_runtime_state *state, int generation,  | 
1007  |  |         Py_ssize_t *n_collected, Py_ssize_t *n_uncollectable, int nofail)  | 
1008  | 134  | { | 
1009  | 134  |     int i;  | 
1010  | 134  |     Py_ssize_t m = 0; /* # objects collected */  | 
1011  | 134  |     Py_ssize_t n = 0; /* # unreachable objects that couldn't be collected */  | 
1012  | 134  |     PyGC_Head *young; /* the generation we are examining */  | 
1013  | 134  |     PyGC_Head *old; /* next older generation */  | 
1014  | 134  |     PyGC_Head unreachable; /* non-problematic unreachable trash */  | 
1015  | 134  |     PyGC_Head finalizers;  /* objects with, & reachable from, __del__ */  | 
1016  | 134  |     PyGC_Head *gc;  | 
1017  | 134  |     _PyTime_t t1 = 0;   /* initialize to prevent a compiler warning */  | 
1018  |  |  | 
1019  | 134  |     if (state->debug & DEBUG_STATS) { | 
1020  | 0  |         PySys_WriteStderr("gc: collecting generation %d...\n", generation); | 
1021  | 0  |         show_stats_each_generations(state);  | 
1022  | 0  |         t1 = _PyTime_GetMonotonicClock();  | 
1023  | 0  |     }  | 
1024  |  |  | 
1025  | 134  |     if (PyDTrace_GC_START_ENABLED())  | 
1026  | 0  |         PyDTrace_GC_START(generation);  | 
1027  |  |  | 
1028  |  |     /* update collection and allocation counters */  | 
1029  | 134  |     if (generation+1 < NUM_GENERATIONS)  | 
1030  | 134  |         state->generations[generation+1].count += 1;  | 
1031  | 269  |     for (i = 0; i <= generation; i++)  | 
1032  | 135  |         state->generations[i].count = 0;  | 
1033  |  |  | 
1034  |  |     /* merge younger generations with one we are currently collecting */  | 
1035  | 135  |     for (i = 0; i < generation; i++) { | 
1036  | 1  |         gc_list_merge(GEN_HEAD(state, i), GEN_HEAD(state, generation));  | 
1037  | 1  |     }  | 
1038  |  |  | 
1039  |  |     /* handy references */  | 
1040  | 134  |     young = GEN_HEAD(state, generation);  | 
1041  | 134  |     if (generation < NUM_GENERATIONS-1)  | 
1042  | 134  |         old = GEN_HEAD(state, generation+1);  | 
1043  | 0  |     else  | 
1044  | 0  |         old = young;  | 
1045  |  |  | 
1046  | 134  |     validate_list(young, 0);  | 
1047  | 134  |     validate_list(old, 0);  | 
1048  |  |     /* Using ob_refcnt and gc_refs, calculate which objects in the  | 
1049  |  |      * container set are reachable from outside the set (i.e., have a  | 
1050  |  |      * refcount greater than 0 when all the references within the  | 
1051  |  |      * set are taken into account).  | 
1052  |  |      */  | 
1053  | 134  |     update_refs(young);  // gc_prev is used for gc_refs  | 
1054  | 134  |     subtract_refs(young);  | 
1055  |  |  | 
1056  |  |     /* Leave everything reachable from outside young in young, and move  | 
1057  |  |      * everything else (in young) to unreachable.  | 
1058  |  |      * NOTE:  This used to move the reachable objects into a reachable  | 
1059  |  |      * set instead.  But most things usually turn out to be reachable,  | 
1060  |  |      * so it's more efficient to move the unreachable things.  | 
1061  |  |      */  | 
1062  | 134  |     gc_list_init(&unreachable);  | 
1063  | 134  |     move_unreachable(young, &unreachable);  // gc_prev is pointer again  | 
1064  | 134  |     validate_list(young, 0);  | 
1065  |  |  | 
1066  | 134  |     untrack_tuples(young);  | 
1067  |  |     /* Move reachable objects to next generation. */  | 
1068  | 134  |     if (young != old) { | 
1069  | 134  |         if (generation == NUM_GENERATIONS - 2) { | 
1070  | 1  |             state->long_lived_pending += gc_list_size(young);  | 
1071  | 1  |         }  | 
1072  | 134  |         gc_list_merge(young, old);  | 
1073  | 134  |     }  | 
1074  | 0  |     else { | 
1075  |  |         /* We only untrack dicts in full collections, to avoid quadratic  | 
1076  |  |            dict build-up. See issue #14775. */  | 
1077  | 0  |         untrack_dicts(young);  | 
1078  | 0  |         state->long_lived_pending = 0;  | 
1079  | 0  |         state->long_lived_total = gc_list_size(young);  | 
1080  | 0  |     }  | 
1081  |  |  | 
1082  |  |     /* All objects in unreachable are trash, but objects reachable from  | 
1083  |  |      * legacy finalizers (e.g. tp_del) can't safely be deleted.  | 
1084  |  |      */  | 
1085  | 134  |     gc_list_init(&finalizers);  | 
1086  |  |     // NEXT_MASK_UNREACHABLE is cleared here.  | 
1087  |  |     // After move_legacy_finalizers(), unreachable is normal list.  | 
1088  | 134  |     move_legacy_finalizers(&unreachable, &finalizers);  | 
1089  |  |     /* finalizers contains the unreachable objects with a legacy finalizer;  | 
1090  |  |      * unreachable objects reachable *from* those are also uncollectable,  | 
1091  |  |      * and we move those into the finalizers list too.  | 
1092  |  |      */  | 
1093  | 134  |     move_legacy_finalizer_reachable(&finalizers);  | 
1094  |  |  | 
1095  | 134  |     validate_list(&finalizers, 0);  | 
1096  | 134  |     validate_list(&unreachable, PREV_MASK_COLLECTING);  | 
1097  |  |  | 
1098  |  |     /* Print debugging information. */  | 
1099  | 134  |     if (state->debug & DEBUG_COLLECTABLE) { | 
1100  | 0  |         for (gc = GC_NEXT(&unreachable); gc != &unreachable; gc = GC_NEXT(gc)) { | 
1101  | 0  |             debug_cycle("collectable", FROM_GC(gc)); | 
1102  | 0  |         }  | 
1103  | 0  |     }  | 
1104  |  |  | 
1105  |  |     /* Clear weakrefs and invoke callbacks as necessary. */  | 
1106  | 134  |     m += handle_weakrefs(&unreachable, old);  | 
1107  |  |  | 
1108  | 134  |     validate_list(old, 0);  | 
1109  | 134  |     validate_list(&unreachable, PREV_MASK_COLLECTING);  | 
1110  |  |  | 
1111  |  |     /* Call tp_finalize on objects which have one. */  | 
1112  | 134  |     finalize_garbage(&unreachable);  | 
1113  |  |  | 
1114  | 134  |     if (check_garbage(&unreachable)) { // clear PREV_MASK_COLLECTING here | 
1115  | 0  |         gc_list_merge(&unreachable, old);  | 
1116  | 0  |     }  | 
1117  | 134  |     else { | 
1118  |  |         /* Call tp_clear on objects in the unreachable set.  This will cause  | 
1119  |  |          * the reference cycles to be broken.  It may also cause some objects  | 
1120  |  |          * in finalizers to be freed.  | 
1121  |  |          */  | 
1122  | 134  |         m += gc_list_size(&unreachable);  | 
1123  | 134  |         delete_garbage(state, &unreachable, old);  | 
1124  | 134  |     }  | 
1125  |  |  | 
1126  |  |     /* Collect statistics on uncollectable objects found and print  | 
1127  |  |      * debugging information. */  | 
1128  | 134  |     for (gc = GC_NEXT(&finalizers); gc != &finalizers; gc = GC_NEXT(gc)) { | 
1129  | 0  |         n++;  | 
1130  | 0  |         if (state->debug & DEBUG_UNCOLLECTABLE)  | 
1131  | 0  |             debug_cycle("uncollectable", FROM_GC(gc)); | 
1132  | 0  |     }  | 
1133  | 134  |     if (state->debug & DEBUG_STATS) { | 
1134  | 0  |         double d = _PyTime_AsSecondsDouble(_PyTime_GetMonotonicClock() - t1);  | 
1135  | 0  |         PySys_WriteStderr(  | 
1136  | 0  |             "gc: done, %" PY_FORMAT_SIZE_T "d unreachable, "  | 
1137  | 0  |             "%" PY_FORMAT_SIZE_T "d uncollectable, %.4fs elapsed\n",  | 
1138  | 0  |             n+m, n, d);  | 
1139  | 0  |     }  | 
1140  |  |  | 
1141  |  |     /* Append instances in the uncollectable set to a Python  | 
1142  |  |      * reachable list of garbage.  The programmer has to deal with  | 
1143  |  |      * this if they insist on creating this type of structure.  | 
1144  |  |      */  | 
1145  | 134  |     handle_legacy_finalizers(state, &finalizers, old);  | 
1146  | 134  |     validate_list(old, 0);  | 
1147  |  |  | 
1148  |  |     /* Clear free list only during the collection of the highest  | 
1149  |  |      * generation */  | 
1150  | 134  |     if (generation == NUM_GENERATIONS-1) { | 
1151  | 0  |         clear_freelists();  | 
1152  | 0  |     }  | 
1153  |  |  | 
1154  | 134  |     if (PyErr_Occurred()) { | 
1155  | 0  |         if (nofail) { | 
1156  | 0  |             PyErr_Clear();  | 
1157  | 0  |         }  | 
1158  | 0  |         else { | 
1159  | 0  |             if (gc_str == NULL)  | 
1160  | 0  |                 gc_str = PyUnicode_FromString("garbage collection"); | 
1161  | 0  |             PyErr_WriteUnraisable(gc_str);  | 
1162  | 0  |             Py_FatalError("unexpected exception during garbage collection"); | 
1163  | 0  |         }  | 
1164  | 0  |     }  | 
1165  |  |  | 
1166  |  |     /* Update stats */  | 
1167  | 134  |     if (n_collected) { | 
1168  | 134  |         *n_collected = m;  | 
1169  | 134  |     }  | 
1170  | 134  |     if (n_uncollectable) { | 
1171  | 134  |         *n_uncollectable = n;  | 
1172  | 134  |     }  | 
1173  |  |  | 
1174  | 134  |     struct gc_generation_stats *stats = &state->generation_stats[generation];  | 
1175  | 134  |     stats->collections++;  | 
1176  | 134  |     stats->collected += m;  | 
1177  | 134  |     stats->uncollectable += n;  | 
1178  |  |  | 
1179  | 134  |     if (PyDTrace_GC_DONE_ENABLED()) { | 
1180  | 0  |         PyDTrace_GC_DONE(n+m);  | 
1181  | 0  |     }  | 
1182  |  |  | 
1183  | 134  |     assert(!PyErr_Occurred());  | 
1184  | 134  |     return n+m;  | 
1185  | 134  | }  | 
1186  |  |  | 
1187  |  | /* Invoke progress callbacks to notify clients that garbage collection  | 
1188  |  |  * is starting or stopping  | 
1189  |  |  */  | 
1190  |  | static void  | 
1191  |  | invoke_gc_callback(struct _gc_runtime_state *state, const char *phase,  | 
1192  |  |                    int generation, Py_ssize_t collected,  | 
1193  |  |                    Py_ssize_t uncollectable)  | 
1194  | 268  | { | 
1195  | 268  |     assert(!PyErr_Occurred());  | 
1196  |  |  | 
1197  |  |     /* we may get called very early */  | 
1198  | 268  |     if (state->callbacks == NULL) { | 
1199  | 268  |         return;  | 
1200  | 268  |     }  | 
1201  |  |  | 
1202  |  |     /* The local variable cannot be rebound, check it for sanity */  | 
1203  | 268  |     assert(PyList_CheckExact(state->callbacks));  | 
1204  | 0  |     PyObject *info = NULL;  | 
1205  | 0  |     if (PyList_GET_SIZE(state->callbacks) != 0) { | 
1206  | 0  |         info = Py_BuildValue("{sisnsn}", | 
1207  | 0  |             "generation", generation,  | 
1208  | 0  |             "collected", collected,  | 
1209  | 0  |             "uncollectable", uncollectable);  | 
1210  | 0  |         if (info == NULL) { | 
1211  | 0  |             PyErr_WriteUnraisable(NULL);  | 
1212  | 0  |             return;  | 
1213  | 0  |         }  | 
1214  | 0  |     }  | 
1215  | 0  |     for (Py_ssize_t i=0; i<PyList_GET_SIZE(state->callbacks); i++) { | 
1216  | 0  |         PyObject *r, *cb = PyList_GET_ITEM(state->callbacks, i);  | 
1217  | 0  |         Py_INCREF(cb); /* make sure cb doesn't go away */  | 
1218  | 0  |         r = PyObject_CallFunction(cb, "sO", phase, info);  | 
1219  | 0  |         if (r == NULL) { | 
1220  | 0  |             PyErr_WriteUnraisable(cb);  | 
1221  | 0  |         }  | 
1222  | 0  |         else { | 
1223  | 0  |             Py_DECREF(r);  | 
1224  | 0  |         }  | 
1225  | 0  |         Py_DECREF(cb);  | 
1226  | 0  |     }  | 
1227  | 0  |     Py_XDECREF(info);  | 
1228  | 0  |     assert(!PyErr_Occurred());  | 
1229  | 0  | }  | 
1230  |  |  | 
1231  |  | /* Perform garbage collection of a generation and invoke  | 
1232  |  |  * progress callbacks.  | 
1233  |  |  */  | 
1234  |  | static Py_ssize_t  | 
1235  |  | collect_with_callback(struct _gc_runtime_state *state, int generation)  | 
1236  | 134  | { | 
1237  | 134  |     assert(!PyErr_Occurred());  | 
1238  | 134  |     Py_ssize_t result, collected, uncollectable;  | 
1239  | 134  |     invoke_gc_callback(state, "start", generation, 0, 0);  | 
1240  | 134  |     result = collect(state, generation, &collected, &uncollectable, 0);  | 
1241  | 134  |     invoke_gc_callback(state, "stop", generation, collected, uncollectable);  | 
1242  | 134  |     assert(!PyErr_Occurred());  | 
1243  | 134  |     return result;  | 
1244  | 134  | }  | 
1245  |  |  | 
1246  |  | static Py_ssize_t  | 
1247  |  | collect_generations(struct _gc_runtime_state *state)  | 
1248  | 134  | { | 
1249  |  |     /* Find the oldest generation (highest numbered) where the count  | 
1250  |  |      * exceeds the threshold.  Objects in the that generation and  | 
1251  |  |      * generations younger than it will be collected. */  | 
1252  | 134  |     Py_ssize_t n = 0;  | 
1253  | 401  |     for (int i = NUM_GENERATIONS-1; i >= 0; i--) { | 
1254  | 401  |         if (state->generations[i].count > state->generations[i].threshold) { | 
1255  |  |             /* Avoid quadratic performance degradation in number  | 
1256  |  |                of tracked objects. See comments at the beginning  | 
1257  |  |                of this file, and issue #4074.  | 
1258  |  |             */  | 
1259  | 134  |             if (i == NUM_GENERATIONS - 1  | 
1260  | 0  |                 && state->long_lived_pending < state->long_lived_total / 4)  | 
1261  | 0  |                 continue;  | 
1262  | 134  |             n = collect_with_callback(state, i);  | 
1263  | 134  |             break;  | 
1264  | 134  |         }  | 
1265  | 401  |     }  | 
1266  | 134  |     return n;  | 
1267  | 134  | }  | 
1268  |  |  | 
1269  |  | #include "clinic/gcmodule.c.h"  | 
1270  |  |  | 
1271  |  | /*[clinic input]  | 
1272  |  | gc.enable  | 
1273  |  |  | 
1274  |  | Enable automatic garbage collection.  | 
1275  |  | [clinic start generated code]*/  | 
1276  |  |  | 
1277  |  | static PyObject *  | 
1278  |  | gc_enable_impl(PyObject *module)  | 
1279  |  | /*[clinic end generated code: output=45a427e9dce9155c input=81ac4940ca579707]*/  | 
1280  | 0  | { | 
1281  | 0  |     _PyRuntime.gc.enabled = 1;  | 
1282  | 0  |     Py_RETURN_NONE;  | 
1283  | 0  | }  | 
1284  |  |  | 
1285  |  | /*[clinic input]  | 
1286  |  | gc.disable  | 
1287  |  |  | 
1288  |  | Disable automatic garbage collection.  | 
1289  |  | [clinic start generated code]*/  | 
1290  |  |  | 
1291  |  | static PyObject *  | 
1292  |  | gc_disable_impl(PyObject *module)  | 
1293  |  | /*[clinic end generated code: output=97d1030f7aa9d279 input=8c2e5a14e800d83b]*/  | 
1294  | 0  | { | 
1295  | 0  |     _PyRuntime.gc.enabled = 0;  | 
1296  | 0  |     Py_RETURN_NONE;  | 
1297  | 0  | }  | 
1298  |  |  | 
1299  |  | /*[clinic input]  | 
1300  |  | gc.isenabled -> bool  | 
1301  |  |  | 
1302  |  | Returns true if automatic garbage collection is enabled.  | 
1303  |  | [clinic start generated code]*/  | 
1304  |  |  | 
1305  |  | static int  | 
1306  |  | gc_isenabled_impl(PyObject *module)  | 
1307  |  | /*[clinic end generated code: output=1874298331c49130 input=30005e0422373b31]*/  | 
1308  | 0  | { | 
1309  | 0  |     return _PyRuntime.gc.enabled;  | 
1310  | 0  | }  | 
1311  |  |  | 
1312  |  | /*[clinic input]  | 
1313  |  | gc.collect -> Py_ssize_t  | 
1314  |  |  | 
1315  |  |     generation: int(c_default="NUM_GENERATIONS - 1") = 2  | 
1316  |  |  | 
1317  |  | Run the garbage collector.  | 
1318  |  |  | 
1319  |  | With no arguments, run a full collection.  The optional argument  | 
1320  |  | may be an integer specifying which generation to collect.  A ValueError  | 
1321  |  | is raised if the generation number is invalid.  | 
1322  |  |  | 
1323  |  | The number of unreachable objects is returned.  | 
1324  |  | [clinic start generated code]*/  | 
1325  |  |  | 
1326  |  | static Py_ssize_t  | 
1327  |  | gc_collect_impl(PyObject *module, int generation)  | 
1328  |  | /*[clinic end generated code: output=b697e633043233c7 input=40720128b682d879]*/  | 
1329  | 0  | { | 
1330  |  | 
  | 
1331  | 0  |     if (generation < 0 || generation >= NUM_GENERATIONS) { | 
1332  | 0  |         PyErr_SetString(PyExc_ValueError, "invalid generation");  | 
1333  | 0  |         return -1;  | 
1334  | 0  |     }  | 
1335  |  |  | 
1336  | 0  |     struct _gc_runtime_state *state = &_PyRuntime.gc;  | 
1337  | 0  |     Py_ssize_t n;  | 
1338  | 0  |     if (state->collecting) { | 
1339  |  |         /* already collecting, don't do anything */  | 
1340  | 0  |         n = 0;  | 
1341  | 0  |     }  | 
1342  | 0  |     else { | 
1343  | 0  |         state->collecting = 1;  | 
1344  | 0  |         n = collect_with_callback(state, generation);  | 
1345  | 0  |         state->collecting = 0;  | 
1346  | 0  |     }  | 
1347  | 0  |     return n;  | 
1348  | 0  | }  | 
1349  |  |  | 
1350  |  | /*[clinic input]  | 
1351  |  | gc.set_debug  | 
1352  |  |  | 
1353  |  |     flags: int  | 
1354  |  |         An integer that can have the following bits turned on:  | 
1355  |  |           DEBUG_STATS - Print statistics during collection.  | 
1356  |  |           DEBUG_COLLECTABLE - Print collectable objects found.  | 
1357  |  |           DEBUG_UNCOLLECTABLE - Print unreachable but uncollectable objects  | 
1358  |  |             found.  | 
1359  |  |           DEBUG_SAVEALL - Save objects to gc.garbage rather than freeing them.  | 
1360  |  |           DEBUG_LEAK - Debug leaking programs (everything but STATS).  | 
1361  |  |     /  | 
1362  |  |  | 
1363  |  | Set the garbage collection debugging flags.  | 
1364  |  |  | 
1365  |  | Debugging information is written to sys.stderr.  | 
1366  |  | [clinic start generated code]*/  | 
1367  |  |  | 
1368  |  | static PyObject *  | 
1369  |  | gc_set_debug_impl(PyObject *module, int flags)  | 
1370  |  | /*[clinic end generated code: output=7c8366575486b228 input=5e5ce15e84fbed15]*/  | 
1371  | 0  | { | 
1372  | 0  |     _PyRuntime.gc.debug = flags;  | 
1373  |  | 
  | 
1374  | 0  |     Py_RETURN_NONE;  | 
1375  | 0  | }  | 
1376  |  |  | 
1377  |  | /*[clinic input]  | 
1378  |  | gc.get_debug -> int  | 
1379  |  |  | 
1380  |  | Get the garbage collection debugging flags.  | 
1381  |  | [clinic start generated code]*/  | 
1382  |  |  | 
1383  |  | static int  | 
1384  |  | gc_get_debug_impl(PyObject *module)  | 
1385  |  | /*[clinic end generated code: output=91242f3506cd1e50 input=91a101e1c3b98366]*/  | 
1386  | 0  | { | 
1387  | 0  |     return _PyRuntime.gc.debug;  | 
1388  | 0  | }  | 
1389  |  |  | 
1390  |  | PyDoc_STRVAR(gc_set_thresh__doc__,  | 
1391  |  | "set_threshold(threshold0, [threshold1, threshold2]) -> None\n"  | 
1392  |  | "\n"  | 
1393  |  | "Sets the collection thresholds.  Setting threshold0 to zero disables\n"  | 
1394  |  | "collection.\n");  | 
1395  |  |  | 
1396  |  | static PyObject *  | 
1397  |  | gc_set_threshold(PyObject *self, PyObject *args)  | 
1398  | 0  | { | 
1399  | 0  |     struct _gc_runtime_state *state = &_PyRuntime.gc;  | 
1400  | 0  |     if (!PyArg_ParseTuple(args, "i|ii:set_threshold",  | 
1401  | 0  |                           &state->generations[0].threshold,  | 
1402  | 0  |                           &state->generations[1].threshold,  | 
1403  | 0  |                           &state->generations[2].threshold))  | 
1404  | 0  |         return NULL;  | 
1405  | 0  |     for (int i = 3; i < NUM_GENERATIONS; i++) { | 
1406  |  |         /* generations higher than 2 get the same threshold */  | 
1407  | 0  |         state->generations[i].threshold = state->generations[2].threshold;  | 
1408  | 0  |     }  | 
1409  | 0  |     Py_RETURN_NONE;  | 
1410  | 0  | }  | 
1411  |  |  | 
1412  |  | /*[clinic input]  | 
1413  |  | gc.get_threshold  | 
1414  |  |  | 
1415  |  | Return the current collection thresholds.  | 
1416  |  | [clinic start generated code]*/  | 
1417  |  |  | 
1418  |  | static PyObject *  | 
1419  |  | gc_get_threshold_impl(PyObject *module)  | 
1420  |  | /*[clinic end generated code: output=7902bc9f41ecbbd8 input=286d79918034d6e6]*/  | 
1421  | 0  | { | 
1422  | 0  |     struct _gc_runtime_state *state = &_PyRuntime.gc;  | 
1423  | 0  |     return Py_BuildValue("(iii)", | 
1424  | 0  |                          state->generations[0].threshold,  | 
1425  | 0  |                          state->generations[1].threshold,  | 
1426  | 0  |                          state->generations[2].threshold);  | 
1427  | 0  | }  | 
1428  |  |  | 
1429  |  | /*[clinic input]  | 
1430  |  | gc.get_count  | 
1431  |  |  | 
1432  |  | Return a three-tuple of the current collection counts.  | 
1433  |  | [clinic start generated code]*/  | 
1434  |  |  | 
1435  |  | static PyObject *  | 
1436  |  | gc_get_count_impl(PyObject *module)  | 
1437  |  | /*[clinic end generated code: output=354012e67b16398f input=a392794a08251751]*/  | 
1438  | 0  | { | 
1439  | 0  |     struct _gc_runtime_state *state = &_PyRuntime.gc;  | 
1440  | 0  |     return Py_BuildValue("(iii)", | 
1441  | 0  |                          state->generations[0].count,  | 
1442  | 0  |                          state->generations[1].count,  | 
1443  | 0  |                          state->generations[2].count);  | 
1444  | 0  | }  | 
1445  |  |  | 
1446  |  | static int  | 
1447  |  | referrersvisit(PyObject* obj, PyObject *objs)  | 
1448  | 0  | { | 
1449  | 0  |     Py_ssize_t i;  | 
1450  | 0  |     for (i = 0; i < PyTuple_GET_SIZE(objs); i++)  | 
1451  | 0  |         if (PyTuple_GET_ITEM(objs, i) == obj)  | 
1452  | 0  |             return 1;  | 
1453  | 0  |     return 0;  | 
1454  | 0  | }  | 
1455  |  |  | 
1456  |  | static int  | 
1457  |  | gc_referrers_for(PyObject *objs, PyGC_Head *list, PyObject *resultlist)  | 
1458  | 0  | { | 
1459  | 0  |     PyGC_Head *gc;  | 
1460  | 0  |     PyObject *obj;  | 
1461  | 0  |     traverseproc traverse;  | 
1462  | 0  |     for (gc = GC_NEXT(list); gc != list; gc = GC_NEXT(gc)) { | 
1463  | 0  |         obj = FROM_GC(gc);  | 
1464  | 0  |         traverse = Py_TYPE(obj)->tp_traverse;  | 
1465  | 0  |         if (obj == objs || obj == resultlist)  | 
1466  | 0  |             continue;  | 
1467  | 0  |         if (traverse(obj, (visitproc)referrersvisit, objs)) { | 
1468  | 0  |             if (PyList_Append(resultlist, obj) < 0)  | 
1469  | 0  |                 return 0; /* error */  | 
1470  | 0  |         }  | 
1471  | 0  |     }  | 
1472  | 0  |     return 1; /* no error */  | 
1473  | 0  | }  | 
1474  |  |  | 
1475  |  | PyDoc_STRVAR(gc_get_referrers__doc__,  | 
1476  |  | "get_referrers(*objs) -> list\n\  | 
1477  |  | Return the list of objects that directly refer to any of objs.");  | 
1478  |  |  | 
1479  |  | static PyObject *  | 
1480  |  | gc_get_referrers(PyObject *self, PyObject *args)  | 
1481  | 0  | { | 
1482  | 0  |     int i;  | 
1483  | 0  |     PyObject *result = PyList_New(0);  | 
1484  | 0  |     if (!result) return NULL;  | 
1485  |  |  | 
1486  | 0  |     struct _gc_runtime_state *state = &_PyRuntime.gc;  | 
1487  | 0  |     for (i = 0; i < NUM_GENERATIONS; i++) { | 
1488  | 0  |         if (!(gc_referrers_for(args, GEN_HEAD(state, i), result))) { | 
1489  | 0  |             Py_DECREF(result);  | 
1490  | 0  |             return NULL;  | 
1491  | 0  |         }  | 
1492  | 0  |     }  | 
1493  | 0  |     return result;  | 
1494  | 0  | }  | 
1495  |  |  | 
1496  |  | /* Append obj to list; return true if error (out of memory), false if OK. */  | 
1497  |  | static int  | 
1498  |  | referentsvisit(PyObject *obj, PyObject *list)  | 
1499  | 0  | { | 
1500  | 0  |     return PyList_Append(list, obj) < 0;  | 
1501  | 0  | }  | 
1502  |  |  | 
1503  |  | PyDoc_STRVAR(gc_get_referents__doc__,  | 
1504  |  | "get_referents(*objs) -> list\n\  | 
1505  |  | Return the list of objects that are directly referred to by objs.");  | 
1506  |  |  | 
1507  |  | static PyObject *  | 
1508  |  | gc_get_referents(PyObject *self, PyObject *args)  | 
1509  | 0  | { | 
1510  | 0  |     Py_ssize_t i;  | 
1511  | 0  |     PyObject *result = PyList_New(0);  | 
1512  |  | 
  | 
1513  | 0  |     if (result == NULL)  | 
1514  | 0  |         return NULL;  | 
1515  |  |  | 
1516  | 0  |     for (i = 0; i < PyTuple_GET_SIZE(args); i++) { | 
1517  | 0  |         traverseproc traverse;  | 
1518  | 0  |         PyObject *obj = PyTuple_GET_ITEM(args, i);  | 
1519  |  | 
  | 
1520  | 0  |         if (! PyObject_IS_GC(obj))  | 
1521  | 0  |             continue;  | 
1522  | 0  |         traverse = Py_TYPE(obj)->tp_traverse;  | 
1523  | 0  |         if (! traverse)  | 
1524  | 0  |             continue;  | 
1525  | 0  |         if (traverse(obj, (visitproc)referentsvisit, result)) { | 
1526  | 0  |             Py_DECREF(result);  | 
1527  | 0  |             return NULL;  | 
1528  | 0  |         }  | 
1529  | 0  |     }  | 
1530  | 0  |     return result;  | 
1531  | 0  | }  | 
1532  |  |  | 
1533  |  | /*[clinic input]  | 
1534  |  | gc.get_objects  | 
1535  |  |     generation: Py_ssize_t(accept={int, NoneType}, c_default="-1") = None | 
1536  |  |         Generation to extract the objects from.  | 
1537  |  |  | 
1538  |  | Return a list of objects tracked by the collector (excluding the list returned).  | 
1539  |  |  | 
1540  |  | If generation is not None, return only the objects tracked by the collector  | 
1541  |  | that are in that generation.  | 
1542  |  | [clinic start generated code]*/  | 
1543  |  |  | 
1544  |  | static PyObject *  | 
1545  |  | gc_get_objects_impl(PyObject *module, Py_ssize_t generation)  | 
1546  |  | /*[clinic end generated code: output=48b35fea4ba6cb0e input=ef7da9df9806754c]*/  | 
1547  | 0  | { | 
1548  | 0  |     int i;  | 
1549  | 0  |     PyObject* result;  | 
1550  | 0  |     struct _gc_runtime_state *state = &_PyRuntime.gc;  | 
1551  |  | 
  | 
1552  | 0  |     result = PyList_New(0);  | 
1553  | 0  |     if (result == NULL) { | 
1554  | 0  |         return NULL;  | 
1555  | 0  |     }  | 
1556  |  |  | 
1557  |  |     /* If generation is passed, we extract only that generation */  | 
1558  | 0  |     if (generation != -1) { | 
1559  | 0  |         if (generation >= NUM_GENERATIONS) { | 
1560  | 0  |             PyErr_Format(PyExc_ValueError,  | 
1561  | 0  |                          "generation parameter must be less than the number of "  | 
1562  | 0  |                          "available generations (%i)",  | 
1563  | 0  |                           NUM_GENERATIONS);  | 
1564  | 0  |             goto error;  | 
1565  | 0  |         }  | 
1566  |  |  | 
1567  | 0  |         if (generation < 0) { | 
1568  | 0  |             PyErr_SetString(PyExc_ValueError,  | 
1569  | 0  |                             "generation parameter cannot be negative");  | 
1570  | 0  |             goto error;  | 
1571  | 0  |         }  | 
1572  |  |  | 
1573  | 0  |         if (append_objects(result, GEN_HEAD(state, generation))) { | 
1574  | 0  |             goto error;  | 
1575  | 0  |         }  | 
1576  |  |  | 
1577  | 0  |         return result;  | 
1578  | 0  |     }  | 
1579  |  |  | 
1580  |  |     /* If generation is not passed or None, get all objects from all generations */  | 
1581  | 0  |     for (i = 0; i < NUM_GENERATIONS; i++) { | 
1582  | 0  |         if (append_objects(result, GEN_HEAD(state, i))) { | 
1583  | 0  |             goto error;  | 
1584  | 0  |         }  | 
1585  | 0  |     }  | 
1586  | 0  |     return result;  | 
1587  |  |  | 
1588  | 0  | error:  | 
1589  | 0  |     Py_DECREF(result);  | 
1590  | 0  |     return NULL;  | 
1591  | 0  | }  | 
1592  |  |  | 
1593  |  | /*[clinic input]  | 
1594  |  | gc.get_stats  | 
1595  |  |  | 
1596  |  | Return a list of dictionaries containing per-generation statistics.  | 
1597  |  | [clinic start generated code]*/  | 
1598  |  |  | 
1599  |  | static PyObject *  | 
1600  |  | gc_get_stats_impl(PyObject *module)  | 
1601  |  | /*[clinic end generated code: output=a8ab1d8a5d26f3ab input=1ef4ed9d17b1a470]*/  | 
1602  | 0  | { | 
1603  | 0  |     int i;  | 
1604  | 0  |     struct gc_generation_stats stats[NUM_GENERATIONS], *st;  | 
1605  |  |  | 
1606  |  |     /* To get consistent values despite allocations while constructing  | 
1607  |  |        the result list, we use a snapshot of the running stats. */  | 
1608  | 0  |     struct _gc_runtime_state *state = &_PyRuntime.gc;  | 
1609  | 0  |     for (i = 0; i < NUM_GENERATIONS; i++) { | 
1610  | 0  |         stats[i] = state->generation_stats[i];  | 
1611  | 0  |     }  | 
1612  |  | 
  | 
1613  | 0  |     PyObject *result = PyList_New(0);  | 
1614  | 0  |     if (result == NULL)  | 
1615  | 0  |         return NULL;  | 
1616  |  |  | 
1617  | 0  |     for (i = 0; i < NUM_GENERATIONS; i++) { | 
1618  | 0  |         PyObject *dict;  | 
1619  | 0  |         st = &stats[i];  | 
1620  | 0  |         dict = Py_BuildValue("{snsnsn}", | 
1621  | 0  |                              "collections", st->collections,  | 
1622  | 0  |                              "collected", st->collected,  | 
1623  | 0  |                              "uncollectable", st->uncollectable  | 
1624  | 0  |                             );  | 
1625  | 0  |         if (dict == NULL)  | 
1626  | 0  |             goto error;  | 
1627  | 0  |         if (PyList_Append(result, dict)) { | 
1628  | 0  |             Py_DECREF(dict);  | 
1629  | 0  |             goto error;  | 
1630  | 0  |         }  | 
1631  | 0  |         Py_DECREF(dict);  | 
1632  | 0  |     }  | 
1633  | 0  |     return result;  | 
1634  |  |  | 
1635  | 0  | error:  | 
1636  | 0  |     Py_XDECREF(result);  | 
1637  | 0  |     return NULL;  | 
1638  | 0  | }  | 
1639  |  |  | 
1640  |  |  | 
1641  |  | /*[clinic input]  | 
1642  |  | gc.is_tracked  | 
1643  |  |  | 
1644  |  |     obj: object  | 
1645  |  |     /  | 
1646  |  |  | 
1647  |  | Returns true if the object is tracked by the garbage collector.  | 
1648  |  |  | 
1649  |  | Simple atomic objects will return false.  | 
1650  |  | [clinic start generated code]*/  | 
1651  |  |  | 
1652  |  | static PyObject *  | 
1653  |  | gc_is_tracked(PyObject *module, PyObject *obj)  | 
1654  |  | /*[clinic end generated code: output=14f0103423b28e31 input=d83057f170ea2723]*/  | 
1655  | 0  | { | 
1656  | 0  |     PyObject *result;  | 
1657  |  | 
  | 
1658  | 0  |     if (PyObject_IS_GC(obj) && _PyObject_GC_IS_TRACKED(obj))  | 
1659  | 0  |         result = Py_True;  | 
1660  | 0  |     else  | 
1661  | 0  |         result = Py_False;  | 
1662  | 0  |     Py_INCREF(result);  | 
1663  | 0  |     return result;  | 
1664  | 0  | }  | 
1665  |  |  | 
1666  |  | /*[clinic input]  | 
1667  |  | gc.freeze  | 
1668  |  |  | 
1669  |  | Freeze all current tracked objects and ignore them for future collections.  | 
1670  |  |  | 
1671  |  | This can be used before a POSIX fork() call to make the gc copy-on-write friendly.  | 
1672  |  | Note: collection before a POSIX fork() call may free pages for future allocation  | 
1673  |  | which can cause copy-on-write.  | 
1674  |  | [clinic start generated code]*/  | 
1675  |  |  | 
1676  |  | static PyObject *  | 
1677  |  | gc_freeze_impl(PyObject *module)  | 
1678  |  | /*[clinic end generated code: output=502159d9cdc4c139 input=b602b16ac5febbe5]*/  | 
1679  | 0  | { | 
1680  | 0  |     struct _gc_runtime_state *state = &_PyRuntime.gc;  | 
1681  | 0  |     for (int i = 0; i < NUM_GENERATIONS; ++i) { | 
1682  | 0  |         gc_list_merge(GEN_HEAD(state, i), &state->permanent_generation.head);  | 
1683  | 0  |         state->generations[i].count = 0;  | 
1684  | 0  |     }  | 
1685  | 0  |     Py_RETURN_NONE;  | 
1686  | 0  | }  | 
1687  |  |  | 
1688  |  | /*[clinic input]  | 
1689  |  | gc.unfreeze  | 
1690  |  |  | 
1691  |  | Unfreeze all objects in the permanent generation.  | 
1692  |  |  | 
1693  |  | Put all objects in the permanent generation back into oldest generation.  | 
1694  |  | [clinic start generated code]*/  | 
1695  |  |  | 
1696  |  | static PyObject *  | 
1697  |  | gc_unfreeze_impl(PyObject *module)  | 
1698  |  | /*[clinic end generated code: output=1c15f2043b25e169 input=2dd52b170f4cef6c]*/  | 
1699  | 0  | { | 
1700  | 0  |     struct _gc_runtime_state *state = &_PyRuntime.gc;  | 
1701  | 0  |     gc_list_merge(&state->permanent_generation.head, GEN_HEAD(state, NUM_GENERATIONS-1));  | 
1702  | 0  |     Py_RETURN_NONE;  | 
1703  | 0  | }  | 
1704  |  |  | 
1705  |  | /*[clinic input]  | 
1706  |  | gc.get_freeze_count -> Py_ssize_t  | 
1707  |  |  | 
1708  |  | Return the number of objects in the permanent generation.  | 
1709  |  | [clinic start generated code]*/  | 
1710  |  |  | 
1711  |  | static Py_ssize_t  | 
1712  |  | gc_get_freeze_count_impl(PyObject *module)  | 
1713  |  | /*[clinic end generated code: output=61cbd9f43aa032e1 input=45ffbc65cfe2a6ed]*/  | 
1714  | 0  | { | 
1715  | 0  |     return gc_list_size(&_PyRuntime.gc.permanent_generation.head);  | 
1716  | 0  | }  | 
1717  |  |  | 
1718  |  |  | 
1719  |  | PyDoc_STRVAR(gc__doc__,  | 
1720  |  | "This module provides access to the garbage collector for reference cycles.\n"  | 
1721  |  | "\n"  | 
1722  |  | "enable() -- Enable automatic garbage collection.\n"  | 
1723  |  | "disable() -- Disable automatic garbage collection.\n"  | 
1724  |  | "isenabled() -- Returns true if automatic collection is enabled.\n"  | 
1725  |  | "collect() -- Do a full collection right now.\n"  | 
1726  |  | "get_count() -- Return the current collection counts.\n"  | 
1727  |  | "get_stats() -- Return list of dictionaries containing per-generation stats.\n"  | 
1728  |  | "set_debug() -- Set debugging flags.\n"  | 
1729  |  | "get_debug() -- Get debugging flags.\n"  | 
1730  |  | "set_threshold() -- Set the collection thresholds.\n"  | 
1731  |  | "get_threshold() -- Return the current the collection thresholds.\n"  | 
1732  |  | "get_objects() -- Return a list of all objects tracked by the collector.\n"  | 
1733  |  | "is_tracked() -- Returns true if a given object is tracked.\n"  | 
1734  |  | "get_referrers() -- Return the list of objects that refer to an object.\n"  | 
1735  |  | "get_referents() -- Return the list of objects that an object refers to.\n"  | 
1736  |  | "freeze() -- Freeze all tracked objects and ignore them for future collections.\n"  | 
1737  |  | "unfreeze() -- Unfreeze all objects in the permanent generation.\n"  | 
1738  |  | "get_freeze_count() -- Return the number of objects in the permanent generation.\n");  | 
1739  |  |  | 
1740  |  | static PyMethodDef GcMethods[] = { | 
1741  |  |     GC_ENABLE_METHODDEF  | 
1742  |  |     GC_DISABLE_METHODDEF  | 
1743  |  |     GC_ISENABLED_METHODDEF  | 
1744  |  |     GC_SET_DEBUG_METHODDEF  | 
1745  |  |     GC_GET_DEBUG_METHODDEF  | 
1746  |  |     GC_GET_COUNT_METHODDEF  | 
1747  |  |     {"set_threshold",  gc_set_threshold, METH_VARARGS, gc_set_thresh__doc__}, | 
1748  |  |     GC_GET_THRESHOLD_METHODDEF  | 
1749  |  |     GC_COLLECT_METHODDEF  | 
1750  |  |     GC_GET_OBJECTS_METHODDEF  | 
1751  |  |     GC_GET_STATS_METHODDEF  | 
1752  |  |     GC_IS_TRACKED_METHODDEF  | 
1753  |  |     {"get_referrers",  gc_get_referrers, METH_VARARGS, | 
1754  |  |         gc_get_referrers__doc__},  | 
1755  |  |     {"get_referents",  gc_get_referents, METH_VARARGS, | 
1756  |  |         gc_get_referents__doc__},  | 
1757  |  |     GC_FREEZE_METHODDEF  | 
1758  |  |     GC_UNFREEZE_METHODDEF  | 
1759  |  |     GC_GET_FREEZE_COUNT_METHODDEF  | 
1760  |  |     {NULL,      NULL}           /* Sentinel */ | 
1761  |  | };  | 
1762  |  |  | 
1763  |  | static struct PyModuleDef gcmodule = { | 
1764  |  |     PyModuleDef_HEAD_INIT,  | 
1765  |  |     "gc",              /* m_name */  | 
1766  |  |     gc__doc__,         /* m_doc */  | 
1767  |  |     -1,                /* m_size */  | 
1768  |  |     GcMethods,         /* m_methods */  | 
1769  |  |     NULL,              /* m_reload */  | 
1770  |  |     NULL,              /* m_traverse */  | 
1771  |  |     NULL,              /* m_clear */  | 
1772  |  |     NULL               /* m_free */  | 
1773  |  | };  | 
1774  |  |  | 
1775  |  | PyMODINIT_FUNC  | 
1776  |  | PyInit_gc(void)  | 
1777  | 0  | { | 
1778  | 0  |     PyObject *m;  | 
1779  |  | 
  | 
1780  | 0  |     m = PyModule_Create(&gcmodule);  | 
1781  |  | 
  | 
1782  | 0  |     if (m == NULL) { | 
1783  | 0  |         return NULL;  | 
1784  | 0  |     }  | 
1785  |  |  | 
1786  | 0  |     struct _gc_runtime_state *state = &_PyRuntime.gc;  | 
1787  | 0  |     if (state->garbage == NULL) { | 
1788  | 0  |         state->garbage = PyList_New(0);  | 
1789  | 0  |         if (state->garbage == NULL)  | 
1790  | 0  |             return NULL;  | 
1791  | 0  |     }  | 
1792  | 0  |     Py_INCREF(state->garbage);  | 
1793  | 0  |     if (PyModule_AddObject(m, "garbage", state->garbage) < 0)  | 
1794  | 0  |         return NULL;  | 
1795  |  |  | 
1796  | 0  |     if (state->callbacks == NULL) { | 
1797  | 0  |         state->callbacks = PyList_New(0);  | 
1798  | 0  |         if (state->callbacks == NULL)  | 
1799  | 0  |             return NULL;  | 
1800  | 0  |     }  | 
1801  | 0  |     Py_INCREF(state->callbacks);  | 
1802  | 0  |     if (PyModule_AddObject(m, "callbacks", state->callbacks) < 0)  | 
1803  | 0  |         return NULL;  | 
1804  |  |  | 
1805  | 0  | #define ADD_INT(NAME) if (PyModule_AddIntConstant(m, #NAME, NAME) < 0) return NULL  | 
1806  | 0  |     ADD_INT(DEBUG_STATS);  | 
1807  | 0  |     ADD_INT(DEBUG_COLLECTABLE);  | 
1808  | 0  |     ADD_INT(DEBUG_UNCOLLECTABLE);  | 
1809  | 0  |     ADD_INT(DEBUG_SAVEALL);  | 
1810  | 0  |     ADD_INT(DEBUG_LEAK);  | 
1811  | 0  | #undef ADD_INT  | 
1812  | 0  |     return m;  | 
1813  | 0  | }  | 
1814  |  |  | 
1815  |  | /* API to invoke gc.collect() from C */  | 
1816  |  | Py_ssize_t  | 
1817  |  | PyGC_Collect(void)  | 
1818  | 0  | { | 
1819  | 0  |     struct _gc_runtime_state *state = &_PyRuntime.gc;  | 
1820  | 0  |     if (!state->enabled) { | 
1821  | 0  |         return 0;  | 
1822  | 0  |     }  | 
1823  |  |  | 
1824  | 0  |     Py_ssize_t n;  | 
1825  | 0  |     if (state->collecting) { | 
1826  |  |         /* already collecting, don't do anything */  | 
1827  | 0  |         n = 0;  | 
1828  | 0  |     }  | 
1829  | 0  |     else { | 
1830  | 0  |         PyObject *exc, *value, *tb;  | 
1831  | 0  |         state->collecting = 1;  | 
1832  | 0  |         PyErr_Fetch(&exc, &value, &tb);  | 
1833  | 0  |         n = collect_with_callback(state, NUM_GENERATIONS - 1);  | 
1834  | 0  |         PyErr_Restore(exc, value, tb);  | 
1835  | 0  |         state->collecting = 0;  | 
1836  | 0  |     }  | 
1837  |  | 
  | 
1838  | 0  |     return n;  | 
1839  | 0  | }  | 
1840  |  |  | 
1841  |  | Py_ssize_t  | 
1842  |  | _PyGC_CollectIfEnabled(void)  | 
1843  | 0  | { | 
1844  | 0  |     return PyGC_Collect();  | 
1845  | 0  | }  | 
1846  |  |  | 
1847  |  | Py_ssize_t  | 
1848  |  | _PyGC_CollectNoFail(void)  | 
1849  | 0  | { | 
1850  | 0  |     assert(!PyErr_Occurred());  | 
1851  |  | 
  | 
1852  | 0  |     struct _gc_runtime_state *state = &_PyRuntime.gc;  | 
1853  | 0  |     Py_ssize_t n;  | 
1854  |  |  | 
1855  |  |     /* Ideally, this function is only called on interpreter shutdown,  | 
1856  |  |        and therefore not recursively.  Unfortunately, when there are daemon  | 
1857  |  |        threads, a daemon thread can start a cyclic garbage collection  | 
1858  |  |        during interpreter shutdown (and then never finish it).  | 
1859  |  |        See http://bugs.python.org/issue8713#msg195178 for an example.  | 
1860  |  |        */  | 
1861  | 0  |     if (state->collecting) { | 
1862  | 0  |         n = 0;  | 
1863  | 0  |     }  | 
1864  | 0  |     else { | 
1865  | 0  |         state->collecting = 1;  | 
1866  | 0  |         n = collect(state, NUM_GENERATIONS - 1, NULL, NULL, 1);  | 
1867  | 0  |         state->collecting = 0;  | 
1868  | 0  |     }  | 
1869  | 0  |     return n;  | 
1870  | 0  | }  | 
1871  |  |  | 
1872  |  | void  | 
1873  |  | _PyGC_DumpShutdownStats(_PyRuntimeState *runtime)  | 
1874  | 0  | { | 
1875  | 0  |     struct _gc_runtime_state *state = &runtime->gc;  | 
1876  | 0  |     if (!(state->debug & DEBUG_SAVEALL)  | 
1877  | 0  |         && state->garbage != NULL && PyList_GET_SIZE(state->garbage) > 0) { | 
1878  | 0  |         const char *message;  | 
1879  | 0  |         if (state->debug & DEBUG_UNCOLLECTABLE)  | 
1880  | 0  |             message = "gc: %zd uncollectable objects at " \  | 
1881  | 0  |                 "shutdown";  | 
1882  | 0  |         else  | 
1883  | 0  |             message = "gc: %zd uncollectable objects at " \  | 
1884  | 0  |                 "shutdown; use gc.set_debug(gc.DEBUG_UNCOLLECTABLE) to list them";  | 
1885  |  |         /* PyErr_WarnFormat does too many things and we are at shutdown,  | 
1886  |  |            the warnings module's dependencies (e.g. linecache) may be gone  | 
1887  |  |            already. */  | 
1888  | 0  |         if (PyErr_WarnExplicitFormat(PyExc_ResourceWarning, "gc", 0,  | 
1889  | 0  |                                      "gc", NULL, message,  | 
1890  | 0  |                                      PyList_GET_SIZE(state->garbage)))  | 
1891  | 0  |             PyErr_WriteUnraisable(NULL);  | 
1892  | 0  |         if (state->debug & DEBUG_UNCOLLECTABLE) { | 
1893  | 0  |             PyObject *repr = NULL, *bytes = NULL;  | 
1894  | 0  |             repr = PyObject_Repr(state->garbage);  | 
1895  | 0  |             if (!repr || !(bytes = PyUnicode_EncodeFSDefault(repr)))  | 
1896  | 0  |                 PyErr_WriteUnraisable(state->garbage);  | 
1897  | 0  |             else { | 
1898  | 0  |                 PySys_WriteStderr(  | 
1899  | 0  |                     "      %s\n",  | 
1900  | 0  |                     PyBytes_AS_STRING(bytes)  | 
1901  | 0  |                     );  | 
1902  | 0  |             }  | 
1903  | 0  |             Py_XDECREF(repr);  | 
1904  | 0  |             Py_XDECREF(bytes);  | 
1905  | 0  |         }  | 
1906  | 0  |     }  | 
1907  | 0  | }  | 
1908  |  |  | 
1909  |  | void  | 
1910  |  | _PyGC_Fini(_PyRuntimeState *runtime)  | 
1911  | 0  | { | 
1912  | 0  |     struct _gc_runtime_state *state = &runtime->gc;  | 
1913  | 0  |     Py_CLEAR(state->garbage);  | 
1914  | 0  |     Py_CLEAR(state->callbacks);  | 
1915  | 0  | }  | 
1916  |  |  | 
1917  |  | /* for debugging */  | 
1918  |  | void  | 
1919  |  | _PyGC_Dump(PyGC_Head *g)  | 
1920  | 0  | { | 
1921  | 0  |     _PyObject_Dump(FROM_GC(g));  | 
1922  | 0  | }  | 
1923  |  |  | 
1924  |  | /* extension modules might be compiled with GC support so these  | 
1925  |  |    functions must always be available */  | 
1926  |  |  | 
1927  |  | void  | 
1928  |  | PyObject_GC_Track(void *op_raw)  | 
1929  | 9.70k  | { | 
1930  | 9.70k  |     PyObject *op = _PyObject_CAST(op_raw);  | 
1931  | 9.70k  |     if (_PyObject_GC_IS_TRACKED(op)) { | 
1932  | 0  |         _PyObject_ASSERT_FAILED_MSG(op,  | 
1933  | 0  |                                     "object already tracked "  | 
1934  | 0  |                                     "by the garbage collector");  | 
1935  | 0  |     }  | 
1936  | 9.70k  |     _PyObject_GC_TRACK(op);  | 
1937  | 9.70k  | }  | 
1938  |  |  | 
1939  |  | void  | 
1940  |  | PyObject_GC_UnTrack(void *op_raw)  | 
1941  | 74.4k  | { | 
1942  | 74.4k  |     PyObject *op = _PyObject_CAST(op_raw);  | 
1943  |  |     /* Obscure:  the Py_TRASHCAN mechanism requires that we be able to  | 
1944  |  |      * call PyObject_GC_UnTrack twice on an object.  | 
1945  |  |      */  | 
1946  | 74.4k  |     if (_PyObject_GC_IS_TRACKED(op)) { | 
1947  | 65.4k  |         _PyObject_GC_UNTRACK(op);  | 
1948  | 65.4k  |     }  | 
1949  | 74.4k  | }  | 
1950  |  |  | 
1951  |  | static PyObject *  | 
1952  |  | _PyObject_GC_Alloc(int use_calloc, size_t basicsize)  | 
1953  | 131k  | { | 
1954  | 131k  |     struct _gc_runtime_state *state = &_PyRuntime.gc;  | 
1955  | 131k  |     PyObject *op;  | 
1956  | 131k  |     PyGC_Head *g;  | 
1957  | 131k  |     size_t size;  | 
1958  | 131k  |     if (basicsize > PY_SSIZE_T_MAX - sizeof(PyGC_Head))  | 
1959  | 0  |         return PyErr_NoMemory();  | 
1960  | 131k  |     size = sizeof(PyGC_Head) + basicsize;  | 
1961  | 131k  |     if (use_calloc)  | 
1962  | 0  |         g = (PyGC_Head *)PyObject_Calloc(1, size);  | 
1963  | 131k  |     else  | 
1964  | 131k  |         g = (PyGC_Head *)PyObject_Malloc(size);  | 
1965  | 131k  |     if (g == NULL)  | 
1966  | 0  |         return PyErr_NoMemory();  | 
1967  | 131k  |     assert(((uintptr_t)g & 3) == 0);  // g must be aligned 4bytes boundary  | 
1968  | 131k  |     g->_gc_next = 0;  | 
1969  | 131k  |     g->_gc_prev = 0;  | 
1970  | 131k  |     state->generations[0].count++; /* number of allocated GC objects */  | 
1971  | 131k  |     if (state->generations[0].count > state->generations[0].threshold &&  | 
1972  | 134  |         state->enabled &&  | 
1973  | 134  |         state->generations[0].threshold &&  | 
1974  | 134  |         !state->collecting &&  | 
1975  | 134  |         !PyErr_Occurred()) { | 
1976  | 134  |         state->collecting = 1;  | 
1977  | 134  |         collect_generations(state);  | 
1978  | 134  |         state->collecting = 0;  | 
1979  | 134  |     }  | 
1980  | 131k  |     op = FROM_GC(g);  | 
1981  | 131k  |     return op;  | 
1982  | 131k  | }  | 
1983  |  |  | 
1984  |  | PyObject *  | 
1985  |  | _PyObject_GC_Malloc(size_t basicsize)  | 
1986  | 131k  | { | 
1987  | 131k  |     return _PyObject_GC_Alloc(0, basicsize);  | 
1988  | 131k  | }  | 
1989  |  |  | 
1990  |  | PyObject *  | 
1991  |  | _PyObject_GC_Calloc(size_t basicsize)  | 
1992  | 0  | { | 
1993  | 0  |     return _PyObject_GC_Alloc(1, basicsize);  | 
1994  | 0  | }  | 
1995  |  |  | 
1996  |  | PyObject *  | 
1997  |  | _PyObject_GC_New(PyTypeObject *tp)  | 
1998  | 49.9k  | { | 
1999  | 49.9k  |     PyObject *op = _PyObject_GC_Malloc(_PyObject_SIZE(tp));  | 
2000  | 49.9k  |     if (op != NULL)  | 
2001  | 49.9k  |         op = PyObject_INIT(op, tp);  | 
2002  | 49.9k  |     return op;  | 
2003  | 49.9k  | }  | 
2004  |  |  | 
2005  |  | PyVarObject *  | 
2006  |  | _PyObject_GC_NewVar(PyTypeObject *tp, Py_ssize_t nitems)  | 
2007  | 38.4k  | { | 
2008  | 38.4k  |     size_t size;  | 
2009  | 38.4k  |     PyVarObject *op;  | 
2010  |  |  | 
2011  | 38.4k  |     if (nitems < 0) { | 
2012  | 0  |         PyErr_BadInternalCall();  | 
2013  | 0  |         return NULL;  | 
2014  | 0  |     }  | 
2015  | 38.4k  |     size = _PyObject_VAR_SIZE(tp, nitems);  | 
2016  | 38.4k  |     op = (PyVarObject *) _PyObject_GC_Malloc(size);  | 
2017  | 38.4k  |     if (op != NULL)  | 
2018  | 38.4k  |         op = PyObject_INIT_VAR(op, tp, nitems);  | 
2019  | 38.4k  |     return op;  | 
2020  | 38.4k  | }  | 
2021  |  |  | 
2022  |  | PyVarObject *  | 
2023  |  | _PyObject_GC_Resize(PyVarObject *op, Py_ssize_t nitems)  | 
2024  | 158  | { | 
2025  | 158  |     const size_t basicsize = _PyObject_VAR_SIZE(Py_TYPE(op), nitems);  | 
2026  | 158  |     _PyObject_ASSERT((PyObject *)op, !_PyObject_GC_IS_TRACKED(op));  | 
2027  | 158  |     if (basicsize > PY_SSIZE_T_MAX - sizeof(PyGC_Head)) { | 
2028  | 0  |         return (PyVarObject *)PyErr_NoMemory();  | 
2029  | 0  |     }  | 
2030  |  |  | 
2031  | 158  |     PyGC_Head *g = AS_GC(op);  | 
2032  | 158  |     g = (PyGC_Head *)PyObject_REALLOC(g,  sizeof(PyGC_Head) + basicsize);  | 
2033  | 158  |     if (g == NULL)  | 
2034  | 0  |         return (PyVarObject *)PyErr_NoMemory();  | 
2035  | 158  |     op = (PyVarObject *) FROM_GC(g);  | 
2036  | 158  |     Py_SIZE(op) = nitems;  | 
2037  | 158  |     return op;  | 
2038  | 158  | }  | 
2039  |  |  | 
2040  |  | void  | 
2041  |  | PyObject_GC_Del(void *op)  | 
2042  | 31.8k  | { | 
2043  | 31.8k  |     PyGC_Head *g = AS_GC(op);  | 
2044  | 31.8k  |     if (_PyObject_GC_IS_TRACKED(op)) { | 
2045  | 0  |         gc_list_remove(g);  | 
2046  | 0  |     }  | 
2047  | 31.8k  |     struct _gc_runtime_state *state = &_PyRuntime.gc;  | 
2048  | 31.8k  |     if (state->generations[0].count > 0) { | 
2049  | 31.7k  |         state->generations[0].count--;  | 
2050  | 31.7k  |     }  | 
2051  | 31.8k  |     PyObject_FREE(g);  | 
2052  | 31.8k  | }  |