Line | Count | Source (jump to first uncovered line) |
1 | | // This implements the reference cycle garbage collector. |
2 | | // The Python module interface to the collector is in gcmodule.c. |
3 | | // See InternalDocs/garbage_collector.md for more infromation. |
4 | | |
5 | | #include "Python.h" |
6 | | #include "pycore_ceval.h" // _Py_set_eval_breaker_bit() |
7 | | #include "pycore_dict.h" // _PyInlineValuesSize() |
8 | | #include "pycore_initconfig.h" // _PyStatus_OK() |
9 | | #include "pycore_interp.h" // PyInterpreterState.gc |
10 | | #include "pycore_interpframe.h" // _PyFrame_GetLocalsArray() |
11 | | #include "pycore_object_alloc.h" // _PyObject_MallocWithType() |
12 | | #include "pycore_pystate.h" // _PyThreadState_GET() |
13 | | #include "pycore_tuple.h" // _PyTuple_MaybeUntrack() |
14 | | #include "pycore_weakref.h" // _PyWeakref_ClearRef() |
15 | | |
16 | | #include "pydtrace.h" |
17 | | |
18 | | |
19 | | #ifndef Py_GIL_DISABLED |
20 | | |
21 | | typedef struct _gc_runtime_state GCState; |
22 | | |
23 | | #ifdef Py_DEBUG |
24 | | # define GC_DEBUG |
25 | | #endif |
26 | | |
27 | | // Define this when debugging the GC |
28 | | // #define GC_EXTRA_DEBUG |
29 | | |
30 | | |
31 | 541M | #define GC_NEXT _PyGCHead_NEXT |
32 | 191M | #define GC_PREV _PyGCHead_PREV |
33 | | |
34 | | // update_refs() set this bit for all objects in current generation. |
35 | | // subtract_refs() and move_unreachable() uses this to distinguish |
36 | | // visited object is in GCing or not. |
37 | | // |
38 | | // move_unreachable() removes this flag from reachable objects. |
39 | | // Only unreachable objects have this flag. |
40 | | // |
41 | | // No objects in interpreter have this flag after GC ends. |
42 | 216M | #define PREV_MASK_COLLECTING _PyGC_PREV_MASK_COLLECTING |
43 | | |
44 | | // Lowest bit of _gc_next is used for UNREACHABLE flag. |
45 | | // |
46 | | // This flag represents the object is in unreachable list in move_unreachable() |
47 | | // |
48 | | // Although this flag is used only in move_unreachable(), move_unreachable() |
49 | | // doesn't clear this flag to skip unnecessary iteration. |
50 | | // move_legacy_finalizers() removes this flag instead. |
51 | | // Between them, unreachable list is not normal list and we can not use |
52 | | // most gc_list_* functions for it. |
53 | 39.1M | #define NEXT_MASK_UNREACHABLE 2 |
54 | | |
55 | 1.62G | #define AS_GC(op) _Py_AS_GC(op) |
56 | 401M | #define FROM_GC(gc) _Py_FROM_GC(gc) |
57 | | |
58 | | // Automatically choose the generation that needs collecting. |
59 | | #define GENERATION_AUTO (-1) |
60 | | |
61 | | static inline int |
62 | | gc_is_collecting(PyGC_Head *g) |
63 | 127M | { |
64 | 127M | return (g->_gc_prev & PREV_MASK_COLLECTING) != 0; |
65 | 127M | } |
66 | | |
67 | | static inline void |
68 | | gc_clear_collecting(PyGC_Head *g) |
69 | 44.3M | { |
70 | 44.3M | g->_gc_prev &= ~PREV_MASK_COLLECTING; |
71 | 44.3M | } |
72 | | |
73 | | static inline Py_ssize_t |
74 | | gc_get_refs(PyGC_Head *g) |
75 | 110M | { |
76 | 110M | return (Py_ssize_t)(g->_gc_prev >> _PyGC_PREV_SHIFT); |
77 | 110M | } |
78 | | |
79 | | static inline void |
80 | | gc_set_refs(PyGC_Head *g, Py_ssize_t refs) |
81 | 30.6M | { |
82 | 30.6M | g->_gc_prev = (g->_gc_prev & ~_PyGC_PREV_MASK) |
83 | 30.6M | | ((uintptr_t)(refs) << _PyGC_PREV_SHIFT); |
84 | 30.6M | } |
85 | | |
86 | | static inline void |
87 | | gc_reset_refs(PyGC_Head *g, Py_ssize_t refs) |
88 | 44.9M | { |
89 | 44.9M | g->_gc_prev = (g->_gc_prev & _PyGC_PREV_MASK_FINALIZED) |
90 | 44.9M | | PREV_MASK_COLLECTING |
91 | 44.9M | | ((uintptr_t)(refs) << _PyGC_PREV_SHIFT); |
92 | 44.9M | } |
93 | | |
94 | | static inline void |
95 | | gc_decref(PyGC_Head *g) |
96 | 36.0M | { |
97 | 36.0M | _PyObject_ASSERT_WITH_MSG(FROM_GC(g), |
98 | 36.0M | gc_get_refs(g) > 0, |
99 | 36.0M | "refcount is too small"); |
100 | 36.0M | g->_gc_prev -= 1 << _PyGC_PREV_SHIFT; |
101 | 36.0M | } |
102 | | |
103 | | static inline int |
104 | | gc_old_space(PyGC_Head *g) |
105 | 167M | { |
106 | 167M | return g->_gc_next & _PyGC_NEXT_MASK_OLD_SPACE_1; |
107 | 167M | } |
108 | | |
109 | | static inline int |
110 | | other_space(int space) |
111 | 4.43k | { |
112 | 4.43k | assert(space == 0 || space == 1); |
113 | 4.43k | return space ^ _PyGC_NEXT_MASK_OLD_SPACE_1; |
114 | 4.43k | } |
115 | | |
116 | | static inline void |
117 | | gc_flip_old_space(PyGC_Head *g) |
118 | 89.2M | { |
119 | 89.2M | g->_gc_next ^= _PyGC_NEXT_MASK_OLD_SPACE_1; |
120 | 89.2M | } |
121 | | |
122 | | static inline void |
123 | | gc_set_old_space(PyGC_Head *g, int space) |
124 | 41.0M | { |
125 | 41.0M | assert(space == 0 || space == _PyGC_NEXT_MASK_OLD_SPACE_1); |
126 | 41.0M | g->_gc_next &= ~_PyGC_NEXT_MASK_OLD_SPACE_1; |
127 | 41.0M | g->_gc_next |= space; |
128 | 41.0M | } |
129 | | |
130 | | static PyGC_Head * |
131 | | GEN_HEAD(GCState *gcstate, int n) |
132 | 0 | { |
133 | 0 | assert((gcstate->visited_space & (~1)) == 0); |
134 | 0 | switch(n) { |
135 | 0 | case 0: |
136 | 0 | return &gcstate->young.head; |
137 | 0 | case 1: |
138 | 0 | return &gcstate->old[gcstate->visited_space].head; |
139 | 0 | case 2: |
140 | 0 | return &gcstate->old[gcstate->visited_space^1].head; |
141 | 0 | default: |
142 | 0 | Py_UNREACHABLE(); |
143 | 0 | } |
144 | 0 | } |
145 | | |
146 | | static GCState * |
147 | | get_gc_state(void) |
148 | 428M | { |
149 | 428M | PyInterpreterState *interp = _PyInterpreterState_GET(); |
150 | 428M | return &interp->gc; |
151 | 428M | } |
152 | | |
153 | | |
154 | | void |
155 | | _PyGC_InitState(GCState *gcstate) |
156 | 16 | { |
157 | 16 | #define INIT_HEAD(GEN) \ |
158 | 64 | do { \ |
159 | 64 | GEN.head._gc_next = (uintptr_t)&GEN.head; \ |
160 | 64 | GEN.head._gc_prev = (uintptr_t)&GEN.head; \ |
161 | 64 | } while (0) |
162 | | |
163 | 16 | assert(gcstate->young.count == 0); |
164 | 16 | assert(gcstate->old[0].count == 0); |
165 | 16 | assert(gcstate->old[1].count == 0); |
166 | 16 | INIT_HEAD(gcstate->young); |
167 | 16 | INIT_HEAD(gcstate->old[0]); |
168 | 16 | INIT_HEAD(gcstate->old[1]); |
169 | 16 | INIT_HEAD(gcstate->permanent_generation); |
170 | | |
171 | 16 | #undef INIT_HEAD |
172 | 16 | } |
173 | | |
174 | | |
175 | | PyStatus |
176 | | _PyGC_Init(PyInterpreterState *interp) |
177 | 16 | { |
178 | 16 | GCState *gcstate = &interp->gc; |
179 | | |
180 | 16 | gcstate->garbage = PyList_New(0); |
181 | 16 | if (gcstate->garbage == NULL) { |
182 | 0 | return _PyStatus_NO_MEMORY(); |
183 | 0 | } |
184 | | |
185 | 16 | gcstate->callbacks = PyList_New(0); |
186 | 16 | if (gcstate->callbacks == NULL) { |
187 | 0 | return _PyStatus_NO_MEMORY(); |
188 | 0 | } |
189 | 16 | gcstate->heap_size = 0; |
190 | | |
191 | 16 | return _PyStatus_OK(); |
192 | 16 | } |
193 | | |
194 | | |
195 | | /* |
196 | | _gc_prev values |
197 | | --------------- |
198 | | |
199 | | Between collections, _gc_prev is used for doubly linked list. |
200 | | |
201 | | Lowest two bits of _gc_prev are used for flags. |
202 | | PREV_MASK_COLLECTING is used only while collecting and cleared before GC ends |
203 | | or _PyObject_GC_UNTRACK() is called. |
204 | | |
205 | | During a collection, _gc_prev is temporary used for gc_refs, and the gc list |
206 | | is singly linked until _gc_prev is restored. |
207 | | |
208 | | gc_refs |
209 | | At the start of a collection, update_refs() copies the true refcount |
210 | | to gc_refs, for each object in the generation being collected. |
211 | | subtract_refs() then adjusts gc_refs so that it equals the number of |
212 | | times an object is referenced directly from outside the generation |
213 | | being collected. |
214 | | |
215 | | PREV_MASK_COLLECTING |
216 | | Objects in generation being collected are marked PREV_MASK_COLLECTING in |
217 | | update_refs(). |
218 | | |
219 | | |
220 | | _gc_next values |
221 | | --------------- |
222 | | |
223 | | _gc_next takes these values: |
224 | | |
225 | | 0 |
226 | | The object is not tracked |
227 | | |
228 | | != 0 |
229 | | Pointer to the next object in the GC list. |
230 | | Additionally, lowest bit is used temporary for |
231 | | NEXT_MASK_UNREACHABLE flag described below. |
232 | | |
233 | | NEXT_MASK_UNREACHABLE |
234 | | move_unreachable() then moves objects not reachable (whether directly or |
235 | | indirectly) from outside the generation into an "unreachable" set and |
236 | | set this flag. |
237 | | |
238 | | Objects that are found to be reachable have gc_refs set to 1. |
239 | | When this flag is set for the reachable object, the object must be in |
240 | | "unreachable" set. |
241 | | The flag is unset and the object is moved back to "reachable" set. |
242 | | |
243 | | move_legacy_finalizers() will remove this flag from "unreachable" set. |
244 | | */ |
245 | | |
246 | | /*** list functions ***/ |
247 | | |
248 | | static inline void |
249 | | gc_list_init(PyGC_Head *list) |
250 | 763k | { |
251 | | // List header must not have flags. |
252 | | // We can assign pointer by simple cast. |
253 | 763k | list->_gc_prev = (uintptr_t)list; |
254 | 763k | list->_gc_next = (uintptr_t)list; |
255 | 763k | } |
256 | | |
257 | | static inline int |
258 | | gc_list_is_empty(PyGC_Head *list) |
259 | 90.4M | { |
260 | 90.4M | return (list->_gc_next == (uintptr_t)list); |
261 | 90.4M | } |
262 | | |
263 | | /* Append `node` to `list`. */ |
264 | | static inline void |
265 | | gc_list_append(PyGC_Head *node, PyGC_Head *list) |
266 | 4.82M | { |
267 | 4.82M | assert((list->_gc_prev & ~_PyGC_PREV_MASK) == 0); |
268 | 4.82M | PyGC_Head *last = (PyGC_Head *)list->_gc_prev; |
269 | | |
270 | | // last <-> node |
271 | 4.82M | _PyGCHead_SET_PREV(node, last); |
272 | 4.82M | _PyGCHead_SET_NEXT(last, node); |
273 | | |
274 | | // node <-> list |
275 | 4.82M | _PyGCHead_SET_NEXT(node, list); |
276 | 4.82M | list->_gc_prev = (uintptr_t)node; |
277 | 4.82M | } |
278 | | |
279 | | /* Remove `node` from the gc list it's currently in. */ |
280 | | static inline void |
281 | | gc_list_remove(PyGC_Head *node) |
282 | 0 | { |
283 | 0 | PyGC_Head *prev = GC_PREV(node); |
284 | 0 | PyGC_Head *next = GC_NEXT(node); |
285 | |
|
286 | 0 | _PyGCHead_SET_NEXT(prev, next); |
287 | 0 | _PyGCHead_SET_PREV(next, prev); |
288 | |
|
289 | 0 | node->_gc_next = 0; /* object is not currently tracked */ |
290 | 0 | } |
291 | | |
292 | | /* Move `node` from the gc list it's currently in (which is not explicitly |
293 | | * named here) to the end of `list`. This is semantically the same as |
294 | | * gc_list_remove(node) followed by gc_list_append(node, list). |
295 | | */ |
296 | | static void |
297 | | gc_list_move(PyGC_Head *node, PyGC_Head *list) |
298 | 178M | { |
299 | | /* Unlink from current list. */ |
300 | 178M | PyGC_Head *from_prev = GC_PREV(node); |
301 | 178M | PyGC_Head *from_next = GC_NEXT(node); |
302 | 178M | _PyGCHead_SET_NEXT(from_prev, from_next); |
303 | 178M | _PyGCHead_SET_PREV(from_next, from_prev); |
304 | | |
305 | | /* Relink at end of new list. */ |
306 | | // list must not have flags. So we can skip macros. |
307 | 178M | PyGC_Head *to_prev = (PyGC_Head*)list->_gc_prev; |
308 | 178M | _PyGCHead_SET_PREV(node, to_prev); |
309 | 178M | _PyGCHead_SET_NEXT(to_prev, node); |
310 | 178M | list->_gc_prev = (uintptr_t)node; |
311 | 178M | _PyGCHead_SET_NEXT(node, list); |
312 | 178M | } |
313 | | |
314 | | /* append list `from` onto list `to`; `from` becomes an empty list */ |
315 | | static void |
316 | | gc_list_merge(PyGC_Head *from, PyGC_Head *to) |
317 | 323k | { |
318 | 323k | assert(from != to); |
319 | 323k | if (!gc_list_is_empty(from)) { |
320 | 103k | PyGC_Head *to_tail = GC_PREV(to); |
321 | 103k | PyGC_Head *from_head = GC_NEXT(from); |
322 | 103k | PyGC_Head *from_tail = GC_PREV(from); |
323 | 103k | assert(from_head != from); |
324 | 103k | assert(from_tail != from); |
325 | 103k | assert(gc_list_is_empty(to) || |
326 | 103k | gc_old_space(to_tail) == gc_old_space(from_tail)); |
327 | | |
328 | 103k | _PyGCHead_SET_NEXT(to_tail, from_head); |
329 | 103k | _PyGCHead_SET_PREV(from_head, to_tail); |
330 | | |
331 | 103k | _PyGCHead_SET_NEXT(from_tail, to); |
332 | 103k | _PyGCHead_SET_PREV(to, from_tail); |
333 | 103k | } |
334 | 323k | gc_list_init(from); |
335 | 323k | } |
336 | | |
337 | | static Py_ssize_t |
338 | | gc_list_size(PyGC_Head *list) |
339 | 107k | { |
340 | 107k | PyGC_Head *gc; |
341 | 107k | Py_ssize_t n = 0; |
342 | 41.2M | for (gc = GC_NEXT(list); gc != list; gc = GC_NEXT(gc)) { |
343 | 41.1M | n++; |
344 | 41.1M | } |
345 | 107k | return n; |
346 | 107k | } |
347 | | |
348 | | /* Walk the list and mark all objects as non-collecting */ |
349 | | static inline void |
350 | | gc_list_clear_collecting(PyGC_Head *collectable) |
351 | 53.9k | { |
352 | 53.9k | PyGC_Head *gc; |
353 | 1.29M | for (gc = GC_NEXT(collectable); gc != collectable; gc = GC_NEXT(gc)) { |
354 | 1.24M | gc_clear_collecting(gc); |
355 | 1.24M | } |
356 | 53.9k | } |
357 | | |
358 | | /* Append objects in a GC list to a Python list. |
359 | | * Return 0 if all OK, < 0 if error (out of memory for list) |
360 | | */ |
361 | | static int |
362 | | append_objects(PyObject *py_list, PyGC_Head *gc_list) |
363 | 0 | { |
364 | 0 | PyGC_Head *gc; |
365 | 0 | for (gc = GC_NEXT(gc_list); gc != gc_list; gc = GC_NEXT(gc)) { |
366 | 0 | PyObject *op = FROM_GC(gc); |
367 | 0 | if (op != py_list) { |
368 | 0 | if (PyList_Append(py_list, op)) { |
369 | 0 | return -1; /* exception */ |
370 | 0 | } |
371 | 0 | } |
372 | 0 | } |
373 | 0 | return 0; |
374 | 0 | } |
375 | | |
376 | | // Constants for validate_list's flags argument. |
377 | | enum flagstates {collecting_clear_unreachable_clear, |
378 | | collecting_clear_unreachable_set, |
379 | | collecting_set_unreachable_clear, |
380 | | collecting_set_unreachable_set}; |
381 | | |
382 | | #ifdef GC_DEBUG |
383 | | // validate_list checks list consistency. And it works as document |
384 | | // describing when flags are expected to be set / unset. |
385 | | // `head` must be a doubly-linked gc list, although it's fine (expected!) if |
386 | | // the prev and next pointers are "polluted" with flags. |
387 | | // What's checked: |
388 | | // - The `head` pointers are not polluted. |
389 | | // - The objects' PREV_MASK_COLLECTING and NEXT_MASK_UNREACHABLE flags are all |
390 | | // `set or clear, as specified by the 'flags' argument. |
391 | | // - The prev and next pointers are mutually consistent. |
392 | | static void |
393 | | validate_list(PyGC_Head *head, enum flagstates flags) |
394 | | { |
395 | | assert((head->_gc_prev & ~_PyGC_PREV_MASK) == 0); |
396 | | assert((head->_gc_next & ~_PyGC_PREV_MASK) == 0); |
397 | | uintptr_t prev_value = 0, next_value = 0; |
398 | | switch (flags) { |
399 | | case collecting_clear_unreachable_clear: |
400 | | break; |
401 | | case collecting_set_unreachable_clear: |
402 | | prev_value = PREV_MASK_COLLECTING; |
403 | | break; |
404 | | case collecting_clear_unreachable_set: |
405 | | next_value = NEXT_MASK_UNREACHABLE; |
406 | | break; |
407 | | case collecting_set_unreachable_set: |
408 | | prev_value = PREV_MASK_COLLECTING; |
409 | | next_value = NEXT_MASK_UNREACHABLE; |
410 | | break; |
411 | | default: |
412 | | assert(! "bad internal flags argument"); |
413 | | } |
414 | | PyGC_Head *prev = head; |
415 | | PyGC_Head *gc = GC_NEXT(head); |
416 | | while (gc != head) { |
417 | | PyGC_Head *trueprev = GC_PREV(gc); |
418 | | PyGC_Head *truenext = GC_NEXT(gc); |
419 | | assert(truenext != NULL); |
420 | | assert(trueprev == prev); |
421 | | assert((gc->_gc_prev & PREV_MASK_COLLECTING) == prev_value); |
422 | | assert((gc->_gc_next & NEXT_MASK_UNREACHABLE) == next_value); |
423 | | prev = gc; |
424 | | gc = truenext; |
425 | | } |
426 | | assert(prev == GC_PREV(head)); |
427 | | } |
428 | | |
429 | | #else |
430 | 700k | #define validate_list(x, y) do{}while(0) |
431 | | #endif |
432 | | |
433 | | #ifdef GC_EXTRA_DEBUG |
434 | | |
435 | | |
436 | | static void |
437 | | gc_list_validate_space(PyGC_Head *head, int space) { |
438 | | PyGC_Head *gc = GC_NEXT(head); |
439 | | while (gc != head) { |
440 | | assert(gc_old_space(gc) == space); |
441 | | gc = GC_NEXT(gc); |
442 | | } |
443 | | } |
444 | | |
445 | | static void |
446 | | validate_spaces(GCState *gcstate) |
447 | | { |
448 | | int visited = gcstate->visited_space; |
449 | | int not_visited = other_space(visited); |
450 | | gc_list_validate_space(&gcstate->young.head, not_visited); |
451 | | for (int space = 0; space < 2; space++) { |
452 | | gc_list_validate_space(&gcstate->old[space].head, space); |
453 | | } |
454 | | gc_list_validate_space(&gcstate->permanent_generation.head, visited); |
455 | | } |
456 | | |
457 | | static void |
458 | | validate_consistent_old_space(PyGC_Head *head) |
459 | | { |
460 | | PyGC_Head *gc = GC_NEXT(head); |
461 | | if (gc == head) { |
462 | | return; |
463 | | } |
464 | | int old_space = gc_old_space(gc); |
465 | | while (gc != head) { |
466 | | PyGC_Head *truenext = GC_NEXT(gc); |
467 | | assert(truenext != NULL); |
468 | | assert(gc_old_space(gc) == old_space); |
469 | | gc = truenext; |
470 | | } |
471 | | } |
472 | | |
473 | | |
474 | | #else |
475 | 121k | #define validate_spaces(g) do{}while(0) |
476 | 269k | #define validate_consistent_old_space(l) do{}while(0) |
477 | 278k | #define gc_list_validate_space(l, s) do{}while(0) |
478 | | #endif |
479 | | |
480 | | /*** end of list stuff ***/ |
481 | | |
482 | | |
483 | | /* Set all gc_refs = ob_refcnt. After this, gc_refs is > 0 and |
484 | | * PREV_MASK_COLLECTING bit is set for all objects in containers. |
485 | | */ |
486 | | static void |
487 | | update_refs(PyGC_Head *containers) |
488 | 107k | { |
489 | 107k | PyGC_Head *next; |
490 | 107k | PyGC_Head *gc = GC_NEXT(containers); |
491 | | |
492 | 45.0M | while (gc != containers) { |
493 | 44.9M | next = GC_NEXT(gc); |
494 | 44.9M | PyObject *op = FROM_GC(gc); |
495 | 44.9M | if (_Py_IsImmortal(op)) { |
496 | 0 | assert(!_Py_IsStaticImmortal(op)); |
497 | 0 | _PyObject_GC_UNTRACK(op); |
498 | 0 | gc = next; |
499 | 0 | continue; |
500 | 0 | } |
501 | 44.9M | gc_reset_refs(gc, Py_REFCNT(op)); |
502 | | /* Python's cyclic gc should never see an incoming refcount |
503 | | * of 0: if something decref'ed to 0, it should have been |
504 | | * deallocated immediately at that time. |
505 | | * Possible cause (if the assert triggers): a tp_dealloc |
506 | | * routine left a gc-aware object tracked during its teardown |
507 | | * phase, and did something-- or allowed something to happen -- |
508 | | * that called back into Python. gc can trigger then, and may |
509 | | * see the still-tracked dying object. Before this assert |
510 | | * was added, such mistakes went on to allow gc to try to |
511 | | * delete the object again. In a debug build, that caused |
512 | | * a mysterious segfault, when _Py_ForgetReference tried |
513 | | * to remove the object from the doubly-linked list of all |
514 | | * objects a second time. In a release build, an actual |
515 | | * double deallocation occurred, which leads to corruption |
516 | | * of the allocator's internal bookkeeping pointers. That's |
517 | | * so serious that maybe this should be a release-build |
518 | | * check instead of an assert? |
519 | | */ |
520 | 44.9M | _PyObject_ASSERT(op, gc_get_refs(gc) != 0); |
521 | 44.9M | gc = next; |
522 | 44.9M | } |
523 | 107k | } |
524 | | |
525 | | /* A traversal callback for subtract_refs. */ |
526 | | static int |
527 | | visit_decref(PyObject *op, void *parent) |
528 | 162M | { |
529 | 162M | OBJECT_STAT_INC(object_visits); |
530 | 162M | _PyObject_ASSERT(_PyObject_CAST(parent), !_PyObject_IsFreed(op)); |
531 | | |
532 | 162M | if (_PyObject_IS_GC(op)) { |
533 | 66.6M | PyGC_Head *gc = AS_GC(op); |
534 | | /* We're only interested in gc_refs for objects in the |
535 | | * generation being collected, which can be recognized |
536 | | * because only they have positive gc_refs. |
537 | | */ |
538 | 66.6M | if (gc_is_collecting(gc)) { |
539 | 36.0M | gc_decref(gc); |
540 | 36.0M | } |
541 | 66.6M | } |
542 | 162M | return 0; |
543 | 162M | } |
544 | | |
545 | | int |
546 | | _PyGC_VisitStackRef(_PyStackRef *ref, visitproc visit, void *arg) |
547 | 3.12M | { |
548 | | // This is a bit tricky! We want to ignore stackrefs with embedded |
549 | | // refcounts when computing the incoming references, but otherwise treat |
550 | | // them like normal. |
551 | 3.12M | assert(!PyStackRef_IsTaggedInt(*ref)); |
552 | 3.12M | if (!PyStackRef_RefcountOnObject(*ref) && (visit == visit_decref)) { |
553 | 19.5k | return 0; |
554 | 19.5k | } |
555 | 3.10M | Py_VISIT(PyStackRef_AsPyObjectBorrow(*ref)); |
556 | 3.10M | return 0; |
557 | 3.10M | } |
558 | | |
559 | | int |
560 | | _PyGC_VisitFrameStack(_PyInterpreterFrame *frame, visitproc visit, void *arg) |
561 | 575k | { |
562 | 575k | _PyStackRef *ref = _PyFrame_GetLocalsArray(frame); |
563 | | /* locals and stack */ |
564 | 3.71M | for (; ref < frame->stackpointer; ref++) { |
565 | 3.14M | if (!PyStackRef_IsTaggedInt(*ref)) { |
566 | 3.14M | _Py_VISIT_STACKREF(*ref); |
567 | 3.14M | } |
568 | 3.14M | } |
569 | 575k | return 0; |
570 | 575k | } |
571 | | |
572 | | /* Subtract internal references from gc_refs. After this, gc_refs is >= 0 |
573 | | * for all objects in containers. The ones with gc_refs > 0 are directly |
574 | | * reachable from outside containers, and so can't be collected. |
575 | | */ |
576 | | static void |
577 | | subtract_refs(PyGC_Head *containers) |
578 | 107k | { |
579 | 107k | traverseproc traverse; |
580 | 107k | PyGC_Head *gc = GC_NEXT(containers); |
581 | 45.0M | for (; gc != containers; gc = GC_NEXT(gc)) { |
582 | 44.9M | PyObject *op = FROM_GC(gc); |
583 | 44.9M | traverse = Py_TYPE(op)->tp_traverse; |
584 | 44.9M | (void) traverse(op, |
585 | 44.9M | visit_decref, |
586 | 44.9M | op); |
587 | 44.9M | } |
588 | 107k | } |
589 | | |
590 | | /* A traversal callback for move_unreachable. */ |
591 | | static int |
592 | | visit_reachable(PyObject *op, void *arg) |
593 | 153M | { |
594 | 153M | PyGC_Head *reachable = arg; |
595 | 153M | OBJECT_STAT_INC(object_visits); |
596 | 153M | if (!_PyObject_IS_GC(op)) { |
597 | 93.3M | return 0; |
598 | 93.3M | } |
599 | | |
600 | 60.3M | PyGC_Head *gc = AS_GC(op); |
601 | 60.3M | const Py_ssize_t gc_refs = gc_get_refs(gc); |
602 | | |
603 | | // Ignore objects in other generation. |
604 | | // This also skips objects "to the left" of the current position in |
605 | | // move_unreachable's scan of the 'young' list - they've already been |
606 | | // traversed, and no longer have the PREV_MASK_COLLECTING flag. |
607 | 60.3M | if (! gc_is_collecting(gc)) { |
608 | 28.6M | return 0; |
609 | 28.6M | } |
610 | | // It would be a logic error elsewhere if the collecting flag were set on |
611 | | // an untracked object. |
612 | 31.7M | _PyObject_ASSERT(op, gc->_gc_next != 0); |
613 | | |
614 | 31.7M | if (gc->_gc_next & NEXT_MASK_UNREACHABLE) { |
615 | | /* This had gc_refs = 0 when move_unreachable got |
616 | | * to it, but turns out it's reachable after all. |
617 | | * Move it back to move_unreachable's 'young' list, |
618 | | * and move_unreachable will eventually get to it |
619 | | * again. |
620 | | */ |
621 | | // Manually unlink gc from unreachable list because the list functions |
622 | | // don't work right in the presence of NEXT_MASK_UNREACHABLE flags. |
623 | 4.82M | PyGC_Head *prev = GC_PREV(gc); |
624 | 4.82M | PyGC_Head *next = GC_NEXT(gc); |
625 | 4.82M | _PyObject_ASSERT(FROM_GC(prev), |
626 | 4.82M | prev->_gc_next & NEXT_MASK_UNREACHABLE); |
627 | 4.82M | _PyObject_ASSERT(FROM_GC(next), |
628 | 4.82M | next->_gc_next & NEXT_MASK_UNREACHABLE); |
629 | 4.82M | prev->_gc_next = gc->_gc_next; // copy flag bits |
630 | 4.82M | gc->_gc_next &= ~NEXT_MASK_UNREACHABLE; |
631 | 4.82M | _PyGCHead_SET_PREV(next, prev); |
632 | | |
633 | 4.82M | gc_list_append(gc, reachable); |
634 | 4.82M | gc_set_refs(gc, 1); |
635 | 4.82M | } |
636 | 26.8M | else if (gc_refs == 0) { |
637 | | /* This is in move_unreachable's 'young' list, but |
638 | | * the traversal hasn't yet gotten to it. All |
639 | | * we need to do is tell move_unreachable that it's |
640 | | * reachable. |
641 | | */ |
642 | 25.8M | gc_set_refs(gc, 1); |
643 | 25.8M | } |
644 | | /* Else there's nothing to do. |
645 | | * If gc_refs > 0, it must be in move_unreachable's 'young' |
646 | | * list, and move_unreachable will eventually get to it. |
647 | | */ |
648 | 1.01M | else { |
649 | 1.01M | _PyObject_ASSERT_WITH_MSG(op, gc_refs > 0, "refcount is too small"); |
650 | 1.01M | } |
651 | 31.7M | return 0; |
652 | 60.3M | } |
653 | | |
654 | | /* Move the unreachable objects from young to unreachable. After this, |
655 | | * all objects in young don't have PREV_MASK_COLLECTING flag and |
656 | | * unreachable have the flag. |
657 | | * All objects in young after this are directly or indirectly reachable |
658 | | * from outside the original young; and all objects in unreachable are |
659 | | * not. |
660 | | * |
661 | | * This function restores _gc_prev pointer. young and unreachable are |
662 | | * doubly linked list after this function. |
663 | | * But _gc_next in unreachable list has NEXT_MASK_UNREACHABLE flag. |
664 | | * So we can not gc_list_* functions for unreachable until we remove the flag. |
665 | | */ |
666 | | static void |
667 | | move_unreachable(PyGC_Head *young, PyGC_Head *unreachable) |
668 | 107k | { |
669 | | // previous elem in the young list, used for restore gc_prev. |
670 | 107k | PyGC_Head *prev = young; |
671 | 107k | PyGC_Head *gc = GC_NEXT(young); |
672 | | |
673 | | /* Invariants: all objects "to the left" of us in young are reachable |
674 | | * (directly or indirectly) from outside the young list as it was at entry. |
675 | | * |
676 | | * All other objects from the original young "to the left" of us are in |
677 | | * unreachable now, and have NEXT_MASK_UNREACHABLE. All objects to the |
678 | | * left of us in 'young' now have been scanned, and no objects here |
679 | | * or to the right have been scanned yet. |
680 | | */ |
681 | | |
682 | 107k | validate_consistent_old_space(young); |
683 | | /* Record which old space we are in, and set NEXT_MASK_UNREACHABLE bit for convenience */ |
684 | 107k | uintptr_t flags = NEXT_MASK_UNREACHABLE | (gc->_gc_next & _PyGC_NEXT_MASK_OLD_SPACE_1); |
685 | 49.8M | while (gc != young) { |
686 | 49.7M | if (gc_get_refs(gc)) { |
687 | | /* gc is definitely reachable from outside the |
688 | | * original 'young'. Mark it as such, and traverse |
689 | | * its pointers to find any other objects that may |
690 | | * be directly reachable from it. Note that the |
691 | | * call to tp_traverse may append objects to young, |
692 | | * so we have to wait until it returns to determine |
693 | | * the next object to visit. |
694 | | */ |
695 | 42.4M | PyObject *op = FROM_GC(gc); |
696 | 42.4M | traverseproc traverse = Py_TYPE(op)->tp_traverse; |
697 | 42.4M | _PyObject_ASSERT_WITH_MSG(op, gc_get_refs(gc) > 0, |
698 | 42.4M | "refcount is too small"); |
699 | | // NOTE: visit_reachable may change gc->_gc_next when |
700 | | // young->_gc_prev == gc. Don't do gc = GC_NEXT(gc) before! |
701 | 42.4M | (void) traverse(op, |
702 | 42.4M | visit_reachable, |
703 | 42.4M | (void *)young); |
704 | | // relink gc_prev to prev element. |
705 | 42.4M | _PyGCHead_SET_PREV(gc, prev); |
706 | | // gc is not COLLECTING state after here. |
707 | 42.4M | gc_clear_collecting(gc); |
708 | 42.4M | prev = gc; |
709 | 42.4M | } |
710 | 7.31M | else { |
711 | | /* This *may* be unreachable. To make progress, |
712 | | * assume it is. gc isn't directly reachable from |
713 | | * any object we've already traversed, but may be |
714 | | * reachable from an object we haven't gotten to yet. |
715 | | * visit_reachable will eventually move gc back into |
716 | | * young if that's so, and we'll see it again. |
717 | | */ |
718 | | // Move gc to unreachable. |
719 | | // No need to gc->next->prev = prev because it is single linked. |
720 | 7.31M | prev->_gc_next = gc->_gc_next; |
721 | | |
722 | | // We can't use gc_list_append() here because we use |
723 | | // NEXT_MASK_UNREACHABLE here. |
724 | 7.31M | PyGC_Head *last = GC_PREV(unreachable); |
725 | | // NOTE: Since all objects in unreachable set has |
726 | | // NEXT_MASK_UNREACHABLE flag, we set it unconditionally. |
727 | | // But this may pollute the unreachable list head's 'next' pointer |
728 | | // too. That's semantically senseless but expedient here - the |
729 | | // damage is repaired when this function ends. |
730 | 7.31M | last->_gc_next = flags | (uintptr_t)gc; |
731 | 7.31M | _PyGCHead_SET_PREV(gc, last); |
732 | 7.31M | gc->_gc_next = flags | (uintptr_t)unreachable; |
733 | 7.31M | unreachable->_gc_prev = (uintptr_t)gc; |
734 | 7.31M | } |
735 | 49.7M | gc = _PyGCHead_NEXT(prev); |
736 | 49.7M | } |
737 | | // young->_gc_prev must be last element remained in the list. |
738 | 107k | young->_gc_prev = (uintptr_t)prev; |
739 | 107k | young->_gc_next &= _PyGC_PREV_MASK; |
740 | | // don't let the pollution of the list head's next pointer leak |
741 | 107k | unreachable->_gc_next &= _PyGC_PREV_MASK; |
742 | 107k | } |
743 | | |
744 | | /* In theory, all tuples should be younger than the |
745 | | * objects they refer to, as tuples are immortal. |
746 | | * Therefore, untracking tuples in oldest-first order in the |
747 | | * young generation before promoting them should have tracked |
748 | | * all the tuples that can be untracked. |
749 | | * |
750 | | * Unfortunately, the C API allows tuples to be created |
751 | | * and then filled in. So this won't untrack all tuples |
752 | | * that can be untracked. It should untrack most of them |
753 | | * and is much faster than a more complex approach that |
754 | | * would untrack all relevant tuples. |
755 | | */ |
756 | | static void |
757 | | untrack_tuples(PyGC_Head *head) |
758 | 112k | { |
759 | 112k | PyGC_Head *gc = GC_NEXT(head); |
760 | 172M | while (gc != head) { |
761 | 172M | PyObject *op = FROM_GC(gc); |
762 | 172M | PyGC_Head *next = GC_NEXT(gc); |
763 | 172M | if (PyTuple_CheckExact(op)) { |
764 | 70.6M | _PyTuple_MaybeUntrack(op); |
765 | 70.6M | } |
766 | 172M | gc = next; |
767 | 172M | } |
768 | 112k | } |
769 | | |
770 | | /* Return true if object has a pre-PEP 442 finalization method. */ |
771 | | static int |
772 | | has_legacy_finalizer(PyObject *op) |
773 | 1.24M | { |
774 | 1.24M | return Py_TYPE(op)->tp_del != NULL; |
775 | 1.24M | } |
776 | | |
777 | | /* Move the objects in unreachable with tp_del slots into `finalizers`. |
778 | | * |
779 | | * This function also removes NEXT_MASK_UNREACHABLE flag |
780 | | * from _gc_next in unreachable. |
781 | | */ |
782 | | static void |
783 | | move_legacy_finalizers(PyGC_Head *unreachable, PyGC_Head *finalizers) |
784 | 53.9k | { |
785 | 53.9k | PyGC_Head *gc, *next; |
786 | 53.9k | _PyObject_ASSERT( |
787 | 53.9k | FROM_GC(unreachable), |
788 | 53.9k | (unreachable->_gc_next & NEXT_MASK_UNREACHABLE) == 0); |
789 | | |
790 | | /* March over unreachable. Move objects with finalizers into |
791 | | * `finalizers`. |
792 | | */ |
793 | 1.29M | for (gc = GC_NEXT(unreachable); gc != unreachable; gc = next) { |
794 | 1.24M | PyObject *op = FROM_GC(gc); |
795 | | |
796 | 1.24M | _PyObject_ASSERT(op, gc->_gc_next & NEXT_MASK_UNREACHABLE); |
797 | 1.24M | next = GC_NEXT(gc); |
798 | 1.24M | gc->_gc_next &= ~NEXT_MASK_UNREACHABLE; |
799 | | |
800 | 1.24M | if (has_legacy_finalizer(op)) { |
801 | 0 | gc_clear_collecting(gc); |
802 | 0 | gc_list_move(gc, finalizers); |
803 | 0 | } |
804 | 1.24M | } |
805 | 53.9k | } |
806 | | |
807 | | static inline void |
808 | | clear_unreachable_mask(PyGC_Head *unreachable) |
809 | 53.9k | { |
810 | | /* Check that the list head does not have the unreachable bit set */ |
811 | 53.9k | _PyObject_ASSERT( |
812 | 53.9k | FROM_GC(unreachable), |
813 | 53.9k | ((uintptr_t)unreachable & NEXT_MASK_UNREACHABLE) == 0); |
814 | 53.9k | _PyObject_ASSERT( |
815 | 53.9k | FROM_GC(unreachable), |
816 | 53.9k | (unreachable->_gc_next & NEXT_MASK_UNREACHABLE) == 0); |
817 | | |
818 | 53.9k | PyGC_Head *gc, *next; |
819 | 1.29M | for (gc = GC_NEXT(unreachable); gc != unreachable; gc = next) { |
820 | 1.24M | _PyObject_ASSERT((PyObject*)FROM_GC(gc), gc->_gc_next & NEXT_MASK_UNREACHABLE); |
821 | 1.24M | next = GC_NEXT(gc); |
822 | 1.24M | gc->_gc_next &= ~NEXT_MASK_UNREACHABLE; |
823 | 1.24M | } |
824 | 53.9k | validate_list(unreachable, collecting_set_unreachable_clear); |
825 | 53.9k | } |
826 | | |
827 | | /* A traversal callback for move_legacy_finalizer_reachable. */ |
828 | | static int |
829 | | visit_move(PyObject *op, void *arg) |
830 | 0 | { |
831 | 0 | PyGC_Head *tolist = arg; |
832 | 0 | OBJECT_STAT_INC(object_visits); |
833 | 0 | if (_PyObject_IS_GC(op)) { |
834 | 0 | PyGC_Head *gc = AS_GC(op); |
835 | 0 | if (gc_is_collecting(gc)) { |
836 | 0 | gc_list_move(gc, tolist); |
837 | 0 | gc_clear_collecting(gc); |
838 | 0 | } |
839 | 0 | } |
840 | 0 | return 0; |
841 | 0 | } |
842 | | |
843 | | /* Move objects that are reachable from finalizers, from the unreachable set |
844 | | * into finalizers set. |
845 | | */ |
846 | | static void |
847 | | move_legacy_finalizer_reachable(PyGC_Head *finalizers) |
848 | 53.9k | { |
849 | 53.9k | traverseproc traverse; |
850 | 53.9k | PyGC_Head *gc = GC_NEXT(finalizers); |
851 | 53.9k | for (; gc != finalizers; gc = GC_NEXT(gc)) { |
852 | | /* Note that the finalizers list may grow during this. */ |
853 | 0 | traverse = Py_TYPE(FROM_GC(gc))->tp_traverse; |
854 | 0 | (void) traverse(FROM_GC(gc), |
855 | 0 | visit_move, |
856 | 0 | (void *)finalizers); |
857 | 0 | } |
858 | 53.9k | } |
859 | | |
860 | | /* Handle weakref callbacks. Note that it's possible for such weakrefs to be |
861 | | * outside the unreachable set -- indeed, those are precisely the weakrefs |
862 | | * whose callbacks must be invoked. See gc_weakref.txt for overview & some |
863 | | * details. |
864 | | * |
865 | | * The clearing of weakrefs is suble and must be done carefully, as there was |
866 | | * previous bugs related to this. First, weakrefs to the unreachable set of |
867 | | * objects must be cleared before we start calling `tp_clear`. If we don't, |
868 | | * those weakrefs can reveal unreachable objects to Python-level code and that |
869 | | * is not safe. Many objects are not usable after `tp_clear` has been called |
870 | | * and could even cause crashes if accessed (see bpo-38006 and gh-91636 as |
871 | | * example bugs). |
872 | | * |
873 | | * Weakrefs with callbacks always need to be cleared before executing the |
874 | | * callback. That's because sometimes a callback will call the ref object, |
875 | | * to check if the reference is actually dead (KeyedRef does this, for |
876 | | * example). We want to indicate that it is dead, even though it is possible |
877 | | * a finalizer might resurrect it. Clearing also prevents the callback from |
878 | | * executing more than once. |
879 | | * |
880 | | * Since Python 2.3, all weakrefs to cyclic garbage have been cleared *before* |
881 | | * calling finalizers. However, since tp_subclasses started being necessary |
882 | | * to invalidate caches (e.g. by PyType_Modified), that clearing has created |
883 | | * a bug. If the weakref to the subclass is cleared before a finalizer is |
884 | | * called, the cache may not be correctly invalidated. That can lead to |
885 | | * segfaults since the caches can refer to deallocated objects (GH-135552 |
886 | | * is an example). Now, we delay the clear of weakrefs without callbacks |
887 | | * until *after* finalizers have been executed. That means weakrefs without |
888 | | * callbacks are still usable while finalizers are being executed. |
889 | | * |
890 | | * The weakrefs that are inside the unreachable set must also be cleared. |
891 | | * The reason this is required is not immediately obvious. If the weakref |
892 | | * refers to an object outside of the unreachable set, that object might go |
893 | | * away when we start clearing objects. Normally, the object should also be |
894 | | * part of the unreachable set but that's not true in the case of incomplete |
895 | | * or missing `tp_traverse` methods. When that object goes away, the callback |
896 | | * for weakref can be executed and that could reveal unreachable objects to |
897 | | * Python-level code. See bpo-38006 as an example bug. |
898 | | */ |
899 | | static int |
900 | | handle_weakref_callbacks(PyGC_Head *unreachable, PyGC_Head *old) |
901 | 53.9k | { |
902 | 53.9k | PyGC_Head *gc; |
903 | 53.9k | PyGC_Head wrcb_to_call; /* weakrefs with callbacks to call */ |
904 | 53.9k | PyGC_Head *next; |
905 | 53.9k | int num_freed = 0; |
906 | | |
907 | 53.9k | gc_list_init(&wrcb_to_call); |
908 | | |
909 | | /* Find all weakrefs with callbacks and move into `wrcb_to_call` if the |
910 | | * callback needs to be invoked. We make another pass over wrcb_to_call, |
911 | | * invoking callbacks, after this pass completes. |
912 | | */ |
913 | 1.29M | for (gc = GC_NEXT(unreachable); gc != unreachable; gc = next) { |
914 | 1.24M | PyWeakReference **wrlist; |
915 | | |
916 | 1.24M | PyObject *op = FROM_GC(gc); |
917 | 1.24M | next = GC_NEXT(gc); |
918 | | |
919 | 1.24M | if (! _PyType_SUPPORTS_WEAKREFS(Py_TYPE(op))) { |
920 | 932k | continue; |
921 | 932k | } |
922 | | |
923 | | /* It supports weakrefs. Does it have any? |
924 | | * |
925 | | * This is never triggered for static types so we can avoid the |
926 | | * (slightly) more costly _PyObject_GET_WEAKREFS_LISTPTR(). |
927 | | */ |
928 | 311k | wrlist = _PyObject_GET_WEAKREFS_LISTPTR_FROM_OFFSET(op); |
929 | | |
930 | | /* `op` may have some weakrefs. March over the list and move the |
931 | | * weakrefs with callbacks that must be called into wrcb_to_call. |
932 | | */ |
933 | 311k | PyWeakReference *next_wr; |
934 | 621k | for (PyWeakReference *wr = *wrlist; wr != NULL; wr = next_wr) { |
935 | | // Get the next list element to get iterator progress if we omit |
936 | | // clearing of the weakref (because _PyWeakref_ClearRef changes |
937 | | // next pointer in the wrlist). |
938 | 310k | next_wr = wr->wr_next; |
939 | | |
940 | 310k | if (wr->wr_callback == NULL) { |
941 | | /* no callback */ |
942 | 310k | continue; |
943 | 310k | } |
944 | | |
945 | | // Clear the weakref. See the comments above this function for |
946 | | // reasons why we need to clear weakrefs that have callbacks. |
947 | | // Note that _PyWeakref_ClearRef clears the weakref but leaves the |
948 | | // callback pointer intact. Obscure: it also changes *wrlist. |
949 | 0 | _PyObject_ASSERT((PyObject *)wr, wr->wr_object == op); |
950 | 0 | _PyWeakref_ClearRef(wr); |
951 | 0 | _PyObject_ASSERT((PyObject *)wr, wr->wr_object == Py_None); |
952 | | |
953 | | /* Headache time. `op` is going away, and is weakly referenced by |
954 | | * `wr`, which has a callback. Should the callback be invoked? If wr |
955 | | * is also trash, no: |
956 | | * |
957 | | * 1. There's no need to call it. The object and the weakref are |
958 | | * both going away, so it's legitimate to pretend the weakref is |
959 | | * going away first. The user has to ensure a weakref outlives its |
960 | | * referent if they want a guarantee that the wr callback will get |
961 | | * invoked. |
962 | | * |
963 | | * 2. It may be catastrophic to call it. If the callback is also in |
964 | | * cyclic trash (CT), then although the CT is unreachable from |
965 | | * outside the current generation, CT may be reachable from the |
966 | | * callback. Then the callback could resurrect insane objects. |
967 | | * |
968 | | * Since the callback is never needed and may be unsafe in this |
969 | | * case, wr is simply left in the unreachable set. Note that |
970 | | * clear_weakrefs() will ensure its callback will not trigger |
971 | | * inside delete_garbage(). |
972 | | * |
973 | | * OTOH, if wr isn't part of CT, we should invoke the callback: the |
974 | | * weakref outlived the trash. Note that since wr isn't CT in this |
975 | | * case, its callback can't be CT either -- wr acted as an external |
976 | | * root to this generation, and therefore its callback did too. So |
977 | | * nothing in CT is reachable from the callback either, so it's hard |
978 | | * to imagine how calling it later could create a problem for us. wr |
979 | | * is moved to wrcb_to_call in this case. |
980 | | */ |
981 | 0 | if (gc_is_collecting(AS_GC((PyObject *)wr))) { |
982 | 0 | continue; |
983 | 0 | } |
984 | | |
985 | | /* Create a new reference so that wr can't go away |
986 | | * before we can process it again. |
987 | | */ |
988 | 0 | Py_INCREF(wr); |
989 | | |
990 | | /* Move wr to wrcb_to_call, for the next pass. */ |
991 | 0 | PyGC_Head *wrasgc = AS_GC((PyObject *)wr); |
992 | | // wrasgc is reachable, but next isn't, so they can't be the same |
993 | 0 | _PyObject_ASSERT((PyObject *)wr, wrasgc != next); |
994 | 0 | gc_list_move(wrasgc, &wrcb_to_call); |
995 | 0 | } |
996 | 311k | } |
997 | | |
998 | | /* Invoke the callbacks we decided to honor. It's safe to invoke them |
999 | | * because they can't reference unreachable objects. |
1000 | | */ |
1001 | 53.9k | int visited_space = get_gc_state()->visited_space; |
1002 | 53.9k | while (! gc_list_is_empty(&wrcb_to_call)) { |
1003 | 0 | PyObject *temp; |
1004 | 0 | PyObject *callback; |
1005 | |
|
1006 | 0 | gc = (PyGC_Head*)wrcb_to_call._gc_next; |
1007 | 0 | PyObject *op = FROM_GC(gc); |
1008 | 0 | _PyObject_ASSERT(op, PyWeakref_Check(op)); |
1009 | 0 | PyWeakReference *wr = (PyWeakReference *)op; |
1010 | 0 | callback = wr->wr_callback; |
1011 | 0 | _PyObject_ASSERT(op, callback != NULL); |
1012 | | |
1013 | | /* copy-paste of weakrefobject.c's handle_callback() */ |
1014 | 0 | temp = PyObject_CallOneArg(callback, (PyObject *)wr); |
1015 | 0 | if (temp == NULL) { |
1016 | 0 | PyErr_FormatUnraisable("Exception ignored on " |
1017 | 0 | "calling weakref callback %R", callback); |
1018 | 0 | } |
1019 | 0 | else { |
1020 | 0 | Py_DECREF(temp); |
1021 | 0 | } |
1022 | | |
1023 | | /* Give up the reference we created in the first pass. When |
1024 | | * op's refcount hits 0 (which it may or may not do right now), |
1025 | | * op's tp_dealloc will decref op->wr_callback too. Note |
1026 | | * that the refcount probably will hit 0 now, and because this |
1027 | | * weakref was reachable to begin with, gc didn't already |
1028 | | * add it to its count of freed objects. Example: a reachable |
1029 | | * weak value dict maps some key to this reachable weakref. |
1030 | | * The callback removes this key->weakref mapping from the |
1031 | | * dict, leaving no other references to the weakref (excepting |
1032 | | * ours). |
1033 | | */ |
1034 | 0 | Py_DECREF(op); |
1035 | 0 | if (wrcb_to_call._gc_next == (uintptr_t)gc) { |
1036 | | /* object is still alive -- move it */ |
1037 | 0 | gc_set_old_space(gc, visited_space); |
1038 | 0 | gc_list_move(gc, old); |
1039 | 0 | } |
1040 | 0 | else { |
1041 | 0 | ++num_freed; |
1042 | 0 | } |
1043 | 0 | } |
1044 | | |
1045 | 53.9k | return num_freed; |
1046 | 53.9k | } |
1047 | | |
1048 | | /* Clear all weakrefs to unreachable objects. When this returns, no object in |
1049 | | * `unreachable` is weakly referenced anymore. See the comments above |
1050 | | * handle_weakref_callbacks() for why these weakrefs need to be cleared. |
1051 | | */ |
1052 | | static void |
1053 | | clear_weakrefs(PyGC_Head *unreachable) |
1054 | 53.9k | { |
1055 | 53.9k | PyGC_Head *gc; |
1056 | 53.9k | PyGC_Head *next; |
1057 | | |
1058 | 1.29M | for (gc = GC_NEXT(unreachable); gc != unreachable; gc = next) { |
1059 | 1.24M | PyWeakReference **wrlist; |
1060 | | |
1061 | 1.24M | PyObject *op = FROM_GC(gc); |
1062 | 1.24M | next = GC_NEXT(gc); |
1063 | | |
1064 | 1.24M | if (PyWeakref_Check(op)) { |
1065 | | /* A weakref inside the unreachable set is always cleared. See |
1066 | | * the comments above handle_weakref_callbacks() for why these |
1067 | | * must be cleared. |
1068 | | */ |
1069 | 0 | _PyWeakref_ClearRef((PyWeakReference *)op); |
1070 | 0 | } |
1071 | | |
1072 | 1.24M | if (! _PyType_SUPPORTS_WEAKREFS(Py_TYPE(op))) { |
1073 | 932k | continue; |
1074 | 932k | } |
1075 | | |
1076 | | /* It supports weakrefs. Does it have any? |
1077 | | * |
1078 | | * This is never triggered for static types so we can avoid the |
1079 | | * (slightly) more costly _PyObject_GET_WEAKREFS_LISTPTR(). |
1080 | | */ |
1081 | 311k | wrlist = _PyObject_GET_WEAKREFS_LISTPTR_FROM_OFFSET(op); |
1082 | | |
1083 | | /* `op` may have some weakrefs. March over the list, clear |
1084 | | * all the weakrefs. |
1085 | | */ |
1086 | 621k | for (PyWeakReference *wr = *wrlist; wr != NULL; wr = *wrlist) { |
1087 | | /* _PyWeakref_ClearRef clears the weakref but leaves |
1088 | | * the callback pointer intact. Obscure: it also |
1089 | | * changes *wrlist. |
1090 | | */ |
1091 | 310k | _PyObject_ASSERT((PyObject *)wr, wr->wr_object == op); |
1092 | 310k | _PyWeakref_ClearRef(wr); |
1093 | 310k | _PyObject_ASSERT((PyObject *)wr, wr->wr_object == Py_None); |
1094 | 310k | } |
1095 | 311k | } |
1096 | 53.9k | } |
1097 | | |
1098 | | static void |
1099 | | debug_cycle(const char *msg, PyObject *op) |
1100 | 0 | { |
1101 | 0 | PySys_FormatStderr("gc: %s <%s %p>\n", |
1102 | 0 | msg, Py_TYPE(op)->tp_name, op); |
1103 | 0 | } |
1104 | | |
1105 | | /* Handle uncollectable garbage (cycles with tp_del slots, and stuff reachable |
1106 | | * only from such cycles). |
1107 | | * If _PyGC_DEBUG_SAVEALL, all objects in finalizers are appended to the module |
1108 | | * garbage list (a Python list), else only the objects in finalizers with |
1109 | | * __del__ methods are appended to garbage. All objects in finalizers are |
1110 | | * merged into the old list regardless. |
1111 | | */ |
1112 | | static void |
1113 | | handle_legacy_finalizers(PyThreadState *tstate, |
1114 | | GCState *gcstate, |
1115 | | PyGC_Head *finalizers, PyGC_Head *old) |
1116 | 53.9k | { |
1117 | 53.9k | assert(!_PyErr_Occurred(tstate)); |
1118 | 53.9k | assert(gcstate->garbage != NULL); |
1119 | | |
1120 | 53.9k | PyGC_Head *gc = GC_NEXT(finalizers); |
1121 | 53.9k | for (; gc != finalizers; gc = GC_NEXT(gc)) { |
1122 | 0 | PyObject *op = FROM_GC(gc); |
1123 | |
|
1124 | 0 | if ((gcstate->debug & _PyGC_DEBUG_SAVEALL) || has_legacy_finalizer(op)) { |
1125 | 0 | if (PyList_Append(gcstate->garbage, op) < 0) { |
1126 | 0 | _PyErr_Clear(tstate); |
1127 | 0 | break; |
1128 | 0 | } |
1129 | 0 | } |
1130 | 0 | } |
1131 | | |
1132 | 53.9k | gc_list_merge(finalizers, old); |
1133 | 53.9k | } |
1134 | | |
1135 | | /* Run first-time finalizers (if any) on all the objects in collectable. |
1136 | | * Note that this may remove some (or even all) of the objects from the |
1137 | | * list, due to refcounts falling to 0. |
1138 | | */ |
1139 | | static void |
1140 | | finalize_garbage(PyThreadState *tstate, PyGC_Head *collectable) |
1141 | 53.9k | { |
1142 | 53.9k | destructor finalize; |
1143 | 53.9k | PyGC_Head seen; |
1144 | | |
1145 | | /* While we're going through the loop, `finalize(op)` may cause op, or |
1146 | | * other objects, to be reclaimed via refcounts falling to zero. So |
1147 | | * there's little we can rely on about the structure of the input |
1148 | | * `collectable` list across iterations. For safety, we always take the |
1149 | | * first object in that list and move it to a temporary `seen` list. |
1150 | | * If objects vanish from the `collectable` and `seen` lists we don't |
1151 | | * care. |
1152 | | */ |
1153 | 53.9k | gc_list_init(&seen); |
1154 | | |
1155 | 1.29M | while (!gc_list_is_empty(collectable)) { |
1156 | 1.24M | PyGC_Head *gc = GC_NEXT(collectable); |
1157 | 1.24M | PyObject *op = FROM_GC(gc); |
1158 | 1.24M | gc_list_move(gc, &seen); |
1159 | 1.24M | if (!_PyGC_FINALIZED(op) && |
1160 | 1.24M | (finalize = Py_TYPE(op)->tp_finalize) != NULL) |
1161 | 0 | { |
1162 | 0 | _PyGC_SET_FINALIZED(op); |
1163 | 0 | Py_INCREF(op); |
1164 | 0 | finalize(op); |
1165 | 0 | assert(!_PyErr_Occurred(tstate)); |
1166 | 0 | Py_DECREF(op); |
1167 | 0 | } |
1168 | 1.24M | } |
1169 | 53.9k | gc_list_merge(&seen, collectable); |
1170 | 53.9k | } |
1171 | | |
1172 | | /* Break reference cycles by clearing the containers involved. This is |
1173 | | * tricky business as the lists can be changing and we don't know which |
1174 | | * objects may be freed. It is possible I screwed something up here. |
1175 | | */ |
1176 | | static void |
1177 | | delete_garbage(PyThreadState *tstate, GCState *gcstate, |
1178 | | PyGC_Head *collectable, PyGC_Head *old) |
1179 | 53.9k | { |
1180 | 53.9k | assert(!_PyErr_Occurred(tstate)); |
1181 | | |
1182 | 967k | while (!gc_list_is_empty(collectable)) { |
1183 | 913k | PyGC_Head *gc = GC_NEXT(collectable); |
1184 | 913k | PyObject *op = FROM_GC(gc); |
1185 | | |
1186 | 913k | _PyObject_ASSERT_WITH_MSG(op, Py_REFCNT(op) > 0, |
1187 | 913k | "refcount is too small"); |
1188 | | |
1189 | 913k | if (gcstate->debug & _PyGC_DEBUG_SAVEALL) { |
1190 | 0 | assert(gcstate->garbage != NULL); |
1191 | 0 | if (PyList_Append(gcstate->garbage, op) < 0) { |
1192 | 0 | _PyErr_Clear(tstate); |
1193 | 0 | } |
1194 | 0 | } |
1195 | 913k | else { |
1196 | 913k | inquiry clear; |
1197 | 913k | if ((clear = Py_TYPE(op)->tp_clear) != NULL) { |
1198 | 612k | Py_INCREF(op); |
1199 | 612k | (void) clear(op); |
1200 | 612k | if (_PyErr_Occurred(tstate)) { |
1201 | 0 | PyErr_FormatUnraisable("Exception ignored in tp_clear of %s", |
1202 | 0 | Py_TYPE(op)->tp_name); |
1203 | 0 | } |
1204 | 612k | Py_DECREF(op); |
1205 | 612k | } |
1206 | 913k | } |
1207 | 913k | if (GC_NEXT(collectable) == gc) { |
1208 | | /* object is still alive, move it, it may die later */ |
1209 | 603k | gc_clear_collecting(gc); |
1210 | 603k | gc_list_move(gc, old); |
1211 | 603k | } |
1212 | 913k | } |
1213 | 53.9k | } |
1214 | | |
1215 | | |
1216 | | /* Deduce which objects among "base" are unreachable from outside the list |
1217 | | and move them to 'unreachable'. The process consist in the following steps: |
1218 | | |
1219 | | 1. Copy all reference counts to a different field (gc_prev is used to hold |
1220 | | this copy to save memory). |
1221 | | 2. Traverse all objects in "base" and visit all referred objects using |
1222 | | "tp_traverse" and for every visited object, subtract 1 to the reference |
1223 | | count (the one that we copied in the previous step). After this step, all |
1224 | | objects that can be reached directly from outside must have strictly positive |
1225 | | reference count, while all unreachable objects must have a count of exactly 0. |
1226 | | 3. Identify all unreachable objects (the ones with 0 reference count) and move |
1227 | | them to the "unreachable" list. This step also needs to move back to "base" all |
1228 | | objects that were initially marked as unreachable but are referred transitively |
1229 | | by the reachable objects (the ones with strictly positive reference count). |
1230 | | |
1231 | | Contracts: |
1232 | | |
1233 | | * The "base" has to be a valid list with no mask set. |
1234 | | |
1235 | | * The "unreachable" list must be uninitialized (this function calls |
1236 | | gc_list_init over 'unreachable'). |
1237 | | |
1238 | | IMPORTANT: This function leaves 'unreachable' with the NEXT_MASK_UNREACHABLE |
1239 | | flag set but it does not clear it to skip unnecessary iteration. Before the |
1240 | | flag is cleared (for example, by using 'clear_unreachable_mask' function or |
1241 | | by a call to 'move_legacy_finalizers'), the 'unreachable' list is not a normal |
1242 | | list and we can not use most gc_list_* functions for it. */ |
1243 | | static inline void |
1244 | 107k | deduce_unreachable(PyGC_Head *base, PyGC_Head *unreachable) { |
1245 | 107k | validate_list(base, collecting_clear_unreachable_clear); |
1246 | | /* Using ob_refcnt and gc_refs, calculate which objects in the |
1247 | | * container set are reachable from outside the set (i.e., have a |
1248 | | * refcount greater than 0 when all the references within the |
1249 | | * set are taken into account). |
1250 | | */ |
1251 | 107k | update_refs(base); // gc_prev is used for gc_refs |
1252 | 107k | subtract_refs(base); |
1253 | | |
1254 | | /* Leave everything reachable from outside base in base, and move |
1255 | | * everything else (in base) to unreachable. |
1256 | | * |
1257 | | * NOTE: This used to move the reachable objects into a reachable |
1258 | | * set instead. But most things usually turn out to be reachable, |
1259 | | * so it's more efficient to move the unreachable things. It "sounds slick" |
1260 | | * to move the unreachable objects, until you think about it - the reason it |
1261 | | * pays isn't actually obvious. |
1262 | | * |
1263 | | * Suppose we create objects A, B, C in that order. They appear in the young |
1264 | | * generation in the same order. If B points to A, and C to B, and C is |
1265 | | * reachable from outside, then the adjusted refcounts will be 0, 0, and 1 |
1266 | | * respectively. |
1267 | | * |
1268 | | * When move_unreachable finds A, A is moved to the unreachable list. The |
1269 | | * same for B when it's first encountered. Then C is traversed, B is moved |
1270 | | * _back_ to the reachable list. B is eventually traversed, and then A is |
1271 | | * moved back to the reachable list. |
1272 | | * |
1273 | | * So instead of not moving at all, the reachable objects B and A are moved |
1274 | | * twice each. Why is this a win? A straightforward algorithm to move the |
1275 | | * reachable objects instead would move A, B, and C once each. |
1276 | | * |
1277 | | * The key is that this dance leaves the objects in order C, B, A - it's |
1278 | | * reversed from the original order. On all _subsequent_ scans, none of |
1279 | | * them will move. Since most objects aren't in cycles, this can save an |
1280 | | * unbounded number of moves across an unbounded number of later collections. |
1281 | | * It can cost more only the first time the chain is scanned. |
1282 | | * |
1283 | | * Drawback: move_unreachable is also used to find out what's still trash |
1284 | | * after finalizers may resurrect objects. In _that_ case most unreachable |
1285 | | * objects will remain unreachable, so it would be more efficient to move |
1286 | | * the reachable objects instead. But this is a one-time cost, probably not |
1287 | | * worth complicating the code to speed just a little. |
1288 | | */ |
1289 | 107k | move_unreachable(base, unreachable); // gc_prev is pointer again |
1290 | 107k | validate_list(base, collecting_clear_unreachable_clear); |
1291 | 107k | validate_list(unreachable, collecting_set_unreachable_set); |
1292 | 107k | } |
1293 | | |
1294 | | /* Handle objects that may have resurrected after a call to 'finalize_garbage', moving |
1295 | | them to 'old_generation' and placing the rest on 'still_unreachable'. |
1296 | | |
1297 | | Contracts: |
1298 | | * After this function 'unreachable' must not be used anymore and 'still_unreachable' |
1299 | | will contain the objects that did not resurrect. |
1300 | | |
1301 | | * The "still_unreachable" list must be uninitialized (this function calls |
1302 | | gc_list_init over 'still_unreachable'). |
1303 | | |
1304 | | IMPORTANT: After a call to this function, the 'still_unreachable' set will have the |
1305 | | PREV_MARK_COLLECTING set, but the objects in this set are going to be removed so |
1306 | | we can skip the expense of clearing the flag to avoid extra iteration. */ |
1307 | | static inline void |
1308 | | handle_resurrected_objects(PyGC_Head *unreachable, PyGC_Head* still_unreachable, |
1309 | | PyGC_Head *old_generation) |
1310 | 53.9k | { |
1311 | | // Remove the PREV_MASK_COLLECTING from unreachable |
1312 | | // to prepare it for a new call to 'deduce_unreachable' |
1313 | 53.9k | gc_list_clear_collecting(unreachable); |
1314 | | |
1315 | | // After the call to deduce_unreachable, the 'still_unreachable' set will |
1316 | | // have the PREV_MARK_COLLECTING set, but the objects are going to be |
1317 | | // removed so we can skip the expense of clearing the flag. |
1318 | 53.9k | PyGC_Head* resurrected = unreachable; |
1319 | 53.9k | deduce_unreachable(resurrected, still_unreachable); |
1320 | 53.9k | clear_unreachable_mask(still_unreachable); |
1321 | | |
1322 | | // Move the resurrected objects to the old generation for future collection. |
1323 | 53.9k | gc_list_merge(resurrected, old_generation); |
1324 | 53.9k | } |
1325 | | |
1326 | | static void |
1327 | | gc_collect_region(PyThreadState *tstate, |
1328 | | PyGC_Head *from, |
1329 | | PyGC_Head *to, |
1330 | | struct gc_collection_stats *stats); |
1331 | | |
1332 | | static inline Py_ssize_t |
1333 | | gc_list_set_space(PyGC_Head *list, int space) |
1334 | 58.3k | { |
1335 | 58.3k | Py_ssize_t size = 0; |
1336 | 58.3k | PyGC_Head *gc; |
1337 | 39.9M | for (gc = GC_NEXT(list); gc != list; gc = GC_NEXT(gc)) { |
1338 | 39.9M | gc_set_old_space(gc, space); |
1339 | 39.9M | size++; |
1340 | 39.9M | } |
1341 | 58.3k | return size; |
1342 | 58.3k | } |
1343 | | |
1344 | | /* Making progress in the incremental collector |
1345 | | * In order to eventually collect all cycles |
1346 | | * the incremental collector must progress through the old |
1347 | | * space faster than objects are added to the old space. |
1348 | | * |
1349 | | * Each young or incremental collection adds a number of |
1350 | | * objects, S (for survivors) to the old space, and |
1351 | | * incremental collectors scan I objects from the old space. |
1352 | | * I > S must be true. We also want I > S * N to be where |
1353 | | * N > 1. Higher values of N mean that the old space is |
1354 | | * scanned more rapidly. |
1355 | | * The default incremental threshold of 10 translates to |
1356 | | * N == 1.4 (1 + 4/threshold) |
1357 | | */ |
1358 | | |
1359 | | /* Divide by 10, so that the default incremental threshold of 10 |
1360 | | * scans objects at 1% of the heap size */ |
1361 | 112k | #define SCAN_RATE_DIVISOR 10 |
1362 | | |
1363 | | static void |
1364 | | add_stats(GCState *gcstate, int gen, struct gc_collection_stats *stats) |
1365 | 53.9k | { |
1366 | 53.9k | gcstate->generation_stats[gen].collected += stats->collected; |
1367 | 53.9k | gcstate->generation_stats[gen].uncollectable += stats->uncollectable; |
1368 | 53.9k | gcstate->generation_stats[gen].collections += 1; |
1369 | 53.9k | } |
1370 | | |
1371 | | static void |
1372 | | gc_collect_young(PyThreadState *tstate, |
1373 | | struct gc_collection_stats *stats) |
1374 | 0 | { |
1375 | 0 | GCState *gcstate = &tstate->interp->gc; |
1376 | 0 | validate_spaces(gcstate); |
1377 | 0 | PyGC_Head *young = &gcstate->young.head; |
1378 | 0 | PyGC_Head *visited = &gcstate->old[gcstate->visited_space].head; |
1379 | 0 | untrack_tuples(young); |
1380 | 0 | GC_STAT_ADD(0, collections, 1); |
1381 | |
|
1382 | 0 | PyGC_Head survivors; |
1383 | 0 | gc_list_init(&survivors); |
1384 | 0 | gc_list_set_space(young, gcstate->visited_space); |
1385 | 0 | gc_collect_region(tstate, young, &survivors, stats); |
1386 | 0 | gc_list_merge(&survivors, visited); |
1387 | 0 | validate_spaces(gcstate); |
1388 | 0 | gcstate->young.count = 0; |
1389 | 0 | gcstate->old[gcstate->visited_space].count++; |
1390 | 0 | add_stats(gcstate, 0, stats); |
1391 | 0 | validate_spaces(gcstate); |
1392 | 0 | } |
1393 | | |
1394 | | #ifndef NDEBUG |
1395 | | static inline int |
1396 | | IS_IN_VISITED(PyGC_Head *gc, int visited_space) |
1397 | | { |
1398 | | assert(visited_space == 0 || other_space(visited_space) == 0); |
1399 | | return gc_old_space(gc) == visited_space; |
1400 | | } |
1401 | | #endif |
1402 | | |
1403 | | struct container_and_flag { |
1404 | | PyGC_Head *container; |
1405 | | int visited_space; |
1406 | | intptr_t size; |
1407 | | }; |
1408 | | |
1409 | | /* A traversal callback for adding to container) */ |
1410 | | static int |
1411 | | visit_add_to_container(PyObject *op, void *arg) |
1412 | 944M | { |
1413 | 944M | OBJECT_STAT_INC(object_visits); |
1414 | 944M | struct container_and_flag *cf = (struct container_and_flag *)arg; |
1415 | 944M | int visited = cf->visited_space; |
1416 | 944M | assert(visited == get_gc_state()->visited_space); |
1417 | 944M | if (!_Py_IsImmortal(op) && _PyObject_IS_GC(op)) { |
1418 | 637M | PyGC_Head *gc = AS_GC(op); |
1419 | 637M | if (_PyObject_GC_IS_TRACKED(op) && |
1420 | 637M | gc_old_space(gc) != visited) { |
1421 | 87.1M | gc_flip_old_space(gc); |
1422 | 87.1M | gc_list_move(gc, cf->container); |
1423 | 87.1M | cf->size++; |
1424 | 87.1M | } |
1425 | 637M | } |
1426 | 944M | return 0; |
1427 | 944M | } |
1428 | | |
1429 | | static intptr_t |
1430 | | expand_region_transitively_reachable(PyGC_Head *container, PyGC_Head *gc, GCState *gcstate) |
1431 | 1.13M | { |
1432 | 1.13M | struct container_and_flag arg = { |
1433 | 1.13M | .container = container, |
1434 | 1.13M | .visited_space = gcstate->visited_space, |
1435 | 1.13M | .size = 0 |
1436 | 1.13M | }; |
1437 | 1.13M | assert(GC_NEXT(gc) == container); |
1438 | 4.94M | while (gc != container) { |
1439 | | /* Survivors will be moved to visited space, so they should |
1440 | | * have been marked as visited */ |
1441 | 3.81M | assert(IS_IN_VISITED(gc, gcstate->visited_space)); |
1442 | 3.81M | PyObject *op = FROM_GC(gc); |
1443 | 3.81M | assert(_PyObject_GC_IS_TRACKED(op)); |
1444 | 3.81M | if (_Py_IsImmortal(op)) { |
1445 | 0 | PyGC_Head *next = GC_NEXT(gc); |
1446 | 0 | gc_list_move(gc, &gcstate->permanent_generation.head); |
1447 | 0 | gc = next; |
1448 | 0 | continue; |
1449 | 0 | } |
1450 | 3.81M | traverseproc traverse = Py_TYPE(op)->tp_traverse; |
1451 | 3.81M | (void) traverse(op, |
1452 | 3.81M | visit_add_to_container, |
1453 | 3.81M | &arg); |
1454 | 3.81M | gc = GC_NEXT(gc); |
1455 | 3.81M | } |
1456 | 1.13M | return arg.size; |
1457 | 1.13M | } |
1458 | | |
1459 | | /* Do bookkeeping for a completed GC cycle */ |
1460 | | static void |
1461 | | completed_scavenge(GCState *gcstate) |
1462 | 4.43k | { |
1463 | | /* We must observe two invariants: |
1464 | | * 1. Members of the permanent generation must be marked visited. |
1465 | | * 2. We cannot touch members of the permanent generation. */ |
1466 | 4.43k | int visited; |
1467 | 4.43k | if (gc_list_is_empty(&gcstate->permanent_generation.head)) { |
1468 | | /* Permanent generation is empty so we can flip spaces bit */ |
1469 | 4.43k | int not_visited = gcstate->visited_space; |
1470 | 4.43k | visited = other_space(not_visited); |
1471 | 4.43k | gcstate->visited_space = visited; |
1472 | | /* Make sure all objects have visited bit set correctly */ |
1473 | 4.43k | gc_list_set_space(&gcstate->young.head, not_visited); |
1474 | 4.43k | } |
1475 | 0 | else { |
1476 | | /* We must move the objects from visited to pending space. */ |
1477 | 0 | visited = gcstate->visited_space; |
1478 | 0 | int not_visited = other_space(visited); |
1479 | 0 | assert(gc_list_is_empty(&gcstate->old[not_visited].head)); |
1480 | 0 | gc_list_merge(&gcstate->old[visited].head, &gcstate->old[not_visited].head); |
1481 | 0 | gc_list_set_space(&gcstate->old[not_visited].head, not_visited); |
1482 | 0 | } |
1483 | 4.43k | assert(gc_list_is_empty(&gcstate->old[visited].head)); |
1484 | 4.43k | gcstate->work_to_do = 0; |
1485 | 4.43k | gcstate->phase = GC_PHASE_MARK; |
1486 | 4.43k | } |
1487 | | |
1488 | | static intptr_t |
1489 | | move_to_reachable(PyObject *op, PyGC_Head *reachable, int visited_space) |
1490 | 3.52M | { |
1491 | 3.52M | if (op != NULL && !_Py_IsImmortal(op) && _PyObject_IS_GC(op)) { |
1492 | 1.90M | PyGC_Head *gc = AS_GC(op); |
1493 | 1.90M | if (_PyObject_GC_IS_TRACKED(op) && |
1494 | 1.90M | gc_old_space(gc) != visited_space) { |
1495 | 1.09M | gc_flip_old_space(gc); |
1496 | 1.09M | gc_list_move(gc, reachable); |
1497 | 1.09M | return 1; |
1498 | 1.09M | } |
1499 | 1.90M | } |
1500 | 2.43M | return 0; |
1501 | 3.52M | } |
1502 | | |
1503 | | static intptr_t |
1504 | | mark_all_reachable(PyGC_Head *reachable, PyGC_Head *visited, int visited_space) |
1505 | 62.8k | { |
1506 | | // Transitively traverse all objects from reachable, until empty |
1507 | 62.8k | struct container_and_flag arg = { |
1508 | 62.8k | .container = reachable, |
1509 | 62.8k | .visited_space = visited_space, |
1510 | 62.8k | .size = 0 |
1511 | 62.8k | }; |
1512 | 86.6M | while (!gc_list_is_empty(reachable)) { |
1513 | 86.5M | PyGC_Head *gc = _PyGCHead_NEXT(reachable); |
1514 | 86.5M | assert(gc_old_space(gc) == visited_space); |
1515 | 86.5M | gc_list_move(gc, visited); |
1516 | 86.5M | PyObject *op = FROM_GC(gc); |
1517 | 86.5M | traverseproc traverse = Py_TYPE(op)->tp_traverse; |
1518 | 86.5M | (void) traverse(op, |
1519 | 86.5M | visit_add_to_container, |
1520 | 86.5M | &arg); |
1521 | 86.5M | } |
1522 | 62.8k | gc_list_validate_space(visited, visited_space); |
1523 | 62.8k | return arg.size; |
1524 | 62.8k | } |
1525 | | |
1526 | | static intptr_t |
1527 | | mark_stacks(PyInterpreterState *interp, PyGC_Head *visited, int visited_space, bool start) |
1528 | 58.3k | { |
1529 | 58.3k | PyGC_Head reachable; |
1530 | 58.3k | gc_list_init(&reachable); |
1531 | 58.3k | Py_ssize_t objects_marked = 0; |
1532 | | // Move all objects on stacks to reachable |
1533 | 58.3k | _PyRuntimeState *runtime = &_PyRuntime; |
1534 | 58.3k | HEAD_LOCK(runtime); |
1535 | 58.3k | PyThreadState* ts = PyInterpreterState_ThreadHead(interp); |
1536 | 58.3k | HEAD_UNLOCK(runtime); |
1537 | 116k | while (ts) { |
1538 | 58.3k | _PyInterpreterFrame *frame = ts->current_frame; |
1539 | 887k | while (frame) { |
1540 | 881k | if (frame->owner >= FRAME_OWNED_BY_INTERPRETER) { |
1541 | 60.6k | frame = frame->previous; |
1542 | 60.6k | continue; |
1543 | 60.6k | } |
1544 | 820k | _PyStackRef *locals = frame->localsplus; |
1545 | 820k | _PyStackRef *sp = frame->stackpointer; |
1546 | 820k | objects_marked += move_to_reachable(frame->f_locals, &reachable, visited_space); |
1547 | 820k | PyObject *func = PyStackRef_AsPyObjectBorrow(frame->f_funcobj); |
1548 | 820k | objects_marked += move_to_reachable(func, &reachable, visited_space); |
1549 | 4.81M | while (sp > locals) { |
1550 | 3.99M | sp--; |
1551 | 3.99M | if (PyStackRef_IsNullOrInt(*sp)) { |
1552 | 1.07M | continue; |
1553 | 1.07M | } |
1554 | 2.92M | PyObject *op = PyStackRef_AsPyObjectBorrow(*sp); |
1555 | 2.92M | if (_Py_IsImmortal(op)) { |
1556 | 279k | continue; |
1557 | 279k | } |
1558 | 2.64M | if (_PyObject_IS_GC(op)) { |
1559 | 1.82M | PyGC_Head *gc = AS_GC(op); |
1560 | 1.82M | if (_PyObject_GC_IS_TRACKED(op) && |
1561 | 1.82M | gc_old_space(gc) != visited_space) { |
1562 | 972k | gc_flip_old_space(gc); |
1563 | 972k | objects_marked++; |
1564 | 972k | gc_list_move(gc, &reachable); |
1565 | 972k | } |
1566 | 1.82M | } |
1567 | 2.64M | } |
1568 | 820k | if (!start && frame->visited) { |
1569 | | // If this frame has already been visited, then the lower frames |
1570 | | // will have already been visited and will not have changed |
1571 | 52.5k | break; |
1572 | 52.5k | } |
1573 | 768k | frame->visited = 1; |
1574 | 768k | frame = frame->previous; |
1575 | 768k | } |
1576 | 58.3k | HEAD_LOCK(runtime); |
1577 | 58.3k | ts = PyThreadState_Next(ts); |
1578 | 58.3k | HEAD_UNLOCK(runtime); |
1579 | 58.3k | } |
1580 | 58.3k | objects_marked += mark_all_reachable(&reachable, visited, visited_space); |
1581 | 58.3k | assert(gc_list_is_empty(&reachable)); |
1582 | 58.3k | return objects_marked; |
1583 | 58.3k | } |
1584 | | |
1585 | | static intptr_t |
1586 | | mark_global_roots(PyInterpreterState *interp, PyGC_Head *visited, int visited_space) |
1587 | 4.44k | { |
1588 | 4.44k | PyGC_Head reachable; |
1589 | 4.44k | gc_list_init(&reachable); |
1590 | 4.44k | Py_ssize_t objects_marked = 0; |
1591 | 4.44k | objects_marked += move_to_reachable(interp->sysdict, &reachable, visited_space); |
1592 | 4.44k | objects_marked += move_to_reachable(interp->builtins, &reachable, visited_space); |
1593 | 4.44k | objects_marked += move_to_reachable(interp->dict, &reachable, visited_space); |
1594 | 4.44k | struct types_state *types = &interp->types; |
1595 | 893k | for (int i = 0; i < _Py_MAX_MANAGED_STATIC_BUILTIN_TYPES; i++) { |
1596 | 889k | objects_marked += move_to_reachable(types->builtins.initialized[i].tp_dict, &reachable, visited_space); |
1597 | 889k | objects_marked += move_to_reachable(types->builtins.initialized[i].tp_subclasses, &reachable, visited_space); |
1598 | 889k | } |
1599 | 48.9k | for (int i = 0; i < _Py_MAX_MANAGED_STATIC_EXT_TYPES; i++) { |
1600 | 44.4k | objects_marked += move_to_reachable(types->for_extensions.initialized[i].tp_dict, &reachable, visited_space); |
1601 | 44.4k | objects_marked += move_to_reachable(types->for_extensions.initialized[i].tp_subclasses, &reachable, visited_space); |
1602 | 44.4k | } |
1603 | 4.44k | objects_marked += mark_all_reachable(&reachable, visited, visited_space); |
1604 | 4.44k | assert(gc_list_is_empty(&reachable)); |
1605 | 4.44k | return objects_marked; |
1606 | 4.44k | } |
1607 | | |
1608 | | static intptr_t |
1609 | | mark_at_start(PyThreadState *tstate) |
1610 | 4.44k | { |
1611 | | // TO DO -- Make this incremental |
1612 | 4.44k | GCState *gcstate = &tstate->interp->gc; |
1613 | 4.44k | PyGC_Head *visited = &gcstate->old[gcstate->visited_space].head; |
1614 | 4.44k | Py_ssize_t objects_marked = mark_global_roots(tstate->interp, visited, gcstate->visited_space); |
1615 | 4.44k | objects_marked += mark_stacks(tstate->interp, visited, gcstate->visited_space, true); |
1616 | 4.44k | gcstate->work_to_do -= objects_marked; |
1617 | 4.44k | gcstate->phase = GC_PHASE_COLLECT; |
1618 | 4.44k | validate_spaces(gcstate); |
1619 | 4.44k | return objects_marked; |
1620 | 4.44k | } |
1621 | | |
1622 | | static intptr_t |
1623 | | assess_work_to_do(GCState *gcstate) |
1624 | 58.3k | { |
1625 | | /* The amount of work we want to do depends on three things. |
1626 | | * 1. The number of new objects created |
1627 | | * 2. The growth in heap size since the last collection |
1628 | | * 3. The heap size (up to the number of new objects, to avoid quadratic effects) |
1629 | | * |
1630 | | * For a steady state heap, the amount of work to do is three times the number |
1631 | | * of new objects added to the heap. This ensures that we stay ahead in the |
1632 | | * worst case of all new objects being garbage. |
1633 | | * |
1634 | | * This could be improved by tracking survival rates, but it is still a |
1635 | | * large improvement on the non-marking approach. |
1636 | | */ |
1637 | 58.3k | intptr_t scale_factor = gcstate->old[0].threshold; |
1638 | 58.3k | if (scale_factor < 2) { |
1639 | 0 | scale_factor = 2; |
1640 | 0 | } |
1641 | 58.3k | intptr_t new_objects = gcstate->young.count; |
1642 | 58.3k | intptr_t max_heap_fraction = new_objects*3/2; |
1643 | 58.3k | intptr_t heap_fraction = gcstate->heap_size / SCAN_RATE_DIVISOR / scale_factor; |
1644 | 58.3k | if (heap_fraction > max_heap_fraction) { |
1645 | 1.93k | heap_fraction = max_heap_fraction; |
1646 | 1.93k | } |
1647 | 58.3k | gcstate->young.count = 0; |
1648 | 58.3k | return new_objects + heap_fraction; |
1649 | 58.3k | } |
1650 | | |
1651 | | static void |
1652 | | gc_collect_increment(PyThreadState *tstate, struct gc_collection_stats *stats) |
1653 | 58.3k | { |
1654 | 58.3k | GC_STAT_ADD(1, collections, 1); |
1655 | 58.3k | GCState *gcstate = &tstate->interp->gc; |
1656 | 58.3k | gcstate->work_to_do += assess_work_to_do(gcstate); |
1657 | 58.3k | untrack_tuples(&gcstate->young.head); |
1658 | 58.3k | if (gcstate->phase == GC_PHASE_MARK) { |
1659 | 4.44k | Py_ssize_t objects_marked = mark_at_start(tstate); |
1660 | 4.44k | GC_STAT_ADD(1, objects_transitively_reachable, objects_marked); |
1661 | 4.44k | gcstate->work_to_do -= objects_marked; |
1662 | 4.44k | validate_spaces(gcstate); |
1663 | 4.44k | return; |
1664 | 4.44k | } |
1665 | 53.9k | PyGC_Head *not_visited = &gcstate->old[gcstate->visited_space^1].head; |
1666 | 53.9k | PyGC_Head *visited = &gcstate->old[gcstate->visited_space].head; |
1667 | 53.9k | PyGC_Head increment; |
1668 | 53.9k | gc_list_init(&increment); |
1669 | 53.9k | int scale_factor = gcstate->old[0].threshold; |
1670 | 53.9k | if (scale_factor < 2) { |
1671 | 0 | scale_factor = 2; |
1672 | 0 | } |
1673 | 53.9k | intptr_t objects_marked = mark_stacks(tstate->interp, visited, gcstate->visited_space, false); |
1674 | 53.9k | GC_STAT_ADD(1, objects_transitively_reachable, objects_marked); |
1675 | 53.9k | gcstate->work_to_do -= objects_marked; |
1676 | 53.9k | gc_list_set_space(&gcstate->young.head, gcstate->visited_space); |
1677 | 53.9k | gc_list_merge(&gcstate->young.head, &increment); |
1678 | 53.9k | gc_list_validate_space(&increment, gcstate->visited_space); |
1679 | 53.9k | Py_ssize_t increment_size = gc_list_size(&increment); |
1680 | 1.18M | while (increment_size < gcstate->work_to_do) { |
1681 | 1.13M | if (gc_list_is_empty(not_visited)) { |
1682 | 4.41k | break; |
1683 | 4.41k | } |
1684 | 1.13M | PyGC_Head *gc = _PyGCHead_NEXT(not_visited); |
1685 | 1.13M | gc_list_move(gc, &increment); |
1686 | 1.13M | increment_size++; |
1687 | 1.13M | assert(!_Py_IsImmortal(FROM_GC(gc))); |
1688 | 1.13M | gc_set_old_space(gc, gcstate->visited_space); |
1689 | 1.13M | increment_size += expand_region_transitively_reachable(&increment, gc, gcstate); |
1690 | 1.13M | } |
1691 | 53.9k | GC_STAT_ADD(1, objects_not_transitively_reachable, increment_size); |
1692 | 53.9k | validate_list(&increment, collecting_clear_unreachable_clear); |
1693 | 53.9k | gc_list_validate_space(&increment, gcstate->visited_space); |
1694 | 53.9k | PyGC_Head survivors; |
1695 | 53.9k | gc_list_init(&survivors); |
1696 | 53.9k | gc_collect_region(tstate, &increment, &survivors, stats); |
1697 | 53.9k | gc_list_merge(&survivors, visited); |
1698 | 53.9k | assert(gc_list_is_empty(&increment)); |
1699 | 53.9k | gcstate->work_to_do += gcstate->heap_size / SCAN_RATE_DIVISOR / scale_factor; |
1700 | 53.9k | gcstate->work_to_do -= increment_size; |
1701 | | |
1702 | 53.9k | add_stats(gcstate, 1, stats); |
1703 | 53.9k | if (gc_list_is_empty(not_visited)) { |
1704 | 4.43k | completed_scavenge(gcstate); |
1705 | 4.43k | } |
1706 | 53.9k | validate_spaces(gcstate); |
1707 | 53.9k | } |
1708 | | |
1709 | | static void |
1710 | | gc_collect_full(PyThreadState *tstate, |
1711 | | struct gc_collection_stats *stats) |
1712 | 0 | { |
1713 | 0 | GC_STAT_ADD(2, collections, 1); |
1714 | 0 | GCState *gcstate = &tstate->interp->gc; |
1715 | 0 | validate_spaces(gcstate); |
1716 | 0 | PyGC_Head *young = &gcstate->young.head; |
1717 | 0 | PyGC_Head *pending = &gcstate->old[gcstate->visited_space^1].head; |
1718 | 0 | PyGC_Head *visited = &gcstate->old[gcstate->visited_space].head; |
1719 | 0 | untrack_tuples(young); |
1720 | | /* merge all generations into visited */ |
1721 | 0 | gc_list_merge(young, pending); |
1722 | 0 | gc_list_validate_space(pending, 1-gcstate->visited_space); |
1723 | 0 | gc_list_set_space(pending, gcstate->visited_space); |
1724 | 0 | gcstate->young.count = 0; |
1725 | 0 | gc_list_merge(pending, visited); |
1726 | 0 | validate_spaces(gcstate); |
1727 | |
|
1728 | 0 | gc_collect_region(tstate, visited, visited, |
1729 | 0 | stats); |
1730 | 0 | validate_spaces(gcstate); |
1731 | 0 | gcstate->young.count = 0; |
1732 | 0 | gcstate->old[0].count = 0; |
1733 | 0 | gcstate->old[1].count = 0; |
1734 | 0 | completed_scavenge(gcstate); |
1735 | 0 | _PyGC_ClearAllFreeLists(tstate->interp); |
1736 | 0 | validate_spaces(gcstate); |
1737 | 0 | add_stats(gcstate, 2, stats); |
1738 | 0 | } |
1739 | | |
1740 | | /* This is the main function. Read this to understand how the |
1741 | | * collection process works. */ |
1742 | | static void |
1743 | | gc_collect_region(PyThreadState *tstate, |
1744 | | PyGC_Head *from, |
1745 | | PyGC_Head *to, |
1746 | | struct gc_collection_stats *stats) |
1747 | 53.9k | { |
1748 | 53.9k | PyGC_Head unreachable; /* non-problematic unreachable trash */ |
1749 | 53.9k | PyGC_Head finalizers; /* objects with, & reachable from, __del__ */ |
1750 | 53.9k | PyGC_Head *gc; /* initialize to prevent a compiler warning */ |
1751 | 53.9k | GCState *gcstate = &tstate->interp->gc; |
1752 | | |
1753 | 53.9k | assert(gcstate->garbage != NULL); |
1754 | 53.9k | assert(!_PyErr_Occurred(tstate)); |
1755 | | |
1756 | 53.9k | gc_list_init(&unreachable); |
1757 | 53.9k | deduce_unreachable(from, &unreachable); |
1758 | 53.9k | validate_consistent_old_space(from); |
1759 | 53.9k | untrack_tuples(from); |
1760 | | |
1761 | | /* Move reachable objects to next generation. */ |
1762 | 53.9k | validate_consistent_old_space(to); |
1763 | 53.9k | if (from != to) { |
1764 | 53.9k | gc_list_merge(from, to); |
1765 | 53.9k | } |
1766 | 53.9k | validate_consistent_old_space(to); |
1767 | | |
1768 | | /* All objects in unreachable are trash, but objects reachable from |
1769 | | * legacy finalizers (e.g. tp_del) can't safely be deleted. |
1770 | | */ |
1771 | 53.9k | gc_list_init(&finalizers); |
1772 | | // NEXT_MASK_UNREACHABLE is cleared here. |
1773 | | // After move_legacy_finalizers(), unreachable is normal list. |
1774 | 53.9k | move_legacy_finalizers(&unreachable, &finalizers); |
1775 | | /* finalizers contains the unreachable objects with a legacy finalizer; |
1776 | | * unreachable objects reachable *from* those are also uncollectable, |
1777 | | * and we move those into the finalizers list too. |
1778 | | */ |
1779 | 53.9k | move_legacy_finalizer_reachable(&finalizers); |
1780 | 53.9k | validate_list(&finalizers, collecting_clear_unreachable_clear); |
1781 | 53.9k | validate_list(&unreachable, collecting_set_unreachable_clear); |
1782 | | /* Print debugging information. */ |
1783 | 53.9k | if (gcstate->debug & _PyGC_DEBUG_COLLECTABLE) { |
1784 | 0 | for (gc = GC_NEXT(&unreachable); gc != &unreachable; gc = GC_NEXT(gc)) { |
1785 | 0 | debug_cycle("collectable", FROM_GC(gc)); |
1786 | 0 | } |
1787 | 0 | } |
1788 | | |
1789 | | /* Invoke weakref callbacks as necessary. */ |
1790 | 53.9k | stats->collected += handle_weakref_callbacks(&unreachable, to); |
1791 | 53.9k | gc_list_validate_space(to, gcstate->visited_space); |
1792 | 53.9k | validate_list(to, collecting_clear_unreachable_clear); |
1793 | 53.9k | validate_list(&unreachable, collecting_set_unreachable_clear); |
1794 | | |
1795 | | /* Call tp_finalize on objects which have one. */ |
1796 | 53.9k | finalize_garbage(tstate, &unreachable); |
1797 | | /* Handle any objects that may have resurrected after the call |
1798 | | * to 'finalize_garbage' and continue the collection with the |
1799 | | * objects that are still unreachable */ |
1800 | 53.9k | PyGC_Head final_unreachable; |
1801 | 53.9k | gc_list_init(&final_unreachable); |
1802 | 53.9k | handle_resurrected_objects(&unreachable, &final_unreachable, to); |
1803 | | |
1804 | | /* Clear weakrefs to objects in the unreachable set. See the comments |
1805 | | * above handle_weakref_callbacks() for details. |
1806 | | */ |
1807 | 53.9k | clear_weakrefs(&final_unreachable); |
1808 | | |
1809 | | /* Call tp_clear on objects in the final_unreachable set. This will cause |
1810 | | * the reference cycles to be broken. It may also cause some objects |
1811 | | * in finalizers to be freed. |
1812 | | */ |
1813 | 53.9k | stats->collected += gc_list_size(&final_unreachable); |
1814 | 53.9k | delete_garbage(tstate, gcstate, &final_unreachable, to); |
1815 | | |
1816 | | /* Collect statistics on uncollectable objects found and print |
1817 | | * debugging information. */ |
1818 | 53.9k | Py_ssize_t n = 0; |
1819 | 53.9k | for (gc = GC_NEXT(&finalizers); gc != &finalizers; gc = GC_NEXT(gc)) { |
1820 | 0 | n++; |
1821 | 0 | if (gcstate->debug & _PyGC_DEBUG_UNCOLLECTABLE) |
1822 | 0 | debug_cycle("uncollectable", FROM_GC(gc)); |
1823 | 0 | } |
1824 | 53.9k | stats->uncollectable = n; |
1825 | | /* Append instances in the uncollectable set to a Python |
1826 | | * reachable list of garbage. The programmer has to deal with |
1827 | | * this if they insist on creating this type of structure. |
1828 | | */ |
1829 | 53.9k | handle_legacy_finalizers(tstate, gcstate, &finalizers, to); |
1830 | 53.9k | gc_list_validate_space(to, gcstate->visited_space); |
1831 | 53.9k | validate_list(to, collecting_clear_unreachable_clear); |
1832 | 53.9k | } |
1833 | | |
1834 | | /* Invoke progress callbacks to notify clients that garbage collection |
1835 | | * is starting or stopping |
1836 | | */ |
1837 | | static void |
1838 | | do_gc_callback(GCState *gcstate, const char *phase, |
1839 | | int generation, struct gc_collection_stats *stats) |
1840 | 116k | { |
1841 | 116k | assert(!PyErr_Occurred()); |
1842 | | |
1843 | | /* The local variable cannot be rebound, check it for sanity */ |
1844 | 116k | assert(PyList_CheckExact(gcstate->callbacks)); |
1845 | 116k | PyObject *info = NULL; |
1846 | 116k | if (PyList_GET_SIZE(gcstate->callbacks) != 0) { |
1847 | 0 | info = Py_BuildValue("{sisnsn}", |
1848 | 0 | "generation", generation, |
1849 | 0 | "collected", stats->collected, |
1850 | 0 | "uncollectable", stats->uncollectable); |
1851 | 0 | if (info == NULL) { |
1852 | 0 | PyErr_FormatUnraisable("Exception ignored while invoking gc callbacks"); |
1853 | 0 | return; |
1854 | 0 | } |
1855 | 0 | } |
1856 | | |
1857 | 116k | PyObject *phase_obj = PyUnicode_FromString(phase); |
1858 | 116k | if (phase_obj == NULL) { |
1859 | 0 | Py_XDECREF(info); |
1860 | 0 | PyErr_FormatUnraisable("Exception ignored while invoking gc callbacks"); |
1861 | 0 | return; |
1862 | 0 | } |
1863 | | |
1864 | 116k | PyObject *stack[] = {phase_obj, info}; |
1865 | 116k | for (Py_ssize_t i=0; i<PyList_GET_SIZE(gcstate->callbacks); i++) { |
1866 | 0 | PyObject *r, *cb = PyList_GET_ITEM(gcstate->callbacks, i); |
1867 | 0 | Py_INCREF(cb); /* make sure cb doesn't go away */ |
1868 | 0 | r = PyObject_Vectorcall(cb, stack, 2, NULL); |
1869 | 0 | if (r == NULL) { |
1870 | 0 | PyErr_FormatUnraisable("Exception ignored while " |
1871 | 0 | "calling GC callback %R", cb); |
1872 | 0 | } |
1873 | 0 | else { |
1874 | 0 | Py_DECREF(r); |
1875 | 0 | } |
1876 | 0 | Py_DECREF(cb); |
1877 | 0 | } |
1878 | 116k | Py_DECREF(phase_obj); |
1879 | 116k | Py_XDECREF(info); |
1880 | 116k | assert(!PyErr_Occurred()); |
1881 | 116k | } |
1882 | | |
1883 | | static void |
1884 | | invoke_gc_callback(GCState *gcstate, const char *phase, |
1885 | | int generation, struct gc_collection_stats *stats) |
1886 | 116k | { |
1887 | 116k | if (gcstate->callbacks == NULL) { |
1888 | 0 | return; |
1889 | 0 | } |
1890 | 116k | do_gc_callback(gcstate, phase, generation, stats); |
1891 | 116k | } |
1892 | | |
1893 | | static int |
1894 | | referrersvisit(PyObject* obj, void *arg) |
1895 | 0 | { |
1896 | 0 | PyObject *objs = arg; |
1897 | 0 | Py_ssize_t i; |
1898 | 0 | for (i = 0; i < PyTuple_GET_SIZE(objs); i++) { |
1899 | 0 | if (PyTuple_GET_ITEM(objs, i) == obj) { |
1900 | 0 | return 1; |
1901 | 0 | } |
1902 | 0 | } |
1903 | 0 | return 0; |
1904 | 0 | } |
1905 | | |
1906 | | static int |
1907 | | gc_referrers_for(PyObject *objs, PyGC_Head *list, PyObject *resultlist) |
1908 | 0 | { |
1909 | 0 | PyGC_Head *gc; |
1910 | 0 | PyObject *obj; |
1911 | 0 | traverseproc traverse; |
1912 | 0 | for (gc = GC_NEXT(list); gc != list; gc = GC_NEXT(gc)) { |
1913 | 0 | obj = FROM_GC(gc); |
1914 | 0 | traverse = Py_TYPE(obj)->tp_traverse; |
1915 | 0 | if (obj == objs || obj == resultlist) { |
1916 | 0 | continue; |
1917 | 0 | } |
1918 | 0 | if (traverse(obj, referrersvisit, objs)) { |
1919 | 0 | if (PyList_Append(resultlist, obj) < 0) { |
1920 | 0 | return 0; /* error */ |
1921 | 0 | } |
1922 | 0 | } |
1923 | 0 | } |
1924 | 0 | return 1; /* no error */ |
1925 | 0 | } |
1926 | | |
1927 | | PyObject * |
1928 | | _PyGC_GetReferrers(PyInterpreterState *interp, PyObject *objs) |
1929 | 0 | { |
1930 | 0 | PyObject *result = PyList_New(0); |
1931 | 0 | if (!result) { |
1932 | 0 | return NULL; |
1933 | 0 | } |
1934 | | |
1935 | 0 | GCState *gcstate = &interp->gc; |
1936 | 0 | for (int i = 0; i < NUM_GENERATIONS; i++) { |
1937 | 0 | if (!(gc_referrers_for(objs, GEN_HEAD(gcstate, i), result))) { |
1938 | 0 | Py_DECREF(result); |
1939 | 0 | return NULL; |
1940 | 0 | } |
1941 | 0 | } |
1942 | 0 | return result; |
1943 | 0 | } |
1944 | | |
1945 | | PyObject * |
1946 | | _PyGC_GetObjects(PyInterpreterState *interp, int generation) |
1947 | 0 | { |
1948 | 0 | assert(generation >= -1 && generation < NUM_GENERATIONS); |
1949 | 0 | GCState *gcstate = &interp->gc; |
1950 | |
|
1951 | 0 | PyObject *result = PyList_New(0); |
1952 | | /* Generation: |
1953 | | * -1: Return all objects |
1954 | | * 0: All young objects |
1955 | | * 1: No objects |
1956 | | * 2: All old objects |
1957 | | */ |
1958 | 0 | if (result == NULL || generation == 1) { |
1959 | 0 | return result; |
1960 | 0 | } |
1961 | 0 | if (generation <= 0) { |
1962 | 0 | if (append_objects(result, &gcstate->young.head)) { |
1963 | 0 | goto error; |
1964 | 0 | } |
1965 | 0 | } |
1966 | 0 | if (generation != 0) { |
1967 | 0 | if (append_objects(result, &gcstate->old[0].head)) { |
1968 | 0 | goto error; |
1969 | 0 | } |
1970 | 0 | if (append_objects(result, &gcstate->old[1].head)) { |
1971 | 0 | goto error; |
1972 | 0 | } |
1973 | 0 | } |
1974 | | |
1975 | 0 | return result; |
1976 | 0 | error: |
1977 | 0 | Py_DECREF(result); |
1978 | 0 | return NULL; |
1979 | 0 | } |
1980 | | |
1981 | | void |
1982 | | _PyGC_Freeze(PyInterpreterState *interp) |
1983 | 0 | { |
1984 | 0 | GCState *gcstate = &interp->gc; |
1985 | | /* The permanent_generation must be visited */ |
1986 | 0 | gc_list_set_space(&gcstate->young.head, gcstate->visited_space); |
1987 | 0 | gc_list_merge(&gcstate->young.head, &gcstate->permanent_generation.head); |
1988 | 0 | gcstate->young.count = 0; |
1989 | 0 | PyGC_Head*old0 = &gcstate->old[0].head; |
1990 | 0 | PyGC_Head*old1 = &gcstate->old[1].head; |
1991 | 0 | if (gcstate->visited_space) { |
1992 | 0 | gc_list_set_space(old0, 1); |
1993 | 0 | } |
1994 | 0 | else { |
1995 | 0 | gc_list_set_space(old1, 0); |
1996 | 0 | } |
1997 | 0 | gc_list_merge(old0, &gcstate->permanent_generation.head); |
1998 | 0 | gcstate->old[0].count = 0; |
1999 | 0 | gc_list_merge(old1, &gcstate->permanent_generation.head); |
2000 | 0 | gcstate->old[1].count = 0; |
2001 | 0 | validate_spaces(gcstate); |
2002 | 0 | } |
2003 | | |
2004 | | void |
2005 | | _PyGC_Unfreeze(PyInterpreterState *interp) |
2006 | 0 | { |
2007 | 0 | GCState *gcstate = &interp->gc; |
2008 | 0 | gc_list_merge(&gcstate->permanent_generation.head, |
2009 | 0 | &gcstate->old[gcstate->visited_space].head); |
2010 | 0 | validate_spaces(gcstate); |
2011 | 0 | } |
2012 | | |
2013 | | Py_ssize_t |
2014 | | _PyGC_GetFreezeCount(PyInterpreterState *interp) |
2015 | 0 | { |
2016 | 0 | GCState *gcstate = &interp->gc; |
2017 | 0 | return gc_list_size(&gcstate->permanent_generation.head); |
2018 | 0 | } |
2019 | | |
2020 | | /* C API for controlling the state of the garbage collector */ |
2021 | | int |
2022 | | PyGC_Enable(void) |
2023 | 0 | { |
2024 | 0 | GCState *gcstate = get_gc_state(); |
2025 | 0 | int old_state = gcstate->enabled; |
2026 | 0 | gcstate->enabled = 1; |
2027 | 0 | return old_state; |
2028 | 0 | } |
2029 | | |
2030 | | int |
2031 | | PyGC_Disable(void) |
2032 | 0 | { |
2033 | 0 | GCState *gcstate = get_gc_state(); |
2034 | 0 | int old_state = gcstate->enabled; |
2035 | 0 | gcstate->enabled = 0; |
2036 | 0 | return old_state; |
2037 | 0 | } |
2038 | | |
2039 | | int |
2040 | | PyGC_IsEnabled(void) |
2041 | 0 | { |
2042 | 0 | GCState *gcstate = get_gc_state(); |
2043 | 0 | return gcstate->enabled; |
2044 | 0 | } |
2045 | | |
2046 | | // Show stats for objects in each generations |
2047 | | static void |
2048 | | show_stats_each_generations(GCState *gcstate) |
2049 | 0 | { |
2050 | 0 | char buf[100]; |
2051 | 0 | size_t pos = 0; |
2052 | |
|
2053 | 0 | for (int i = 0; i < NUM_GENERATIONS && pos < sizeof(buf); i++) { |
2054 | 0 | pos += PyOS_snprintf(buf+pos, sizeof(buf)-pos, |
2055 | 0 | " %zd", |
2056 | 0 | gc_list_size(GEN_HEAD(gcstate, i))); |
2057 | 0 | } |
2058 | 0 | PySys_FormatStderr( |
2059 | 0 | "gc: objects in each generation:%s\n" |
2060 | 0 | "gc: objects in permanent generation: %zd\n", |
2061 | 0 | buf, gc_list_size(&gcstate->permanent_generation.head)); |
2062 | 0 | } |
2063 | | |
2064 | | Py_ssize_t |
2065 | | _PyGC_Collect(PyThreadState *tstate, int generation, _PyGC_Reason reason) |
2066 | 58.3k | { |
2067 | 58.3k | GCState *gcstate = &tstate->interp->gc; |
2068 | 58.3k | assert(tstate->current_frame == NULL || tstate->current_frame->stackpointer != NULL); |
2069 | | |
2070 | 58.3k | int expected = 0; |
2071 | 58.3k | if (!_Py_atomic_compare_exchange_int(&gcstate->collecting, &expected, 1)) { |
2072 | | // Don't start a garbage collection if one is already in progress. |
2073 | 0 | return 0; |
2074 | 0 | } |
2075 | | |
2076 | 58.3k | struct gc_collection_stats stats = { 0 }; |
2077 | 58.3k | if (reason != _Py_GC_REASON_SHUTDOWN) { |
2078 | 58.3k | invoke_gc_callback(gcstate, "start", generation, &stats); |
2079 | 58.3k | } |
2080 | 58.3k | if (gcstate->debug & _PyGC_DEBUG_STATS) { |
2081 | 0 | PySys_WriteStderr("gc: collecting generation %d...\n", generation); |
2082 | 0 | show_stats_each_generations(gcstate); |
2083 | 0 | } |
2084 | 58.3k | if (PyDTrace_GC_START_ENABLED()) { |
2085 | 0 | PyDTrace_GC_START(generation); |
2086 | 0 | } |
2087 | 58.3k | PyObject *exc = _PyErr_GetRaisedException(tstate); |
2088 | 58.3k | switch(generation) { |
2089 | 0 | case 0: |
2090 | 0 | gc_collect_young(tstate, &stats); |
2091 | 0 | break; |
2092 | 58.3k | case 1: |
2093 | 58.3k | gc_collect_increment(tstate, &stats); |
2094 | 58.3k | break; |
2095 | 0 | case 2: |
2096 | 0 | gc_collect_full(tstate, &stats); |
2097 | 0 | break; |
2098 | 0 | default: |
2099 | 0 | Py_UNREACHABLE(); |
2100 | 58.3k | } |
2101 | 58.3k | if (PyDTrace_GC_DONE_ENABLED()) { |
2102 | 0 | PyDTrace_GC_DONE(stats.uncollectable + stats.collected); |
2103 | 0 | } |
2104 | 58.3k | if (reason != _Py_GC_REASON_SHUTDOWN) { |
2105 | 58.3k | invoke_gc_callback(gcstate, "stop", generation, &stats); |
2106 | 58.3k | } |
2107 | 58.3k | _PyErr_SetRaisedException(tstate, exc); |
2108 | 58.3k | GC_STAT_ADD(generation, objects_collected, stats.collected); |
2109 | | #ifdef Py_STATS |
2110 | | if (_Py_stats) { |
2111 | | GC_STAT_ADD(generation, object_visits, |
2112 | | _Py_stats->object_stats.object_visits); |
2113 | | _Py_stats->object_stats.object_visits = 0; |
2114 | | } |
2115 | | #endif |
2116 | 58.3k | validate_spaces(gcstate); |
2117 | 58.3k | _Py_atomic_store_int(&gcstate->collecting, 0); |
2118 | 58.3k | return stats.uncollectable + stats.collected; |
2119 | 58.3k | } |
2120 | | |
2121 | | /* Public API to invoke gc.collect() from C */ |
2122 | | Py_ssize_t |
2123 | | PyGC_Collect(void) |
2124 | 0 | { |
2125 | 0 | return _PyGC_Collect(_PyThreadState_GET(), 2, _Py_GC_REASON_MANUAL); |
2126 | 0 | } |
2127 | | |
2128 | | void |
2129 | | _PyGC_CollectNoFail(PyThreadState *tstate) |
2130 | 0 | { |
2131 | | /* Ideally, this function is only called on interpreter shutdown, |
2132 | | and therefore not recursively. Unfortunately, when there are daemon |
2133 | | threads, a daemon thread can start a cyclic garbage collection |
2134 | | during interpreter shutdown (and then never finish it). |
2135 | | See http://bugs.python.org/issue8713#msg195178 for an example. |
2136 | | */ |
2137 | 0 | _PyGC_Collect(_PyThreadState_GET(), 2, _Py_GC_REASON_SHUTDOWN); |
2138 | 0 | } |
2139 | | |
2140 | | void |
2141 | | _PyGC_DumpShutdownStats(PyInterpreterState *interp) |
2142 | 0 | { |
2143 | 0 | GCState *gcstate = &interp->gc; |
2144 | 0 | if (!(gcstate->debug & _PyGC_DEBUG_SAVEALL) |
2145 | 0 | && gcstate->garbage != NULL && PyList_GET_SIZE(gcstate->garbage) > 0) { |
2146 | 0 | const char *message; |
2147 | 0 | if (gcstate->debug & _PyGC_DEBUG_UNCOLLECTABLE) { |
2148 | 0 | message = "gc: %zd uncollectable objects at shutdown"; |
2149 | 0 | } |
2150 | 0 | else { |
2151 | 0 | message = "gc: %zd uncollectable objects at shutdown; " \ |
2152 | 0 | "use gc.set_debug(gc.DEBUG_UNCOLLECTABLE) to list them"; |
2153 | 0 | } |
2154 | | /* PyErr_WarnFormat does too many things and we are at shutdown, |
2155 | | the warnings module's dependencies (e.g. linecache) may be gone |
2156 | | already. */ |
2157 | 0 | if (PyErr_WarnExplicitFormat(PyExc_ResourceWarning, "gc", 0, |
2158 | 0 | "gc", NULL, message, |
2159 | 0 | PyList_GET_SIZE(gcstate->garbage))) |
2160 | 0 | { |
2161 | 0 | PyErr_FormatUnraisable("Exception ignored in GC shutdown"); |
2162 | 0 | } |
2163 | 0 | if (gcstate->debug & _PyGC_DEBUG_UNCOLLECTABLE) { |
2164 | 0 | PyObject *repr = NULL, *bytes = NULL; |
2165 | 0 | repr = PyObject_Repr(gcstate->garbage); |
2166 | 0 | if (!repr || !(bytes = PyUnicode_EncodeFSDefault(repr))) { |
2167 | 0 | PyErr_FormatUnraisable("Exception ignored in GC shutdown " |
2168 | 0 | "while formatting garbage"); |
2169 | 0 | } |
2170 | 0 | else { |
2171 | 0 | PySys_WriteStderr( |
2172 | 0 | " %s\n", |
2173 | 0 | PyBytes_AS_STRING(bytes) |
2174 | 0 | ); |
2175 | 0 | } |
2176 | 0 | Py_XDECREF(repr); |
2177 | 0 | Py_XDECREF(bytes); |
2178 | 0 | } |
2179 | 0 | } |
2180 | 0 | } |
2181 | | |
2182 | | static void |
2183 | 0 | finalize_unlink_gc_head(PyGC_Head *gc) { |
2184 | 0 | PyGC_Head *prev = GC_PREV(gc); |
2185 | 0 | PyGC_Head *next = GC_NEXT(gc); |
2186 | 0 | _PyGCHead_SET_NEXT(prev, next); |
2187 | 0 | _PyGCHead_SET_PREV(next, prev); |
2188 | 0 | } |
2189 | | |
2190 | | void |
2191 | | _PyGC_Fini(PyInterpreterState *interp) |
2192 | 0 | { |
2193 | 0 | GCState *gcstate = &interp->gc; |
2194 | 0 | Py_CLEAR(gcstate->garbage); |
2195 | 0 | Py_CLEAR(gcstate->callbacks); |
2196 | | |
2197 | | /* Prevent a subtle bug that affects sub-interpreters that use basic |
2198 | | * single-phase init extensions (m_size == -1). Those extensions cause objects |
2199 | | * to be shared between interpreters, via the PyDict_Update(mdict, m_copy) call |
2200 | | * in import_find_extension(). |
2201 | | * |
2202 | | * If they are GC objects, their GC head next or prev links could refer to |
2203 | | * the interpreter _gc_runtime_state PyGC_Head nodes. Those nodes go away |
2204 | | * when the interpreter structure is freed and so pointers to them become |
2205 | | * invalid. If those objects are still used by another interpreter and |
2206 | | * UNTRACK is called on them, a crash will happen. We untrack the nodes |
2207 | | * here to avoid that. |
2208 | | * |
2209 | | * This bug was originally fixed when reported as gh-90228. The bug was |
2210 | | * re-introduced in gh-94673. |
2211 | | */ |
2212 | 0 | finalize_unlink_gc_head(&gcstate->young.head); |
2213 | 0 | finalize_unlink_gc_head(&gcstate->old[0].head); |
2214 | 0 | finalize_unlink_gc_head(&gcstate->old[1].head); |
2215 | 0 | finalize_unlink_gc_head(&gcstate->permanent_generation.head); |
2216 | 0 | } |
2217 | | |
2218 | | /* for debugging */ |
2219 | | void |
2220 | | _PyGC_Dump(PyGC_Head *g) |
2221 | 0 | { |
2222 | 0 | _PyObject_Dump(FROM_GC(g)); |
2223 | 0 | } |
2224 | | |
2225 | | |
2226 | | #ifdef Py_DEBUG |
2227 | | static int |
2228 | | visit_validate(PyObject *op, void *parent_raw) |
2229 | | { |
2230 | | PyObject *parent = _PyObject_CAST(parent_raw); |
2231 | | if (_PyObject_IsFreed(op)) { |
2232 | | _PyObject_ASSERT_FAILED_MSG(parent, |
2233 | | "PyObject_GC_Track() object is not valid"); |
2234 | | } |
2235 | | return 0; |
2236 | | } |
2237 | | #endif |
2238 | | |
2239 | | |
2240 | | /* extension modules might be compiled with GC support so these |
2241 | | functions must always be available */ |
2242 | | |
2243 | | void |
2244 | | PyObject_GC_Track(void *op_raw) |
2245 | 149M | { |
2246 | 149M | PyObject *op = _PyObject_CAST(op_raw); |
2247 | 149M | if (_PyObject_GC_IS_TRACKED(op)) { |
2248 | 0 | _PyObject_ASSERT_FAILED_MSG(op, |
2249 | 0 | "object already tracked " |
2250 | 0 | "by the garbage collector"); |
2251 | 0 | } |
2252 | 149M | _PyObject_GC_TRACK(op); |
2253 | | |
2254 | | #ifdef Py_DEBUG |
2255 | | /* Check that the object is valid: validate objects traversed |
2256 | | by tp_traverse() */ |
2257 | | traverseproc traverse = Py_TYPE(op)->tp_traverse; |
2258 | | (void)traverse(op, visit_validate, op); |
2259 | | #endif |
2260 | 149M | } |
2261 | | |
2262 | | void |
2263 | | PyObject_GC_UnTrack(void *op_raw) |
2264 | 1.57G | { |
2265 | 1.57G | PyObject *op = _PyObject_CAST(op_raw); |
2266 | | /* The code for some objects, such as tuples, is a bit |
2267 | | * sloppy about when the object is tracked and untracked. */ |
2268 | 1.57G | if (_PyObject_GC_IS_TRACKED(op)) { |
2269 | 1.50G | _PyObject_GC_UNTRACK(op); |
2270 | 1.50G | } |
2271 | 1.57G | } |
2272 | | |
2273 | | int |
2274 | | PyObject_IS_GC(PyObject *obj) |
2275 | 138M | { |
2276 | 138M | return _PyObject_IS_GC(obj); |
2277 | 138M | } |
2278 | | |
2279 | | void |
2280 | | _Py_ScheduleGC(PyThreadState *tstate) |
2281 | 5.57M | { |
2282 | 5.57M | if (!_Py_eval_breaker_bit_is_set(tstate, _PY_GC_SCHEDULED_BIT)) |
2283 | 58.3k | { |
2284 | 58.3k | _Py_set_eval_breaker_bit(tstate, _PY_GC_SCHEDULED_BIT); |
2285 | 58.3k | } |
2286 | 5.57M | } |
2287 | | |
2288 | | void |
2289 | | _PyObject_GC_Link(PyObject *op) |
2290 | 428M | { |
2291 | 428M | PyGC_Head *gc = AS_GC(op); |
2292 | | // gc must be correctly aligned |
2293 | 428M | _PyObject_ASSERT(op, ((uintptr_t)gc & (sizeof(uintptr_t)-1)) == 0); |
2294 | | |
2295 | 428M | PyThreadState *tstate = _PyThreadState_GET(); |
2296 | 428M | GCState *gcstate = &tstate->interp->gc; |
2297 | 428M | gc->_gc_next = 0; |
2298 | 428M | gc->_gc_prev = 0; |
2299 | 428M | gcstate->young.count++; /* number of allocated GC objects */ |
2300 | 428M | gcstate->heap_size++; |
2301 | 428M | if (gcstate->young.count > gcstate->young.threshold && |
2302 | 428M | gcstate->enabled && |
2303 | 428M | gcstate->young.threshold && |
2304 | 428M | !_Py_atomic_load_int_relaxed(&gcstate->collecting) && |
2305 | 428M | !_PyErr_Occurred(tstate)) |
2306 | 5.57M | { |
2307 | 5.57M | _Py_ScheduleGC(tstate); |
2308 | 5.57M | } |
2309 | 428M | } |
2310 | | |
2311 | | void |
2312 | | _Py_RunGC(PyThreadState *tstate) |
2313 | 58.3k | { |
2314 | 58.3k | if (tstate->interp->gc.enabled) { |
2315 | 58.3k | _PyGC_Collect(tstate, 1, _Py_GC_REASON_HEAP); |
2316 | 58.3k | } |
2317 | 58.3k | } |
2318 | | |
2319 | | static PyObject * |
2320 | | gc_alloc(PyTypeObject *tp, size_t basicsize, size_t presize) |
2321 | 339M | { |
2322 | 339M | PyThreadState *tstate = _PyThreadState_GET(); |
2323 | 339M | if (basicsize > PY_SSIZE_T_MAX - presize) { |
2324 | 0 | return _PyErr_NoMemory(tstate); |
2325 | 0 | } |
2326 | 339M | size_t size = presize + basicsize; |
2327 | 339M | char *mem = _PyObject_MallocWithType(tp, size); |
2328 | 339M | if (mem == NULL) { |
2329 | 0 | return _PyErr_NoMemory(tstate); |
2330 | 0 | } |
2331 | 339M | ((PyObject **)mem)[0] = NULL; |
2332 | 339M | ((PyObject **)mem)[1] = NULL; |
2333 | 339M | PyObject *op = (PyObject *)(mem + presize); |
2334 | 339M | _PyObject_GC_Link(op); |
2335 | 339M | return op; |
2336 | 339M | } |
2337 | | |
2338 | | |
2339 | | PyObject * |
2340 | | _PyObject_GC_New(PyTypeObject *tp) |
2341 | 151M | { |
2342 | 151M | size_t presize = _PyType_PreHeaderSize(tp); |
2343 | 151M | size_t size = _PyObject_SIZE(tp); |
2344 | 151M | if (_PyType_HasFeature(tp, Py_TPFLAGS_INLINE_VALUES)) { |
2345 | 0 | size += _PyInlineValuesSize(tp); |
2346 | 0 | } |
2347 | 151M | PyObject *op = gc_alloc(tp, size, presize); |
2348 | 151M | if (op == NULL) { |
2349 | 0 | return NULL; |
2350 | 0 | } |
2351 | 151M | _PyObject_Init(op, tp); |
2352 | 151M | if (tp->tp_flags & Py_TPFLAGS_INLINE_VALUES) { |
2353 | 0 | _PyObject_InitInlineValues(op, tp); |
2354 | 0 | } |
2355 | 151M | return op; |
2356 | 151M | } |
2357 | | |
2358 | | PyVarObject * |
2359 | | _PyObject_GC_NewVar(PyTypeObject *tp, Py_ssize_t nitems) |
2360 | 187M | { |
2361 | 187M | PyVarObject *op; |
2362 | | |
2363 | 187M | if (nitems < 0) { |
2364 | 0 | PyErr_BadInternalCall(); |
2365 | 0 | return NULL; |
2366 | 0 | } |
2367 | 187M | size_t presize = _PyType_PreHeaderSize(tp); |
2368 | 187M | size_t size = _PyObject_VAR_SIZE(tp, nitems); |
2369 | 187M | op = (PyVarObject *)gc_alloc(tp, size, presize); |
2370 | 187M | if (op == NULL) { |
2371 | 0 | return NULL; |
2372 | 0 | } |
2373 | 187M | _PyObject_InitVar(op, tp, nitems); |
2374 | 187M | return op; |
2375 | 187M | } |
2376 | | |
2377 | | PyObject * |
2378 | | PyUnstable_Object_GC_NewWithExtraData(PyTypeObject *tp, size_t extra_size) |
2379 | 0 | { |
2380 | 0 | size_t presize = _PyType_PreHeaderSize(tp); |
2381 | 0 | size_t size = _PyObject_SIZE(tp) + extra_size; |
2382 | 0 | PyObject *op = gc_alloc(tp, size, presize); |
2383 | 0 | if (op == NULL) { |
2384 | 0 | return NULL; |
2385 | 0 | } |
2386 | 0 | memset((char *)op + sizeof(PyObject), 0, size - sizeof(PyObject)); |
2387 | 0 | _PyObject_Init(op, tp); |
2388 | 0 | return op; |
2389 | 0 | } |
2390 | | |
2391 | | PyVarObject * |
2392 | | _PyObject_GC_Resize(PyVarObject *op, Py_ssize_t nitems) |
2393 | 16 | { |
2394 | 16 | const size_t basicsize = _PyObject_VAR_SIZE(Py_TYPE(op), nitems); |
2395 | 16 | const size_t presize = _PyType_PreHeaderSize(Py_TYPE(op)); |
2396 | 16 | _PyObject_ASSERT((PyObject *)op, !_PyObject_GC_IS_TRACKED(op)); |
2397 | 16 | if (basicsize > (size_t)PY_SSIZE_T_MAX - presize) { |
2398 | 0 | return (PyVarObject *)PyErr_NoMemory(); |
2399 | 0 | } |
2400 | 16 | char *mem = (char *)op - presize; |
2401 | 16 | mem = (char *)_PyObject_ReallocWithType(Py_TYPE(op), mem, presize + basicsize); |
2402 | 16 | if (mem == NULL) { |
2403 | 0 | return (PyVarObject *)PyErr_NoMemory(); |
2404 | 0 | } |
2405 | 16 | op = (PyVarObject *) (mem + presize); |
2406 | 16 | Py_SET_SIZE(op, nitems); |
2407 | 16 | return op; |
2408 | 16 | } |
2409 | | |
2410 | | void |
2411 | | PyObject_GC_Del(void *op) |
2412 | 428M | { |
2413 | 428M | size_t presize = _PyType_PreHeaderSize(Py_TYPE(op)); |
2414 | 428M | PyGC_Head *g = AS_GC(op); |
2415 | 428M | if (_PyObject_GC_IS_TRACKED(op)) { |
2416 | 0 | gc_list_remove(g); |
2417 | | #ifdef Py_DEBUG |
2418 | | PyObject *exc = PyErr_GetRaisedException(); |
2419 | | if (PyErr_WarnExplicitFormat(PyExc_ResourceWarning, "gc", 0, |
2420 | | "gc", NULL, |
2421 | | "Object of type %s is not untracked " |
2422 | | "before destruction", |
2423 | | Py_TYPE(op)->tp_name)) |
2424 | | { |
2425 | | PyErr_FormatUnraisable("Exception ignored on object deallocation"); |
2426 | | } |
2427 | | PyErr_SetRaisedException(exc); |
2428 | | #endif |
2429 | 0 | } |
2430 | 428M | GCState *gcstate = get_gc_state(); |
2431 | 428M | if (gcstate->young.count > 0) { |
2432 | 307M | gcstate->young.count--; |
2433 | 307M | } |
2434 | 428M | gcstate->heap_size--; |
2435 | 428M | PyObject_Free(((char *)op)-presize); |
2436 | 428M | } |
2437 | | |
2438 | | int |
2439 | | PyObject_GC_IsTracked(PyObject* obj) |
2440 | 0 | { |
2441 | 0 | if (_PyObject_IS_GC(obj) && _PyObject_GC_IS_TRACKED(obj)) { |
2442 | 0 | return 1; |
2443 | 0 | } |
2444 | 0 | return 0; |
2445 | 0 | } |
2446 | | |
2447 | | int |
2448 | | PyObject_GC_IsFinalized(PyObject *obj) |
2449 | 0 | { |
2450 | 0 | if (_PyObject_IS_GC(obj) && _PyGC_FINALIZED(obj)) { |
2451 | 0 | return 1; |
2452 | 0 | } |
2453 | 0 | return 0; |
2454 | 0 | } |
2455 | | |
2456 | | static int |
2457 | | visit_generation(gcvisitobjects_t callback, void *arg, struct gc_generation *gen) |
2458 | 0 | { |
2459 | 0 | PyGC_Head *gc_list, *gc; |
2460 | 0 | gc_list = &gen->head; |
2461 | 0 | for (gc = GC_NEXT(gc_list); gc != gc_list; gc = GC_NEXT(gc)) { |
2462 | 0 | PyObject *op = FROM_GC(gc); |
2463 | 0 | Py_INCREF(op); |
2464 | 0 | int res = callback(op, arg); |
2465 | 0 | Py_DECREF(op); |
2466 | 0 | if (!res) { |
2467 | 0 | return -1; |
2468 | 0 | } |
2469 | 0 | } |
2470 | 0 | return 0; |
2471 | 0 | } |
2472 | | |
2473 | | void |
2474 | | PyUnstable_GC_VisitObjects(gcvisitobjects_t callback, void *arg) |
2475 | 0 | { |
2476 | 0 | GCState *gcstate = get_gc_state(); |
2477 | 0 | int original_state = gcstate->enabled; |
2478 | 0 | gcstate->enabled = 0; |
2479 | 0 | if (visit_generation(callback, arg, &gcstate->young) < 0) { |
2480 | 0 | goto done; |
2481 | 0 | } |
2482 | 0 | if (visit_generation(callback, arg, &gcstate->old[0]) < 0) { |
2483 | 0 | goto done; |
2484 | 0 | } |
2485 | 0 | if (visit_generation(callback, arg, &gcstate->old[1]) < 0) { |
2486 | 0 | goto done; |
2487 | 0 | } |
2488 | 0 | visit_generation(callback, arg, &gcstate->permanent_generation); |
2489 | 0 | done: |
2490 | 0 | gcstate->enabled = original_state; |
2491 | 0 | } |
2492 | | |
2493 | | #endif // Py_GIL_DISABLED |