/src/Python-3.8.3/Objects/frameobject.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* Frame object implementation */ |
2 | | |
3 | | #include "Python.h" |
4 | | #include "pycore_object.h" |
5 | | #include "pycore_pystate.h" |
6 | | |
7 | | #include "code.h" |
8 | | #include "frameobject.h" |
9 | | #include "opcode.h" |
10 | | #include "structmember.h" |
11 | | |
12 | | #define OFF(x) offsetof(PyFrameObject, x) |
13 | | |
14 | | static PyMemberDef frame_memberlist[] = { |
15 | | {"f_back", T_OBJECT, OFF(f_back), READONLY}, |
16 | | {"f_code", T_OBJECT, OFF(f_code), READONLY}, |
17 | | {"f_builtins", T_OBJECT, OFF(f_builtins), READONLY}, |
18 | | {"f_globals", T_OBJECT, OFF(f_globals), READONLY}, |
19 | | {"f_lasti", T_INT, OFF(f_lasti), READONLY}, |
20 | | {"f_trace_lines", T_BOOL, OFF(f_trace_lines), 0}, |
21 | | {"f_trace_opcodes", T_BOOL, OFF(f_trace_opcodes), 0}, |
22 | | {NULL} /* Sentinel */ |
23 | | }; |
24 | | |
25 | | static PyObject * |
26 | | frame_getlocals(PyFrameObject *f, void *closure) |
27 | 0 | { |
28 | 0 | if (PyFrame_FastToLocalsWithError(f) < 0) |
29 | 0 | return NULL; |
30 | 0 | Py_INCREF(f->f_locals); |
31 | 0 | return f->f_locals; |
32 | 0 | } |
33 | | |
34 | | int |
35 | | PyFrame_GetLineNumber(PyFrameObject *f) |
36 | 3.55k | { |
37 | 3.55k | if (f->f_trace) |
38 | 0 | return f->f_lineno; |
39 | 3.55k | else |
40 | 3.55k | return PyCode_Addr2Line(f->f_code, f->f_lasti); |
41 | 3.55k | } |
42 | | |
43 | | static PyObject * |
44 | | frame_getlineno(PyFrameObject *f, void *closure) |
45 | 14 | { |
46 | 14 | return PyLong_FromLong(PyFrame_GetLineNumber(f)); |
47 | 14 | } |
48 | | |
49 | | |
50 | | /* Given the index of the effective opcode, |
51 | | scan back to construct the oparg with EXTENDED_ARG */ |
52 | | static unsigned int |
53 | | get_arg(const _Py_CODEUNIT *codestr, Py_ssize_t i) |
54 | 0 | { |
55 | 0 | _Py_CODEUNIT word; |
56 | 0 | unsigned int oparg = _Py_OPARG(codestr[i]); |
57 | 0 | if (i >= 1 && _Py_OPCODE(word = codestr[i-1]) == EXTENDED_ARG) { |
58 | 0 | oparg |= _Py_OPARG(word) << 8; |
59 | 0 | if (i >= 2 && _Py_OPCODE(word = codestr[i-2]) == EXTENDED_ARG) { |
60 | 0 | oparg |= _Py_OPARG(word) << 16; |
61 | 0 | if (i >= 3 && _Py_OPCODE(word = codestr[i-3]) == EXTENDED_ARG) { |
62 | 0 | oparg |= _Py_OPARG(word) << 24; |
63 | 0 | } |
64 | 0 | } |
65 | 0 | } |
66 | 0 | return oparg; |
67 | 0 | } |
68 | | |
69 | | |
70 | | /* Setter for f_lineno - you can set f_lineno from within a trace function in |
71 | | * order to jump to a given line of code, subject to some restrictions. Most |
72 | | * lines are OK to jump to because they don't make any assumptions about the |
73 | | * state of the stack (obvious because you could remove the line and the code |
74 | | * would still work without any stack errors), but there are some constructs |
75 | | * that limit jumping: |
76 | | * |
77 | | * o Lines with an 'except' statement on them can't be jumped to, because |
78 | | * they expect an exception to be on the top of the stack. |
79 | | * o Lines that live in a 'finally' block can't be jumped from or to, since |
80 | | * the END_FINALLY expects to clean up the stack after the 'try' block. |
81 | | * o 'try', 'with' and 'async with' blocks can't be jumped into because |
82 | | * the blockstack needs to be set up before their code runs. |
83 | | * o 'for' and 'async for' loops can't be jumped into because the |
84 | | * iterator needs to be on the stack. |
85 | | * o Jumps cannot be made from within a trace function invoked with a |
86 | | * 'return' or 'exception' event since the eval loop has been exited at |
87 | | * that time. |
88 | | */ |
89 | | static int |
90 | | frame_setlineno(PyFrameObject *f, PyObject* p_new_lineno, void *Py_UNUSED(ignored)) |
91 | 0 | { |
92 | 0 | int new_lineno = 0; /* The new value of f_lineno */ |
93 | 0 | long l_new_lineno; |
94 | 0 | int overflow; |
95 | 0 | int new_lasti = 0; /* The new value of f_lasti */ |
96 | 0 | unsigned char *code = NULL; /* The bytecode for the frame... */ |
97 | 0 | Py_ssize_t code_len = 0; /* ...and its length */ |
98 | 0 | unsigned char *lnotab = NULL; /* Iterating over co_lnotab */ |
99 | 0 | Py_ssize_t lnotab_len = 0; /* (ditto) */ |
100 | 0 | int offset = 0; /* (ditto) */ |
101 | 0 | int line = 0; /* (ditto) */ |
102 | 0 | int addr = 0; /* (ditto) */ |
103 | 0 | int delta_iblock = 0; /* Scanning the SETUPs and POPs */ |
104 | 0 | int delta = 0; |
105 | 0 | int blockstack[CO_MAXBLOCKS]; /* Walking the 'finally' blocks */ |
106 | 0 | int blockstack_top = 0; /* (ditto) */ |
107 | |
|
108 | 0 | if (p_new_lineno == NULL) { |
109 | 0 | PyErr_SetString(PyExc_AttributeError, "cannot delete attribute"); |
110 | 0 | return -1; |
111 | 0 | } |
112 | | /* f_lineno must be an integer. */ |
113 | 0 | if (!PyLong_CheckExact(p_new_lineno)) { |
114 | 0 | PyErr_SetString(PyExc_ValueError, |
115 | 0 | "lineno must be an integer"); |
116 | 0 | return -1; |
117 | 0 | } |
118 | | |
119 | | /* Upon the 'call' trace event of a new frame, f->f_lasti is -1 and |
120 | | * f->f_trace is NULL, check first on the first condition. |
121 | | * Forbidding jumps from the 'call' event of a new frame is a side effect |
122 | | * of allowing to set f_lineno only from trace functions. */ |
123 | 0 | if (f->f_lasti == -1) { |
124 | 0 | PyErr_Format(PyExc_ValueError, |
125 | 0 | "can't jump from the 'call' trace event of a new frame"); |
126 | 0 | return -1; |
127 | 0 | } |
128 | | |
129 | | /* You can only do this from within a trace function, not via |
130 | | * _getframe or similar hackery. */ |
131 | 0 | if (!f->f_trace) { |
132 | 0 | PyErr_Format(PyExc_ValueError, |
133 | 0 | "f_lineno can only be set by a trace function"); |
134 | 0 | return -1; |
135 | 0 | } |
136 | | |
137 | | /* Forbid jumps upon a 'return' trace event (except after executing a |
138 | | * YIELD_VALUE or YIELD_FROM opcode, f_stacktop is not NULL in that case) |
139 | | * and upon an 'exception' trace event. |
140 | | * Jumps from 'call' trace events have already been forbidden above for new |
141 | | * frames, so this check does not change anything for 'call' events. */ |
142 | 0 | if (f->f_stacktop == NULL) { |
143 | 0 | PyErr_SetString(PyExc_ValueError, |
144 | 0 | "can only jump from a 'line' trace event"); |
145 | 0 | return -1; |
146 | 0 | } |
147 | | |
148 | | /* Fail if the line comes before the start of the code block. */ |
149 | 0 | l_new_lineno = PyLong_AsLongAndOverflow(p_new_lineno, &overflow); |
150 | 0 | if (overflow |
151 | 0 | #if SIZEOF_LONG > SIZEOF_INT |
152 | 0 | || l_new_lineno > INT_MAX |
153 | 0 | || l_new_lineno < INT_MIN |
154 | 0 | #endif |
155 | 0 | ) { |
156 | 0 | PyErr_SetString(PyExc_ValueError, |
157 | 0 | "lineno out of range"); |
158 | 0 | return -1; |
159 | 0 | } |
160 | 0 | new_lineno = (int)l_new_lineno; |
161 | |
|
162 | 0 | if (new_lineno < f->f_code->co_firstlineno) { |
163 | 0 | PyErr_Format(PyExc_ValueError, |
164 | 0 | "line %d comes before the current code block", |
165 | 0 | new_lineno); |
166 | 0 | return -1; |
167 | 0 | } |
168 | 0 | else if (new_lineno == f->f_code->co_firstlineno) { |
169 | 0 | new_lasti = 0; |
170 | 0 | new_lineno = f->f_code->co_firstlineno; |
171 | 0 | } |
172 | 0 | else { |
173 | | /* Find the bytecode offset for the start of the given |
174 | | * line, or the first code-owning line after it. */ |
175 | 0 | char *tmp; |
176 | 0 | PyBytes_AsStringAndSize(f->f_code->co_lnotab, |
177 | 0 | &tmp, &lnotab_len); |
178 | 0 | lnotab = (unsigned char *) tmp; |
179 | 0 | addr = 0; |
180 | 0 | line = f->f_code->co_firstlineno; |
181 | 0 | new_lasti = -1; |
182 | 0 | for (offset = 0; offset < lnotab_len; offset += 2) { |
183 | 0 | addr += lnotab[offset]; |
184 | 0 | line += (signed char)lnotab[offset+1]; |
185 | 0 | if (line >= new_lineno) { |
186 | 0 | new_lasti = addr; |
187 | 0 | new_lineno = line; |
188 | 0 | break; |
189 | 0 | } |
190 | 0 | } |
191 | 0 | } |
192 | | |
193 | | /* If we didn't reach the requested line, return an error. */ |
194 | 0 | if (new_lasti == -1) { |
195 | 0 | PyErr_Format(PyExc_ValueError, |
196 | 0 | "line %d comes after the current code block", |
197 | 0 | new_lineno); |
198 | 0 | return -1; |
199 | 0 | } |
200 | | |
201 | | /* We're now ready to look at the bytecode. */ |
202 | 0 | PyBytes_AsStringAndSize(f->f_code->co_code, (char **)&code, &code_len); |
203 | | |
204 | | /* The trace function is called with a 'return' trace event after the |
205 | | * execution of a yield statement. */ |
206 | 0 | assert(f->f_lasti != -1); |
207 | 0 | if (code[f->f_lasti] == YIELD_VALUE || code[f->f_lasti] == YIELD_FROM) { |
208 | 0 | PyErr_SetString(PyExc_ValueError, |
209 | 0 | "can't jump from a yield statement"); |
210 | 0 | return -1; |
211 | 0 | } |
212 | | |
213 | | /* You can't jump onto a line with an 'except' statement on it - |
214 | | * they expect to have an exception on the top of the stack, which |
215 | | * won't be true if you jump to them. They always start with code |
216 | | * that either pops the exception using POP_TOP (plain 'except:' |
217 | | * lines do this) or duplicates the exception on the stack using |
218 | | * DUP_TOP (if there's an exception type specified). See compile.c, |
219 | | * 'com_try_except' for the full details. There aren't any other |
220 | | * cases (AFAIK) where a line's code can start with DUP_TOP or |
221 | | * POP_TOP, but if any ever appear, they'll be subject to the same |
222 | | * restriction (but with a different error message). */ |
223 | 0 | if (code[new_lasti] == DUP_TOP || code[new_lasti] == POP_TOP) { |
224 | 0 | PyErr_SetString(PyExc_ValueError, |
225 | 0 | "can't jump to 'except' line as there's no exception"); |
226 | 0 | return -1; |
227 | 0 | } |
228 | | |
229 | | /* You can't jump into or out of a 'finally' block because the 'try' |
230 | | * block leaves something on the stack for the END_FINALLY to clean up. |
231 | | * So we walk the bytecode, maintaining a simulated blockstack. |
232 | | * 'blockstack' is a stack of the bytecode addresses of the starts of |
233 | | * the 'finally' blocks. */ |
234 | 0 | memset(blockstack, '\0', sizeof(blockstack)); |
235 | 0 | blockstack_top = 0; |
236 | 0 | unsigned char prevop = NOP; |
237 | 0 | for (addr = 0; addr < code_len; addr += sizeof(_Py_CODEUNIT)) { |
238 | 0 | unsigned char op = code[addr]; |
239 | 0 | switch (op) { |
240 | 0 | case SETUP_FINALLY: |
241 | 0 | case SETUP_WITH: |
242 | 0 | case SETUP_ASYNC_WITH: |
243 | 0 | case FOR_ITER: { |
244 | 0 | unsigned int oparg = get_arg((const _Py_CODEUNIT *)code, |
245 | 0 | addr / sizeof(_Py_CODEUNIT)); |
246 | 0 | int target_addr = addr + oparg + sizeof(_Py_CODEUNIT); |
247 | 0 | assert(target_addr < code_len); |
248 | | /* Police block-jumping (you can't jump into the middle of a block) |
249 | | * and ensure that the blockstack finishes up in a sensible state (by |
250 | | * popping any blocks we're jumping out of). We look at all the |
251 | | * blockstack operations between the current position and the new |
252 | | * one, and keep track of how many blocks we drop out of on the way. |
253 | | * By also keeping track of the lowest blockstack position we see, we |
254 | | * can tell whether the jump goes into any blocks without coming out |
255 | | * again - in that case we raise an exception below. */ |
256 | 0 | int first_in = addr < f->f_lasti && f->f_lasti < target_addr; |
257 | 0 | int second_in = addr < new_lasti && new_lasti < target_addr; |
258 | 0 | if (!first_in && second_in) { |
259 | 0 | PyErr_SetString(PyExc_ValueError, |
260 | 0 | "can't jump into the middle of a block"); |
261 | 0 | return -1; |
262 | 0 | } |
263 | 0 | int in_for_loop = op == FOR_ITER || code[target_addr] == END_ASYNC_FOR; |
264 | 0 | if (first_in && !second_in) { |
265 | 0 | if (!delta_iblock) { |
266 | 0 | if (in_for_loop) { |
267 | | /* Pop the iterators of any 'for' and 'async for' loop |
268 | | * we're jumping out of. */ |
269 | 0 | delta++; |
270 | 0 | } |
271 | 0 | else if (prevop == LOAD_CONST) { |
272 | | /* Pops None pushed before SETUP_FINALLY. */ |
273 | 0 | delta++; |
274 | 0 | } |
275 | 0 | } |
276 | 0 | if (!in_for_loop) { |
277 | 0 | delta_iblock++; |
278 | 0 | } |
279 | 0 | } |
280 | 0 | if (!in_for_loop) { |
281 | 0 | blockstack[blockstack_top++] = target_addr; |
282 | 0 | } |
283 | 0 | break; |
284 | 0 | } |
285 | | |
286 | 0 | case END_FINALLY: { |
287 | 0 | assert(blockstack_top > 0); |
288 | 0 | int target_addr = blockstack[--blockstack_top]; |
289 | 0 | assert(target_addr <= addr); |
290 | 0 | int first_in = target_addr <= f->f_lasti && f->f_lasti <= addr; |
291 | 0 | int second_in = target_addr <= new_lasti && new_lasti <= addr; |
292 | 0 | if (first_in != second_in) { |
293 | 0 | op = code[target_addr]; |
294 | 0 | PyErr_Format(PyExc_ValueError, |
295 | 0 | "can't jump %s %s block", |
296 | 0 | second_in ? "into" : "out of", |
297 | 0 | (op == DUP_TOP || op == POP_TOP) ? |
298 | 0 | "an 'except'" : "a 'finally'"); |
299 | 0 | return -1; |
300 | 0 | } |
301 | 0 | break; |
302 | 0 | } |
303 | 0 | } |
304 | 0 | prevop = op; |
305 | 0 | } |
306 | | |
307 | | /* Verify that the blockstack tracking code didn't get lost. */ |
308 | 0 | assert(blockstack_top == 0); |
309 | | |
310 | | /* Pop any blocks that we're jumping out of. */ |
311 | 0 | if (delta_iblock > 0) { |
312 | 0 | f->f_iblock -= delta_iblock; |
313 | 0 | PyTryBlock *b = &f->f_blockstack[f->f_iblock]; |
314 | 0 | delta += (int)(f->f_stacktop - f->f_valuestack) - b->b_level; |
315 | 0 | if (b->b_type == SETUP_FINALLY && |
316 | 0 | code[b->b_handler] == WITH_CLEANUP_START) |
317 | 0 | { |
318 | | /* Pop the exit function. */ |
319 | 0 | delta++; |
320 | 0 | } |
321 | 0 | } |
322 | 0 | while (delta > 0) { |
323 | 0 | PyObject *v = (*--f->f_stacktop); |
324 | 0 | Py_DECREF(v); |
325 | 0 | delta--; |
326 | 0 | } |
327 | | |
328 | | /* Finally set the new f_lineno and f_lasti and return OK. */ |
329 | 0 | f->f_lineno = new_lineno; |
330 | 0 | f->f_lasti = new_lasti; |
331 | 0 | return 0; |
332 | 0 | } |
333 | | |
334 | | static PyObject * |
335 | | frame_gettrace(PyFrameObject *f, void *closure) |
336 | 0 | { |
337 | 0 | PyObject* trace = f->f_trace; |
338 | |
|
339 | 0 | if (trace == NULL) |
340 | 0 | trace = Py_None; |
341 | |
|
342 | 0 | Py_INCREF(trace); |
343 | |
|
344 | 0 | return trace; |
345 | 0 | } |
346 | | |
347 | | static int |
348 | | frame_settrace(PyFrameObject *f, PyObject* v, void *closure) |
349 | 0 | { |
350 | | /* We rely on f_lineno being accurate when f_trace is set. */ |
351 | 0 | f->f_lineno = PyFrame_GetLineNumber(f); |
352 | |
|
353 | 0 | if (v == Py_None) |
354 | 0 | v = NULL; |
355 | 0 | Py_XINCREF(v); |
356 | 0 | Py_XSETREF(f->f_trace, v); |
357 | |
|
358 | 0 | return 0; |
359 | 0 | } |
360 | | |
361 | | |
362 | | static PyGetSetDef frame_getsetlist[] = { |
363 | | {"f_locals", (getter)frame_getlocals, NULL, NULL}, |
364 | | {"f_lineno", (getter)frame_getlineno, |
365 | | (setter)frame_setlineno, NULL}, |
366 | | {"f_trace", (getter)frame_gettrace, (setter)frame_settrace, NULL}, |
367 | | {0} |
368 | | }; |
369 | | |
370 | | /* Stack frames are allocated and deallocated at a considerable rate. |
371 | | In an attempt to improve the speed of function calls, we: |
372 | | |
373 | | 1. Hold a single "zombie" frame on each code object. This retains |
374 | | the allocated and initialised frame object from an invocation of |
375 | | the code object. The zombie is reanimated the next time we need a |
376 | | frame object for that code object. Doing this saves the malloc/ |
377 | | realloc required when using a free_list frame that isn't the |
378 | | correct size. It also saves some field initialisation. |
379 | | |
380 | | In zombie mode, no field of PyFrameObject holds a reference, but |
381 | | the following fields are still valid: |
382 | | |
383 | | * ob_type, ob_size, f_code, f_valuestack; |
384 | | |
385 | | * f_locals, f_trace are NULL; |
386 | | |
387 | | * f_localsplus does not require re-allocation and |
388 | | the local variables in f_localsplus are NULL. |
389 | | |
390 | | 2. We also maintain a separate free list of stack frames (just like |
391 | | floats are allocated in a special way -- see floatobject.c). When |
392 | | a stack frame is on the free list, only the following members have |
393 | | a meaning: |
394 | | ob_type == &Frametype |
395 | | f_back next item on free list, or NULL |
396 | | f_stacksize size of value stack |
397 | | ob_size size of localsplus |
398 | | Note that the value and block stacks are preserved -- this can save |
399 | | another malloc() call or two (and two free() calls as well!). |
400 | | Also note that, unlike for integers, each frame object is a |
401 | | malloc'ed object in its own right -- it is only the actual calls to |
402 | | malloc() that we are trying to save here, not the administration. |
403 | | After all, while a typical program may make millions of calls, a |
404 | | call depth of more than 20 or 30 is probably already exceptional |
405 | | unless the program contains run-away recursion. I hope. |
406 | | |
407 | | Later, PyFrame_MAXFREELIST was added to bound the # of frames saved on |
408 | | free_list. Else programs creating lots of cyclic trash involving |
409 | | frames could provoke free_list into growing without bound. |
410 | | */ |
411 | | |
412 | | static PyFrameObject *free_list = NULL; |
413 | | static int numfree = 0; /* number of frames currently in free_list */ |
414 | | /* max value for numfree */ |
415 | 751 | #define PyFrame_MAXFREELIST 200 |
416 | | |
417 | | static void _Py_HOT_FUNCTION |
418 | | frame_dealloc(PyFrameObject *f) |
419 | 39.7k | { |
420 | 39.7k | PyObject **p, **valuestack; |
421 | 39.7k | PyCodeObject *co; |
422 | | |
423 | 39.7k | if (_PyObject_GC_IS_TRACKED(f)) |
424 | 684 | _PyObject_GC_UNTRACK(f); |
425 | | |
426 | 39.7k | Py_TRASHCAN_SAFE_BEGIN(f) |
427 | | /* Kill all local variables */ |
428 | 39.7k | valuestack = f->f_valuestack; |
429 | 159k | for (p = f->f_localsplus; p < valuestack; p++) |
430 | 119k | Py_CLEAR(*p); |
431 | | |
432 | | /* Free stack */ |
433 | 39.7k | if (f->f_stacktop != NULL) { |
434 | 0 | for (p = valuestack; p < f->f_stacktop; p++) |
435 | 0 | Py_XDECREF(*p); |
436 | 0 | } |
437 | | |
438 | 39.7k | Py_XDECREF(f->f_back); |
439 | 39.7k | Py_DECREF(f->f_builtins); |
440 | 39.7k | Py_DECREF(f->f_globals); |
441 | 39.7k | Py_CLEAR(f->f_locals); |
442 | 39.7k | Py_CLEAR(f->f_trace); |
443 | | |
444 | 39.7k | co = f->f_code; |
445 | 39.7k | if (co->co_zombieframe == NULL) |
446 | 38.9k | co->co_zombieframe = f; |
447 | 751 | else if (numfree < PyFrame_MAXFREELIST) { |
448 | 751 | ++numfree; |
449 | 751 | f->f_back = free_list; |
450 | 751 | free_list = f; |
451 | 751 | } |
452 | 0 | else |
453 | 0 | PyObject_GC_Del(f); |
454 | | |
455 | 39.7k | Py_DECREF(co); |
456 | 39.7k | Py_TRASHCAN_SAFE_END(f) |
457 | 39.7k | } |
458 | | |
459 | | static int |
460 | | frame_traverse(PyFrameObject *f, visitproc visit, void *arg) |
461 | 4 | { |
462 | 4 | PyObject **fastlocals, **p; |
463 | 4 | Py_ssize_t i, slots; |
464 | | |
465 | 4 | Py_VISIT(f->f_back); |
466 | 4 | Py_VISIT(f->f_code); |
467 | 4 | Py_VISIT(f->f_builtins); |
468 | 4 | Py_VISIT(f->f_globals); |
469 | 4 | Py_VISIT(f->f_locals); |
470 | 4 | Py_VISIT(f->f_trace); |
471 | | |
472 | | /* locals */ |
473 | 4 | slots = f->f_code->co_nlocals + PyTuple_GET_SIZE(f->f_code->co_cellvars) + PyTuple_GET_SIZE(f->f_code->co_freevars); |
474 | 4 | fastlocals = f->f_localsplus; |
475 | 40 | for (i = slots; --i >= 0; ++fastlocals) |
476 | 36 | Py_VISIT(*fastlocals); |
477 | | |
478 | | /* stack */ |
479 | 4 | if (f->f_stacktop != NULL) { |
480 | 0 | for (p = f->f_valuestack; p < f->f_stacktop; p++) |
481 | 0 | Py_VISIT(*p); |
482 | 0 | } |
483 | 4 | return 0; |
484 | 4 | } |
485 | | |
486 | | static int |
487 | | frame_tp_clear(PyFrameObject *f) |
488 | 0 | { |
489 | 0 | PyObject **fastlocals, **p, **oldtop; |
490 | 0 | Py_ssize_t i, slots; |
491 | | |
492 | | /* Before anything else, make sure that this frame is clearly marked |
493 | | * as being defunct! Else, e.g., a generator reachable from this |
494 | | * frame may also point to this frame, believe itself to still be |
495 | | * active, and try cleaning up this frame again. |
496 | | */ |
497 | 0 | oldtop = f->f_stacktop; |
498 | 0 | f->f_stacktop = NULL; |
499 | 0 | f->f_executing = 0; |
500 | |
|
501 | 0 | Py_CLEAR(f->f_trace); |
502 | | |
503 | | /* locals */ |
504 | 0 | slots = f->f_code->co_nlocals + PyTuple_GET_SIZE(f->f_code->co_cellvars) + PyTuple_GET_SIZE(f->f_code->co_freevars); |
505 | 0 | fastlocals = f->f_localsplus; |
506 | 0 | for (i = slots; --i >= 0; ++fastlocals) |
507 | 0 | Py_CLEAR(*fastlocals); |
508 | | |
509 | | /* stack */ |
510 | 0 | if (oldtop != NULL) { |
511 | 0 | for (p = f->f_valuestack; p < oldtop; p++) |
512 | 0 | Py_CLEAR(*p); |
513 | 0 | } |
514 | 0 | return 0; |
515 | 0 | } |
516 | | |
517 | | static PyObject * |
518 | | frame_clear(PyFrameObject *f, PyObject *Py_UNUSED(ignored)) |
519 | 0 | { |
520 | 0 | if (f->f_executing) { |
521 | 0 | PyErr_SetString(PyExc_RuntimeError, |
522 | 0 | "cannot clear an executing frame"); |
523 | 0 | return NULL; |
524 | 0 | } |
525 | 0 | if (f->f_gen) { |
526 | 0 | _PyGen_Finalize(f->f_gen); |
527 | 0 | assert(f->f_gen == NULL); |
528 | 0 | } |
529 | 0 | (void)frame_tp_clear(f); |
530 | 0 | Py_RETURN_NONE; |
531 | 0 | } |
532 | | |
533 | | PyDoc_STRVAR(clear__doc__, |
534 | | "F.clear(): clear most references held by the frame"); |
535 | | |
536 | | static PyObject * |
537 | | frame_sizeof(PyFrameObject *f, PyObject *Py_UNUSED(ignored)) |
538 | 0 | { |
539 | 0 | Py_ssize_t res, extras, ncells, nfrees; |
540 | |
|
541 | 0 | ncells = PyTuple_GET_SIZE(f->f_code->co_cellvars); |
542 | 0 | nfrees = PyTuple_GET_SIZE(f->f_code->co_freevars); |
543 | 0 | extras = f->f_code->co_stacksize + f->f_code->co_nlocals + |
544 | 0 | ncells + nfrees; |
545 | | /* subtract one as it is already included in PyFrameObject */ |
546 | 0 | res = sizeof(PyFrameObject) + (extras-1) * sizeof(PyObject *); |
547 | |
|
548 | 0 | return PyLong_FromSsize_t(res); |
549 | 0 | } |
550 | | |
551 | | PyDoc_STRVAR(sizeof__doc__, |
552 | | "F.__sizeof__() -> size of F in memory, in bytes"); |
553 | | |
554 | | static PyObject * |
555 | | frame_repr(PyFrameObject *f) |
556 | 0 | { |
557 | 0 | int lineno = PyFrame_GetLineNumber(f); |
558 | 0 | return PyUnicode_FromFormat( |
559 | 0 | "<frame at %p, file %R, line %d, code %S>", |
560 | 0 | f, f->f_code->co_filename, lineno, f->f_code->co_name); |
561 | 0 | } |
562 | | |
563 | | static PyMethodDef frame_methods[] = { |
564 | | {"clear", (PyCFunction)frame_clear, METH_NOARGS, |
565 | | clear__doc__}, |
566 | | {"__sizeof__", (PyCFunction)frame_sizeof, METH_NOARGS, |
567 | | sizeof__doc__}, |
568 | | {NULL, NULL} /* sentinel */ |
569 | | }; |
570 | | |
571 | | PyTypeObject PyFrame_Type = { |
572 | | PyVarObject_HEAD_INIT(&PyType_Type, 0) |
573 | | "frame", |
574 | | sizeof(PyFrameObject), |
575 | | sizeof(PyObject *), |
576 | | (destructor)frame_dealloc, /* tp_dealloc */ |
577 | | 0, /* tp_vectorcall_offset */ |
578 | | 0, /* tp_getattr */ |
579 | | 0, /* tp_setattr */ |
580 | | 0, /* tp_as_async */ |
581 | | (reprfunc)frame_repr, /* tp_repr */ |
582 | | 0, /* tp_as_number */ |
583 | | 0, /* tp_as_sequence */ |
584 | | 0, /* tp_as_mapping */ |
585 | | 0, /* tp_hash */ |
586 | | 0, /* tp_call */ |
587 | | 0, /* tp_str */ |
588 | | PyObject_GenericGetAttr, /* tp_getattro */ |
589 | | PyObject_GenericSetAttr, /* tp_setattro */ |
590 | | 0, /* tp_as_buffer */ |
591 | | Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC,/* tp_flags */ |
592 | | 0, /* tp_doc */ |
593 | | (traverseproc)frame_traverse, /* tp_traverse */ |
594 | | (inquiry)frame_tp_clear, /* tp_clear */ |
595 | | 0, /* tp_richcompare */ |
596 | | 0, /* tp_weaklistoffset */ |
597 | | 0, /* tp_iter */ |
598 | | 0, /* tp_iternext */ |
599 | | frame_methods, /* tp_methods */ |
600 | | frame_memberlist, /* tp_members */ |
601 | | frame_getsetlist, /* tp_getset */ |
602 | | 0, /* tp_base */ |
603 | | 0, /* tp_dict */ |
604 | | }; |
605 | | |
606 | | _Py_IDENTIFIER(__builtins__); |
607 | | |
608 | | PyFrameObject* _Py_HOT_FUNCTION |
609 | | _PyFrame_New_NoTrack(PyThreadState *tstate, PyCodeObject *code, |
610 | | PyObject *globals, PyObject *locals) |
611 | 39.7k | { |
612 | 39.7k | PyFrameObject *back = tstate->frame; |
613 | 39.7k | PyFrameObject *f; |
614 | 39.7k | PyObject *builtins; |
615 | 39.7k | Py_ssize_t i; |
616 | | |
617 | | #ifdef Py_DEBUG |
618 | | if (code == NULL || globals == NULL || !PyDict_Check(globals) || |
619 | | (locals != NULL && !PyMapping_Check(locals))) { |
620 | | PyErr_BadInternalCall(); |
621 | | return NULL; |
622 | | } |
623 | | #endif |
624 | 39.7k | if (back == NULL || back->f_globals != globals) { |
625 | 9.61k | builtins = _PyDict_GetItemIdWithError(globals, &PyId___builtins__); |
626 | 9.61k | if (builtins) { |
627 | 9.61k | if (PyModule_Check(builtins)) { |
628 | 2.40k | builtins = PyModule_GetDict(builtins); |
629 | 2.40k | assert(builtins != NULL); |
630 | 2.40k | } |
631 | 9.61k | } |
632 | 9.61k | if (builtins == NULL) { |
633 | 0 | if (PyErr_Occurred()) { |
634 | 0 | return NULL; |
635 | 0 | } |
636 | | /* No builtins! Make up a minimal one |
637 | | Give them 'None', at least. */ |
638 | 0 | builtins = PyDict_New(); |
639 | 0 | if (builtins == NULL || |
640 | 0 | PyDict_SetItemString( |
641 | 0 | builtins, "None", Py_None) < 0) |
642 | 0 | return NULL; |
643 | 0 | } |
644 | 9.61k | else |
645 | 9.61k | Py_INCREF(builtins); |
646 | | |
647 | 9.61k | } |
648 | 30.0k | else { |
649 | | /* If we share the globals, we share the builtins. |
650 | | Save a lookup and a call. */ |
651 | 30.0k | builtins = back->f_builtins; |
652 | 30.0k | assert(builtins != NULL); |
653 | 30.0k | Py_INCREF(builtins); |
654 | 30.0k | } |
655 | 39.7k | if (code->co_zombieframe != NULL) { |
656 | 35.0k | f = code->co_zombieframe; |
657 | 35.0k | code->co_zombieframe = NULL; |
658 | 35.0k | _Py_NewReference((PyObject *)f); |
659 | 35.0k | assert(f->f_code == code); |
660 | 35.0k | } |
661 | 4.68k | else { |
662 | 4.68k | Py_ssize_t extras, ncells, nfrees; |
663 | 4.68k | ncells = PyTuple_GET_SIZE(code->co_cellvars); |
664 | 4.68k | nfrees = PyTuple_GET_SIZE(code->co_freevars); |
665 | 4.68k | extras = code->co_stacksize + code->co_nlocals + ncells + |
666 | 4.68k | nfrees; |
667 | 4.68k | if (free_list == NULL) { |
668 | 3.99k | f = PyObject_GC_NewVar(PyFrameObject, &PyFrame_Type, |
669 | 3.99k | extras); |
670 | 3.99k | if (f == NULL) { |
671 | 0 | Py_DECREF(builtins); |
672 | 0 | return NULL; |
673 | 0 | } |
674 | 3.99k | } |
675 | 687 | else { |
676 | 687 | assert(numfree > 0); |
677 | 687 | --numfree; |
678 | 687 | f = free_list; |
679 | 687 | free_list = free_list->f_back; |
680 | 687 | if (Py_SIZE(f) < extras) { |
681 | 119 | PyFrameObject *new_f = PyObject_GC_Resize(PyFrameObject, f, extras); |
682 | 119 | if (new_f == NULL) { |
683 | 0 | PyObject_GC_Del(f); |
684 | 0 | Py_DECREF(builtins); |
685 | 0 | return NULL; |
686 | 0 | } |
687 | 119 | f = new_f; |
688 | 119 | } |
689 | 687 | _Py_NewReference((PyObject *)f); |
690 | 687 | } |
691 | | |
692 | 4.68k | f->f_code = code; |
693 | 4.68k | extras = code->co_nlocals + ncells + nfrees; |
694 | 4.68k | f->f_valuestack = f->f_localsplus + extras; |
695 | 17.9k | for (i=0; i<extras; i++) |
696 | 13.3k | f->f_localsplus[i] = NULL; |
697 | 4.68k | f->f_locals = NULL; |
698 | 4.68k | f->f_trace = NULL; |
699 | 4.68k | } |
700 | 39.7k | f->f_stacktop = f->f_valuestack; |
701 | 39.7k | f->f_builtins = builtins; |
702 | 39.7k | Py_XINCREF(back); |
703 | 39.7k | f->f_back = back; |
704 | 39.7k | Py_INCREF(code); |
705 | 39.7k | Py_INCREF(globals); |
706 | 39.7k | f->f_globals = globals; |
707 | | /* Most functions have CO_NEWLOCALS and CO_OPTIMIZED set. */ |
708 | 39.7k | if ((code->co_flags & (CO_NEWLOCALS | CO_OPTIMIZED)) == |
709 | 39.7k | (CO_NEWLOCALS | CO_OPTIMIZED)) |
710 | 35.6k | ; /* f_locals = NULL; will be set by PyFrame_FastToLocals() */ |
711 | 4.01k | else if (code->co_flags & CO_NEWLOCALS) { |
712 | 0 | locals = PyDict_New(); |
713 | 0 | if (locals == NULL) { |
714 | 0 | Py_DECREF(f); |
715 | 0 | return NULL; |
716 | 0 | } |
717 | 0 | f->f_locals = locals; |
718 | 0 | } |
719 | 4.01k | else { |
720 | 4.01k | if (locals == NULL) |
721 | 0 | locals = globals; |
722 | 4.01k | Py_INCREF(locals); |
723 | 4.01k | f->f_locals = locals; |
724 | 4.01k | } |
725 | | |
726 | 39.7k | f->f_lasti = -1; |
727 | 39.7k | f->f_lineno = code->co_firstlineno; |
728 | 39.7k | f->f_iblock = 0; |
729 | 39.7k | f->f_executing = 0; |
730 | 39.7k | f->f_gen = NULL; |
731 | 39.7k | f->f_trace_opcodes = 0; |
732 | 39.7k | f->f_trace_lines = 1; |
733 | | |
734 | 39.7k | return f; |
735 | 39.7k | } |
736 | | |
737 | | PyFrameObject* |
738 | | PyFrame_New(PyThreadState *tstate, PyCodeObject *code, |
739 | | PyObject *globals, PyObject *locals) |
740 | 0 | { |
741 | 0 | PyFrameObject *f = _PyFrame_New_NoTrack(tstate, code, globals, locals); |
742 | 0 | if (f) |
743 | 0 | _PyObject_GC_TRACK(f); |
744 | 0 | return f; |
745 | 0 | } |
746 | | |
747 | | |
748 | | /* Block management */ |
749 | | |
750 | | void |
751 | | PyFrame_BlockSetup(PyFrameObject *f, int type, int handler, int level) |
752 | 24.4k | { |
753 | 24.4k | PyTryBlock *b; |
754 | 24.4k | if (f->f_iblock >= CO_MAXBLOCKS) |
755 | 0 | Py_FatalError("XXX block stack overflow"); |
756 | 24.4k | b = &f->f_blockstack[f->f_iblock++]; |
757 | 24.4k | b->b_type = type; |
758 | 24.4k | b->b_level = level; |
759 | 24.4k | b->b_handler = handler; |
760 | 24.4k | } |
761 | | |
762 | | PyTryBlock * |
763 | | PyFrame_BlockPop(PyFrameObject *f) |
764 | 21.0k | { |
765 | 21.0k | PyTryBlock *b; |
766 | 21.0k | if (f->f_iblock <= 0) |
767 | 0 | Py_FatalError("XXX block stack underflow"); |
768 | 21.0k | b = &f->f_blockstack[--f->f_iblock]; |
769 | 21.0k | return b; |
770 | 21.0k | } |
771 | | |
772 | | /* Convert between "fast" version of locals and dictionary version. |
773 | | |
774 | | map and values are input arguments. map is a tuple of strings. |
775 | | values is an array of PyObject*. At index i, map[i] is the name of |
776 | | the variable with value values[i]. The function copies the first |
777 | | nmap variable from map/values into dict. If values[i] is NULL, |
778 | | the variable is deleted from dict. |
779 | | |
780 | | If deref is true, then the values being copied are cell variables |
781 | | and the value is extracted from the cell variable before being put |
782 | | in dict. |
783 | | */ |
784 | | |
785 | | static int |
786 | | map_to_dict(PyObject *map, Py_ssize_t nmap, PyObject *dict, PyObject **values, |
787 | | int deref) |
788 | 0 | { |
789 | 0 | Py_ssize_t j; |
790 | 0 | assert(PyTuple_Check(map)); |
791 | 0 | assert(PyDict_Check(dict)); |
792 | 0 | assert(PyTuple_Size(map) >= nmap); |
793 | 0 | for (j=0; j < nmap; j++) { |
794 | 0 | PyObject *key = PyTuple_GET_ITEM(map, j); |
795 | 0 | PyObject *value = values[j]; |
796 | 0 | assert(PyUnicode_Check(key)); |
797 | 0 | if (deref && value != NULL) { |
798 | 0 | assert(PyCell_Check(value)); |
799 | 0 | value = PyCell_GET(value); |
800 | 0 | } |
801 | 0 | if (value == NULL) { |
802 | 0 | if (PyObject_DelItem(dict, key) != 0) { |
803 | 0 | if (PyErr_ExceptionMatches(PyExc_KeyError)) |
804 | 0 | PyErr_Clear(); |
805 | 0 | else |
806 | 0 | return -1; |
807 | 0 | } |
808 | 0 | } |
809 | 0 | else { |
810 | 0 | if (PyObject_SetItem(dict, key, value) != 0) |
811 | 0 | return -1; |
812 | 0 | } |
813 | 0 | } |
814 | 0 | return 0; |
815 | 0 | } |
816 | | |
817 | | /* Copy values from the "locals" dict into the fast locals. |
818 | | |
819 | | dict is an input argument containing string keys representing |
820 | | variables names and arbitrary PyObject* as values. |
821 | | |
822 | | map and values are input arguments. map is a tuple of strings. |
823 | | values is an array of PyObject*. At index i, map[i] is the name of |
824 | | the variable with value values[i]. The function copies the first |
825 | | nmap variable from map/values into dict. If values[i] is NULL, |
826 | | the variable is deleted from dict. |
827 | | |
828 | | If deref is true, then the values being copied are cell variables |
829 | | and the value is extracted from the cell variable before being put |
830 | | in dict. If clear is true, then variables in map but not in dict |
831 | | are set to NULL in map; if clear is false, variables missing in |
832 | | dict are ignored. |
833 | | |
834 | | Exceptions raised while modifying the dict are silently ignored, |
835 | | because there is no good way to report them. |
836 | | */ |
837 | | |
838 | | static void |
839 | | dict_to_map(PyObject *map, Py_ssize_t nmap, PyObject *dict, PyObject **values, |
840 | | int deref, int clear) |
841 | 0 | { |
842 | 0 | Py_ssize_t j; |
843 | 0 | assert(PyTuple_Check(map)); |
844 | 0 | assert(PyDict_Check(dict)); |
845 | 0 | assert(PyTuple_Size(map) >= nmap); |
846 | 0 | for (j=0; j < nmap; j++) { |
847 | 0 | PyObject *key = PyTuple_GET_ITEM(map, j); |
848 | 0 | PyObject *value = PyObject_GetItem(dict, key); |
849 | 0 | assert(PyUnicode_Check(key)); |
850 | | /* We only care about NULLs if clear is true. */ |
851 | 0 | if (value == NULL) { |
852 | 0 | PyErr_Clear(); |
853 | 0 | if (!clear) |
854 | 0 | continue; |
855 | 0 | } |
856 | 0 | if (deref) { |
857 | 0 | assert(PyCell_Check(values[j])); |
858 | 0 | if (PyCell_GET(values[j]) != value) { |
859 | 0 | if (PyCell_Set(values[j], value) < 0) |
860 | 0 | PyErr_Clear(); |
861 | 0 | } |
862 | 0 | } else if (values[j] != value) { |
863 | 0 | Py_XINCREF(value); |
864 | 0 | Py_XSETREF(values[j], value); |
865 | 0 | } |
866 | 0 | Py_XDECREF(value); |
867 | 0 | } |
868 | 0 | } |
869 | | |
870 | | int |
871 | | PyFrame_FastToLocalsWithError(PyFrameObject *f) |
872 | 61 | { |
873 | | /* Merge fast locals into f->f_locals */ |
874 | 61 | PyObject *locals, *map; |
875 | 61 | PyObject **fast; |
876 | 61 | PyCodeObject *co; |
877 | 61 | Py_ssize_t j; |
878 | 61 | Py_ssize_t ncells, nfreevars; |
879 | | |
880 | 61 | if (f == NULL) { |
881 | 0 | PyErr_BadInternalCall(); |
882 | 0 | return -1; |
883 | 0 | } |
884 | 61 | locals = f->f_locals; |
885 | 61 | if (locals == NULL) { |
886 | 0 | locals = f->f_locals = PyDict_New(); |
887 | 0 | if (locals == NULL) |
888 | 0 | return -1; |
889 | 0 | } |
890 | 61 | co = f->f_code; |
891 | 61 | map = co->co_varnames; |
892 | 61 | if (!PyTuple_Check(map)) { |
893 | 0 | PyErr_Format(PyExc_SystemError, |
894 | 0 | "co_varnames must be a tuple, not %s", |
895 | 0 | Py_TYPE(map)->tp_name); |
896 | 0 | return -1; |
897 | 0 | } |
898 | 61 | fast = f->f_localsplus; |
899 | 61 | j = PyTuple_GET_SIZE(map); |
900 | 61 | if (j > co->co_nlocals) |
901 | 0 | j = co->co_nlocals; |
902 | 61 | if (co->co_nlocals) { |
903 | 0 | if (map_to_dict(map, j, locals, fast, 0) < 0) |
904 | 0 | return -1; |
905 | 0 | } |
906 | 61 | ncells = PyTuple_GET_SIZE(co->co_cellvars); |
907 | 61 | nfreevars = PyTuple_GET_SIZE(co->co_freevars); |
908 | 61 | if (ncells || nfreevars) { |
909 | 0 | if (map_to_dict(co->co_cellvars, ncells, |
910 | 0 | locals, fast + co->co_nlocals, 1)) |
911 | 0 | return -1; |
912 | | |
913 | | /* If the namespace is unoptimized, then one of the |
914 | | following cases applies: |
915 | | 1. It does not contain free variables, because it |
916 | | uses import * or is a top-level namespace. |
917 | | 2. It is a class namespace. |
918 | | We don't want to accidentally copy free variables |
919 | | into the locals dict used by the class. |
920 | | */ |
921 | 0 | if (co->co_flags & CO_OPTIMIZED) { |
922 | 0 | if (map_to_dict(co->co_freevars, nfreevars, |
923 | 0 | locals, fast + co->co_nlocals + ncells, 1) < 0) |
924 | 0 | return -1; |
925 | 0 | } |
926 | 0 | } |
927 | 61 | return 0; |
928 | 61 | } |
929 | | |
930 | | void |
931 | | PyFrame_FastToLocals(PyFrameObject *f) |
932 | 0 | { |
933 | 0 | int res; |
934 | |
|
935 | 0 | assert(!PyErr_Occurred()); |
936 | |
|
937 | 0 | res = PyFrame_FastToLocalsWithError(f); |
938 | 0 | if (res < 0) |
939 | 0 | PyErr_Clear(); |
940 | 0 | } |
941 | | |
942 | | void |
943 | | PyFrame_LocalsToFast(PyFrameObject *f, int clear) |
944 | 61 | { |
945 | | /* Merge f->f_locals into fast locals */ |
946 | 61 | PyObject *locals, *map; |
947 | 61 | PyObject **fast; |
948 | 61 | PyObject *error_type, *error_value, *error_traceback; |
949 | 61 | PyCodeObject *co; |
950 | 61 | Py_ssize_t j; |
951 | 61 | Py_ssize_t ncells, nfreevars; |
952 | 61 | if (f == NULL) |
953 | 0 | return; |
954 | 61 | locals = f->f_locals; |
955 | 61 | co = f->f_code; |
956 | 61 | map = co->co_varnames; |
957 | 61 | if (locals == NULL) |
958 | 0 | return; |
959 | 61 | if (!PyTuple_Check(map)) |
960 | 0 | return; |
961 | 61 | PyErr_Fetch(&error_type, &error_value, &error_traceback); |
962 | 61 | fast = f->f_localsplus; |
963 | 61 | j = PyTuple_GET_SIZE(map); |
964 | 61 | if (j > co->co_nlocals) |
965 | 0 | j = co->co_nlocals; |
966 | 61 | if (co->co_nlocals) |
967 | 0 | dict_to_map(co->co_varnames, j, locals, fast, 0, clear); |
968 | 61 | ncells = PyTuple_GET_SIZE(co->co_cellvars); |
969 | 61 | nfreevars = PyTuple_GET_SIZE(co->co_freevars); |
970 | 61 | if (ncells || nfreevars) { |
971 | 0 | dict_to_map(co->co_cellvars, ncells, |
972 | 0 | locals, fast + co->co_nlocals, 1, clear); |
973 | | /* Same test as in PyFrame_FastToLocals() above. */ |
974 | 0 | if (co->co_flags & CO_OPTIMIZED) { |
975 | 0 | dict_to_map(co->co_freevars, nfreevars, |
976 | 0 | locals, fast + co->co_nlocals + ncells, 1, |
977 | 0 | clear); |
978 | 0 | } |
979 | 0 | } |
980 | 61 | PyErr_Restore(error_type, error_value, error_traceback); |
981 | 61 | } |
982 | | |
983 | | /* Clear out the free list */ |
984 | | int |
985 | | PyFrame_ClearFreeList(void) |
986 | 0 | { |
987 | 0 | int freelist_size = numfree; |
988 | |
|
989 | 0 | while (free_list != NULL) { |
990 | 0 | PyFrameObject *f = free_list; |
991 | 0 | free_list = free_list->f_back; |
992 | 0 | PyObject_GC_Del(f); |
993 | 0 | --numfree; |
994 | 0 | } |
995 | 0 | assert(numfree == 0); |
996 | 0 | return freelist_size; |
997 | 0 | } |
998 | | |
999 | | void |
1000 | | PyFrame_Fini(void) |
1001 | 0 | { |
1002 | 0 | (void)PyFrame_ClearFreeList(); |
1003 | 0 | } |
1004 | | |
1005 | | /* Print summary info about the state of the optimized allocator */ |
1006 | | void |
1007 | | _PyFrame_DebugMallocStats(FILE *out) |
1008 | 0 | { |
1009 | 0 | _PyDebugAllocatorStats(out, |
1010 | 0 | "free PyFrameObject", |
1011 | 0 | numfree, sizeof(PyFrameObject)); |
1012 | 0 | } |
1013 | | |