/src/cpython/Python/pystate.c
Line | Count | Source (jump to first uncovered line) |
1 | | |
2 | | /* Thread and interpreter state structures and their interfaces */ |
3 | | |
4 | | #include "Python.h" |
5 | | #include "pycore_abstract.h" // _PyIndex_Check() |
6 | | #include "pycore_audit.h" // _Py_AuditHookEntry |
7 | | #include "pycore_ceval.h" // _PyEval_AcquireLock() |
8 | | #include "pycore_codecs.h" // _PyCodec_Fini() |
9 | | #include "pycore_critical_section.h" // _PyCriticalSection_Resume() |
10 | | #include "pycore_dtoa.h" // _dtoa_state_INIT() |
11 | | #include "pycore_emscripten_trampoline.h" // _Py_EmscriptenTrampoline_Init() |
12 | | #include "pycore_freelist.h" // _PyObject_ClearFreeLists() |
13 | | #include "pycore_initconfig.h" // _PyStatus_OK() |
14 | | #include "pycore_interpframe.h" // _PyThreadState_HasStackSpace() |
15 | | #include "pycore_object.h" // _PyType_InitCache() |
16 | | #include "pycore_obmalloc.h" // _PyMem_obmalloc_state_on_heap() |
17 | | #include "pycore_optimizer.h" // JIT_CLEANUP_THRESHOLD |
18 | | #include "pycore_parking_lot.h" // _PyParkingLot_AfterFork() |
19 | | #include "pycore_pyerrors.h" // _PyErr_Clear() |
20 | | #include "pycore_pylifecycle.h" // _PyAST_Fini() |
21 | | #include "pycore_pymem.h" // _PyMem_DebugEnabled() |
22 | | #include "pycore_runtime.h" // _PyRuntime |
23 | | #include "pycore_runtime_init.h" // _PyRuntimeState_INIT |
24 | | #include "pycore_stackref.h" // Py_STACKREF_DEBUG |
25 | | #include "pycore_time.h" // _PyTime_Init() |
26 | | #include "pycore_uniqueid.h" // _PyObject_FinalizePerThreadRefcounts() |
27 | | |
28 | | |
29 | | /* -------------------------------------------------------------------------- |
30 | | CAUTION |
31 | | |
32 | | Always use PyMem_RawMalloc() and PyMem_RawFree() directly in this file. A |
33 | | number of these functions are advertised as safe to call when the GIL isn't |
34 | | held, and in a debug build Python redirects (e.g.) PyMem_NEW (etc) to Python's |
35 | | debugging obmalloc functions. Those aren't thread-safe (they rely on the GIL |
36 | | to avoid the expense of doing their own locking). |
37 | | -------------------------------------------------------------------------- */ |
38 | | |
39 | | #ifdef HAVE_DLOPEN |
40 | | # ifdef HAVE_DLFCN_H |
41 | | # include <dlfcn.h> |
42 | | # endif |
43 | | # if !HAVE_DECL_RTLD_LAZY |
44 | | # define RTLD_LAZY 1 |
45 | | # endif |
46 | | #endif |
47 | | |
48 | | |
49 | | /****************************************/ |
50 | | /* helpers for the current thread state */ |
51 | | /****************************************/ |
52 | | |
53 | | // API for the current thread state is further down. |
54 | | |
55 | | /* "current" means one of: |
56 | | - bound to the current OS thread |
57 | | - holds the GIL |
58 | | */ |
59 | | |
60 | | //------------------------------------------------- |
61 | | // a highly efficient lookup for the current thread |
62 | | //------------------------------------------------- |
63 | | |
64 | | /* |
65 | | The stored thread state is set by PyThreadState_Swap(). |
66 | | |
67 | | For each of these functions, the GIL must be held by the current thread. |
68 | | */ |
69 | | |
70 | | |
71 | | #ifdef HAVE_THREAD_LOCAL |
72 | | /* The attached thread state for the current thread. */ |
73 | | _Py_thread_local PyThreadState *_Py_tss_tstate = NULL; |
74 | | |
75 | | /* The "bound" thread state used by PyGILState_Ensure(), |
76 | | also known as a "gilstate." */ |
77 | | _Py_thread_local PyThreadState *_Py_tss_gilstate = NULL; |
78 | | #endif |
79 | | |
80 | | static inline PyThreadState * |
81 | | current_fast_get(void) |
82 | 102M | { |
83 | 102M | #ifdef HAVE_THREAD_LOCAL |
84 | 102M | return _Py_tss_tstate; |
85 | | #else |
86 | | // XXX Fall back to the PyThread_tss_*() API. |
87 | | # error "no supported thread-local variable storage classifier" |
88 | | #endif |
89 | 102M | } |
90 | | |
91 | | static inline void |
92 | | current_fast_set(_PyRuntimeState *Py_UNUSED(runtime), PyThreadState *tstate) |
93 | 31.5k | { |
94 | 31.5k | assert(tstate != NULL); |
95 | 31.5k | #ifdef HAVE_THREAD_LOCAL |
96 | 31.5k | _Py_tss_tstate = tstate; |
97 | | #else |
98 | | // XXX Fall back to the PyThread_tss_*() API. |
99 | | # error "no supported thread-local variable storage classifier" |
100 | | #endif |
101 | 31.5k | } |
102 | | |
103 | | static inline void |
104 | | current_fast_clear(_PyRuntimeState *Py_UNUSED(runtime)) |
105 | 31.5k | { |
106 | 31.5k | #ifdef HAVE_THREAD_LOCAL |
107 | 31.5k | _Py_tss_tstate = NULL; |
108 | | #else |
109 | | // XXX Fall back to the PyThread_tss_*() API. |
110 | | # error "no supported thread-local variable storage classifier" |
111 | | #endif |
112 | 31.5k | } |
113 | | |
114 | | #define tstate_verify_not_active(tstate) \ |
115 | 0 | if (tstate == current_fast_get()) { \ |
116 | 0 | _Py_FatalErrorFormat(__func__, "tstate %p is still current", tstate); \ |
117 | 0 | } |
118 | | |
119 | | PyThreadState * |
120 | | _PyThreadState_GetCurrent(void) |
121 | 8.37M | { |
122 | 8.37M | return current_fast_get(); |
123 | 8.37M | } |
124 | | |
125 | | |
126 | | //--------------------------------------------- |
127 | | // The thread state used by PyGILState_Ensure() |
128 | | //--------------------------------------------- |
129 | | |
130 | | /* |
131 | | The stored thread state is set by bind_tstate() (AKA PyThreadState_Bind(). |
132 | | |
133 | | The GIL does no need to be held for these. |
134 | | */ |
135 | | |
136 | | static inline PyThreadState * |
137 | | gilstate_get(void) |
138 | 32 | { |
139 | 32 | return _Py_tss_gilstate; |
140 | 32 | } |
141 | | |
142 | | static inline void |
143 | | gilstate_set(PyThreadState *tstate) |
144 | 16 | { |
145 | 16 | assert(tstate != NULL); |
146 | 16 | _Py_tss_gilstate = tstate; |
147 | 16 | } |
148 | | |
149 | | static inline void |
150 | | gilstate_clear(void) |
151 | 0 | { |
152 | 0 | _Py_tss_gilstate = NULL; |
153 | 0 | } |
154 | | |
155 | | |
156 | | #ifndef NDEBUG |
157 | | static inline int tstate_is_alive(PyThreadState *tstate); |
158 | | |
159 | | static inline int |
160 | | tstate_is_bound(PyThreadState *tstate) |
161 | | { |
162 | | return tstate->_status.bound && !tstate->_status.unbound; |
163 | | } |
164 | | #endif // !NDEBUG |
165 | | |
166 | | static void bind_gilstate_tstate(PyThreadState *); |
167 | | static void unbind_gilstate_tstate(PyThreadState *); |
168 | | |
169 | | static void tstate_mimalloc_bind(PyThreadState *); |
170 | | |
171 | | static void |
172 | | bind_tstate(PyThreadState *tstate) |
173 | 16 | { |
174 | 16 | assert(tstate != NULL); |
175 | 16 | assert(tstate_is_alive(tstate) && !tstate->_status.bound); |
176 | 16 | assert(!tstate->_status.unbound); // just in case |
177 | 16 | assert(!tstate->_status.bound_gilstate); |
178 | 16 | assert(tstate != gilstate_get()); |
179 | 16 | assert(!tstate->_status.active); |
180 | 16 | assert(tstate->thread_id == 0); |
181 | 16 | assert(tstate->native_thread_id == 0); |
182 | | |
183 | | // Currently we don't necessarily store the thread state |
184 | | // in thread-local storage (e.g. per-interpreter). |
185 | | |
186 | 16 | tstate->thread_id = PyThread_get_thread_ident(); |
187 | 16 | #ifdef PY_HAVE_THREAD_NATIVE_ID |
188 | 16 | tstate->native_thread_id = PyThread_get_thread_native_id(); |
189 | 16 | #endif |
190 | | |
191 | | #ifdef Py_GIL_DISABLED |
192 | | // Initialize biased reference counting inter-thread queue. Note that this |
193 | | // needs to be initialized from the active thread. |
194 | | _Py_brc_init_thread(tstate); |
195 | | #endif |
196 | | |
197 | | // mimalloc state needs to be initialized from the active thread. |
198 | 16 | tstate_mimalloc_bind(tstate); |
199 | | |
200 | 16 | tstate->_status.bound = 1; |
201 | 16 | } |
202 | | |
203 | | static void |
204 | | unbind_tstate(PyThreadState *tstate) |
205 | 0 | { |
206 | 0 | assert(tstate != NULL); |
207 | 0 | assert(tstate_is_bound(tstate)); |
208 | 0 | #ifndef HAVE_PTHREAD_STUBS |
209 | 0 | assert(tstate->thread_id > 0); |
210 | 0 | #endif |
211 | 0 | #ifdef PY_HAVE_THREAD_NATIVE_ID |
212 | 0 | assert(tstate->native_thread_id > 0); |
213 | 0 | #endif |
214 | | |
215 | | // We leave thread_id and native_thread_id alone |
216 | | // since they can be useful for debugging. |
217 | | // Check the `_status` field to know if these values |
218 | | // are still valid. |
219 | | |
220 | | // We leave tstate->_status.bound set to 1 |
221 | | // to indicate it was previously bound. |
222 | 0 | tstate->_status.unbound = 1; |
223 | 0 | } |
224 | | |
225 | | |
226 | | /* Stick the thread state for this thread in thread specific storage. |
227 | | |
228 | | When a thread state is created for a thread by some mechanism |
229 | | other than PyGILState_Ensure(), it's important that the GILState |
230 | | machinery knows about it so it doesn't try to create another |
231 | | thread state for the thread. |
232 | | (This is a better fix for SF bug #1010677 than the first one attempted.) |
233 | | |
234 | | The only situation where you can legitimately have more than one |
235 | | thread state for an OS level thread is when there are multiple |
236 | | interpreters. |
237 | | |
238 | | Before 3.12, the PyGILState_*() APIs didn't work with multiple |
239 | | interpreters (see bpo-10915 and bpo-15751), so this function used |
240 | | to set TSS only once. Thus, the first thread state created for that |
241 | | given OS level thread would "win", which seemed reasonable behaviour. |
242 | | */ |
243 | | |
244 | | static void |
245 | | bind_gilstate_tstate(PyThreadState *tstate) |
246 | 16 | { |
247 | 16 | assert(tstate != NULL); |
248 | 16 | assert(tstate_is_alive(tstate)); |
249 | 16 | assert(tstate_is_bound(tstate)); |
250 | | // XXX assert(!tstate->_status.active); |
251 | 16 | assert(!tstate->_status.bound_gilstate); |
252 | | |
253 | 16 | PyThreadState *tcur = gilstate_get(); |
254 | 16 | assert(tstate != tcur); |
255 | | |
256 | 16 | if (tcur != NULL) { |
257 | 0 | tcur->_status.bound_gilstate = 0; |
258 | 0 | } |
259 | 16 | gilstate_set(tstate); |
260 | 16 | tstate->_status.bound_gilstate = 1; |
261 | 16 | } |
262 | | |
263 | | static void |
264 | | unbind_gilstate_tstate(PyThreadState *tstate) |
265 | 0 | { |
266 | 0 | assert(tstate != NULL); |
267 | | // XXX assert(tstate_is_alive(tstate)); |
268 | 0 | assert(tstate_is_bound(tstate)); |
269 | | // XXX assert(!tstate->_status.active); |
270 | 0 | assert(tstate->_status.bound_gilstate); |
271 | 0 | assert(tstate == gilstate_get()); |
272 | 0 | gilstate_clear(); |
273 | 0 | tstate->_status.bound_gilstate = 0; |
274 | 0 | } |
275 | | |
276 | | |
277 | | //---------------------------------------------- |
278 | | // the thread state that currently holds the GIL |
279 | | //---------------------------------------------- |
280 | | |
281 | | /* This is not exported, as it is not reliable! It can only |
282 | | ever be compared to the state for the *current* thread. |
283 | | * If not equal, then it doesn't matter that the actual |
284 | | value may change immediately after comparison, as it can't |
285 | | possibly change to the current thread's state. |
286 | | * If equal, then the current thread holds the lock, so the value can't |
287 | | change until we yield the lock. |
288 | | */ |
289 | | static int |
290 | | holds_gil(PyThreadState *tstate) |
291 | 0 | { |
292 | | // XXX Fall back to tstate->interp->runtime->ceval.gil.last_holder |
293 | | // (and tstate->interp->runtime->ceval.gil.locked). |
294 | 0 | assert(tstate != NULL); |
295 | | /* Must be the tstate for this thread */ |
296 | 0 | assert(tstate == gilstate_get()); |
297 | 0 | return tstate == current_fast_get(); |
298 | 0 | } |
299 | | |
300 | | |
301 | | /****************************/ |
302 | | /* the global runtime state */ |
303 | | /****************************/ |
304 | | |
305 | | //---------- |
306 | | // lifecycle |
307 | | //---------- |
308 | | |
309 | | /* Suppress deprecation warning for PyBytesObject.ob_shash */ |
310 | | _Py_COMP_DIAG_PUSH |
311 | | _Py_COMP_DIAG_IGNORE_DEPR_DECLS |
312 | | /* We use "initial" if the runtime gets re-used |
313 | | (e.g. Py_Finalize() followed by Py_Initialize(). |
314 | | Note that we initialize "initial" relative to _PyRuntime, |
315 | | to ensure pre-initialized pointers point to the active |
316 | | runtime state (and not "initial"). */ |
317 | | static const _PyRuntimeState initial = _PyRuntimeState_INIT(_PyRuntime, ""); |
318 | | _Py_COMP_DIAG_POP |
319 | | |
320 | | #define LOCKS_INIT(runtime) \ |
321 | 0 | { \ |
322 | 0 | &(runtime)->interpreters.mutex, \ |
323 | 0 | &(runtime)->xi.data_lookup.registry.mutex, \ |
324 | 0 | &(runtime)->unicode_state.ids.mutex, \ |
325 | 0 | &(runtime)->imports.extensions.mutex, \ |
326 | 0 | &(runtime)->ceval.pending_mainthread.mutex, \ |
327 | 0 | &(runtime)->atexit.mutex, \ |
328 | 0 | &(runtime)->audit_hooks.mutex, \ |
329 | 0 | &(runtime)->allocators.mutex, \ |
330 | 0 | &(runtime)->_main_interpreter.types.mutex, \ |
331 | 0 | &(runtime)->_main_interpreter.code_state.mutex, \ |
332 | 0 | } |
333 | | |
334 | | static void |
335 | | init_runtime(_PyRuntimeState *runtime, |
336 | | void *open_code_hook, void *open_code_userdata, |
337 | | _Py_AuditHookEntry *audit_hook_head, |
338 | | Py_ssize_t unicode_next_index) |
339 | 16 | { |
340 | 16 | assert(!runtime->preinitializing); |
341 | 16 | assert(!runtime->preinitialized); |
342 | 16 | assert(!runtime->core_initialized); |
343 | 16 | assert(!runtime->initialized); |
344 | 16 | assert(!runtime->_initialized); |
345 | | |
346 | 16 | runtime->open_code_hook = open_code_hook; |
347 | 16 | runtime->open_code_userdata = open_code_userdata; |
348 | 16 | runtime->audit_hooks.head = audit_hook_head; |
349 | | |
350 | 16 | PyPreConfig_InitPythonConfig(&runtime->preconfig); |
351 | | |
352 | | // Set it to the ID of the main thread of the main interpreter. |
353 | 16 | runtime->main_thread = PyThread_get_thread_ident(); |
354 | | |
355 | 16 | runtime->unicode_state.ids.next_index = unicode_next_index; |
356 | | |
357 | | #if defined(__EMSCRIPTEN__) && defined(PY_CALL_TRAMPOLINE) |
358 | | _Py_EmscriptenTrampoline_Init(runtime); |
359 | | #endif |
360 | | |
361 | 16 | runtime->_initialized = 1; |
362 | 16 | } |
363 | | |
364 | | PyStatus |
365 | | _PyRuntimeState_Init(_PyRuntimeState *runtime) |
366 | 16 | { |
367 | | /* We preserve the hook across init, because there is |
368 | | currently no public API to set it between runtime |
369 | | initialization and interpreter initialization. */ |
370 | 16 | void *open_code_hook = runtime->open_code_hook; |
371 | 16 | void *open_code_userdata = runtime->open_code_userdata; |
372 | 16 | _Py_AuditHookEntry *audit_hook_head = runtime->audit_hooks.head; |
373 | | // bpo-42882: Preserve next_index value if Py_Initialize()/Py_Finalize() |
374 | | // is called multiple times. |
375 | 16 | Py_ssize_t unicode_next_index = runtime->unicode_state.ids.next_index; |
376 | | |
377 | 16 | if (runtime->_initialized) { |
378 | | // Py_Initialize() must be running again. |
379 | | // Reset to _PyRuntimeState_INIT. |
380 | 0 | memcpy(runtime, &initial, sizeof(*runtime)); |
381 | | // Preserve the cookie from the original runtime. |
382 | 0 | memcpy(runtime->debug_offsets.cookie, _Py_Debug_Cookie, 8); |
383 | 0 | assert(!runtime->_initialized); |
384 | 0 | } |
385 | | |
386 | 16 | PyStatus status = _PyTime_Init(&runtime->time); |
387 | 16 | if (_PyStatus_EXCEPTION(status)) { |
388 | 0 | return status; |
389 | 0 | } |
390 | | |
391 | 16 | init_runtime(runtime, open_code_hook, open_code_userdata, audit_hook_head, |
392 | 16 | unicode_next_index); |
393 | | |
394 | 16 | return _PyStatus_OK(); |
395 | 16 | } |
396 | | |
397 | | void |
398 | | _PyRuntimeState_Fini(_PyRuntimeState *runtime) |
399 | 0 | { |
400 | | #ifdef Py_REF_DEBUG |
401 | | /* The count is cleared by _Py_FinalizeRefTotal(). */ |
402 | | assert(runtime->object_state.interpreter_leaks == 0); |
403 | | #endif |
404 | 0 | gilstate_clear(); |
405 | 0 | } |
406 | | |
407 | | #ifdef HAVE_FORK |
408 | | /* This function is called from PyOS_AfterFork_Child to ensure that |
409 | | newly created child processes do not share locks with the parent. */ |
410 | | PyStatus |
411 | | _PyRuntimeState_ReInitThreads(_PyRuntimeState *runtime) |
412 | 0 | { |
413 | | // This was initially set in _PyRuntimeState_Init(). |
414 | 0 | runtime->main_thread = PyThread_get_thread_ident(); |
415 | | |
416 | | // Clears the parking lot. Any waiting threads are dead. This must be |
417 | | // called before releasing any locks that use the parking lot. |
418 | 0 | _PyParkingLot_AfterFork(); |
419 | | |
420 | | // Re-initialize global locks |
421 | 0 | PyMutex *locks[] = LOCKS_INIT(runtime); |
422 | 0 | for (size_t i = 0; i < Py_ARRAY_LENGTH(locks); i++) { |
423 | 0 | _PyMutex_at_fork_reinit(locks[i]); |
424 | 0 | } |
425 | | #ifdef Py_GIL_DISABLED |
426 | | for (PyInterpreterState *interp = runtime->interpreters.head; |
427 | | interp != NULL; interp = interp->next) |
428 | | { |
429 | | for (int i = 0; i < NUM_WEAKREF_LIST_LOCKS; i++) { |
430 | | _PyMutex_at_fork_reinit(&interp->weakref_locks[i]); |
431 | | } |
432 | | } |
433 | | #endif |
434 | |
|
435 | 0 | _PyTypes_AfterFork(); |
436 | |
|
437 | 0 | _PyThread_AfterFork(&runtime->threads); |
438 | |
|
439 | 0 | return _PyStatus_OK(); |
440 | 0 | } |
441 | | #endif |
442 | | |
443 | | |
444 | | /*************************************/ |
445 | | /* the per-interpreter runtime state */ |
446 | | /*************************************/ |
447 | | |
448 | | //---------- |
449 | | // lifecycle |
450 | | //---------- |
451 | | |
452 | | /* Calling this indicates that the runtime is ready to create interpreters. */ |
453 | | |
454 | | PyStatus |
455 | | _PyInterpreterState_Enable(_PyRuntimeState *runtime) |
456 | 16 | { |
457 | 16 | struct pyinterpreters *interpreters = &runtime->interpreters; |
458 | 16 | interpreters->next_id = 0; |
459 | 16 | return _PyStatus_OK(); |
460 | 16 | } |
461 | | |
462 | | static PyInterpreterState * |
463 | | alloc_interpreter(void) |
464 | 0 | { |
465 | 0 | size_t alignment = _Alignof(PyInterpreterState); |
466 | 0 | size_t allocsize = sizeof(PyInterpreterState) + alignment - 1; |
467 | 0 | void *mem = PyMem_RawCalloc(1, allocsize); |
468 | 0 | if (mem == NULL) { |
469 | 0 | return NULL; |
470 | 0 | } |
471 | 0 | PyInterpreterState *interp = _Py_ALIGN_UP(mem, alignment); |
472 | 0 | assert(_Py_IS_ALIGNED(interp, alignment)); |
473 | 0 | interp->_malloced = mem; |
474 | 0 | return interp; |
475 | 0 | } |
476 | | |
477 | | static void |
478 | | free_interpreter(PyInterpreterState *interp) |
479 | 0 | { |
480 | | // The main interpreter is statically allocated so |
481 | | // should not be freed. |
482 | 0 | if (interp != &_PyRuntime._main_interpreter) { |
483 | 0 | if (_PyMem_obmalloc_state_on_heap(interp)) { |
484 | | // interpreter has its own obmalloc state, free it |
485 | 0 | PyMem_RawFree(interp->obmalloc); |
486 | 0 | interp->obmalloc = NULL; |
487 | 0 | } |
488 | 0 | assert(_Py_IS_ALIGNED(interp, _Alignof(PyInterpreterState))); |
489 | 0 | PyMem_RawFree(interp->_malloced); |
490 | 0 | } |
491 | 0 | } |
492 | | |
493 | | #ifndef NDEBUG |
494 | | static inline int check_interpreter_whence(long); |
495 | | #endif |
496 | | |
497 | | extern _Py_CODEUNIT * |
498 | | _Py_LazyJitTrampoline( |
499 | | struct _PyExecutorObject *exec, _PyInterpreterFrame *frame, _PyStackRef *stack_pointer, PyThreadState *tstate |
500 | | ); |
501 | | |
502 | | /* Get the interpreter state to a minimal consistent state. |
503 | | Further init happens in pylifecycle.c before it can be used. |
504 | | All fields not initialized here are expected to be zeroed out, |
505 | | e.g. by PyMem_RawCalloc() or memset(), or otherwise pre-initialized. |
506 | | The runtime state is not manipulated. Instead it is assumed that |
507 | | the interpreter is getting added to the runtime. |
508 | | |
509 | | Note that the main interpreter was statically initialized as part |
510 | | of the runtime and most state is already set properly. That leaves |
511 | | a small number of fields to initialize dynamically, as well as some |
512 | | that are initialized lazily. |
513 | | |
514 | | For subinterpreters we memcpy() the main interpreter in |
515 | | PyInterpreterState_New(), leaving it in the same mostly-initialized |
516 | | state. The only difference is that the interpreter has some |
517 | | self-referential state that is statically initializexd to the |
518 | | main interpreter. We fix those fields here, in addition |
519 | | to the other dynamically initialized fields. |
520 | | */ |
521 | | static PyStatus |
522 | | init_interpreter(PyInterpreterState *interp, |
523 | | _PyRuntimeState *runtime, int64_t id, |
524 | | PyInterpreterState *next, |
525 | | long whence) |
526 | 16 | { |
527 | 16 | if (interp->_initialized) { |
528 | 0 | return _PyStatus_ERR("interpreter already initialized"); |
529 | 0 | } |
530 | | |
531 | 16 | assert(interp->_whence == _PyInterpreterState_WHENCE_NOTSET); |
532 | 16 | assert(check_interpreter_whence(whence) == 0); |
533 | 16 | interp->_whence = whence; |
534 | | |
535 | 16 | assert(runtime != NULL); |
536 | 16 | interp->runtime = runtime; |
537 | | |
538 | 16 | assert(id > 0 || (id == 0 && interp == runtime->interpreters.main)); |
539 | 16 | interp->id = id; |
540 | | |
541 | 16 | interp->id_refcount = 0; |
542 | | |
543 | 16 | assert(runtime->interpreters.head == interp); |
544 | 16 | assert(next != NULL || (interp == runtime->interpreters.main)); |
545 | 16 | interp->next = next; |
546 | | |
547 | 16 | interp->threads.preallocated = &interp->_initial_thread; |
548 | | |
549 | | // We would call _PyObject_InitState() at this point |
550 | | // if interp->feature_flags were alredy set. |
551 | | |
552 | 16 | _PyEval_InitState(interp); |
553 | 16 | _PyGC_InitState(&interp->gc); |
554 | 16 | PyConfig_InitPythonConfig(&interp->config); |
555 | 16 | _PyType_InitCache(interp); |
556 | | #ifdef Py_GIL_DISABLED |
557 | | _Py_brc_init_state(interp); |
558 | | #endif |
559 | 16 | llist_init(&interp->mem_free_queue.head); |
560 | 16 | llist_init(&interp->asyncio_tasks_head); |
561 | 16 | interp->asyncio_tasks_lock = (PyMutex){0}; |
562 | 272 | for (int i = 0; i < _PY_MONITORING_UNGROUPED_EVENTS; i++) { |
563 | 256 | interp->monitors.tools[i] = 0; |
564 | 256 | } |
565 | 144 | for (int t = 0; t < PY_MONITORING_TOOL_IDS; t++) { |
566 | 2.56k | for (int e = 0; e < _PY_MONITORING_EVENTS; e++) { |
567 | 2.43k | interp->monitoring_callables[t][e] = NULL; |
568 | | |
569 | 2.43k | } |
570 | 128 | interp->monitoring_tool_versions[t] = 0; |
571 | 128 | } |
572 | 16 | interp->_code_object_generation = 0; |
573 | 16 | interp->jit = false; |
574 | 16 | interp->executor_list_head = NULL; |
575 | 16 | interp->executor_deletion_list_head = NULL; |
576 | 16 | interp->executor_deletion_list_remaining_capacity = 0; |
577 | 16 | interp->trace_run_counter = JIT_CLEANUP_THRESHOLD; |
578 | 16 | if (interp != &runtime->_main_interpreter) { |
579 | | /* Fix the self-referential, statically initialized fields. */ |
580 | 0 | interp->dtoa = (struct _dtoa_state)_dtoa_state_INIT(interp); |
581 | 0 | } |
582 | | #if !defined(Py_GIL_DISABLED) && defined(Py_STACKREF_DEBUG) |
583 | | interp->next_stackref = INITIAL_STACKREF_INDEX; |
584 | | _Py_hashtable_allocator_t alloc = { |
585 | | .malloc = malloc, |
586 | | .free = free, |
587 | | }; |
588 | | interp->open_stackrefs_table = _Py_hashtable_new_full( |
589 | | _Py_hashtable_hash_ptr, |
590 | | _Py_hashtable_compare_direct, |
591 | | NULL, |
592 | | NULL, |
593 | | &alloc |
594 | | ); |
595 | | # ifdef Py_STACKREF_CLOSE_DEBUG |
596 | | interp->closed_stackrefs_table = _Py_hashtable_new_full( |
597 | | _Py_hashtable_hash_ptr, |
598 | | _Py_hashtable_compare_direct, |
599 | | NULL, |
600 | | NULL, |
601 | | &alloc |
602 | | ); |
603 | | # endif |
604 | | _Py_stackref_associate(interp, Py_None, PyStackRef_None); |
605 | | _Py_stackref_associate(interp, Py_False, PyStackRef_False); |
606 | | _Py_stackref_associate(interp, Py_True, PyStackRef_True); |
607 | | #endif |
608 | | |
609 | 16 | interp->_initialized = 1; |
610 | 16 | return _PyStatus_OK(); |
611 | 16 | } |
612 | | |
613 | | |
614 | | PyStatus |
615 | | _PyInterpreterState_New(PyThreadState *tstate, PyInterpreterState **pinterp) |
616 | 16 | { |
617 | 16 | *pinterp = NULL; |
618 | | |
619 | | // Don't get runtime from tstate since tstate can be NULL |
620 | 16 | _PyRuntimeState *runtime = &_PyRuntime; |
621 | | |
622 | | // tstate is NULL when pycore_create_interpreter() calls |
623 | | // _PyInterpreterState_New() to create the main interpreter. |
624 | 16 | if (tstate != NULL) { |
625 | 0 | if (_PySys_Audit(tstate, "cpython.PyInterpreterState_New", NULL) < 0) { |
626 | 0 | return _PyStatus_ERR("sys.audit failed"); |
627 | 0 | } |
628 | 0 | } |
629 | | |
630 | | /* We completely serialize creation of multiple interpreters, since |
631 | | it simplifies things here and blocking concurrent calls isn't a problem. |
632 | | Regardless, we must fully block subinterpreter creation until |
633 | | after the main interpreter is created. */ |
634 | 16 | HEAD_LOCK(runtime); |
635 | | |
636 | 16 | struct pyinterpreters *interpreters = &runtime->interpreters; |
637 | 16 | int64_t id = interpreters->next_id; |
638 | 16 | interpreters->next_id += 1; |
639 | | |
640 | | // Allocate the interpreter and add it to the runtime state. |
641 | 16 | PyInterpreterState *interp; |
642 | 16 | PyStatus status; |
643 | 16 | PyInterpreterState *old_head = interpreters->head; |
644 | 16 | if (old_head == NULL) { |
645 | | // We are creating the main interpreter. |
646 | 16 | assert(interpreters->main == NULL); |
647 | 16 | assert(id == 0); |
648 | | |
649 | 16 | interp = &runtime->_main_interpreter; |
650 | 16 | assert(interp->id == 0); |
651 | 16 | assert(interp->next == NULL); |
652 | | |
653 | 16 | interpreters->main = interp; |
654 | 16 | } |
655 | 0 | else { |
656 | 0 | assert(interpreters->main != NULL); |
657 | 0 | assert(id != 0); |
658 | |
|
659 | 0 | interp = alloc_interpreter(); |
660 | 0 | if (interp == NULL) { |
661 | 0 | status = _PyStatus_NO_MEMORY(); |
662 | 0 | goto error; |
663 | 0 | } |
664 | | // Set to _PyInterpreterState_INIT. |
665 | 0 | memcpy(interp, &initial._main_interpreter, sizeof(*interp)); |
666 | |
|
667 | 0 | if (id < 0) { |
668 | | /* overflow or Py_Initialize() not called yet! */ |
669 | 0 | status = _PyStatus_ERR("failed to get an interpreter ID"); |
670 | 0 | goto error; |
671 | 0 | } |
672 | 0 | } |
673 | 16 | interpreters->head = interp; |
674 | | |
675 | 16 | long whence = _PyInterpreterState_WHENCE_UNKNOWN; |
676 | 16 | status = init_interpreter(interp, runtime, |
677 | 16 | id, old_head, whence); |
678 | 16 | if (_PyStatus_EXCEPTION(status)) { |
679 | 0 | goto error; |
680 | 0 | } |
681 | | |
682 | 16 | HEAD_UNLOCK(runtime); |
683 | | |
684 | 16 | assert(interp != NULL); |
685 | 16 | *pinterp = interp; |
686 | 16 | return _PyStatus_OK(); |
687 | | |
688 | 0 | error: |
689 | 0 | HEAD_UNLOCK(runtime); |
690 | |
|
691 | 0 | if (interp != NULL) { |
692 | 0 | free_interpreter(interp); |
693 | 0 | } |
694 | 0 | return status; |
695 | 16 | } |
696 | | |
697 | | |
698 | | PyInterpreterState * |
699 | | PyInterpreterState_New(void) |
700 | 0 | { |
701 | | // tstate can be NULL |
702 | 0 | PyThreadState *tstate = current_fast_get(); |
703 | |
|
704 | 0 | PyInterpreterState *interp; |
705 | 0 | PyStatus status = _PyInterpreterState_New(tstate, &interp); |
706 | 0 | if (_PyStatus_EXCEPTION(status)) { |
707 | 0 | Py_ExitStatusException(status); |
708 | 0 | } |
709 | 0 | assert(interp != NULL); |
710 | 0 | return interp; |
711 | 0 | } |
712 | | |
713 | | #if !defined(Py_GIL_DISABLED) && defined(Py_STACKREF_DEBUG) |
714 | | extern void |
715 | | _Py_stackref_report_leaks(PyInterpreterState *interp); |
716 | | #endif |
717 | | |
718 | | static void |
719 | | interpreter_clear(PyInterpreterState *interp, PyThreadState *tstate) |
720 | 0 | { |
721 | 0 | assert(interp != NULL); |
722 | 0 | assert(tstate != NULL); |
723 | 0 | _PyRuntimeState *runtime = interp->runtime; |
724 | | |
725 | | /* XXX Conditions we need to enforce: |
726 | | |
727 | | * the GIL must be held by the current thread |
728 | | * tstate must be the "current" thread state (current_fast_get()) |
729 | | * tstate->interp must be interp |
730 | | * for the main interpreter, tstate must be the main thread |
731 | | */ |
732 | | // XXX Ideally, we would not rely on any thread state in this function |
733 | | // (and we would drop the "tstate" argument). |
734 | |
|
735 | 0 | if (_PySys_Audit(tstate, "cpython.PyInterpreterState_Clear", NULL) < 0) { |
736 | 0 | _PyErr_Clear(tstate); |
737 | 0 | } |
738 | | |
739 | | // Clear the current/main thread state last. |
740 | 0 | _Py_FOR_EACH_TSTATE_BEGIN(interp, p) { |
741 | | // See https://github.com/python/cpython/issues/102126 |
742 | | // Must be called without HEAD_LOCK held as it can deadlock |
743 | | // if any finalizer tries to acquire that lock. |
744 | 0 | HEAD_UNLOCK(runtime); |
745 | 0 | PyThreadState_Clear(p); |
746 | 0 | HEAD_LOCK(runtime); |
747 | 0 | } |
748 | 0 | _Py_FOR_EACH_TSTATE_END(interp); |
749 | 0 | if (tstate->interp == interp) { |
750 | | /* We fix tstate->_status below when we for sure aren't using it |
751 | | (e.g. no longer need the GIL). */ |
752 | | // XXX Eliminate the need to do this. |
753 | 0 | tstate->_status.cleared = 0; |
754 | 0 | } |
755 | | |
756 | | /* It is possible that any of the objects below have a finalizer |
757 | | that runs Python code or otherwise relies on a thread state |
758 | | or even the interpreter state. For now we trust that isn't |
759 | | a problem. |
760 | | */ |
761 | | // XXX Make sure we properly deal with problematic finalizers. |
762 | |
|
763 | 0 | Py_CLEAR(interp->audit_hooks); |
764 | | |
765 | | // At this time, all the threads should be cleared so we don't need atomic |
766 | | // operations for instrumentation_version or eval_breaker. |
767 | 0 | interp->ceval.instrumentation_version = 0; |
768 | 0 | tstate->eval_breaker = 0; |
769 | |
|
770 | 0 | for (int i = 0; i < _PY_MONITORING_UNGROUPED_EVENTS; i++) { |
771 | 0 | interp->monitors.tools[i] = 0; |
772 | 0 | } |
773 | 0 | for (int t = 0; t < PY_MONITORING_TOOL_IDS; t++) { |
774 | 0 | for (int e = 0; e < _PY_MONITORING_EVENTS; e++) { |
775 | 0 | Py_CLEAR(interp->monitoring_callables[t][e]); |
776 | 0 | } |
777 | 0 | } |
778 | 0 | for (int t = 0; t < PY_MONITORING_TOOL_IDS; t++) { |
779 | 0 | Py_CLEAR(interp->monitoring_tool_names[t]); |
780 | 0 | } |
781 | 0 | interp->_code_object_generation = 0; |
782 | | #ifdef Py_GIL_DISABLED |
783 | | interp->tlbc_indices.tlbc_generation = 0; |
784 | | #endif |
785 | |
|
786 | 0 | PyConfig_Clear(&interp->config); |
787 | 0 | _PyCodec_Fini(interp); |
788 | |
|
789 | 0 | assert(interp->imports.modules == NULL); |
790 | 0 | assert(interp->imports.modules_by_index == NULL); |
791 | 0 | assert(interp->imports.importlib == NULL); |
792 | 0 | assert(interp->imports.import_func == NULL); |
793 | |
|
794 | 0 | Py_CLEAR(interp->sysdict_copy); |
795 | 0 | Py_CLEAR(interp->builtins_copy); |
796 | 0 | Py_CLEAR(interp->dict); |
797 | 0 | #ifdef HAVE_FORK |
798 | 0 | Py_CLEAR(interp->before_forkers); |
799 | 0 | Py_CLEAR(interp->after_forkers_parent); |
800 | 0 | Py_CLEAR(interp->after_forkers_child); |
801 | 0 | #endif |
802 | | |
803 | |
|
804 | | #ifdef _Py_TIER2 |
805 | | _Py_ClearExecutorDeletionList(interp); |
806 | | #endif |
807 | 0 | _PyAST_Fini(interp); |
808 | 0 | _PyAtExit_Fini(interp); |
809 | | |
810 | | // All Python types must be destroyed before the last GC collection. Python |
811 | | // types create a reference cycle to themselves in their in their |
812 | | // PyTypeObject.tp_mro member (the tuple contains the type). |
813 | | |
814 | | /* Last garbage collection on this interpreter */ |
815 | 0 | _PyGC_CollectNoFail(tstate); |
816 | 0 | _PyGC_Fini(interp); |
817 | | |
818 | | // Finalize warnings after last gc so that any finalizers can |
819 | | // access warnings state |
820 | 0 | _PyWarnings_Fini(interp); |
821 | 0 | struct _PyExecutorObject *cold = interp->cold_executor; |
822 | 0 | if (cold != NULL) { |
823 | 0 | interp->cold_executor = NULL; |
824 | 0 | assert(cold->vm_data.valid); |
825 | 0 | assert(cold->vm_data.warm); |
826 | 0 | _PyExecutor_Free(cold); |
827 | 0 | } |
828 | | /* We don't clear sysdict and builtins until the end of this function. |
829 | | Because clearing other attributes can execute arbitrary Python code |
830 | | which requires sysdict and builtins. */ |
831 | 0 | PyDict_Clear(interp->sysdict); |
832 | 0 | PyDict_Clear(interp->builtins); |
833 | 0 | Py_CLEAR(interp->sysdict); |
834 | 0 | Py_CLEAR(interp->builtins); |
835 | |
|
836 | | #if !defined(Py_GIL_DISABLED) && defined(Py_STACKREF_DEBUG) |
837 | | # ifdef Py_STACKREF_CLOSE_DEBUG |
838 | | _Py_hashtable_destroy(interp->closed_stackrefs_table); |
839 | | interp->closed_stackrefs_table = NULL; |
840 | | # endif |
841 | | _Py_stackref_report_leaks(interp); |
842 | | _Py_hashtable_destroy(interp->open_stackrefs_table); |
843 | | interp->open_stackrefs_table = NULL; |
844 | | #endif |
845 | |
|
846 | 0 | if (tstate->interp == interp) { |
847 | | /* We are now safe to fix tstate->_status.cleared. */ |
848 | | // XXX Do this (much) earlier? |
849 | 0 | tstate->_status.cleared = 1; |
850 | 0 | } |
851 | |
|
852 | 0 | for (int i=0; i < DICT_MAX_WATCHERS; i++) { |
853 | 0 | interp->dict_state.watchers[i] = NULL; |
854 | 0 | } |
855 | |
|
856 | 0 | for (int i=0; i < TYPE_MAX_WATCHERS; i++) { |
857 | 0 | interp->type_watchers[i] = NULL; |
858 | 0 | } |
859 | |
|
860 | 0 | for (int i=0; i < FUNC_MAX_WATCHERS; i++) { |
861 | 0 | interp->func_watchers[i] = NULL; |
862 | 0 | } |
863 | 0 | interp->active_func_watchers = 0; |
864 | |
|
865 | 0 | for (int i=0; i < CODE_MAX_WATCHERS; i++) { |
866 | 0 | interp->code_watchers[i] = NULL; |
867 | 0 | } |
868 | 0 | interp->active_code_watchers = 0; |
869 | |
|
870 | 0 | for (int i=0; i < CONTEXT_MAX_WATCHERS; i++) { |
871 | 0 | interp->context_watchers[i] = NULL; |
872 | 0 | } |
873 | 0 | interp->active_context_watchers = 0; |
874 | | // XXX Once we have one allocator per interpreter (i.e. |
875 | | // per-interpreter GC) we must ensure that all of the interpreter's |
876 | | // objects have been cleaned up at the point. |
877 | | |
878 | | // We could clear interp->threads.freelist here |
879 | | // if it held more than just the initial thread state. |
880 | 0 | } |
881 | | |
882 | | |
883 | | void |
884 | | PyInterpreterState_Clear(PyInterpreterState *interp) |
885 | 0 | { |
886 | | // Use the current Python thread state to call audit hooks and to collect |
887 | | // garbage. It can be different than the current Python thread state |
888 | | // of 'interp'. |
889 | 0 | PyThreadState *current_tstate = current_fast_get(); |
890 | 0 | _PyImport_ClearCore(interp); |
891 | 0 | interpreter_clear(interp, current_tstate); |
892 | 0 | } |
893 | | |
894 | | |
895 | | void |
896 | | _PyInterpreterState_Clear(PyThreadState *tstate) |
897 | 0 | { |
898 | 0 | _PyImport_ClearCore(tstate->interp); |
899 | 0 | interpreter_clear(tstate->interp, tstate); |
900 | 0 | } |
901 | | |
902 | | |
903 | | static inline void tstate_deactivate(PyThreadState *tstate); |
904 | | static void tstate_set_detached(PyThreadState *tstate, int detached_state); |
905 | | static void zapthreads(PyInterpreterState *interp); |
906 | | |
907 | | void |
908 | | PyInterpreterState_Delete(PyInterpreterState *interp) |
909 | 0 | { |
910 | 0 | _PyRuntimeState *runtime = interp->runtime; |
911 | 0 | struct pyinterpreters *interpreters = &runtime->interpreters; |
912 | | |
913 | | // XXX Clearing the "current" thread state should happen before |
914 | | // we start finalizing the interpreter (or the current thread state). |
915 | 0 | PyThreadState *tcur = current_fast_get(); |
916 | 0 | if (tcur != NULL && interp == tcur->interp) { |
917 | | /* Unset current thread. After this, many C API calls become crashy. */ |
918 | 0 | _PyThreadState_Detach(tcur); |
919 | 0 | } |
920 | |
|
921 | 0 | zapthreads(interp); |
922 | | |
923 | | // XXX These two calls should be done at the end of clear_interpreter(), |
924 | | // but currently some objects get decref'ed after that. |
925 | | #ifdef Py_REF_DEBUG |
926 | | _PyInterpreterState_FinalizeRefTotal(interp); |
927 | | #endif |
928 | 0 | _PyInterpreterState_FinalizeAllocatedBlocks(interp); |
929 | |
|
930 | 0 | HEAD_LOCK(runtime); |
931 | 0 | PyInterpreterState **p; |
932 | 0 | for (p = &interpreters->head; ; p = &(*p)->next) { |
933 | 0 | if (*p == NULL) { |
934 | 0 | Py_FatalError("NULL interpreter"); |
935 | 0 | } |
936 | 0 | if (*p == interp) { |
937 | 0 | break; |
938 | 0 | } |
939 | 0 | } |
940 | 0 | if (interp->threads.head != NULL) { |
941 | 0 | Py_FatalError("remaining threads"); |
942 | 0 | } |
943 | 0 | *p = interp->next; |
944 | |
|
945 | 0 | if (interpreters->main == interp) { |
946 | 0 | interpreters->main = NULL; |
947 | 0 | if (interpreters->head != NULL) { |
948 | 0 | Py_FatalError("remaining subinterpreters"); |
949 | 0 | } |
950 | 0 | } |
951 | 0 | HEAD_UNLOCK(runtime); |
952 | |
|
953 | 0 | _Py_qsbr_fini(interp); |
954 | |
|
955 | 0 | _PyObject_FiniState(interp); |
956 | |
|
957 | 0 | free_interpreter(interp); |
958 | 0 | } |
959 | | |
960 | | |
961 | | #ifdef HAVE_FORK |
962 | | /* |
963 | | * Delete all interpreter states except the main interpreter. If there |
964 | | * is a current interpreter state, it *must* be the main interpreter. |
965 | | */ |
966 | | PyStatus |
967 | | _PyInterpreterState_DeleteExceptMain(_PyRuntimeState *runtime) |
968 | 0 | { |
969 | 0 | struct pyinterpreters *interpreters = &runtime->interpreters; |
970 | |
|
971 | 0 | PyThreadState *tstate = _PyThreadState_Swap(runtime, NULL); |
972 | 0 | if (tstate != NULL && tstate->interp != interpreters->main) { |
973 | 0 | return _PyStatus_ERR("not main interpreter"); |
974 | 0 | } |
975 | | |
976 | 0 | HEAD_LOCK(runtime); |
977 | 0 | PyInterpreterState *interp = interpreters->head; |
978 | 0 | interpreters->head = NULL; |
979 | 0 | while (interp != NULL) { |
980 | 0 | if (interp == interpreters->main) { |
981 | 0 | interpreters->main->next = NULL; |
982 | 0 | interpreters->head = interp; |
983 | 0 | interp = interp->next; |
984 | 0 | continue; |
985 | 0 | } |
986 | | |
987 | | // XXX Won't this fail since PyInterpreterState_Clear() requires |
988 | | // the "current" tstate to be set? |
989 | 0 | PyInterpreterState_Clear(interp); // XXX must activate? |
990 | 0 | zapthreads(interp); |
991 | 0 | PyInterpreterState *prev_interp = interp; |
992 | 0 | interp = interp->next; |
993 | 0 | free_interpreter(prev_interp); |
994 | 0 | } |
995 | 0 | HEAD_UNLOCK(runtime); |
996 | |
|
997 | 0 | if (interpreters->head == NULL) { |
998 | 0 | return _PyStatus_ERR("missing main interpreter"); |
999 | 0 | } |
1000 | 0 | _PyThreadState_Swap(runtime, tstate); |
1001 | 0 | return _PyStatus_OK(); |
1002 | 0 | } |
1003 | | #endif |
1004 | | |
1005 | | static inline void |
1006 | | set_main_thread(PyInterpreterState *interp, PyThreadState *tstate) |
1007 | 0 | { |
1008 | 0 | _Py_atomic_store_ptr_relaxed(&interp->threads.main, tstate); |
1009 | 0 | } |
1010 | | |
1011 | | static inline PyThreadState * |
1012 | | get_main_thread(PyInterpreterState *interp) |
1013 | 0 | { |
1014 | 0 | return _Py_atomic_load_ptr_relaxed(&interp->threads.main); |
1015 | 0 | } |
1016 | | |
1017 | | void |
1018 | | _PyErr_SetInterpreterAlreadyRunning(void) |
1019 | 0 | { |
1020 | 0 | PyErr_SetString(PyExc_InterpreterError, "interpreter already running"); |
1021 | 0 | } |
1022 | | |
1023 | | int |
1024 | | _PyInterpreterState_SetRunningMain(PyInterpreterState *interp) |
1025 | 0 | { |
1026 | 0 | if (get_main_thread(interp) != NULL) { |
1027 | 0 | _PyErr_SetInterpreterAlreadyRunning(); |
1028 | 0 | return -1; |
1029 | 0 | } |
1030 | 0 | PyThreadState *tstate = current_fast_get(); |
1031 | 0 | _Py_EnsureTstateNotNULL(tstate); |
1032 | 0 | if (tstate->interp != interp) { |
1033 | 0 | PyErr_SetString(PyExc_RuntimeError, |
1034 | 0 | "current tstate has wrong interpreter"); |
1035 | 0 | return -1; |
1036 | 0 | } |
1037 | 0 | set_main_thread(interp, tstate); |
1038 | |
|
1039 | 0 | return 0; |
1040 | 0 | } |
1041 | | |
1042 | | void |
1043 | | _PyInterpreterState_SetNotRunningMain(PyInterpreterState *interp) |
1044 | 0 | { |
1045 | 0 | assert(get_main_thread(interp) == current_fast_get()); |
1046 | 0 | set_main_thread(interp, NULL); |
1047 | 0 | } |
1048 | | |
1049 | | int |
1050 | | _PyInterpreterState_IsRunningMain(PyInterpreterState *interp) |
1051 | 0 | { |
1052 | 0 | if (get_main_thread(interp) != NULL) { |
1053 | 0 | return 1; |
1054 | 0 | } |
1055 | | // Embedders might not know to call _PyInterpreterState_SetRunningMain(), |
1056 | | // so their main thread wouldn't show it is running the main interpreter's |
1057 | | // program. (Py_Main() doesn't have this problem.) For now this isn't |
1058 | | // critical. If it were, we would need to infer "running main" from other |
1059 | | // information, like if it's the main interpreter. We used to do that |
1060 | | // but the naive approach led to some inconsistencies that caused problems. |
1061 | 0 | return 0; |
1062 | 0 | } |
1063 | | |
1064 | | int |
1065 | | _PyThreadState_IsRunningMain(PyThreadState *tstate) |
1066 | 0 | { |
1067 | 0 | PyInterpreterState *interp = tstate->interp; |
1068 | | // See the note in _PyInterpreterState_IsRunningMain() about |
1069 | | // possible false negatives here for embedders. |
1070 | 0 | return get_main_thread(interp) == tstate; |
1071 | 0 | } |
1072 | | |
1073 | | void |
1074 | | _PyInterpreterState_ReinitRunningMain(PyThreadState *tstate) |
1075 | 0 | { |
1076 | 0 | PyInterpreterState *interp = tstate->interp; |
1077 | 0 | if (get_main_thread(interp) != tstate) { |
1078 | 0 | set_main_thread(interp, NULL); |
1079 | 0 | } |
1080 | 0 | } |
1081 | | |
1082 | | |
1083 | | //---------- |
1084 | | // accessors |
1085 | | //---------- |
1086 | | |
1087 | | int |
1088 | | _PyInterpreterState_IsReady(PyInterpreterState *interp) |
1089 | 0 | { |
1090 | 0 | return interp->_ready; |
1091 | 0 | } |
1092 | | |
1093 | | #ifndef NDEBUG |
1094 | | static inline int |
1095 | | check_interpreter_whence(long whence) |
1096 | | { |
1097 | | if(whence < 0) { |
1098 | | return -1; |
1099 | | } |
1100 | | if (whence > _PyInterpreterState_WHENCE_MAX) { |
1101 | | return -1; |
1102 | | } |
1103 | | return 0; |
1104 | | } |
1105 | | #endif |
1106 | | |
1107 | | long |
1108 | | _PyInterpreterState_GetWhence(PyInterpreterState *interp) |
1109 | 0 | { |
1110 | 0 | assert(check_interpreter_whence(interp->_whence) == 0); |
1111 | 0 | return interp->_whence; |
1112 | 0 | } |
1113 | | |
1114 | | void |
1115 | | _PyInterpreterState_SetWhence(PyInterpreterState *interp, long whence) |
1116 | 16 | { |
1117 | 16 | assert(interp->_whence != _PyInterpreterState_WHENCE_NOTSET); |
1118 | 16 | assert(check_interpreter_whence(whence) == 0); |
1119 | 16 | interp->_whence = whence; |
1120 | 16 | } |
1121 | | |
1122 | | |
1123 | | PyObject * |
1124 | | _Py_GetMainModule(PyThreadState *tstate) |
1125 | 0 | { |
1126 | | // We return None to indicate "not found" or "bogus". |
1127 | 0 | PyObject *modules = _PyImport_GetModulesRef(tstate->interp); |
1128 | 0 | if (modules == Py_None) { |
1129 | 0 | return modules; |
1130 | 0 | } |
1131 | 0 | PyObject *module = NULL; |
1132 | 0 | (void)PyMapping_GetOptionalItem(modules, &_Py_ID(__main__), &module); |
1133 | 0 | Py_DECREF(modules); |
1134 | 0 | if (module == NULL && !PyErr_Occurred()) { |
1135 | 0 | Py_RETURN_NONE; |
1136 | 0 | } |
1137 | 0 | return module; |
1138 | 0 | } |
1139 | | |
1140 | | int |
1141 | | _Py_CheckMainModule(PyObject *module) |
1142 | 0 | { |
1143 | 0 | if (module == NULL || module == Py_None) { |
1144 | 0 | if (!PyErr_Occurred()) { |
1145 | 0 | (void)_PyErr_SetModuleNotFoundError(&_Py_ID(__main__)); |
1146 | 0 | } |
1147 | 0 | return -1; |
1148 | 0 | } |
1149 | 0 | if (!Py_IS_TYPE(module, &PyModule_Type)) { |
1150 | | /* The __main__ module has been tampered with. */ |
1151 | 0 | PyObject *msg = PyUnicode_FromString("invalid __main__ module"); |
1152 | 0 | if (msg != NULL) { |
1153 | 0 | (void)PyErr_SetImportError(msg, &_Py_ID(__main__), NULL); |
1154 | 0 | Py_DECREF(msg); |
1155 | 0 | } |
1156 | 0 | return -1; |
1157 | 0 | } |
1158 | 0 | return 0; |
1159 | 0 | } |
1160 | | |
1161 | | |
1162 | | PyObject * |
1163 | | PyInterpreterState_GetDict(PyInterpreterState *interp) |
1164 | 12 | { |
1165 | 12 | if (interp->dict == NULL) { |
1166 | 6 | interp->dict = PyDict_New(); |
1167 | 6 | if (interp->dict == NULL) { |
1168 | 0 | PyErr_Clear(); |
1169 | 0 | } |
1170 | 6 | } |
1171 | | /* Returning NULL means no per-interpreter dict is available. */ |
1172 | 12 | return interp->dict; |
1173 | 12 | } |
1174 | | |
1175 | | |
1176 | | //---------- |
1177 | | // interp ID |
1178 | | //---------- |
1179 | | |
1180 | | int64_t |
1181 | | _PyInterpreterState_ObjectToID(PyObject *idobj) |
1182 | 0 | { |
1183 | 0 | if (!_PyIndex_Check(idobj)) { |
1184 | 0 | PyErr_Format(PyExc_TypeError, |
1185 | 0 | "interpreter ID must be an int, got %.100s", |
1186 | 0 | Py_TYPE(idobj)->tp_name); |
1187 | 0 | return -1; |
1188 | 0 | } |
1189 | | |
1190 | | // This may raise OverflowError. |
1191 | | // For now, we don't worry about if LLONG_MAX < INT64_MAX. |
1192 | 0 | long long id = PyLong_AsLongLong(idobj); |
1193 | 0 | if (id == -1 && PyErr_Occurred()) { |
1194 | 0 | return -1; |
1195 | 0 | } |
1196 | | |
1197 | 0 | if (id < 0) { |
1198 | 0 | PyErr_Format(PyExc_ValueError, |
1199 | 0 | "interpreter ID must be a non-negative int, got %R", |
1200 | 0 | idobj); |
1201 | 0 | return -1; |
1202 | 0 | } |
1203 | | #if LLONG_MAX > INT64_MAX |
1204 | | else if (id > INT64_MAX) { |
1205 | | PyErr_SetString(PyExc_OverflowError, "int too big to convert"); |
1206 | | return -1; |
1207 | | } |
1208 | | #endif |
1209 | 0 | else { |
1210 | 0 | return (int64_t)id; |
1211 | 0 | } |
1212 | 0 | } |
1213 | | |
1214 | | int64_t |
1215 | | PyInterpreterState_GetID(PyInterpreterState *interp) |
1216 | 0 | { |
1217 | 0 | if (interp == NULL) { |
1218 | 0 | PyErr_SetString(PyExc_RuntimeError, "no interpreter provided"); |
1219 | 0 | return -1; |
1220 | 0 | } |
1221 | 0 | return interp->id; |
1222 | 0 | } |
1223 | | |
1224 | | PyObject * |
1225 | | _PyInterpreterState_GetIDObject(PyInterpreterState *interp) |
1226 | 0 | { |
1227 | 0 | int64_t interpid = interp->id; |
1228 | 0 | if (interpid < 0) { |
1229 | 0 | return NULL; |
1230 | 0 | } |
1231 | 0 | assert(interpid < LLONG_MAX); |
1232 | 0 | return PyLong_FromLongLong(interpid); |
1233 | 0 | } |
1234 | | |
1235 | | |
1236 | | |
1237 | | void |
1238 | | _PyInterpreterState_IDIncref(PyInterpreterState *interp) |
1239 | 0 | { |
1240 | 0 | _Py_atomic_add_ssize(&interp->id_refcount, 1); |
1241 | 0 | } |
1242 | | |
1243 | | |
1244 | | void |
1245 | | _PyInterpreterState_IDDecref(PyInterpreterState *interp) |
1246 | 0 | { |
1247 | 0 | _PyRuntimeState *runtime = interp->runtime; |
1248 | |
|
1249 | 0 | Py_ssize_t refcount = _Py_atomic_add_ssize(&interp->id_refcount, -1); |
1250 | |
|
1251 | 0 | if (refcount == 1 && interp->requires_idref) { |
1252 | 0 | PyThreadState *tstate = |
1253 | 0 | _PyThreadState_NewBound(interp, _PyThreadState_WHENCE_FINI); |
1254 | | |
1255 | | // XXX Possible GILState issues? |
1256 | 0 | PyThreadState *save_tstate = _PyThreadState_Swap(runtime, tstate); |
1257 | 0 | Py_EndInterpreter(tstate); |
1258 | 0 | _PyThreadState_Swap(runtime, save_tstate); |
1259 | 0 | } |
1260 | 0 | } |
1261 | | |
1262 | | int |
1263 | | _PyInterpreterState_RequiresIDRef(PyInterpreterState *interp) |
1264 | 0 | { |
1265 | 0 | return interp->requires_idref; |
1266 | 0 | } |
1267 | | |
1268 | | void |
1269 | | _PyInterpreterState_RequireIDRef(PyInterpreterState *interp, int required) |
1270 | 0 | { |
1271 | 0 | interp->requires_idref = required ? 1 : 0; |
1272 | 0 | } |
1273 | | |
1274 | | |
1275 | | //----------------------------- |
1276 | | // look up an interpreter state |
1277 | | //----------------------------- |
1278 | | |
1279 | | /* Return the interpreter associated with the current OS thread. |
1280 | | |
1281 | | The GIL must be held. |
1282 | | */ |
1283 | | |
1284 | | PyInterpreterState* |
1285 | | PyInterpreterState_Get(void) |
1286 | 35 | { |
1287 | 35 | PyThreadState *tstate = current_fast_get(); |
1288 | 35 | _Py_EnsureTstateNotNULL(tstate); |
1289 | 35 | PyInterpreterState *interp = tstate->interp; |
1290 | 35 | if (interp == NULL) { |
1291 | 0 | Py_FatalError("no current interpreter"); |
1292 | 0 | } |
1293 | 35 | return interp; |
1294 | 35 | } |
1295 | | |
1296 | | |
1297 | | static PyInterpreterState * |
1298 | | interp_look_up_id(_PyRuntimeState *runtime, int64_t requested_id) |
1299 | 0 | { |
1300 | 0 | PyInterpreterState *interp = runtime->interpreters.head; |
1301 | 0 | while (interp != NULL) { |
1302 | 0 | int64_t id = interp->id; |
1303 | 0 | assert(id >= 0); |
1304 | 0 | if (requested_id == id) { |
1305 | 0 | return interp; |
1306 | 0 | } |
1307 | 0 | interp = PyInterpreterState_Next(interp); |
1308 | 0 | } |
1309 | 0 | return NULL; |
1310 | 0 | } |
1311 | | |
1312 | | /* Return the interpreter state with the given ID. |
1313 | | |
1314 | | Fail with RuntimeError if the interpreter is not found. */ |
1315 | | |
1316 | | PyInterpreterState * |
1317 | | _PyInterpreterState_LookUpID(int64_t requested_id) |
1318 | 0 | { |
1319 | 0 | PyInterpreterState *interp = NULL; |
1320 | 0 | if (requested_id >= 0) { |
1321 | 0 | _PyRuntimeState *runtime = &_PyRuntime; |
1322 | 0 | HEAD_LOCK(runtime); |
1323 | 0 | interp = interp_look_up_id(runtime, requested_id); |
1324 | 0 | HEAD_UNLOCK(runtime); |
1325 | 0 | } |
1326 | 0 | if (interp == NULL && !PyErr_Occurred()) { |
1327 | 0 | PyErr_Format(PyExc_InterpreterNotFoundError, |
1328 | 0 | "unrecognized interpreter ID %lld", requested_id); |
1329 | 0 | } |
1330 | 0 | return interp; |
1331 | 0 | } |
1332 | | |
1333 | | PyInterpreterState * |
1334 | | _PyInterpreterState_LookUpIDObject(PyObject *requested_id) |
1335 | 0 | { |
1336 | 0 | int64_t id = _PyInterpreterState_ObjectToID(requested_id); |
1337 | 0 | if (id < 0) { |
1338 | 0 | return NULL; |
1339 | 0 | } |
1340 | 0 | return _PyInterpreterState_LookUpID(id); |
1341 | 0 | } |
1342 | | |
1343 | | |
1344 | | /********************************/ |
1345 | | /* the per-thread runtime state */ |
1346 | | /********************************/ |
1347 | | |
1348 | | #ifndef NDEBUG |
1349 | | static inline int |
1350 | | tstate_is_alive(PyThreadState *tstate) |
1351 | | { |
1352 | | return (tstate->_status.initialized && |
1353 | | !tstate->_status.finalized && |
1354 | | !tstate->_status.cleared && |
1355 | | !tstate->_status.finalizing); |
1356 | | } |
1357 | | #endif |
1358 | | |
1359 | | |
1360 | | //---------- |
1361 | | // lifecycle |
1362 | | //---------- |
1363 | | |
1364 | | static _PyStackChunk* |
1365 | | allocate_chunk(int size_in_bytes, _PyStackChunk* previous) |
1366 | 182k | { |
1367 | 182k | assert(size_in_bytes % sizeof(PyObject **) == 0); |
1368 | 182k | _PyStackChunk *res = _PyObject_VirtualAlloc(size_in_bytes); |
1369 | 182k | if (res == NULL) { |
1370 | 0 | return NULL; |
1371 | 0 | } |
1372 | 182k | res->previous = previous; |
1373 | 182k | res->size = size_in_bytes; |
1374 | 182k | res->top = 0; |
1375 | 182k | return res; |
1376 | 182k | } |
1377 | | |
1378 | | static void |
1379 | | reset_threadstate(_PyThreadStateImpl *tstate) |
1380 | 0 | { |
1381 | | // Set to _PyThreadState_INIT directly? |
1382 | 0 | memcpy(tstate, |
1383 | 0 | &initial._main_interpreter._initial_thread, |
1384 | 0 | sizeof(*tstate)); |
1385 | 0 | } |
1386 | | |
1387 | | static _PyThreadStateImpl * |
1388 | | alloc_threadstate(PyInterpreterState *interp) |
1389 | 16 | { |
1390 | 16 | _PyThreadStateImpl *tstate; |
1391 | | |
1392 | | // Try the preallocated tstate first. |
1393 | 16 | tstate = _Py_atomic_exchange_ptr(&interp->threads.preallocated, NULL); |
1394 | | |
1395 | | // Fall back to the allocator. |
1396 | 16 | if (tstate == NULL) { |
1397 | 0 | tstate = PyMem_RawCalloc(1, sizeof(_PyThreadStateImpl)); |
1398 | 0 | if (tstate == NULL) { |
1399 | 0 | return NULL; |
1400 | 0 | } |
1401 | 0 | reset_threadstate(tstate); |
1402 | 0 | } |
1403 | 16 | return tstate; |
1404 | 16 | } |
1405 | | |
1406 | | static void |
1407 | | free_threadstate(_PyThreadStateImpl *tstate) |
1408 | 0 | { |
1409 | 0 | PyInterpreterState *interp = tstate->base.interp; |
1410 | | // The initial thread state of the interpreter is allocated |
1411 | | // as part of the interpreter state so should not be freed. |
1412 | 0 | if (tstate == &interp->_initial_thread) { |
1413 | | // Make it available again. |
1414 | 0 | reset_threadstate(tstate); |
1415 | 0 | assert(interp->threads.preallocated == NULL); |
1416 | 0 | _Py_atomic_store_ptr(&interp->threads.preallocated, tstate); |
1417 | 0 | } |
1418 | 0 | else { |
1419 | 0 | PyMem_RawFree(tstate); |
1420 | 0 | } |
1421 | 0 | } |
1422 | | |
1423 | | static void |
1424 | | decref_threadstate(_PyThreadStateImpl *tstate) |
1425 | 0 | { |
1426 | 0 | if (_Py_atomic_add_ssize(&tstate->refcount, -1) == 1) { |
1427 | | // The last reference to the thread state is gone. |
1428 | 0 | free_threadstate(tstate); |
1429 | 0 | } |
1430 | 0 | } |
1431 | | |
1432 | | /* Get the thread state to a minimal consistent state. |
1433 | | Further init happens in pylifecycle.c before it can be used. |
1434 | | All fields not initialized here are expected to be zeroed out, |
1435 | | e.g. by PyMem_RawCalloc() or memset(), or otherwise pre-initialized. |
1436 | | The interpreter state is not manipulated. Instead it is assumed that |
1437 | | the thread is getting added to the interpreter. |
1438 | | */ |
1439 | | |
1440 | | static void |
1441 | | init_threadstate(_PyThreadStateImpl *_tstate, |
1442 | | PyInterpreterState *interp, uint64_t id, int whence) |
1443 | 16 | { |
1444 | 16 | PyThreadState *tstate = (PyThreadState *)_tstate; |
1445 | 16 | if (tstate->_status.initialized) { |
1446 | 0 | Py_FatalError("thread state already initialized"); |
1447 | 0 | } |
1448 | | |
1449 | 16 | assert(interp != NULL); |
1450 | 16 | tstate->interp = interp; |
1451 | 16 | tstate->eval_breaker = |
1452 | 16 | _Py_atomic_load_uintptr_relaxed(&interp->ceval.instrumentation_version); |
1453 | | |
1454 | | // next/prev are set in add_threadstate(). |
1455 | 16 | assert(tstate->next == NULL); |
1456 | 16 | assert(tstate->prev == NULL); |
1457 | | |
1458 | 16 | assert(tstate->_whence == _PyThreadState_WHENCE_NOTSET); |
1459 | 16 | assert(whence >= 0 && whence <= _PyThreadState_WHENCE_EXEC); |
1460 | 16 | tstate->_whence = whence; |
1461 | | |
1462 | 16 | assert(id > 0); |
1463 | 16 | tstate->id = id; |
1464 | | |
1465 | | // thread_id and native_thread_id are set in bind_tstate(). |
1466 | | |
1467 | 16 | tstate->py_recursion_limit = interp->ceval.recursion_limit; |
1468 | 16 | tstate->py_recursion_remaining = interp->ceval.recursion_limit; |
1469 | 16 | tstate->exc_info = &tstate->exc_state; |
1470 | | |
1471 | | // PyGILState_Release must not try to delete this thread state. |
1472 | | // This is cleared when PyGILState_Ensure() creates the thread state. |
1473 | 16 | tstate->gilstate_counter = 1; |
1474 | | |
1475 | 16 | tstate->current_frame = NULL; |
1476 | 16 | tstate->datastack_chunk = NULL; |
1477 | 16 | tstate->datastack_top = NULL; |
1478 | 16 | tstate->datastack_limit = NULL; |
1479 | 16 | tstate->what_event = -1; |
1480 | 16 | tstate->current_executor = NULL; |
1481 | 16 | tstate->jit_exit = NULL; |
1482 | 16 | tstate->dict_global_version = 0; |
1483 | | |
1484 | 16 | _tstate->c_stack_soft_limit = UINTPTR_MAX; |
1485 | 16 | _tstate->c_stack_top = 0; |
1486 | 16 | _tstate->c_stack_hard_limit = 0; |
1487 | | |
1488 | 16 | _tstate->asyncio_running_loop = NULL; |
1489 | 16 | _tstate->asyncio_running_task = NULL; |
1490 | | |
1491 | 16 | tstate->delete_later = NULL; |
1492 | | |
1493 | 16 | llist_init(&_tstate->mem_free_queue); |
1494 | 16 | llist_init(&_tstate->asyncio_tasks_head); |
1495 | 16 | if (interp->stoptheworld.requested || _PyRuntime.stoptheworld.requested) { |
1496 | | // Start in the suspended state if there is an ongoing stop-the-world. |
1497 | 0 | tstate->state = _Py_THREAD_SUSPENDED; |
1498 | 0 | } |
1499 | | |
1500 | 16 | tstate->_status.initialized = 1; |
1501 | 16 | } |
1502 | | |
1503 | | static void |
1504 | | add_threadstate(PyInterpreterState *interp, PyThreadState *tstate, |
1505 | | PyThreadState *next) |
1506 | 16 | { |
1507 | 16 | assert(interp->threads.head != tstate); |
1508 | 16 | if (next != NULL) { |
1509 | 0 | assert(next->prev == NULL || next->prev == tstate); |
1510 | 0 | next->prev = tstate; |
1511 | 0 | } |
1512 | 16 | tstate->next = next; |
1513 | 16 | assert(tstate->prev == NULL); |
1514 | 16 | interp->threads.head = tstate; |
1515 | 16 | } |
1516 | | |
1517 | | static PyThreadState * |
1518 | | new_threadstate(PyInterpreterState *interp, int whence) |
1519 | 16 | { |
1520 | | // Allocate the thread state. |
1521 | 16 | _PyThreadStateImpl *tstate = alloc_threadstate(interp); |
1522 | 16 | if (tstate == NULL) { |
1523 | 0 | return NULL; |
1524 | 0 | } |
1525 | | |
1526 | | #ifdef Py_GIL_DISABLED |
1527 | | Py_ssize_t qsbr_idx = _Py_qsbr_reserve(interp); |
1528 | | if (qsbr_idx < 0) { |
1529 | | free_threadstate(tstate); |
1530 | | return NULL; |
1531 | | } |
1532 | | int32_t tlbc_idx = _Py_ReserveTLBCIndex(interp); |
1533 | | if (tlbc_idx < 0) { |
1534 | | free_threadstate(tstate); |
1535 | | return NULL; |
1536 | | } |
1537 | | #endif |
1538 | | |
1539 | | /* We serialize concurrent creation to protect global state. */ |
1540 | 16 | HEAD_LOCK(interp->runtime); |
1541 | | |
1542 | | // Initialize the new thread state. |
1543 | 16 | interp->threads.next_unique_id += 1; |
1544 | 16 | uint64_t id = interp->threads.next_unique_id; |
1545 | 16 | init_threadstate(tstate, interp, id, whence); |
1546 | | |
1547 | | // Add the new thread state to the interpreter. |
1548 | 16 | PyThreadState *old_head = interp->threads.head; |
1549 | 16 | add_threadstate(interp, (PyThreadState *)tstate, old_head); |
1550 | | |
1551 | 16 | HEAD_UNLOCK(interp->runtime); |
1552 | | |
1553 | | #ifdef Py_GIL_DISABLED |
1554 | | // Must be called with lock unlocked to avoid lock ordering deadlocks. |
1555 | | _Py_qsbr_register(tstate, interp, qsbr_idx); |
1556 | | tstate->tlbc_index = tlbc_idx; |
1557 | | #endif |
1558 | | |
1559 | 16 | return (PyThreadState *)tstate; |
1560 | 16 | } |
1561 | | |
1562 | | PyThreadState * |
1563 | | PyThreadState_New(PyInterpreterState *interp) |
1564 | 0 | { |
1565 | 0 | return _PyThreadState_NewBound(interp, _PyThreadState_WHENCE_UNKNOWN); |
1566 | 0 | } |
1567 | | |
1568 | | PyThreadState * |
1569 | | _PyThreadState_NewBound(PyInterpreterState *interp, int whence) |
1570 | 0 | { |
1571 | 0 | PyThreadState *tstate = new_threadstate(interp, whence); |
1572 | 0 | if (tstate) { |
1573 | 0 | bind_tstate(tstate); |
1574 | | // This makes sure there's a gilstate tstate bound |
1575 | | // as soon as possible. |
1576 | 0 | if (gilstate_get() == NULL) { |
1577 | 0 | bind_gilstate_tstate(tstate); |
1578 | 0 | } |
1579 | 0 | } |
1580 | 0 | return tstate; |
1581 | 0 | } |
1582 | | |
1583 | | // This must be followed by a call to _PyThreadState_Bind(); |
1584 | | PyThreadState * |
1585 | | _PyThreadState_New(PyInterpreterState *interp, int whence) |
1586 | 16 | { |
1587 | 16 | return new_threadstate(interp, whence); |
1588 | 16 | } |
1589 | | |
1590 | | // We keep this for stable ABI compabibility. |
1591 | | PyAPI_FUNC(PyThreadState*) |
1592 | | _PyThreadState_Prealloc(PyInterpreterState *interp) |
1593 | 0 | { |
1594 | 0 | return _PyThreadState_New(interp, _PyThreadState_WHENCE_UNKNOWN); |
1595 | 0 | } |
1596 | | |
1597 | | // We keep this around for (accidental) stable ABI compatibility. |
1598 | | // Realistically, no extensions are using it. |
1599 | | PyAPI_FUNC(void) |
1600 | | _PyThreadState_Init(PyThreadState *tstate) |
1601 | 0 | { |
1602 | 0 | Py_FatalError("_PyThreadState_Init() is for internal use only"); |
1603 | 0 | } |
1604 | | |
1605 | | |
1606 | | static void |
1607 | | clear_datastack(PyThreadState *tstate) |
1608 | 0 | { |
1609 | 0 | _PyStackChunk *chunk = tstate->datastack_chunk; |
1610 | 0 | tstate->datastack_chunk = NULL; |
1611 | 0 | while (chunk != NULL) { |
1612 | 0 | _PyStackChunk *prev = chunk->previous; |
1613 | 0 | _PyObject_VirtualFree(chunk, chunk->size); |
1614 | 0 | chunk = prev; |
1615 | 0 | } |
1616 | 0 | } |
1617 | | |
1618 | | void |
1619 | | PyThreadState_Clear(PyThreadState *tstate) |
1620 | 0 | { |
1621 | 0 | assert(tstate->_status.initialized && !tstate->_status.cleared); |
1622 | 0 | assert(current_fast_get()->interp == tstate->interp); |
1623 | 0 | assert(!_PyThreadState_IsRunningMain(tstate)); |
1624 | | // XXX assert(!tstate->_status.bound || tstate->_status.unbound); |
1625 | 0 | tstate->_status.finalizing = 1; // just in case |
1626 | | |
1627 | | /* XXX Conditions we need to enforce: |
1628 | | |
1629 | | * the GIL must be held by the current thread |
1630 | | * current_fast_get()->interp must match tstate->interp |
1631 | | * for the main interpreter, current_fast_get() must be the main thread |
1632 | | */ |
1633 | |
|
1634 | 0 | int verbose = _PyInterpreterState_GetConfig(tstate->interp)->verbose; |
1635 | |
|
1636 | 0 | if (verbose && tstate->current_frame != NULL) { |
1637 | | /* bpo-20526: After the main thread calls |
1638 | | _PyInterpreterState_SetFinalizing() in Py_FinalizeEx() |
1639 | | (or in Py_EndInterpreter() for subinterpreters), |
1640 | | threads must exit when trying to take the GIL. |
1641 | | If a thread exit in the middle of _PyEval_EvalFrameDefault(), |
1642 | | tstate->frame is not reset to its previous value. |
1643 | | It is more likely with daemon threads, but it can happen |
1644 | | with regular threads if threading._shutdown() fails |
1645 | | (ex: interrupted by CTRL+C). */ |
1646 | 0 | fprintf(stderr, |
1647 | 0 | "PyThreadState_Clear: warning: thread still has a frame\n"); |
1648 | 0 | } |
1649 | |
|
1650 | 0 | if (verbose && tstate->current_exception != NULL) { |
1651 | 0 | fprintf(stderr, "PyThreadState_Clear: warning: thread has an exception set\n"); |
1652 | 0 | _PyErr_Print(tstate); |
1653 | 0 | } |
1654 | | |
1655 | | /* At this point tstate shouldn't be used any more, |
1656 | | neither to run Python code nor for other uses. |
1657 | | |
1658 | | This is tricky when current_fast_get() == tstate, in the same way |
1659 | | as noted in interpreter_clear() above. The below finalizers |
1660 | | can possibly run Python code or otherwise use the partially |
1661 | | cleared thread state. For now we trust that isn't a problem |
1662 | | in practice. |
1663 | | */ |
1664 | | // XXX Deal with the possibility of problematic finalizers. |
1665 | | |
1666 | | /* Don't clear tstate->pyframe: it is a borrowed reference */ |
1667 | |
|
1668 | 0 | Py_CLEAR(tstate->threading_local_key); |
1669 | 0 | Py_CLEAR(tstate->threading_local_sentinel); |
1670 | |
|
1671 | 0 | Py_CLEAR(((_PyThreadStateImpl *)tstate)->asyncio_running_loop); |
1672 | 0 | Py_CLEAR(((_PyThreadStateImpl *)tstate)->asyncio_running_task); |
1673 | | |
1674 | |
|
1675 | 0 | PyMutex_Lock(&tstate->interp->asyncio_tasks_lock); |
1676 | | // merge any lingering tasks from thread state to interpreter's |
1677 | | // tasks list |
1678 | 0 | llist_concat(&tstate->interp->asyncio_tasks_head, |
1679 | 0 | &((_PyThreadStateImpl *)tstate)->asyncio_tasks_head); |
1680 | 0 | PyMutex_Unlock(&tstate->interp->asyncio_tasks_lock); |
1681 | |
|
1682 | 0 | Py_CLEAR(tstate->dict); |
1683 | 0 | Py_CLEAR(tstate->async_exc); |
1684 | |
|
1685 | 0 | Py_CLEAR(tstate->current_exception); |
1686 | |
|
1687 | 0 | Py_CLEAR(tstate->exc_state.exc_value); |
1688 | | |
1689 | | /* The stack of exception states should contain just this thread. */ |
1690 | 0 | if (verbose && tstate->exc_info != &tstate->exc_state) { |
1691 | 0 | fprintf(stderr, |
1692 | 0 | "PyThreadState_Clear: warning: thread still has a generator\n"); |
1693 | 0 | } |
1694 | |
|
1695 | 0 | if (tstate->c_profilefunc != NULL) { |
1696 | 0 | FT_ATOMIC_ADD_SSIZE(tstate->interp->sys_profiling_threads, -1); |
1697 | 0 | tstate->c_profilefunc = NULL; |
1698 | 0 | } |
1699 | 0 | if (tstate->c_tracefunc != NULL) { |
1700 | 0 | FT_ATOMIC_ADD_SSIZE(tstate->interp->sys_tracing_threads, -1); |
1701 | 0 | tstate->c_tracefunc = NULL; |
1702 | 0 | } |
1703 | |
|
1704 | 0 | Py_CLEAR(tstate->c_profileobj); |
1705 | 0 | Py_CLEAR(tstate->c_traceobj); |
1706 | |
|
1707 | 0 | Py_CLEAR(tstate->async_gen_firstiter); |
1708 | 0 | Py_CLEAR(tstate->async_gen_finalizer); |
1709 | |
|
1710 | 0 | Py_CLEAR(tstate->context); |
1711 | |
|
1712 | | #ifdef Py_GIL_DISABLED |
1713 | | // Each thread should clear own freelists in free-threading builds. |
1714 | | struct _Py_freelists *freelists = _Py_freelists_GET(); |
1715 | | _PyObject_ClearFreeLists(freelists, 1); |
1716 | | |
1717 | | // Merge our thread-local refcounts into the type's own refcount and |
1718 | | // free our local refcount array. |
1719 | | _PyObject_FinalizePerThreadRefcounts((_PyThreadStateImpl *)tstate); |
1720 | | |
1721 | | // Remove ourself from the biased reference counting table of threads. |
1722 | | _Py_brc_remove_thread(tstate); |
1723 | | |
1724 | | // Release our thread-local copies of the bytecode for reuse by another |
1725 | | // thread |
1726 | | _Py_ClearTLBCIndex((_PyThreadStateImpl *)tstate); |
1727 | | #endif |
1728 | | |
1729 | | // Merge our queue of pointers to be freed into the interpreter queue. |
1730 | 0 | _PyMem_AbandonDelayed(tstate); |
1731 | |
|
1732 | 0 | _PyThreadState_ClearMimallocHeaps(tstate); |
1733 | |
|
1734 | 0 | tstate->_status.cleared = 1; |
1735 | | |
1736 | | // XXX Call _PyThreadStateSwap(runtime, NULL) here if "current". |
1737 | | // XXX Do it as early in the function as possible. |
1738 | 0 | } |
1739 | | |
1740 | | static void |
1741 | | decrement_stoptheworld_countdown(struct _stoptheworld_state *stw); |
1742 | | |
1743 | | /* Common code for PyThreadState_Delete() and PyThreadState_DeleteCurrent() */ |
1744 | | static void |
1745 | | tstate_delete_common(PyThreadState *tstate, int release_gil) |
1746 | 0 | { |
1747 | 0 | assert(tstate->_status.cleared && !tstate->_status.finalized); |
1748 | 0 | tstate_verify_not_active(tstate); |
1749 | 0 | assert(!_PyThreadState_IsRunningMain(tstate)); |
1750 | |
|
1751 | 0 | PyInterpreterState *interp = tstate->interp; |
1752 | 0 | if (interp == NULL) { |
1753 | 0 | Py_FatalError("NULL interpreter"); |
1754 | 0 | } |
1755 | 0 | _PyRuntimeState *runtime = interp->runtime; |
1756 | |
|
1757 | 0 | HEAD_LOCK(runtime); |
1758 | 0 | if (tstate->prev) { |
1759 | 0 | tstate->prev->next = tstate->next; |
1760 | 0 | } |
1761 | 0 | else { |
1762 | 0 | interp->threads.head = tstate->next; |
1763 | 0 | } |
1764 | 0 | if (tstate->next) { |
1765 | 0 | tstate->next->prev = tstate->prev; |
1766 | 0 | } |
1767 | 0 | if (tstate->state != _Py_THREAD_SUSPENDED) { |
1768 | | // Any ongoing stop-the-world request should not wait for us because |
1769 | | // our thread is getting deleted. |
1770 | 0 | if (interp->stoptheworld.requested) { |
1771 | 0 | decrement_stoptheworld_countdown(&interp->stoptheworld); |
1772 | 0 | } |
1773 | 0 | if (runtime->stoptheworld.requested) { |
1774 | 0 | decrement_stoptheworld_countdown(&runtime->stoptheworld); |
1775 | 0 | } |
1776 | 0 | } |
1777 | |
|
1778 | | #if defined(Py_REF_DEBUG) && defined(Py_GIL_DISABLED) |
1779 | | // Add our portion of the total refcount to the interpreter's total. |
1780 | | _PyThreadStateImpl *tstate_impl = (_PyThreadStateImpl *)tstate; |
1781 | | tstate->interp->object_state.reftotal += tstate_impl->reftotal; |
1782 | | tstate_impl->reftotal = 0; |
1783 | | assert(tstate_impl->refcounts.values == NULL); |
1784 | | #endif |
1785 | |
|
1786 | 0 | HEAD_UNLOCK(runtime); |
1787 | | |
1788 | | // XXX Unbind in PyThreadState_Clear(), or earlier |
1789 | | // (and assert not-equal here)? |
1790 | 0 | if (tstate->_status.bound_gilstate) { |
1791 | 0 | unbind_gilstate_tstate(tstate); |
1792 | 0 | } |
1793 | 0 | if (tstate->_status.bound) { |
1794 | 0 | unbind_tstate(tstate); |
1795 | 0 | } |
1796 | | |
1797 | | // XXX Move to PyThreadState_Clear()? |
1798 | 0 | clear_datastack(tstate); |
1799 | |
|
1800 | 0 | if (release_gil) { |
1801 | 0 | _PyEval_ReleaseLock(tstate->interp, tstate, 1); |
1802 | 0 | } |
1803 | |
|
1804 | | #ifdef Py_GIL_DISABLED |
1805 | | _Py_qsbr_unregister(tstate); |
1806 | | #endif |
1807 | |
|
1808 | 0 | tstate->_status.finalized = 1; |
1809 | 0 | } |
1810 | | |
1811 | | static void |
1812 | | zapthreads(PyInterpreterState *interp) |
1813 | 0 | { |
1814 | 0 | PyThreadState *tstate; |
1815 | | /* No need to lock the mutex here because this should only happen |
1816 | | when the threads are all really dead (XXX famous last words). |
1817 | | |
1818 | | Cannot use _Py_FOR_EACH_TSTATE_UNLOCKED because we are freeing |
1819 | | the thread states here. |
1820 | | */ |
1821 | 0 | while ((tstate = interp->threads.head) != NULL) { |
1822 | 0 | tstate_verify_not_active(tstate); |
1823 | 0 | tstate_delete_common(tstate, 0); |
1824 | 0 | free_threadstate((_PyThreadStateImpl *)tstate); |
1825 | 0 | } |
1826 | 0 | } |
1827 | | |
1828 | | |
1829 | | void |
1830 | | PyThreadState_Delete(PyThreadState *tstate) |
1831 | 0 | { |
1832 | 0 | _Py_EnsureTstateNotNULL(tstate); |
1833 | 0 | tstate_verify_not_active(tstate); |
1834 | 0 | tstate_delete_common(tstate, 0); |
1835 | 0 | free_threadstate((_PyThreadStateImpl *)tstate); |
1836 | 0 | } |
1837 | | |
1838 | | |
1839 | | void |
1840 | | _PyThreadState_DeleteCurrent(PyThreadState *tstate) |
1841 | 0 | { |
1842 | 0 | _Py_EnsureTstateNotNULL(tstate); |
1843 | | #ifdef Py_GIL_DISABLED |
1844 | | _Py_qsbr_detach(((_PyThreadStateImpl *)tstate)->qsbr); |
1845 | | #endif |
1846 | 0 | current_fast_clear(tstate->interp->runtime); |
1847 | 0 | tstate_delete_common(tstate, 1); // release GIL as part of call |
1848 | 0 | free_threadstate((_PyThreadStateImpl *)tstate); |
1849 | 0 | } |
1850 | | |
1851 | | void |
1852 | | PyThreadState_DeleteCurrent(void) |
1853 | 0 | { |
1854 | 0 | PyThreadState *tstate = current_fast_get(); |
1855 | 0 | _PyThreadState_DeleteCurrent(tstate); |
1856 | 0 | } |
1857 | | |
1858 | | |
1859 | | // Unlinks and removes all thread states from `tstate->interp`, with the |
1860 | | // exception of the one passed as an argument. However, it does not delete |
1861 | | // these thread states. Instead, it returns the removed thread states as a |
1862 | | // linked list. |
1863 | | // |
1864 | | // Note that if there is a current thread state, it *must* be the one |
1865 | | // passed as argument. Also, this won't touch any interpreters other |
1866 | | // than the current one, since we don't know which thread state should |
1867 | | // be kept in those other interpreters. |
1868 | | PyThreadState * |
1869 | | _PyThreadState_RemoveExcept(PyThreadState *tstate) |
1870 | 0 | { |
1871 | 0 | assert(tstate != NULL); |
1872 | 0 | PyInterpreterState *interp = tstate->interp; |
1873 | 0 | _PyRuntimeState *runtime = interp->runtime; |
1874 | |
|
1875 | | #ifdef Py_GIL_DISABLED |
1876 | | assert(runtime->stoptheworld.world_stopped); |
1877 | | #endif |
1878 | |
|
1879 | 0 | HEAD_LOCK(runtime); |
1880 | | /* Remove all thread states, except tstate, from the linked list of |
1881 | | thread states. */ |
1882 | 0 | PyThreadState *list = interp->threads.head; |
1883 | 0 | if (list == tstate) { |
1884 | 0 | list = tstate->next; |
1885 | 0 | } |
1886 | 0 | if (tstate->prev) { |
1887 | 0 | tstate->prev->next = tstate->next; |
1888 | 0 | } |
1889 | 0 | if (tstate->next) { |
1890 | 0 | tstate->next->prev = tstate->prev; |
1891 | 0 | } |
1892 | 0 | tstate->prev = tstate->next = NULL; |
1893 | 0 | interp->threads.head = tstate; |
1894 | 0 | HEAD_UNLOCK(runtime); |
1895 | |
|
1896 | 0 | return list; |
1897 | 0 | } |
1898 | | |
1899 | | // Deletes the thread states in the linked list `list`. |
1900 | | // |
1901 | | // This is intended to be used in conjunction with _PyThreadState_RemoveExcept. |
1902 | | // |
1903 | | // If `is_after_fork` is true, the thread states are immediately freed. |
1904 | | // Otherwise, they are decref'd because they may still be referenced by an |
1905 | | // OS thread. |
1906 | | void |
1907 | | _PyThreadState_DeleteList(PyThreadState *list, int is_after_fork) |
1908 | 0 | { |
1909 | | // The world can't be stopped because we PyThreadState_Clear() can |
1910 | | // call destructors. |
1911 | 0 | assert(!_PyRuntime.stoptheworld.world_stopped); |
1912 | |
|
1913 | 0 | PyThreadState *p, *next; |
1914 | 0 | for (p = list; p; p = next) { |
1915 | 0 | next = p->next; |
1916 | 0 | PyThreadState_Clear(p); |
1917 | 0 | if (is_after_fork) { |
1918 | 0 | free_threadstate((_PyThreadStateImpl *)p); |
1919 | 0 | } |
1920 | 0 | else { |
1921 | 0 | decref_threadstate((_PyThreadStateImpl *)p); |
1922 | 0 | } |
1923 | 0 | } |
1924 | 0 | } |
1925 | | |
1926 | | |
1927 | | //---------- |
1928 | | // accessors |
1929 | | //---------- |
1930 | | |
1931 | | /* An extension mechanism to store arbitrary additional per-thread state. |
1932 | | PyThreadState_GetDict() returns a dictionary that can be used to hold such |
1933 | | state; the caller should pick a unique key and store its state there. If |
1934 | | PyThreadState_GetDict() returns NULL, an exception has *not* been raised |
1935 | | and the caller should assume no per-thread state is available. */ |
1936 | | |
1937 | | PyObject * |
1938 | | _PyThreadState_GetDict(PyThreadState *tstate) |
1939 | 8.51M | { |
1940 | 8.51M | assert(tstate != NULL); |
1941 | 8.51M | if (tstate->dict == NULL) { |
1942 | 1 | tstate->dict = PyDict_New(); |
1943 | 1 | if (tstate->dict == NULL) { |
1944 | 0 | _PyErr_Clear(tstate); |
1945 | 0 | } |
1946 | 1 | } |
1947 | 8.51M | return tstate->dict; |
1948 | 8.51M | } |
1949 | | |
1950 | | |
1951 | | PyObject * |
1952 | | PyThreadState_GetDict(void) |
1953 | 8.51M | { |
1954 | 8.51M | PyThreadState *tstate = current_fast_get(); |
1955 | 8.51M | if (tstate == NULL) { |
1956 | 0 | return NULL; |
1957 | 0 | } |
1958 | 8.51M | return _PyThreadState_GetDict(tstate); |
1959 | 8.51M | } |
1960 | | |
1961 | | |
1962 | | PyInterpreterState * |
1963 | | PyThreadState_GetInterpreter(PyThreadState *tstate) |
1964 | 0 | { |
1965 | 0 | assert(tstate != NULL); |
1966 | 0 | return tstate->interp; |
1967 | 0 | } |
1968 | | |
1969 | | |
1970 | | PyFrameObject* |
1971 | | PyThreadState_GetFrame(PyThreadState *tstate) |
1972 | 2.27k | { |
1973 | 2.27k | assert(tstate != NULL); |
1974 | 2.27k | _PyInterpreterFrame *f = _PyThreadState_GetFrame(tstate); |
1975 | 2.27k | if (f == NULL) { |
1976 | 0 | return NULL; |
1977 | 0 | } |
1978 | 2.27k | PyFrameObject *frame = _PyFrame_GetFrameObject(f); |
1979 | 2.27k | if (frame == NULL) { |
1980 | 0 | PyErr_Clear(); |
1981 | 0 | } |
1982 | 2.27k | return (PyFrameObject*)Py_XNewRef(frame); |
1983 | 2.27k | } |
1984 | | |
1985 | | |
1986 | | uint64_t |
1987 | | PyThreadState_GetID(PyThreadState *tstate) |
1988 | 0 | { |
1989 | 0 | assert(tstate != NULL); |
1990 | 0 | return tstate->id; |
1991 | 0 | } |
1992 | | |
1993 | | |
1994 | | static inline void |
1995 | | tstate_activate(PyThreadState *tstate) |
1996 | 31.5k | { |
1997 | 31.5k | assert(tstate != NULL); |
1998 | | // XXX assert(tstate_is_alive(tstate)); |
1999 | 31.5k | assert(tstate_is_bound(tstate)); |
2000 | 31.5k | assert(!tstate->_status.active); |
2001 | | |
2002 | 31.5k | assert(!tstate->_status.bound_gilstate || |
2003 | 31.5k | tstate == gilstate_get()); |
2004 | 31.5k | if (!tstate->_status.bound_gilstate) { |
2005 | 0 | bind_gilstate_tstate(tstate); |
2006 | 0 | } |
2007 | | |
2008 | 31.5k | tstate->_status.active = 1; |
2009 | 31.5k | } |
2010 | | |
2011 | | static inline void |
2012 | | tstate_deactivate(PyThreadState *tstate) |
2013 | 31.5k | { |
2014 | 31.5k | assert(tstate != NULL); |
2015 | | // XXX assert(tstate_is_alive(tstate)); |
2016 | 31.5k | assert(tstate_is_bound(tstate)); |
2017 | 31.5k | assert(tstate->_status.active); |
2018 | | |
2019 | 31.5k | tstate->_status.active = 0; |
2020 | | |
2021 | | // We do not unbind the gilstate tstate here. |
2022 | | // It will still be used in PyGILState_Ensure(). |
2023 | 31.5k | } |
2024 | | |
2025 | | static int |
2026 | | tstate_try_attach(PyThreadState *tstate) |
2027 | 31.5k | { |
2028 | | #ifdef Py_GIL_DISABLED |
2029 | | int expected = _Py_THREAD_DETACHED; |
2030 | | return _Py_atomic_compare_exchange_int(&tstate->state, |
2031 | | &expected, |
2032 | | _Py_THREAD_ATTACHED); |
2033 | | #else |
2034 | 31.5k | assert(tstate->state == _Py_THREAD_DETACHED); |
2035 | 31.5k | tstate->state = _Py_THREAD_ATTACHED; |
2036 | 31.5k | return 1; |
2037 | 31.5k | #endif |
2038 | 31.5k | } |
2039 | | |
2040 | | static void |
2041 | | tstate_set_detached(PyThreadState *tstate, int detached_state) |
2042 | 31.5k | { |
2043 | 31.5k | assert(_Py_atomic_load_int_relaxed(&tstate->state) == _Py_THREAD_ATTACHED); |
2044 | | #ifdef Py_GIL_DISABLED |
2045 | | _Py_atomic_store_int(&tstate->state, detached_state); |
2046 | | #else |
2047 | 31.5k | tstate->state = detached_state; |
2048 | 31.5k | #endif |
2049 | 31.5k | } |
2050 | | |
2051 | | static void |
2052 | | tstate_wait_attach(PyThreadState *tstate) |
2053 | 0 | { |
2054 | 0 | do { |
2055 | 0 | int state = _Py_atomic_load_int_relaxed(&tstate->state); |
2056 | 0 | if (state == _Py_THREAD_SUSPENDED) { |
2057 | | // Wait until we're switched out of SUSPENDED to DETACHED. |
2058 | 0 | _PyParkingLot_Park(&tstate->state, &state, sizeof(tstate->state), |
2059 | 0 | /*timeout=*/-1, NULL, /*detach=*/0); |
2060 | 0 | } |
2061 | 0 | else if (state == _Py_THREAD_SHUTTING_DOWN) { |
2062 | | // We're shutting down, so we can't attach. |
2063 | 0 | _PyThreadState_HangThread(tstate); |
2064 | 0 | } |
2065 | 0 | else { |
2066 | 0 | assert(state == _Py_THREAD_DETACHED); |
2067 | 0 | } |
2068 | | // Once we're back in DETACHED we can re-attach |
2069 | 0 | } while (!tstate_try_attach(tstate)); |
2070 | 0 | } |
2071 | | |
2072 | | void |
2073 | | _PyThreadState_Attach(PyThreadState *tstate) |
2074 | 31.5k | { |
2075 | | #if defined(Py_DEBUG) |
2076 | | // This is called from PyEval_RestoreThread(). Similar |
2077 | | // to it, we need to ensure errno doesn't change. |
2078 | | int err = errno; |
2079 | | #endif |
2080 | | |
2081 | 31.5k | _Py_EnsureTstateNotNULL(tstate); |
2082 | 31.5k | if (current_fast_get() != NULL) { |
2083 | 0 | Py_FatalError("non-NULL old thread state"); |
2084 | 0 | } |
2085 | 31.5k | _PyThreadStateImpl *_tstate = (_PyThreadStateImpl *)tstate; |
2086 | 31.5k | if (_tstate->c_stack_hard_limit == 0) { |
2087 | 16 | _Py_InitializeRecursionLimits(tstate); |
2088 | 16 | } |
2089 | | |
2090 | 31.5k | while (1) { |
2091 | 31.5k | _PyEval_AcquireLock(tstate); |
2092 | | |
2093 | | // XXX assert(tstate_is_alive(tstate)); |
2094 | 31.5k | current_fast_set(&_PyRuntime, tstate); |
2095 | 31.5k | if (!tstate_try_attach(tstate)) { |
2096 | 0 | tstate_wait_attach(tstate); |
2097 | 0 | } |
2098 | 31.5k | tstate_activate(tstate); |
2099 | | |
2100 | | #ifdef Py_GIL_DISABLED |
2101 | | if (_PyEval_IsGILEnabled(tstate) && !tstate->holds_gil) { |
2102 | | // The GIL was enabled between our call to _PyEval_AcquireLock() |
2103 | | // and when we attached (the GIL can't go from enabled to disabled |
2104 | | // here because only a thread holding the GIL can disable |
2105 | | // it). Detach and try again. |
2106 | | tstate_set_detached(tstate, _Py_THREAD_DETACHED); |
2107 | | tstate_deactivate(tstate); |
2108 | | current_fast_clear(&_PyRuntime); |
2109 | | continue; |
2110 | | } |
2111 | | _Py_qsbr_attach(((_PyThreadStateImpl *)tstate)->qsbr); |
2112 | | #endif |
2113 | 31.5k | break; |
2114 | 31.5k | } |
2115 | | |
2116 | | // Resume previous critical section. This acquires the lock(s) from the |
2117 | | // top-most critical section. |
2118 | 31.5k | if (tstate->critical_section != 0) { |
2119 | 0 | _PyCriticalSection_Resume(tstate); |
2120 | 0 | } |
2121 | | |
2122 | | #if defined(Py_DEBUG) |
2123 | | errno = err; |
2124 | | #endif |
2125 | 31.5k | } |
2126 | | |
2127 | | static void |
2128 | | detach_thread(PyThreadState *tstate, int detached_state) |
2129 | 31.5k | { |
2130 | | // XXX assert(tstate_is_alive(tstate) && tstate_is_bound(tstate)); |
2131 | 31.5k | assert(_Py_atomic_load_int_relaxed(&tstate->state) == _Py_THREAD_ATTACHED); |
2132 | 31.5k | assert(tstate == current_fast_get()); |
2133 | 31.5k | if (tstate->critical_section != 0) { |
2134 | 0 | _PyCriticalSection_SuspendAll(tstate); |
2135 | 0 | } |
2136 | | #ifdef Py_GIL_DISABLED |
2137 | | _Py_qsbr_detach(((_PyThreadStateImpl *)tstate)->qsbr); |
2138 | | #endif |
2139 | 31.5k | tstate_deactivate(tstate); |
2140 | 31.5k | tstate_set_detached(tstate, detached_state); |
2141 | 31.5k | current_fast_clear(&_PyRuntime); |
2142 | 31.5k | _PyEval_ReleaseLock(tstate->interp, tstate, 0); |
2143 | 31.5k | } |
2144 | | |
2145 | | void |
2146 | | _PyThreadState_Detach(PyThreadState *tstate) |
2147 | 31.5k | { |
2148 | 31.5k | detach_thread(tstate, _Py_THREAD_DETACHED); |
2149 | 31.5k | } |
2150 | | |
2151 | | void |
2152 | | _PyThreadState_Suspend(PyThreadState *tstate) |
2153 | 0 | { |
2154 | 0 | _PyRuntimeState *runtime = &_PyRuntime; |
2155 | |
|
2156 | 0 | assert(_Py_atomic_load_int_relaxed(&tstate->state) == _Py_THREAD_ATTACHED); |
2157 | |
|
2158 | 0 | struct _stoptheworld_state *stw = NULL; |
2159 | 0 | HEAD_LOCK(runtime); |
2160 | 0 | if (runtime->stoptheworld.requested) { |
2161 | 0 | stw = &runtime->stoptheworld; |
2162 | 0 | } |
2163 | 0 | else if (tstate->interp->stoptheworld.requested) { |
2164 | 0 | stw = &tstate->interp->stoptheworld; |
2165 | 0 | } |
2166 | 0 | HEAD_UNLOCK(runtime); |
2167 | |
|
2168 | 0 | if (stw == NULL) { |
2169 | | // Switch directly to "detached" if there is no active stop-the-world |
2170 | | // request. |
2171 | 0 | detach_thread(tstate, _Py_THREAD_DETACHED); |
2172 | 0 | return; |
2173 | 0 | } |
2174 | | |
2175 | | // Switch to "suspended" state. |
2176 | 0 | detach_thread(tstate, _Py_THREAD_SUSPENDED); |
2177 | | |
2178 | | // Decrease the count of remaining threads needing to park. |
2179 | 0 | HEAD_LOCK(runtime); |
2180 | 0 | decrement_stoptheworld_countdown(stw); |
2181 | 0 | HEAD_UNLOCK(runtime); |
2182 | 0 | } |
2183 | | |
2184 | | void |
2185 | | _PyThreadState_SetShuttingDown(PyThreadState *tstate) |
2186 | 0 | { |
2187 | 0 | _Py_atomic_store_int(&tstate->state, _Py_THREAD_SHUTTING_DOWN); |
2188 | | #ifdef Py_GIL_DISABLED |
2189 | | _PyParkingLot_UnparkAll(&tstate->state); |
2190 | | #endif |
2191 | 0 | } |
2192 | | |
2193 | | // Decrease stop-the-world counter of remaining number of threads that need to |
2194 | | // pause. If we are the final thread to pause, notify the requesting thread. |
2195 | | static void |
2196 | | decrement_stoptheworld_countdown(struct _stoptheworld_state *stw) |
2197 | 0 | { |
2198 | 0 | assert(stw->thread_countdown > 0); |
2199 | 0 | if (--stw->thread_countdown == 0) { |
2200 | 0 | _PyEvent_Notify(&stw->stop_event); |
2201 | 0 | } |
2202 | 0 | } |
2203 | | |
2204 | | #ifdef Py_GIL_DISABLED |
2205 | | // Interpreter for _Py_FOR_EACH_STW_INTERP(). For global stop-the-world events, |
2206 | | // we start with the first interpreter and then iterate over all interpreters. |
2207 | | // For per-interpreter stop-the-world events, we only operate on the one |
2208 | | // interpreter. |
2209 | | static PyInterpreterState * |
2210 | | interp_for_stop_the_world(struct _stoptheworld_state *stw) |
2211 | | { |
2212 | | return (stw->is_global |
2213 | | ? PyInterpreterState_Head() |
2214 | | : _Py_CONTAINER_OF(stw, PyInterpreterState, stoptheworld)); |
2215 | | } |
2216 | | |
2217 | | // Loops over threads for a stop-the-world event. |
2218 | | // For global: all threads in all interpreters |
2219 | | // For per-interpreter: all threads in the interpreter |
2220 | | #define _Py_FOR_EACH_STW_INTERP(stw, i) \ |
2221 | | for (PyInterpreterState *i = interp_for_stop_the_world((stw)); \ |
2222 | | i != NULL; i = ((stw->is_global) ? i->next : NULL)) |
2223 | | |
2224 | | |
2225 | | // Try to transition threads atomically from the "detached" state to the |
2226 | | // "gc stopped" state. Returns true if all threads are in the "gc stopped" |
2227 | | static bool |
2228 | | park_detached_threads(struct _stoptheworld_state *stw) |
2229 | | { |
2230 | | int num_parked = 0; |
2231 | | _Py_FOR_EACH_STW_INTERP(stw, i) { |
2232 | | _Py_FOR_EACH_TSTATE_UNLOCKED(i, t) { |
2233 | | int state = _Py_atomic_load_int_relaxed(&t->state); |
2234 | | if (state == _Py_THREAD_DETACHED) { |
2235 | | // Atomically transition to "suspended" if in "detached" state. |
2236 | | if (_Py_atomic_compare_exchange_int( |
2237 | | &t->state, &state, _Py_THREAD_SUSPENDED)) { |
2238 | | num_parked++; |
2239 | | } |
2240 | | } |
2241 | | else if (state == _Py_THREAD_ATTACHED && t != stw->requester) { |
2242 | | _Py_set_eval_breaker_bit(t, _PY_EVAL_PLEASE_STOP_BIT); |
2243 | | } |
2244 | | } |
2245 | | } |
2246 | | stw->thread_countdown -= num_parked; |
2247 | | assert(stw->thread_countdown >= 0); |
2248 | | return num_parked > 0 && stw->thread_countdown == 0; |
2249 | | } |
2250 | | |
2251 | | static void |
2252 | | stop_the_world(struct _stoptheworld_state *stw) |
2253 | | { |
2254 | | _PyRuntimeState *runtime = &_PyRuntime; |
2255 | | |
2256 | | PyMutex_Lock(&stw->mutex); |
2257 | | if (stw->is_global) { |
2258 | | _PyRWMutex_Lock(&runtime->stoptheworld_mutex); |
2259 | | } |
2260 | | else { |
2261 | | _PyRWMutex_RLock(&runtime->stoptheworld_mutex); |
2262 | | } |
2263 | | |
2264 | | HEAD_LOCK(runtime); |
2265 | | stw->requested = 1; |
2266 | | stw->thread_countdown = 0; |
2267 | | stw->stop_event = (PyEvent){0}; // zero-initialize (unset) |
2268 | | stw->requester = _PyThreadState_GET(); // may be NULL |
2269 | | |
2270 | | _Py_FOR_EACH_STW_INTERP(stw, i) { |
2271 | | _Py_FOR_EACH_TSTATE_UNLOCKED(i, t) { |
2272 | | if (t != stw->requester) { |
2273 | | // Count all the other threads (we don't wait on ourself). |
2274 | | stw->thread_countdown++; |
2275 | | } |
2276 | | } |
2277 | | } |
2278 | | |
2279 | | if (stw->thread_countdown == 0) { |
2280 | | HEAD_UNLOCK(runtime); |
2281 | | stw->world_stopped = 1; |
2282 | | return; |
2283 | | } |
2284 | | |
2285 | | for (;;) { |
2286 | | // Switch threads that are detached to the GC stopped state |
2287 | | bool stopped_all_threads = park_detached_threads(stw); |
2288 | | HEAD_UNLOCK(runtime); |
2289 | | |
2290 | | if (stopped_all_threads) { |
2291 | | break; |
2292 | | } |
2293 | | |
2294 | | PyTime_t wait_ns = 1000*1000; // 1ms (arbitrary, may need tuning) |
2295 | | int detach = 0; |
2296 | | if (PyEvent_WaitTimed(&stw->stop_event, wait_ns, detach)) { |
2297 | | assert(stw->thread_countdown == 0); |
2298 | | break; |
2299 | | } |
2300 | | |
2301 | | HEAD_LOCK(runtime); |
2302 | | } |
2303 | | stw->world_stopped = 1; |
2304 | | } |
2305 | | |
2306 | | static void |
2307 | | start_the_world(struct _stoptheworld_state *stw) |
2308 | | { |
2309 | | _PyRuntimeState *runtime = &_PyRuntime; |
2310 | | assert(PyMutex_IsLocked(&stw->mutex)); |
2311 | | |
2312 | | HEAD_LOCK(runtime); |
2313 | | stw->requested = 0; |
2314 | | stw->world_stopped = 0; |
2315 | | // Switch threads back to the detached state. |
2316 | | _Py_FOR_EACH_STW_INTERP(stw, i) { |
2317 | | _Py_FOR_EACH_TSTATE_UNLOCKED(i, t) { |
2318 | | if (t != stw->requester) { |
2319 | | assert(_Py_atomic_load_int_relaxed(&t->state) == |
2320 | | _Py_THREAD_SUSPENDED); |
2321 | | _Py_atomic_store_int(&t->state, _Py_THREAD_DETACHED); |
2322 | | _PyParkingLot_UnparkAll(&t->state); |
2323 | | } |
2324 | | } |
2325 | | } |
2326 | | stw->requester = NULL; |
2327 | | HEAD_UNLOCK(runtime); |
2328 | | if (stw->is_global) { |
2329 | | _PyRWMutex_Unlock(&runtime->stoptheworld_mutex); |
2330 | | } |
2331 | | else { |
2332 | | _PyRWMutex_RUnlock(&runtime->stoptheworld_mutex); |
2333 | | } |
2334 | | PyMutex_Unlock(&stw->mutex); |
2335 | | } |
2336 | | #endif // Py_GIL_DISABLED |
2337 | | |
2338 | | void |
2339 | | _PyEval_StopTheWorldAll(_PyRuntimeState *runtime) |
2340 | 0 | { |
2341 | | #ifdef Py_GIL_DISABLED |
2342 | | stop_the_world(&runtime->stoptheworld); |
2343 | | #endif |
2344 | 0 | } |
2345 | | |
2346 | | void |
2347 | | _PyEval_StartTheWorldAll(_PyRuntimeState *runtime) |
2348 | 0 | { |
2349 | | #ifdef Py_GIL_DISABLED |
2350 | | start_the_world(&runtime->stoptheworld); |
2351 | | #endif |
2352 | 0 | } |
2353 | | |
2354 | | void |
2355 | | _PyEval_StopTheWorld(PyInterpreterState *interp) |
2356 | 4 | { |
2357 | | #ifdef Py_GIL_DISABLED |
2358 | | stop_the_world(&interp->stoptheworld); |
2359 | | #endif |
2360 | 4 | } |
2361 | | |
2362 | | void |
2363 | | _PyEval_StartTheWorld(PyInterpreterState *interp) |
2364 | 4 | { |
2365 | | #ifdef Py_GIL_DISABLED |
2366 | | start_the_world(&interp->stoptheworld); |
2367 | | #endif |
2368 | 4 | } |
2369 | | |
2370 | | //---------- |
2371 | | // other API |
2372 | | //---------- |
2373 | | |
2374 | | /* Asynchronously raise an exception in a thread. |
2375 | | Requested by Just van Rossum and Alex Martelli. |
2376 | | To prevent naive misuse, you must write your own extension |
2377 | | to call this, or use ctypes. Must be called with the GIL held. |
2378 | | Returns the number of tstates modified (normally 1, but 0 if `id` didn't |
2379 | | match any known thread id). Can be called with exc=NULL to clear an |
2380 | | existing async exception. This raises no exceptions. */ |
2381 | | |
2382 | | // XXX Move this to Python/ceval_gil.c? |
2383 | | // XXX Deprecate this. |
2384 | | int |
2385 | | PyThreadState_SetAsyncExc(unsigned long id, PyObject *exc) |
2386 | 0 | { |
2387 | 0 | PyInterpreterState *interp = _PyInterpreterState_GET(); |
2388 | | |
2389 | | /* Although the GIL is held, a few C API functions can be called |
2390 | | * without the GIL held, and in particular some that create and |
2391 | | * destroy thread and interpreter states. Those can mutate the |
2392 | | * list of thread states we're traversing, so to prevent that we lock |
2393 | | * head_mutex for the duration. |
2394 | | */ |
2395 | 0 | PyThreadState *tstate = NULL; |
2396 | 0 | _Py_FOR_EACH_TSTATE_BEGIN(interp, t) { |
2397 | 0 | if (t->thread_id == id) { |
2398 | 0 | tstate = t; |
2399 | 0 | break; |
2400 | 0 | } |
2401 | 0 | } |
2402 | 0 | _Py_FOR_EACH_TSTATE_END(interp); |
2403 | |
|
2404 | 0 | if (tstate != NULL) { |
2405 | | /* Tricky: we need to decref the current value |
2406 | | * (if any) in tstate->async_exc, but that can in turn |
2407 | | * allow arbitrary Python code to run, including |
2408 | | * perhaps calls to this function. To prevent |
2409 | | * deadlock, we need to release head_mutex before |
2410 | | * the decref. |
2411 | | */ |
2412 | 0 | Py_XINCREF(exc); |
2413 | 0 | PyObject *old_exc = _Py_atomic_exchange_ptr(&tstate->async_exc, exc); |
2414 | |
|
2415 | 0 | Py_XDECREF(old_exc); |
2416 | 0 | _Py_set_eval_breaker_bit(tstate, _PY_ASYNC_EXCEPTION_BIT); |
2417 | 0 | } |
2418 | |
|
2419 | 0 | return tstate != NULL; |
2420 | 0 | } |
2421 | | |
2422 | | //--------------------------------- |
2423 | | // API for the current thread state |
2424 | | //--------------------------------- |
2425 | | |
2426 | | PyThreadState * |
2427 | | PyThreadState_GetUnchecked(void) |
2428 | 0 | { |
2429 | 0 | return current_fast_get(); |
2430 | 0 | } |
2431 | | |
2432 | | |
2433 | | PyThreadState * |
2434 | | PyThreadState_Get(void) |
2435 | 85.3M | { |
2436 | 85.3M | PyThreadState *tstate = current_fast_get(); |
2437 | 85.3M | _Py_EnsureTstateNotNULL(tstate); |
2438 | 85.3M | return tstate; |
2439 | 85.3M | } |
2440 | | |
2441 | | PyThreadState * |
2442 | | _PyThreadState_Swap(_PyRuntimeState *runtime, PyThreadState *newts) |
2443 | 0 | { |
2444 | 0 | PyThreadState *oldts = current_fast_get(); |
2445 | 0 | if (oldts != NULL) { |
2446 | 0 | _PyThreadState_Detach(oldts); |
2447 | 0 | } |
2448 | 0 | if (newts != NULL) { |
2449 | 0 | _PyThreadState_Attach(newts); |
2450 | 0 | } |
2451 | 0 | return oldts; |
2452 | 0 | } |
2453 | | |
2454 | | PyThreadState * |
2455 | | PyThreadState_Swap(PyThreadState *newts) |
2456 | 0 | { |
2457 | 0 | return _PyThreadState_Swap(&_PyRuntime, newts); |
2458 | 0 | } |
2459 | | |
2460 | | |
2461 | | void |
2462 | | _PyThreadState_Bind(PyThreadState *tstate) |
2463 | 16 | { |
2464 | | // gh-104690: If Python is being finalized and PyInterpreterState_Delete() |
2465 | | // was called, tstate becomes a dangling pointer. |
2466 | 16 | assert(_PyThreadState_CheckConsistency(tstate)); |
2467 | | |
2468 | 16 | bind_tstate(tstate); |
2469 | | // This makes sure there's a gilstate tstate bound |
2470 | | // as soon as possible. |
2471 | 16 | if (gilstate_get() == NULL) { |
2472 | 16 | bind_gilstate_tstate(tstate); |
2473 | 16 | } |
2474 | 16 | } |
2475 | | |
2476 | | #if defined(Py_GIL_DISABLED) && !defined(Py_LIMITED_API) |
2477 | | uintptr_t |
2478 | | _Py_GetThreadLocal_Addr(void) |
2479 | | { |
2480 | | #ifdef HAVE_THREAD_LOCAL |
2481 | | // gh-112535: Use the address of the thread-local PyThreadState variable as |
2482 | | // a unique identifier for the current thread. Each thread has a unique |
2483 | | // _Py_tss_tstate variable with a unique address. |
2484 | | return (uintptr_t)&_Py_tss_tstate; |
2485 | | #else |
2486 | | # error "no supported thread-local variable storage classifier" |
2487 | | #endif |
2488 | | } |
2489 | | #endif |
2490 | | |
2491 | | /***********************************/ |
2492 | | /* routines for advanced debuggers */ |
2493 | | /***********************************/ |
2494 | | |
2495 | | // (requested by David Beazley) |
2496 | | // Don't use unless you know what you are doing! |
2497 | | |
2498 | | PyInterpreterState * |
2499 | | PyInterpreterState_Head(void) |
2500 | 0 | { |
2501 | 0 | return _PyRuntime.interpreters.head; |
2502 | 0 | } |
2503 | | |
2504 | | PyInterpreterState * |
2505 | | PyInterpreterState_Main(void) |
2506 | 0 | { |
2507 | 0 | return _PyInterpreterState_Main(); |
2508 | 0 | } |
2509 | | |
2510 | | PyInterpreterState * |
2511 | 0 | PyInterpreterState_Next(PyInterpreterState *interp) { |
2512 | 0 | return interp->next; |
2513 | 0 | } |
2514 | | |
2515 | | PyThreadState * |
2516 | 58.3k | PyInterpreterState_ThreadHead(PyInterpreterState *interp) { |
2517 | 58.3k | return interp->threads.head; |
2518 | 58.3k | } |
2519 | | |
2520 | | PyThreadState * |
2521 | 58.3k | PyThreadState_Next(PyThreadState *tstate) { |
2522 | 58.3k | return tstate->next; |
2523 | 58.3k | } |
2524 | | |
2525 | | |
2526 | | /********************************************/ |
2527 | | /* reporting execution state of all threads */ |
2528 | | /********************************************/ |
2529 | | |
2530 | | /* The implementation of sys._current_frames(). This is intended to be |
2531 | | called with the GIL held, as it will be when called via |
2532 | | sys._current_frames(). It's possible it would work fine even without |
2533 | | the GIL held, but haven't thought enough about that. |
2534 | | */ |
2535 | | PyObject * |
2536 | | _PyThread_CurrentFrames(void) |
2537 | 0 | { |
2538 | 0 | _PyRuntimeState *runtime = &_PyRuntime; |
2539 | 0 | PyThreadState *tstate = current_fast_get(); |
2540 | 0 | if (_PySys_Audit(tstate, "sys._current_frames", NULL) < 0) { |
2541 | 0 | return NULL; |
2542 | 0 | } |
2543 | | |
2544 | 0 | PyObject *result = PyDict_New(); |
2545 | 0 | if (result == NULL) { |
2546 | 0 | return NULL; |
2547 | 0 | } |
2548 | | |
2549 | | /* for i in all interpreters: |
2550 | | * for t in all of i's thread states: |
2551 | | * if t's frame isn't NULL, map t's id to its frame |
2552 | | * Because these lists can mutate even when the GIL is held, we |
2553 | | * need to grab head_mutex for the duration. |
2554 | | */ |
2555 | 0 | _PyEval_StopTheWorldAll(runtime); |
2556 | 0 | HEAD_LOCK(runtime); |
2557 | 0 | PyInterpreterState *i; |
2558 | 0 | for (i = runtime->interpreters.head; i != NULL; i = i->next) { |
2559 | 0 | _Py_FOR_EACH_TSTATE_UNLOCKED(i, t) { |
2560 | 0 | _PyInterpreterFrame *frame = t->current_frame; |
2561 | 0 | frame = _PyFrame_GetFirstComplete(frame); |
2562 | 0 | if (frame == NULL) { |
2563 | 0 | continue; |
2564 | 0 | } |
2565 | 0 | PyObject *id = PyLong_FromUnsignedLong(t->thread_id); |
2566 | 0 | if (id == NULL) { |
2567 | 0 | goto fail; |
2568 | 0 | } |
2569 | 0 | PyObject *frameobj = (PyObject *)_PyFrame_GetFrameObject(frame); |
2570 | 0 | if (frameobj == NULL) { |
2571 | 0 | Py_DECREF(id); |
2572 | 0 | goto fail; |
2573 | 0 | } |
2574 | 0 | int stat = PyDict_SetItem(result, id, frameobj); |
2575 | 0 | Py_DECREF(id); |
2576 | 0 | if (stat < 0) { |
2577 | 0 | goto fail; |
2578 | 0 | } |
2579 | 0 | } |
2580 | 0 | } |
2581 | 0 | goto done; |
2582 | | |
2583 | 0 | fail: |
2584 | 0 | Py_CLEAR(result); |
2585 | |
|
2586 | 0 | done: |
2587 | 0 | HEAD_UNLOCK(runtime); |
2588 | 0 | _PyEval_StartTheWorldAll(runtime); |
2589 | 0 | return result; |
2590 | 0 | } |
2591 | | |
2592 | | /* The implementation of sys._current_exceptions(). This is intended to be |
2593 | | called with the GIL held, as it will be when called via |
2594 | | sys._current_exceptions(). It's possible it would work fine even without |
2595 | | the GIL held, but haven't thought enough about that. |
2596 | | */ |
2597 | | PyObject * |
2598 | | _PyThread_CurrentExceptions(void) |
2599 | 0 | { |
2600 | 0 | _PyRuntimeState *runtime = &_PyRuntime; |
2601 | 0 | PyThreadState *tstate = current_fast_get(); |
2602 | |
|
2603 | 0 | _Py_EnsureTstateNotNULL(tstate); |
2604 | |
|
2605 | 0 | if (_PySys_Audit(tstate, "sys._current_exceptions", NULL) < 0) { |
2606 | 0 | return NULL; |
2607 | 0 | } |
2608 | | |
2609 | 0 | PyObject *result = PyDict_New(); |
2610 | 0 | if (result == NULL) { |
2611 | 0 | return NULL; |
2612 | 0 | } |
2613 | | |
2614 | | /* for i in all interpreters: |
2615 | | * for t in all of i's thread states: |
2616 | | * if t's frame isn't NULL, map t's id to its frame |
2617 | | * Because these lists can mutate even when the GIL is held, we |
2618 | | * need to grab head_mutex for the duration. |
2619 | | */ |
2620 | 0 | _PyEval_StopTheWorldAll(runtime); |
2621 | 0 | HEAD_LOCK(runtime); |
2622 | 0 | PyInterpreterState *i; |
2623 | 0 | for (i = runtime->interpreters.head; i != NULL; i = i->next) { |
2624 | 0 | _Py_FOR_EACH_TSTATE_UNLOCKED(i, t) { |
2625 | 0 | _PyErr_StackItem *err_info = _PyErr_GetTopmostException(t); |
2626 | 0 | if (err_info == NULL) { |
2627 | 0 | continue; |
2628 | 0 | } |
2629 | 0 | PyObject *id = PyLong_FromUnsignedLong(t->thread_id); |
2630 | 0 | if (id == NULL) { |
2631 | 0 | goto fail; |
2632 | 0 | } |
2633 | 0 | PyObject *exc = err_info->exc_value; |
2634 | 0 | assert(exc == NULL || |
2635 | 0 | exc == Py_None || |
2636 | 0 | PyExceptionInstance_Check(exc)); |
2637 | |
|
2638 | 0 | int stat = PyDict_SetItem(result, id, exc == NULL ? Py_None : exc); |
2639 | 0 | Py_DECREF(id); |
2640 | 0 | if (stat < 0) { |
2641 | 0 | goto fail; |
2642 | 0 | } |
2643 | 0 | } |
2644 | 0 | } |
2645 | 0 | goto done; |
2646 | | |
2647 | 0 | fail: |
2648 | 0 | Py_CLEAR(result); |
2649 | |
|
2650 | 0 | done: |
2651 | 0 | HEAD_UNLOCK(runtime); |
2652 | 0 | _PyEval_StartTheWorldAll(runtime); |
2653 | 0 | return result; |
2654 | 0 | } |
2655 | | |
2656 | | |
2657 | | /***********************************/ |
2658 | | /* Python "auto thread state" API. */ |
2659 | | /***********************************/ |
2660 | | |
2661 | | /* Internal initialization/finalization functions called by |
2662 | | Py_Initialize/Py_FinalizeEx |
2663 | | */ |
2664 | | PyStatus |
2665 | | _PyGILState_Init(PyInterpreterState *interp) |
2666 | 16 | { |
2667 | 16 | if (!_Py_IsMainInterpreter(interp)) { |
2668 | | /* Currently, PyGILState is shared by all interpreters. The main |
2669 | | * interpreter is responsible to initialize it. */ |
2670 | 0 | return _PyStatus_OK(); |
2671 | 0 | } |
2672 | 16 | _PyRuntimeState *runtime = interp->runtime; |
2673 | 16 | assert(gilstate_get() == NULL); |
2674 | 16 | assert(runtime->gilstate.autoInterpreterState == NULL); |
2675 | 16 | runtime->gilstate.autoInterpreterState = interp; |
2676 | 16 | return _PyStatus_OK(); |
2677 | 16 | } |
2678 | | |
2679 | | void |
2680 | | _PyGILState_Fini(PyInterpreterState *interp) |
2681 | 0 | { |
2682 | 0 | if (!_Py_IsMainInterpreter(interp)) { |
2683 | | /* Currently, PyGILState is shared by all interpreters. The main |
2684 | | * interpreter is responsible to initialize it. */ |
2685 | 0 | return; |
2686 | 0 | } |
2687 | 0 | interp->runtime->gilstate.autoInterpreterState = NULL; |
2688 | 0 | } |
2689 | | |
2690 | | |
2691 | | // XXX Drop this. |
2692 | | void |
2693 | | _PyGILState_SetTstate(PyThreadState *tstate) |
2694 | 16 | { |
2695 | | /* must init with valid states */ |
2696 | 16 | assert(tstate != NULL); |
2697 | 16 | assert(tstate->interp != NULL); |
2698 | | |
2699 | 16 | if (!_Py_IsMainInterpreter(tstate->interp)) { |
2700 | | /* Currently, PyGILState is shared by all interpreters. The main |
2701 | | * interpreter is responsible to initialize it. */ |
2702 | 0 | return; |
2703 | 0 | } |
2704 | | |
2705 | | #ifndef NDEBUG |
2706 | | _PyRuntimeState *runtime = tstate->interp->runtime; |
2707 | | |
2708 | | assert(runtime->gilstate.autoInterpreterState == tstate->interp); |
2709 | | assert(gilstate_get() == tstate); |
2710 | | assert(tstate->gilstate_counter == 1); |
2711 | | #endif |
2712 | 16 | } |
2713 | | |
2714 | | PyInterpreterState * |
2715 | | _PyGILState_GetInterpreterStateUnsafe(void) |
2716 | 0 | { |
2717 | 0 | return _PyRuntime.gilstate.autoInterpreterState; |
2718 | 0 | } |
2719 | | |
2720 | | /* The public functions */ |
2721 | | |
2722 | | PyThreadState * |
2723 | | PyGILState_GetThisThreadState(void) |
2724 | 0 | { |
2725 | 0 | return gilstate_get(); |
2726 | 0 | } |
2727 | | |
2728 | | int |
2729 | | PyGILState_Check(void) |
2730 | 0 | { |
2731 | 0 | _PyRuntimeState *runtime = &_PyRuntime; |
2732 | 0 | if (!runtime->gilstate.check_enabled) { |
2733 | 0 | return 1; |
2734 | 0 | } |
2735 | | |
2736 | 0 | PyThreadState *tstate = current_fast_get(); |
2737 | 0 | if (tstate == NULL) { |
2738 | 0 | return 0; |
2739 | 0 | } |
2740 | | |
2741 | 0 | PyThreadState *tcur = gilstate_get(); |
2742 | 0 | return (tstate == tcur); |
2743 | 0 | } |
2744 | | |
2745 | | PyGILState_STATE |
2746 | | PyGILState_Ensure(void) |
2747 | 0 | { |
2748 | 0 | _PyRuntimeState *runtime = &_PyRuntime; |
2749 | | |
2750 | | /* Note that we do not auto-init Python here - apart from |
2751 | | potential races with 2 threads auto-initializing, pep-311 |
2752 | | spells out other issues. Embedders are expected to have |
2753 | | called Py_Initialize(). */ |
2754 | | |
2755 | | /* Ensure that _PyEval_InitThreads() and _PyGILState_Init() have been |
2756 | | called by Py_Initialize() |
2757 | | |
2758 | | TODO: This isn't thread-safe. There's no protection here against |
2759 | | concurrent finalization of the interpreter; it's simply a guard |
2760 | | for *after* the interpreter has finalized. |
2761 | | */ |
2762 | 0 | if (!_PyEval_ThreadsInitialized() || runtime->gilstate.autoInterpreterState == NULL) { |
2763 | 0 | PyThread_hang_thread(); |
2764 | 0 | } |
2765 | | |
2766 | 0 | PyThreadState *tcur = gilstate_get(); |
2767 | 0 | int has_gil; |
2768 | 0 | if (tcur == NULL) { |
2769 | | /* Create a new Python thread state for this thread */ |
2770 | | // XXX Use PyInterpreterState_EnsureThreadState()? |
2771 | 0 | tcur = new_threadstate(runtime->gilstate.autoInterpreterState, |
2772 | 0 | _PyThreadState_WHENCE_GILSTATE); |
2773 | 0 | if (tcur == NULL) { |
2774 | 0 | Py_FatalError("Couldn't create thread-state for new thread"); |
2775 | 0 | } |
2776 | 0 | bind_tstate(tcur); |
2777 | 0 | bind_gilstate_tstate(tcur); |
2778 | | |
2779 | | /* This is our thread state! We'll need to delete it in the |
2780 | | matching call to PyGILState_Release(). */ |
2781 | 0 | assert(tcur->gilstate_counter == 1); |
2782 | 0 | tcur->gilstate_counter = 0; |
2783 | 0 | has_gil = 0; /* new thread state is never current */ |
2784 | 0 | } |
2785 | 0 | else { |
2786 | 0 | has_gil = holds_gil(tcur); |
2787 | 0 | } |
2788 | | |
2789 | 0 | if (!has_gil) { |
2790 | 0 | PyEval_RestoreThread(tcur); |
2791 | 0 | } |
2792 | | |
2793 | | /* Update our counter in the thread-state - no need for locks: |
2794 | | - tcur will remain valid as we hold the GIL. |
2795 | | - the counter is safe as we are the only thread "allowed" |
2796 | | to modify this value |
2797 | | */ |
2798 | 0 | ++tcur->gilstate_counter; |
2799 | |
|
2800 | 0 | return has_gil ? PyGILState_LOCKED : PyGILState_UNLOCKED; |
2801 | 0 | } |
2802 | | |
2803 | | void |
2804 | | PyGILState_Release(PyGILState_STATE oldstate) |
2805 | 0 | { |
2806 | 0 | PyThreadState *tstate = gilstate_get(); |
2807 | 0 | if (tstate == NULL) { |
2808 | 0 | Py_FatalError("auto-releasing thread-state, " |
2809 | 0 | "but no thread-state for this thread"); |
2810 | 0 | } |
2811 | | |
2812 | | /* We must hold the GIL and have our thread state current */ |
2813 | 0 | if (!holds_gil(tstate)) { |
2814 | 0 | _Py_FatalErrorFormat(__func__, |
2815 | 0 | "thread state %p must be current when releasing", |
2816 | 0 | tstate); |
2817 | 0 | } |
2818 | 0 | --tstate->gilstate_counter; |
2819 | 0 | assert(tstate->gilstate_counter >= 0); /* illegal counter value */ |
2820 | | |
2821 | | /* If we're going to destroy this thread-state, we must |
2822 | | * clear it while the GIL is held, as destructors may run. |
2823 | | */ |
2824 | 0 | if (tstate->gilstate_counter == 0) { |
2825 | | /* can't have been locked when we created it */ |
2826 | 0 | assert(oldstate == PyGILState_UNLOCKED); |
2827 | | // XXX Unbind tstate here. |
2828 | | // gh-119585: `PyThreadState_Clear()` may call destructors that |
2829 | | // themselves use PyGILState_Ensure and PyGILState_Release, so make |
2830 | | // sure that gilstate_counter is not zero when calling it. |
2831 | 0 | ++tstate->gilstate_counter; |
2832 | 0 | PyThreadState_Clear(tstate); |
2833 | 0 | --tstate->gilstate_counter; |
2834 | | /* Delete the thread-state. Note this releases the GIL too! |
2835 | | * It's vital that the GIL be held here, to avoid shutdown |
2836 | | * races; see bugs 225673 and 1061968 (that nasty bug has a |
2837 | | * habit of coming back). |
2838 | | */ |
2839 | 0 | assert(tstate->gilstate_counter == 0); |
2840 | 0 | assert(current_fast_get() == tstate); |
2841 | 0 | _PyThreadState_DeleteCurrent(tstate); |
2842 | 0 | } |
2843 | | /* Release the lock if necessary */ |
2844 | 0 | else if (oldstate == PyGILState_UNLOCKED) { |
2845 | 0 | PyEval_SaveThread(); |
2846 | 0 | } |
2847 | 0 | } |
2848 | | |
2849 | | |
2850 | | /*************/ |
2851 | | /* Other API */ |
2852 | | /*************/ |
2853 | | |
2854 | | _PyFrameEvalFunction |
2855 | | _PyInterpreterState_GetEvalFrameFunc(PyInterpreterState *interp) |
2856 | 0 | { |
2857 | 0 | if (interp->eval_frame == NULL) { |
2858 | 0 | return _PyEval_EvalFrameDefault; |
2859 | 0 | } |
2860 | 0 | return interp->eval_frame; |
2861 | 0 | } |
2862 | | |
2863 | | |
2864 | | void |
2865 | | _PyInterpreterState_SetEvalFrameFunc(PyInterpreterState *interp, |
2866 | | _PyFrameEvalFunction eval_frame) |
2867 | 0 | { |
2868 | 0 | if (eval_frame == _PyEval_EvalFrameDefault) { |
2869 | 0 | eval_frame = NULL; |
2870 | 0 | } |
2871 | 0 | if (eval_frame == interp->eval_frame) { |
2872 | 0 | return; |
2873 | 0 | } |
2874 | | #ifdef _Py_TIER2 |
2875 | | if (eval_frame != NULL) { |
2876 | | _Py_Executors_InvalidateAll(interp, 1); |
2877 | | } |
2878 | | #endif |
2879 | 0 | RARE_EVENT_INC(set_eval_frame_func); |
2880 | 0 | _PyEval_StopTheWorld(interp); |
2881 | 0 | interp->eval_frame = eval_frame; |
2882 | 0 | _PyEval_StartTheWorld(interp); |
2883 | 0 | } |
2884 | | |
2885 | | |
2886 | | const PyConfig* |
2887 | | _PyInterpreterState_GetConfig(PyInterpreterState *interp) |
2888 | 115M | { |
2889 | 115M | return &interp->config; |
2890 | 115M | } |
2891 | | |
2892 | | |
2893 | | const PyConfig* |
2894 | | _Py_GetConfig(void) |
2895 | 41.0k | { |
2896 | 41.0k | PyThreadState *tstate = current_fast_get(); |
2897 | 41.0k | _Py_EnsureTstateNotNULL(tstate); |
2898 | 41.0k | return _PyInterpreterState_GetConfig(tstate->interp); |
2899 | 41.0k | } |
2900 | | |
2901 | | |
2902 | | int |
2903 | | _PyInterpreterState_HasFeature(PyInterpreterState *interp, unsigned long feature) |
2904 | 0 | { |
2905 | 0 | return ((interp->feature_flags & feature) != 0); |
2906 | 0 | } |
2907 | | |
2908 | | |
2909 | 182k | #define MINIMUM_OVERHEAD 1000 |
2910 | | |
2911 | | static PyObject ** |
2912 | | push_chunk(PyThreadState *tstate, int size) |
2913 | 182k | { |
2914 | 182k | int allocate_size = _PY_DATA_STACK_CHUNK_SIZE; |
2915 | 182k | while (allocate_size < (int)sizeof(PyObject*)*(size + MINIMUM_OVERHEAD)) { |
2916 | 0 | allocate_size *= 2; |
2917 | 0 | } |
2918 | 182k | _PyStackChunk *new = allocate_chunk(allocate_size, tstate->datastack_chunk); |
2919 | 182k | if (new == NULL) { |
2920 | 0 | return NULL; |
2921 | 0 | } |
2922 | 182k | if (tstate->datastack_chunk) { |
2923 | 182k | tstate->datastack_chunk->top = tstate->datastack_top - |
2924 | 182k | &tstate->datastack_chunk->data[0]; |
2925 | 182k | } |
2926 | 182k | tstate->datastack_chunk = new; |
2927 | 182k | tstate->datastack_limit = (PyObject **)(((char *)new) + allocate_size); |
2928 | | // When new is the "root" chunk (i.e. new->previous == NULL), we can keep |
2929 | | // _PyThreadState_PopFrame from freeing it later by "skipping" over the |
2930 | | // first element: |
2931 | 182k | PyObject **res = &new->data[new->previous == NULL]; |
2932 | 182k | tstate->datastack_top = res + size; |
2933 | 182k | return res; |
2934 | 182k | } |
2935 | | |
2936 | | _PyInterpreterFrame * |
2937 | | _PyThreadState_PushFrame(PyThreadState *tstate, size_t size) |
2938 | 161M | { |
2939 | 161M | assert(size < INT_MAX/sizeof(PyObject *)); |
2940 | 161M | if (_PyThreadState_HasStackSpace(tstate, (int)size)) { |
2941 | 161M | _PyInterpreterFrame *res = (_PyInterpreterFrame *)tstate->datastack_top; |
2942 | 161M | tstate->datastack_top += size; |
2943 | 161M | return res; |
2944 | 161M | } |
2945 | 182k | return (_PyInterpreterFrame *)push_chunk(tstate, (int)size); |
2946 | 161M | } |
2947 | | |
2948 | | void |
2949 | | _PyThreadState_PopFrame(PyThreadState *tstate, _PyInterpreterFrame * frame) |
2950 | 501M | { |
2951 | 501M | assert(tstate->datastack_chunk); |
2952 | 501M | PyObject **base = (PyObject **)frame; |
2953 | 501M | if (base == &tstate->datastack_chunk->data[0]) { |
2954 | 182k | _PyStackChunk *chunk = tstate->datastack_chunk; |
2955 | 182k | _PyStackChunk *previous = chunk->previous; |
2956 | | // push_chunk ensures that the root chunk is never popped: |
2957 | 182k | assert(previous); |
2958 | 182k | tstate->datastack_top = &previous->data[previous->top]; |
2959 | 182k | tstate->datastack_chunk = previous; |
2960 | 182k | _PyObject_VirtualFree(chunk, chunk->size); |
2961 | 182k | tstate->datastack_limit = (PyObject **)(((char *)previous) + previous->size); |
2962 | 182k | } |
2963 | 501M | else { |
2964 | 501M | assert(tstate->datastack_top); |
2965 | 501M | assert(tstate->datastack_top >= base); |
2966 | 501M | tstate->datastack_top = base; |
2967 | 501M | } |
2968 | 501M | } |
2969 | | |
2970 | | |
2971 | | #ifndef NDEBUG |
2972 | | // Check that a Python thread state valid. In practice, this function is used |
2973 | | // on a Python debug build to check if 'tstate' is a dangling pointer, if the |
2974 | | // PyThreadState memory has been freed. |
2975 | | // |
2976 | | // Usage: |
2977 | | // |
2978 | | // assert(_PyThreadState_CheckConsistency(tstate)); |
2979 | | int |
2980 | | _PyThreadState_CheckConsistency(PyThreadState *tstate) |
2981 | | { |
2982 | | assert(!_PyMem_IsPtrFreed(tstate)); |
2983 | | assert(!_PyMem_IsPtrFreed(tstate->interp)); |
2984 | | return 1; |
2985 | | } |
2986 | | #endif |
2987 | | |
2988 | | |
2989 | | // Check if a Python thread must call _PyThreadState_HangThread(), rather than |
2990 | | // taking the GIL or attaching to the interpreter if Py_Finalize() has been |
2991 | | // called. |
2992 | | // |
2993 | | // When this function is called by a daemon thread after Py_Finalize() has been |
2994 | | // called, the GIL may no longer exist. |
2995 | | // |
2996 | | // tstate must be non-NULL. |
2997 | | int |
2998 | | _PyThreadState_MustExit(PyThreadState *tstate) |
2999 | 63.0k | { |
3000 | 63.0k | int state = _Py_atomic_load_int_relaxed(&tstate->state); |
3001 | 63.0k | return state == _Py_THREAD_SHUTTING_DOWN; |
3002 | 63.0k | } |
3003 | | |
3004 | | void |
3005 | | _PyThreadState_HangThread(PyThreadState *tstate) |
3006 | 0 | { |
3007 | 0 | _PyThreadStateImpl *tstate_impl = (_PyThreadStateImpl *)tstate; |
3008 | 0 | decref_threadstate(tstate_impl); |
3009 | 0 | PyThread_hang_thread(); |
3010 | 0 | } |
3011 | | |
3012 | | /********************/ |
3013 | | /* mimalloc support */ |
3014 | | /********************/ |
3015 | | |
3016 | | static void |
3017 | | tstate_mimalloc_bind(PyThreadState *tstate) |
3018 | 16 | { |
3019 | | #ifdef Py_GIL_DISABLED |
3020 | | struct _mimalloc_thread_state *mts = &((_PyThreadStateImpl*)tstate)->mimalloc; |
3021 | | |
3022 | | // Initialize the mimalloc thread state. This must be called from the |
3023 | | // same thread that will use the thread state. The "mem" heap doubles as |
3024 | | // the "backing" heap. |
3025 | | mi_tld_t *tld = &mts->tld; |
3026 | | _mi_tld_init(tld, &mts->heaps[_Py_MIMALLOC_HEAP_MEM]); |
3027 | | llist_init(&mts->page_list); |
3028 | | |
3029 | | // Exiting threads push any remaining in-use segments to the abandoned |
3030 | | // pool to be re-claimed later by other threads. We use per-interpreter |
3031 | | // pools to keep Python objects from different interpreters separate. |
3032 | | tld->segments.abandoned = &tstate->interp->mimalloc.abandoned_pool; |
3033 | | |
3034 | | // Don't fill in the first N bytes up to ob_type in debug builds. We may |
3035 | | // access ob_tid and the refcount fields in the dict and list lock-less |
3036 | | // accesses, so they must remain valid for a while after deallocation. |
3037 | | size_t base_offset = offsetof(PyObject, ob_type); |
3038 | | if (_PyMem_DebugEnabled()) { |
3039 | | // The debug allocator adds two words at the beginning of each block. |
3040 | | base_offset += 2 * sizeof(size_t); |
3041 | | } |
3042 | | size_t debug_offsets[_Py_MIMALLOC_HEAP_COUNT] = { |
3043 | | [_Py_MIMALLOC_HEAP_OBJECT] = base_offset, |
3044 | | [_Py_MIMALLOC_HEAP_GC] = base_offset, |
3045 | | [_Py_MIMALLOC_HEAP_GC_PRE] = base_offset + 2 * sizeof(PyObject *), |
3046 | | }; |
3047 | | |
3048 | | // Initialize each heap |
3049 | | for (uint8_t i = 0; i < _Py_MIMALLOC_HEAP_COUNT; i++) { |
3050 | | _mi_heap_init_ex(&mts->heaps[i], tld, _mi_arena_id_none(), false, i); |
3051 | | mts->heaps[i].debug_offset = (uint8_t)debug_offsets[i]; |
3052 | | } |
3053 | | |
3054 | | // Heaps that store Python objects should use QSBR to delay freeing |
3055 | | // mimalloc pages while there may be concurrent lock-free readers. |
3056 | | mts->heaps[_Py_MIMALLOC_HEAP_OBJECT].page_use_qsbr = true; |
3057 | | mts->heaps[_Py_MIMALLOC_HEAP_GC].page_use_qsbr = true; |
3058 | | mts->heaps[_Py_MIMALLOC_HEAP_GC_PRE].page_use_qsbr = true; |
3059 | | |
3060 | | // By default, object allocations use _Py_MIMALLOC_HEAP_OBJECT. |
3061 | | // _PyObject_GC_New() and similar functions temporarily override this to |
3062 | | // use one of the GC heaps. |
3063 | | mts->current_object_heap = &mts->heaps[_Py_MIMALLOC_HEAP_OBJECT]; |
3064 | | |
3065 | | _Py_atomic_store_int(&mts->initialized, 1); |
3066 | | #endif |
3067 | 16 | } |
3068 | | |
3069 | | void |
3070 | | _PyThreadState_ClearMimallocHeaps(PyThreadState *tstate) |
3071 | 0 | { |
3072 | | #ifdef Py_GIL_DISABLED |
3073 | | if (!tstate->_status.bound) { |
3074 | | // The mimalloc heaps are only initialized when the thread is bound. |
3075 | | return; |
3076 | | } |
3077 | | |
3078 | | _PyThreadStateImpl *tstate_impl = (_PyThreadStateImpl *)tstate; |
3079 | | for (Py_ssize_t i = 0; i < _Py_MIMALLOC_HEAP_COUNT; i++) { |
3080 | | // Abandon all segments in use by this thread. This pushes them to |
3081 | | // a shared pool to later be reclaimed by other threads. It's important |
3082 | | // to do this before the thread state is destroyed so that objects |
3083 | | // remain visible to the GC. |
3084 | | _mi_heap_collect_abandon(&tstate_impl->mimalloc.heaps[i]); |
3085 | | } |
3086 | | #endif |
3087 | 0 | } |
3088 | | |
3089 | | |
3090 | | int |
3091 | | _Py_IsMainThread(void) |
3092 | 105M | { |
3093 | 105M | unsigned long thread = PyThread_get_thread_ident(); |
3094 | 105M | return (thread == _PyRuntime.main_thread); |
3095 | 105M | } |
3096 | | |
3097 | | |
3098 | | PyInterpreterState * |
3099 | | _PyInterpreterState_Main(void) |
3100 | 105M | { |
3101 | 105M | return _PyRuntime.interpreters.main; |
3102 | 105M | } |
3103 | | |
3104 | | |
3105 | | int |
3106 | | _Py_IsMainInterpreterFinalizing(PyInterpreterState *interp) |
3107 | 0 | { |
3108 | | /* bpo-39877: Access _PyRuntime directly rather than using |
3109 | | tstate->interp->runtime to support calls from Python daemon threads. |
3110 | | After Py_Finalize() has been called, tstate can be a dangling pointer: |
3111 | | point to PyThreadState freed memory. */ |
3112 | 0 | return (_PyRuntimeState_GetFinalizing(&_PyRuntime) != NULL && |
3113 | 0 | interp == &_PyRuntime._main_interpreter); |
3114 | 0 | } |
3115 | | |
3116 | | |
3117 | | const PyConfig * |
3118 | | _Py_GetMainConfig(void) |
3119 | 0 | { |
3120 | 0 | PyInterpreterState *interp = _PyInterpreterState_Main(); |
3121 | 0 | if (interp == NULL) { |
3122 | 0 | return NULL; |
3123 | 0 | } |
3124 | 0 | return _PyInterpreterState_GetConfig(interp); |
3125 | 0 | } |