/src/cpython/Python/pystate.c
Line | Count | Source |
1 | | |
2 | | /* Thread and interpreter state structures and their interfaces */ |
3 | | |
4 | | #include "Python.h" |
5 | | #include "pycore_abstract.h" // _PyIndex_Check() |
6 | | #include "pycore_audit.h" // _Py_AuditHookEntry |
7 | | #include "pycore_ceval.h" // _PyEval_AcquireLock() |
8 | | #include "pycore_codecs.h" // _PyCodec_Fini() |
9 | | #include "pycore_critical_section.h" // _PyCriticalSection_Resume() |
10 | | #include "pycore_dtoa.h" // _dtoa_state_INIT() |
11 | | #include "pycore_freelist.h" // _PyObject_ClearFreeLists() |
12 | | #include "pycore_initconfig.h" // _PyStatus_OK() |
13 | | #include "pycore_interpframe.h" // _PyThreadState_HasStackSpace() |
14 | | #include "pycore_object.h" // _PyType_InitCache() |
15 | | #include "pycore_obmalloc.h" // _PyMem_obmalloc_state_on_heap() |
16 | | #include "pycore_optimizer.h" // JIT_CLEANUP_THRESHOLD |
17 | | #include "pycore_parking_lot.h" // _PyParkingLot_AfterFork() |
18 | | #include "pycore_pyerrors.h" // _PyErr_Clear() |
19 | | #include "pycore_pylifecycle.h" // _PyAST_Fini() |
20 | | #include "pycore_pymem.h" // _PyMem_DebugEnabled() |
21 | | #include "pycore_runtime.h" // _PyRuntime |
22 | | #include "pycore_runtime_init.h" // _PyRuntimeState_INIT |
23 | | #include "pycore_stackref.h" // Py_STACKREF_DEBUG |
24 | | #include "pycore_time.h" // _PyTime_Init() |
25 | | #include "pycore_uop.h" // UOP_BUFFER_SIZE |
26 | | #include "pycore_uniqueid.h" // _PyObject_FinalizePerThreadRefcounts() |
27 | | |
28 | | |
29 | | /* -------------------------------------------------------------------------- |
30 | | CAUTION |
31 | | |
32 | | Always use PyMem_RawMalloc() and PyMem_RawFree() directly in this file. A |
33 | | number of these functions are advertised as safe to call when the GIL isn't |
34 | | held, and in a debug build Python redirects (e.g.) PyMem_NEW (etc) to Python's |
35 | | debugging obmalloc functions. Those aren't thread-safe (they rely on the GIL |
36 | | to avoid the expense of doing their own locking). |
37 | | -------------------------------------------------------------------------- */ |
38 | | |
39 | | #ifdef HAVE_DLOPEN |
40 | | # ifdef HAVE_DLFCN_H |
41 | | # include <dlfcn.h> |
42 | | # endif |
43 | | # if !HAVE_DECL_RTLD_LAZY |
44 | | # define RTLD_LAZY 1 |
45 | | # endif |
46 | | #endif |
47 | | |
48 | | |
49 | | /****************************************/ |
50 | | /* helpers for the current thread state */ |
51 | | /****************************************/ |
52 | | |
53 | | // API for the current thread state is further down. |
54 | | |
55 | | /* "current" means one of: |
56 | | - bound to the current OS thread |
57 | | - holds the GIL |
58 | | */ |
59 | | |
60 | | //------------------------------------------------- |
61 | | // a highly efficient lookup for the current thread |
62 | | //------------------------------------------------- |
63 | | |
64 | | /* |
65 | | The stored thread state is set by PyThreadState_Swap(). |
66 | | |
67 | | For each of these functions, the GIL must be held by the current thread. |
68 | | */ |
69 | | |
70 | | |
71 | | /* The attached thread state for the current thread. */ |
72 | | _Py_thread_local PyThreadState *_Py_tss_tstate = NULL; |
73 | | |
74 | | /* The "bound" thread state used by PyGILState_Ensure(), |
75 | | also known as a "gilstate." */ |
76 | | _Py_thread_local PyThreadState *_Py_tss_gilstate = NULL; |
77 | | |
78 | | /* The interpreter of the attached thread state, |
79 | | and is same as tstate->interp. */ |
80 | | _Py_thread_local PyInterpreterState *_Py_tss_interp = NULL; |
81 | | |
82 | | static inline PyThreadState * |
83 | | current_fast_get(void) |
84 | 95.7M | { |
85 | 95.7M | return _Py_tss_tstate; |
86 | 95.7M | } |
87 | | |
88 | | static inline void |
89 | | current_fast_set(_PyRuntimeState *Py_UNUSED(runtime), PyThreadState *tstate) |
90 | 30.4k | { |
91 | 30.4k | assert(tstate != NULL); |
92 | 30.4k | _Py_tss_tstate = tstate; |
93 | 30.4k | assert(tstate->interp != NULL); |
94 | 30.4k | _Py_tss_interp = tstate->interp; |
95 | 30.4k | } |
96 | | |
97 | | static inline void |
98 | | current_fast_clear(_PyRuntimeState *Py_UNUSED(runtime)) |
99 | 30.4k | { |
100 | 30.4k | _Py_tss_tstate = NULL; |
101 | 30.4k | _Py_tss_interp = NULL; |
102 | 30.4k | } |
103 | | |
104 | | #define tstate_verify_not_active(tstate) \ |
105 | 0 | if (tstate == current_fast_get()) { \ |
106 | 0 | _Py_FatalErrorFormat(__func__, "tstate %p is still current", tstate); \ |
107 | 0 | } |
108 | | |
109 | | PyThreadState * |
110 | | _PyThreadState_GetCurrent(void) |
111 | 8.32M | { |
112 | 8.32M | return current_fast_get(); |
113 | 8.32M | } |
114 | | |
115 | | |
116 | | //--------------------------------------------- |
117 | | // The thread state used by PyGILState_Ensure() |
118 | | //--------------------------------------------- |
119 | | |
120 | | /* |
121 | | The stored thread state is set by bind_tstate() (AKA PyThreadState_Bind(). |
122 | | |
123 | | The GIL does no need to be held for these. |
124 | | */ |
125 | | |
126 | | static inline PyThreadState * |
127 | | gilstate_get(void) |
128 | 32 | { |
129 | 32 | return _Py_tss_gilstate; |
130 | 32 | } |
131 | | |
132 | | static inline void |
133 | | gilstate_set(PyThreadState *tstate) |
134 | 16 | { |
135 | 16 | assert(tstate != NULL); |
136 | 16 | _Py_tss_gilstate = tstate; |
137 | 16 | } |
138 | | |
139 | | static inline void |
140 | | gilstate_clear(void) |
141 | 0 | { |
142 | 0 | _Py_tss_gilstate = NULL; |
143 | 0 | } |
144 | | |
145 | | |
146 | | #ifndef NDEBUG |
147 | | static inline int tstate_is_alive(PyThreadState *tstate); |
148 | | |
149 | | static inline int |
150 | | tstate_is_bound(PyThreadState *tstate) |
151 | | { |
152 | | return tstate->_status.bound && !tstate->_status.unbound; |
153 | | } |
154 | | #endif // !NDEBUG |
155 | | |
156 | | static void bind_gilstate_tstate(PyThreadState *); |
157 | | static void unbind_gilstate_tstate(PyThreadState *); |
158 | | |
159 | | static void tstate_mimalloc_bind(PyThreadState *); |
160 | | |
161 | | static void |
162 | | bind_tstate(PyThreadState *tstate) |
163 | 16 | { |
164 | 16 | assert(tstate != NULL); |
165 | 16 | assert(tstate_is_alive(tstate) && !tstate->_status.bound); |
166 | 16 | assert(!tstate->_status.unbound); // just in case |
167 | 16 | assert(!tstate->_status.bound_gilstate); |
168 | 16 | assert(tstate != gilstate_get()); |
169 | 16 | assert(!tstate->_status.active); |
170 | 16 | assert(tstate->thread_id == 0); |
171 | 16 | assert(tstate->native_thread_id == 0); |
172 | | |
173 | | // Currently we don't necessarily store the thread state |
174 | | // in thread-local storage (e.g. per-interpreter). |
175 | | |
176 | 16 | tstate->thread_id = PyThread_get_thread_ident(); |
177 | 16 | #ifdef PY_HAVE_THREAD_NATIVE_ID |
178 | 16 | tstate->native_thread_id = PyThread_get_thread_native_id(); |
179 | 16 | #endif |
180 | | |
181 | | #ifdef Py_GIL_DISABLED |
182 | | // Initialize biased reference counting inter-thread queue. Note that this |
183 | | // needs to be initialized from the active thread. |
184 | | _Py_brc_init_thread(tstate); |
185 | | #endif |
186 | | |
187 | | // mimalloc state needs to be initialized from the active thread. |
188 | 16 | tstate_mimalloc_bind(tstate); |
189 | | |
190 | 16 | tstate->_status.bound = 1; |
191 | 16 | } |
192 | | |
193 | | static void |
194 | | unbind_tstate(PyThreadState *tstate) |
195 | 0 | { |
196 | 0 | assert(tstate != NULL); |
197 | 0 | assert(tstate_is_bound(tstate)); |
198 | 0 | #ifndef HAVE_PTHREAD_STUBS |
199 | 0 | assert(tstate->thread_id > 0); |
200 | 0 | #endif |
201 | 0 | #ifdef PY_HAVE_THREAD_NATIVE_ID |
202 | 0 | assert(tstate->native_thread_id > 0); |
203 | 0 | #endif |
204 | | |
205 | | // We leave thread_id and native_thread_id alone |
206 | | // since they can be useful for debugging. |
207 | | // Check the `_status` field to know if these values |
208 | | // are still valid. |
209 | | |
210 | | // We leave tstate->_status.bound set to 1 |
211 | | // to indicate it was previously bound. |
212 | 0 | tstate->_status.unbound = 1; |
213 | 0 | } |
214 | | |
215 | | |
216 | | /* Stick the thread state for this thread in thread specific storage. |
217 | | |
218 | | When a thread state is created for a thread by some mechanism |
219 | | other than PyGILState_Ensure(), it's important that the GILState |
220 | | machinery knows about it so it doesn't try to create another |
221 | | thread state for the thread. |
222 | | (This is a better fix for SF bug #1010677 than the first one attempted.) |
223 | | |
224 | | The only situation where you can legitimately have more than one |
225 | | thread state for an OS level thread is when there are multiple |
226 | | interpreters. |
227 | | |
228 | | Before 3.12, the PyGILState_*() APIs didn't work with multiple |
229 | | interpreters (see bpo-10915 and bpo-15751), so this function used |
230 | | to set TSS only once. Thus, the first thread state created for that |
231 | | given OS level thread would "win", which seemed reasonable behaviour. |
232 | | */ |
233 | | |
234 | | static void |
235 | | bind_gilstate_tstate(PyThreadState *tstate) |
236 | 16 | { |
237 | 16 | assert(tstate != NULL); |
238 | 16 | assert(tstate_is_alive(tstate)); |
239 | 16 | assert(tstate_is_bound(tstate)); |
240 | | // XXX assert(!tstate->_status.active); |
241 | 16 | assert(!tstate->_status.bound_gilstate); |
242 | | |
243 | 16 | PyThreadState *tcur = gilstate_get(); |
244 | 16 | assert(tstate != tcur); |
245 | | |
246 | 16 | if (tcur != NULL) { |
247 | 0 | tcur->_status.bound_gilstate = 0; |
248 | 0 | } |
249 | 16 | gilstate_set(tstate); |
250 | 16 | tstate->_status.bound_gilstate = 1; |
251 | 16 | } |
252 | | |
253 | | static void |
254 | | unbind_gilstate_tstate(PyThreadState *tstate) |
255 | 0 | { |
256 | 0 | assert(tstate != NULL); |
257 | | // XXX assert(tstate_is_alive(tstate)); |
258 | 0 | assert(tstate_is_bound(tstate)); |
259 | | // XXX assert(!tstate->_status.active); |
260 | 0 | assert(tstate->_status.bound_gilstate); |
261 | 0 | assert(tstate == gilstate_get()); |
262 | 0 | gilstate_clear(); |
263 | 0 | tstate->_status.bound_gilstate = 0; |
264 | 0 | } |
265 | | |
266 | | |
267 | | //---------------------------------------------- |
268 | | // the thread state that currently holds the GIL |
269 | | //---------------------------------------------- |
270 | | |
271 | | /* This is not exported, as it is not reliable! It can only |
272 | | ever be compared to the state for the *current* thread. |
273 | | * If not equal, then it doesn't matter that the actual |
274 | | value may change immediately after comparison, as it can't |
275 | | possibly change to the current thread's state. |
276 | | * If equal, then the current thread holds the lock, so the value can't |
277 | | change until we yield the lock. |
278 | | */ |
279 | | static int |
280 | | holds_gil(PyThreadState *tstate) |
281 | 0 | { |
282 | | // XXX Fall back to tstate->interp->runtime->ceval.gil.last_holder |
283 | | // (and tstate->interp->runtime->ceval.gil.locked). |
284 | 0 | assert(tstate != NULL); |
285 | | /* Must be the tstate for this thread */ |
286 | 0 | assert(tstate == gilstate_get()); |
287 | 0 | return tstate == current_fast_get(); |
288 | 0 | } |
289 | | |
290 | | |
291 | | /****************************/ |
292 | | /* the global runtime state */ |
293 | | /****************************/ |
294 | | |
295 | | //---------- |
296 | | // lifecycle |
297 | | //---------- |
298 | | |
299 | | /* Suppress deprecation warning for PyBytesObject.ob_shash */ |
300 | | _Py_COMP_DIAG_PUSH |
301 | | _Py_COMP_DIAG_IGNORE_DEPR_DECLS |
302 | | /* We use "initial" if the runtime gets re-used |
303 | | (e.g. Py_Finalize() followed by Py_Initialize(). |
304 | | Note that we initialize "initial" relative to _PyRuntime, |
305 | | to ensure pre-initialized pointers point to the active |
306 | | runtime state (and not "initial"). */ |
307 | | static const _PyRuntimeState initial = _PyRuntimeState_INIT(_PyRuntime, ""); |
308 | | _Py_COMP_DIAG_POP |
309 | | |
310 | | #define LOCKS_INIT(runtime) \ |
311 | 0 | { \ |
312 | 0 | &(runtime)->interpreters.mutex, \ |
313 | 0 | &(runtime)->xi.data_lookup.registry.mutex, \ |
314 | 0 | &(runtime)->unicode_state.ids.mutex, \ |
315 | 0 | &(runtime)->imports.extensions.mutex, \ |
316 | 0 | &(runtime)->ceval.pending_mainthread.mutex, \ |
317 | 0 | &(runtime)->atexit.mutex, \ |
318 | 0 | &(runtime)->audit_hooks.mutex, \ |
319 | 0 | &(runtime)->allocators.mutex, \ |
320 | 0 | &(runtime)->_main_interpreter.types.mutex, \ |
321 | 0 | &(runtime)->_main_interpreter.code_state.mutex, \ |
322 | 0 | } |
323 | | |
324 | | static void |
325 | | init_runtime(_PyRuntimeState *runtime, |
326 | | void *open_code_hook, void *open_code_userdata, |
327 | | _Py_AuditHookEntry *audit_hook_head, |
328 | | Py_ssize_t unicode_next_index) |
329 | 16 | { |
330 | 16 | assert(!runtime->preinitializing); |
331 | 16 | assert(!runtime->preinitialized); |
332 | 16 | assert(!runtime->core_initialized); |
333 | 16 | assert(!runtime->initialized); |
334 | 16 | assert(!runtime->_initialized); |
335 | | |
336 | 16 | runtime->open_code_hook = open_code_hook; |
337 | 16 | runtime->open_code_userdata = open_code_userdata; |
338 | 16 | runtime->audit_hooks.head = audit_hook_head; |
339 | | |
340 | 16 | PyPreConfig_InitPythonConfig(&runtime->preconfig); |
341 | | |
342 | | // Set it to the ID of the main thread of the main interpreter. |
343 | 16 | runtime->main_thread = PyThread_get_thread_ident(); |
344 | | |
345 | 16 | runtime->unicode_state.ids.next_index = unicode_next_index; |
346 | 16 | runtime->_initialized = 1; |
347 | 16 | } |
348 | | |
349 | | PyStatus |
350 | | _PyRuntimeState_Init(_PyRuntimeState *runtime) |
351 | 16 | { |
352 | | /* We preserve the hook across init, because there is |
353 | | currently no public API to set it between runtime |
354 | | initialization and interpreter initialization. */ |
355 | 16 | void *open_code_hook = runtime->open_code_hook; |
356 | 16 | void *open_code_userdata = runtime->open_code_userdata; |
357 | 16 | _Py_AuditHookEntry *audit_hook_head = runtime->audit_hooks.head; |
358 | | // bpo-42882: Preserve next_index value if Py_Initialize()/Py_Finalize() |
359 | | // is called multiple times. |
360 | 16 | Py_ssize_t unicode_next_index = runtime->unicode_state.ids.next_index; |
361 | | |
362 | 16 | if (runtime->_initialized) { |
363 | | // Py_Initialize() must be running again. |
364 | | // Reset to _PyRuntimeState_INIT. |
365 | 0 | memcpy(runtime, &initial, sizeof(*runtime)); |
366 | | // Preserve the cookie from the original runtime. |
367 | 0 | memcpy(runtime->debug_offsets.cookie, _Py_Debug_Cookie, 8); |
368 | 0 | assert(!runtime->_initialized); |
369 | 0 | } |
370 | | |
371 | 16 | PyStatus status = _PyTime_Init(&runtime->time); |
372 | 16 | if (_PyStatus_EXCEPTION(status)) { |
373 | 0 | return status; |
374 | 0 | } |
375 | | |
376 | 16 | init_runtime(runtime, open_code_hook, open_code_userdata, audit_hook_head, |
377 | 16 | unicode_next_index); |
378 | | |
379 | 16 | return _PyStatus_OK(); |
380 | 16 | } |
381 | | |
382 | | void |
383 | | _PyRuntimeState_Fini(_PyRuntimeState *runtime) |
384 | 0 | { |
385 | | #ifdef Py_REF_DEBUG |
386 | | /* The count is cleared by _Py_FinalizeRefTotal(). */ |
387 | | assert(runtime->object_state.interpreter_leaks == 0); |
388 | | #endif |
389 | 0 | gilstate_clear(); |
390 | 0 | } |
391 | | |
392 | | #ifdef HAVE_FORK |
393 | | /* This function is called from PyOS_AfterFork_Child to ensure that |
394 | | newly created child processes do not share locks with the parent. */ |
395 | | PyStatus |
396 | | _PyRuntimeState_ReInitThreads(_PyRuntimeState *runtime) |
397 | 0 | { |
398 | | // This was initially set in _PyRuntimeState_Init(). |
399 | 0 | runtime->main_thread = PyThread_get_thread_ident(); |
400 | | |
401 | | // Clears the parking lot. Any waiting threads are dead. This must be |
402 | | // called before releasing any locks that use the parking lot. |
403 | 0 | _PyParkingLot_AfterFork(); |
404 | | |
405 | | // Re-initialize global locks |
406 | 0 | PyMutex *locks[] = LOCKS_INIT(runtime); |
407 | 0 | for (size_t i = 0; i < Py_ARRAY_LENGTH(locks); i++) { |
408 | 0 | _PyMutex_at_fork_reinit(locks[i]); |
409 | 0 | } |
410 | | #ifdef Py_GIL_DISABLED |
411 | | for (PyInterpreterState *interp = runtime->interpreters.head; |
412 | | interp != NULL; interp = interp->next) |
413 | | { |
414 | | for (int i = 0; i < NUM_WEAKREF_LIST_LOCKS; i++) { |
415 | | _PyMutex_at_fork_reinit(&interp->weakref_locks[i]); |
416 | | } |
417 | | } |
418 | | #endif |
419 | |
|
420 | 0 | _PyTypes_AfterFork(); |
421 | |
|
422 | 0 | _PyThread_AfterFork(&runtime->threads); |
423 | |
|
424 | 0 | return _PyStatus_OK(); |
425 | 0 | } |
426 | | #endif |
427 | | |
428 | | |
429 | | /*************************************/ |
430 | | /* the per-interpreter runtime state */ |
431 | | /*************************************/ |
432 | | |
433 | | //---------- |
434 | | // lifecycle |
435 | | //---------- |
436 | | |
437 | | /* Calling this indicates that the runtime is ready to create interpreters. */ |
438 | | |
439 | | PyStatus |
440 | | _PyInterpreterState_Enable(_PyRuntimeState *runtime) |
441 | 16 | { |
442 | 16 | struct pyinterpreters *interpreters = &runtime->interpreters; |
443 | 16 | interpreters->next_id = 0; |
444 | 16 | return _PyStatus_OK(); |
445 | 16 | } |
446 | | |
447 | | static PyInterpreterState * |
448 | | alloc_interpreter(void) |
449 | 0 | { |
450 | | // Aligned allocation for PyInterpreterState. |
451 | | // the first word of the memory block is used to store |
452 | | // the original pointer to be used later to free the memory. |
453 | 0 | size_t alignment = _Alignof(PyInterpreterState); |
454 | 0 | size_t allocsize = sizeof(PyInterpreterState) + sizeof(void *) + alignment - 1; |
455 | 0 | void *mem = PyMem_RawCalloc(1, allocsize); |
456 | 0 | if (mem == NULL) { |
457 | 0 | return NULL; |
458 | 0 | } |
459 | 0 | void *ptr = _Py_ALIGN_UP((char *)mem + sizeof(void *), alignment); |
460 | 0 | ((void **)ptr)[-1] = mem; |
461 | 0 | assert(_Py_IS_ALIGNED(ptr, alignment)); |
462 | 0 | return ptr; |
463 | 0 | } |
464 | | |
465 | | static void |
466 | | free_interpreter(PyInterpreterState *interp) |
467 | 0 | { |
468 | | // The main interpreter is statically allocated so |
469 | | // should not be freed. |
470 | 0 | if (interp != &_PyRuntime._main_interpreter) { |
471 | 0 | if (_PyMem_obmalloc_state_on_heap(interp)) { |
472 | | // interpreter has its own obmalloc state, free it |
473 | 0 | PyMem_RawFree(interp->obmalloc); |
474 | 0 | interp->obmalloc = NULL; |
475 | 0 | } |
476 | 0 | assert(_Py_IS_ALIGNED(interp, _Alignof(PyInterpreterState))); |
477 | 0 | PyMem_RawFree(((void **)interp)[-1]); |
478 | 0 | } |
479 | 0 | } |
480 | | |
481 | | #ifndef NDEBUG |
482 | | static inline int check_interpreter_whence(long); |
483 | | #endif |
484 | | |
485 | | extern _Py_CODEUNIT * |
486 | | _Py_LazyJitTrampoline( |
487 | | struct _PyExecutorObject *exec, _PyInterpreterFrame *frame, _PyStackRef *stack_pointer, PyThreadState *tstate |
488 | | ); |
489 | | |
490 | | /* Get the interpreter state to a minimal consistent state. |
491 | | Further init happens in pylifecycle.c before it can be used. |
492 | | All fields not initialized here are expected to be zeroed out, |
493 | | e.g. by PyMem_RawCalloc() or memset(), or otherwise pre-initialized. |
494 | | The runtime state is not manipulated. Instead it is assumed that |
495 | | the interpreter is getting added to the runtime. |
496 | | |
497 | | Note that the main interpreter was statically initialized as part |
498 | | of the runtime and most state is already set properly. That leaves |
499 | | a small number of fields to initialize dynamically, as well as some |
500 | | that are initialized lazily. |
501 | | |
502 | | For subinterpreters we memcpy() the main interpreter in |
503 | | PyInterpreterState_New(), leaving it in the same mostly-initialized |
504 | | state. The only difference is that the interpreter has some |
505 | | self-referential state that is statically initializexd to the |
506 | | main interpreter. We fix those fields here, in addition |
507 | | to the other dynamically initialized fields. |
508 | | */ |
509 | | static PyStatus |
510 | | init_interpreter(PyInterpreterState *interp, |
511 | | _PyRuntimeState *runtime, int64_t id, |
512 | | PyInterpreterState *next, |
513 | | long whence) |
514 | 16 | { |
515 | 16 | if (interp->_initialized) { |
516 | 0 | return _PyStatus_ERR("interpreter already initialized"); |
517 | 0 | } |
518 | | |
519 | 16 | assert(interp->_whence == _PyInterpreterState_WHENCE_NOTSET); |
520 | 16 | assert(check_interpreter_whence(whence) == 0); |
521 | 16 | interp->_whence = whence; |
522 | | |
523 | 16 | assert(runtime != NULL); |
524 | 16 | interp->runtime = runtime; |
525 | | |
526 | 16 | assert(id > 0 || (id == 0 && interp == runtime->interpreters.main)); |
527 | 16 | interp->id = id; |
528 | | |
529 | 16 | interp->id_refcount = 0; |
530 | | |
531 | 16 | assert(runtime->interpreters.head == interp); |
532 | 16 | assert(next != NULL || (interp == runtime->interpreters.main)); |
533 | 16 | interp->next = next; |
534 | | |
535 | 16 | interp->threads.preallocated = &interp->_initial_thread; |
536 | | |
537 | | // We would call _PyObject_InitState() at this point |
538 | | // if interp->feature_flags were alredy set. |
539 | | |
540 | 16 | _PyEval_InitState(interp); |
541 | 16 | _PyGC_InitState(&interp->gc); |
542 | 16 | PyConfig_InitPythonConfig(&interp->config); |
543 | 16 | _PyType_InitCache(interp); |
544 | | #ifdef Py_GIL_DISABLED |
545 | | _Py_brc_init_state(interp); |
546 | | #endif |
547 | | |
548 | | #ifdef _Py_TIER2 |
549 | | // Ensure the buffer is to be set as NULL. |
550 | | interp->jit_uop_buffer = NULL; |
551 | | #endif |
552 | 16 | llist_init(&interp->mem_free_queue.head); |
553 | 16 | llist_init(&interp->asyncio_tasks_head); |
554 | 16 | interp->asyncio_tasks_lock = (PyMutex){0}; |
555 | 272 | for (int i = 0; i < _PY_MONITORING_UNGROUPED_EVENTS; i++) { |
556 | 256 | interp->monitors.tools[i] = 0; |
557 | 256 | } |
558 | 144 | for (int t = 0; t < PY_MONITORING_TOOL_IDS; t++) { |
559 | 2.56k | for (int e = 0; e < _PY_MONITORING_EVENTS; e++) { |
560 | 2.43k | interp->monitoring_callables[t][e] = NULL; |
561 | | |
562 | 2.43k | } |
563 | 128 | interp->monitoring_tool_versions[t] = 0; |
564 | 128 | } |
565 | 16 | interp->_code_object_generation = 0; |
566 | 16 | interp->jit = false; |
567 | 16 | interp->compiling = false; |
568 | 16 | interp->executor_list_head = NULL; |
569 | 16 | interp->executor_deletion_list_head = NULL; |
570 | 16 | interp->executor_deletion_list_remaining_capacity = 0; |
571 | 16 | interp->executor_creation_counter = JIT_CLEANUP_THRESHOLD; |
572 | 16 | if (interp != &runtime->_main_interpreter) { |
573 | | /* Fix the self-referential, statically initialized fields. */ |
574 | 0 | interp->dtoa = (struct _dtoa_state)_dtoa_state_INIT(interp); |
575 | 0 | } |
576 | | #if !defined(Py_GIL_DISABLED) && defined(Py_STACKREF_DEBUG) |
577 | | interp->next_stackref = INITIAL_STACKREF_INDEX; |
578 | | _Py_hashtable_allocator_t alloc = { |
579 | | .malloc = malloc, |
580 | | .free = free, |
581 | | }; |
582 | | interp->open_stackrefs_table = _Py_hashtable_new_full( |
583 | | _Py_hashtable_hash_ptr, |
584 | | _Py_hashtable_compare_direct, |
585 | | NULL, |
586 | | NULL, |
587 | | &alloc |
588 | | ); |
589 | | # ifdef Py_STACKREF_CLOSE_DEBUG |
590 | | interp->closed_stackrefs_table = _Py_hashtable_new_full( |
591 | | _Py_hashtable_hash_ptr, |
592 | | _Py_hashtable_compare_direct, |
593 | | NULL, |
594 | | NULL, |
595 | | &alloc |
596 | | ); |
597 | | # endif |
598 | | _Py_stackref_associate(interp, Py_None, PyStackRef_None); |
599 | | _Py_stackref_associate(interp, Py_False, PyStackRef_False); |
600 | | _Py_stackref_associate(interp, Py_True, PyStackRef_True); |
601 | | #endif |
602 | | |
603 | 16 | interp->_initialized = 1; |
604 | 16 | return _PyStatus_OK(); |
605 | 16 | } |
606 | | |
607 | | |
608 | | PyStatus |
609 | | _PyInterpreterState_New(PyThreadState *tstate, PyInterpreterState **pinterp) |
610 | 16 | { |
611 | 16 | *pinterp = NULL; |
612 | | |
613 | | // Don't get runtime from tstate since tstate can be NULL |
614 | 16 | _PyRuntimeState *runtime = &_PyRuntime; |
615 | | |
616 | | // tstate is NULL when pycore_create_interpreter() calls |
617 | | // _PyInterpreterState_New() to create the main interpreter. |
618 | 16 | if (tstate != NULL) { |
619 | 0 | if (_PySys_Audit(tstate, "cpython.PyInterpreterState_New", NULL) < 0) { |
620 | 0 | return _PyStatus_ERR("sys.audit failed"); |
621 | 0 | } |
622 | 0 | } |
623 | | |
624 | | /* We completely serialize creation of multiple interpreters, since |
625 | | it simplifies things here and blocking concurrent calls isn't a problem. |
626 | | Regardless, we must fully block subinterpreter creation until |
627 | | after the main interpreter is created. */ |
628 | 16 | HEAD_LOCK(runtime); |
629 | | |
630 | 16 | struct pyinterpreters *interpreters = &runtime->interpreters; |
631 | 16 | int64_t id = interpreters->next_id; |
632 | 16 | interpreters->next_id += 1; |
633 | | |
634 | | // Allocate the interpreter and add it to the runtime state. |
635 | 16 | PyInterpreterState *interp; |
636 | 16 | PyStatus status; |
637 | 16 | PyInterpreterState *old_head = interpreters->head; |
638 | 16 | if (old_head == NULL) { |
639 | | // We are creating the main interpreter. |
640 | 16 | assert(interpreters->main == NULL); |
641 | 16 | assert(id == 0); |
642 | | |
643 | 16 | interp = &runtime->_main_interpreter; |
644 | 16 | assert(interp->id == 0); |
645 | 16 | assert(interp->next == NULL); |
646 | | |
647 | 16 | interpreters->main = interp; |
648 | 16 | } |
649 | 0 | else { |
650 | 0 | assert(interpreters->main != NULL); |
651 | 0 | assert(id != 0); |
652 | |
|
653 | 0 | interp = alloc_interpreter(); |
654 | 0 | if (interp == NULL) { |
655 | 0 | status = _PyStatus_NO_MEMORY(); |
656 | 0 | goto error; |
657 | 0 | } |
658 | | // Set to _PyInterpreterState_INIT. |
659 | 0 | memcpy(interp, &initial._main_interpreter, sizeof(*interp)); |
660 | |
|
661 | 0 | if (id < 0) { |
662 | | /* overflow or Py_Initialize() not called yet! */ |
663 | 0 | status = _PyStatus_ERR("failed to get an interpreter ID"); |
664 | 0 | goto error; |
665 | 0 | } |
666 | 0 | } |
667 | 16 | interpreters->head = interp; |
668 | | |
669 | 16 | long whence = _PyInterpreterState_WHENCE_UNKNOWN; |
670 | 16 | status = init_interpreter(interp, runtime, |
671 | 16 | id, old_head, whence); |
672 | 16 | if (_PyStatus_EXCEPTION(status)) { |
673 | 0 | goto error; |
674 | 0 | } |
675 | | |
676 | 16 | HEAD_UNLOCK(runtime); |
677 | | |
678 | 16 | assert(interp != NULL); |
679 | 16 | *pinterp = interp; |
680 | 16 | return _PyStatus_OK(); |
681 | | |
682 | 0 | error: |
683 | 0 | HEAD_UNLOCK(runtime); |
684 | |
|
685 | 0 | if (interp != NULL) { |
686 | 0 | free_interpreter(interp); |
687 | 0 | } |
688 | 0 | return status; |
689 | 16 | } |
690 | | |
691 | | |
692 | | PyInterpreterState * |
693 | | PyInterpreterState_New(void) |
694 | 0 | { |
695 | | // tstate can be NULL |
696 | 0 | PyThreadState *tstate = current_fast_get(); |
697 | |
|
698 | 0 | PyInterpreterState *interp; |
699 | 0 | PyStatus status = _PyInterpreterState_New(tstate, &interp); |
700 | 0 | if (_PyStatus_EXCEPTION(status)) { |
701 | 0 | Py_ExitStatusException(status); |
702 | 0 | } |
703 | 0 | assert(interp != NULL); |
704 | 0 | return interp; |
705 | 0 | } |
706 | | |
707 | | #if !defined(Py_GIL_DISABLED) && defined(Py_STACKREF_DEBUG) |
708 | | extern void |
709 | | _Py_stackref_report_leaks(PyInterpreterState *interp); |
710 | | #endif |
711 | | |
712 | | static void |
713 | | interpreter_clear(PyInterpreterState *interp, PyThreadState *tstate) |
714 | 0 | { |
715 | 0 | assert(interp != NULL); |
716 | 0 | assert(tstate != NULL); |
717 | 0 | _PyRuntimeState *runtime = interp->runtime; |
718 | | |
719 | | /* XXX Conditions we need to enforce: |
720 | | |
721 | | * the GIL must be held by the current thread |
722 | | * tstate must be the "current" thread state (current_fast_get()) |
723 | | * tstate->interp must be interp |
724 | | * for the main interpreter, tstate must be the main thread |
725 | | */ |
726 | | // XXX Ideally, we would not rely on any thread state in this function |
727 | | // (and we would drop the "tstate" argument). |
728 | |
|
729 | 0 | if (_PySys_Audit(tstate, "cpython.PyInterpreterState_Clear", NULL) < 0) { |
730 | 0 | _PyErr_Clear(tstate); |
731 | 0 | } |
732 | | |
733 | | // Clear the current/main thread state last. |
734 | 0 | _Py_FOR_EACH_TSTATE_BEGIN(interp, p) { |
735 | | // See https://github.com/python/cpython/issues/102126 |
736 | | // Must be called without HEAD_LOCK held as it can deadlock |
737 | | // if any finalizer tries to acquire that lock. |
738 | 0 | HEAD_UNLOCK(runtime); |
739 | 0 | PyThreadState_Clear(p); |
740 | 0 | HEAD_LOCK(runtime); |
741 | 0 | } |
742 | 0 | _Py_FOR_EACH_TSTATE_END(interp); |
743 | 0 | if (tstate->interp == interp) { |
744 | | /* We fix tstate->_status below when we for sure aren't using it |
745 | | (e.g. no longer need the GIL). */ |
746 | | // XXX Eliminate the need to do this. |
747 | 0 | tstate->_status.cleared = 0; |
748 | 0 | } |
749 | | |
750 | | /* It is possible that any of the objects below have a finalizer |
751 | | that runs Python code or otherwise relies on a thread state |
752 | | or even the interpreter state. For now we trust that isn't |
753 | | a problem. |
754 | | */ |
755 | | // XXX Make sure we properly deal with problematic finalizers. |
756 | |
|
757 | 0 | Py_CLEAR(interp->audit_hooks); |
758 | | |
759 | | // gh-140257: Threads have already been cleared, but daemon threads may |
760 | | // still access eval_breaker atomically via take_gil() right before they |
761 | | // hang. Use an atomic store to prevent data races during finalization. |
762 | 0 | interp->ceval.instrumentation_version = 0; |
763 | 0 | _Py_atomic_store_uintptr(&tstate->eval_breaker, 0); |
764 | |
|
765 | 0 | for (int i = 0; i < _PY_MONITORING_UNGROUPED_EVENTS; i++) { |
766 | 0 | interp->monitors.tools[i] = 0; |
767 | 0 | } |
768 | 0 | for (int t = 0; t < PY_MONITORING_TOOL_IDS; t++) { |
769 | 0 | for (int e = 0; e < _PY_MONITORING_EVENTS; e++) { |
770 | 0 | Py_CLEAR(interp->monitoring_callables[t][e]); |
771 | 0 | } |
772 | 0 | } |
773 | 0 | for (int t = 0; t < PY_MONITORING_TOOL_IDS; t++) { |
774 | 0 | Py_CLEAR(interp->monitoring_tool_names[t]); |
775 | 0 | } |
776 | 0 | interp->_code_object_generation = 0; |
777 | | #ifdef Py_GIL_DISABLED |
778 | | interp->tlbc_indices.tlbc_generation = 0; |
779 | | #endif |
780 | |
|
781 | 0 | PyConfig_Clear(&interp->config); |
782 | 0 | _PyCodec_Fini(interp); |
783 | |
|
784 | 0 | assert(interp->imports.modules == NULL); |
785 | 0 | assert(interp->imports.modules_by_index == NULL); |
786 | 0 | assert(interp->imports.importlib == NULL); |
787 | 0 | assert(interp->imports.import_func == NULL); |
788 | |
|
789 | 0 | Py_CLEAR(interp->sysdict_copy); |
790 | 0 | Py_CLEAR(interp->builtins_copy); |
791 | 0 | Py_CLEAR(interp->dict); |
792 | 0 | #ifdef HAVE_FORK |
793 | 0 | Py_CLEAR(interp->before_forkers); |
794 | 0 | Py_CLEAR(interp->after_forkers_parent); |
795 | 0 | Py_CLEAR(interp->after_forkers_child); |
796 | 0 | #endif |
797 | | |
798 | |
|
799 | | #ifdef _Py_TIER2 |
800 | | _Py_ClearExecutorDeletionList(interp); |
801 | | if (interp->jit_uop_buffer != NULL) { |
802 | | _PyObject_VirtualFree(interp->jit_uop_buffer, UOP_BUFFER_SIZE); |
803 | | interp->jit_uop_buffer = NULL; |
804 | | } |
805 | | #endif |
806 | 0 | _PyAST_Fini(interp); |
807 | 0 | _PyAtExit_Fini(interp); |
808 | | |
809 | | // All Python types must be destroyed before the last GC collection. Python |
810 | | // types create a reference cycle to themselves in their in their |
811 | | // PyTypeObject.tp_mro member (the tuple contains the type). |
812 | | |
813 | | /* Last garbage collection on this interpreter */ |
814 | 0 | _PyGC_CollectNoFail(tstate); |
815 | 0 | _PyGC_Fini(interp); |
816 | | |
817 | | // Finalize warnings after last gc so that any finalizers can |
818 | | // access warnings state |
819 | 0 | _PyWarnings_Fini(interp); |
820 | 0 | struct _PyExecutorObject *cold = interp->cold_executor; |
821 | 0 | if (cold != NULL) { |
822 | 0 | interp->cold_executor = NULL; |
823 | 0 | assert(cold->vm_data.valid); |
824 | 0 | assert(cold->vm_data.warm); |
825 | 0 | _PyExecutor_Free(cold); |
826 | 0 | } |
827 | | /* We don't clear sysdict and builtins until the end of this function. |
828 | | Because clearing other attributes can execute arbitrary Python code |
829 | | which requires sysdict and builtins. */ |
830 | 0 | PyDict_Clear(interp->sysdict); |
831 | 0 | PyDict_Clear(interp->builtins); |
832 | 0 | Py_CLEAR(interp->sysdict); |
833 | 0 | Py_CLEAR(interp->builtins); |
834 | |
|
835 | | #if !defined(Py_GIL_DISABLED) && defined(Py_STACKREF_DEBUG) |
836 | | # ifdef Py_STACKREF_CLOSE_DEBUG |
837 | | _Py_hashtable_destroy(interp->closed_stackrefs_table); |
838 | | interp->closed_stackrefs_table = NULL; |
839 | | # endif |
840 | | _Py_stackref_report_leaks(interp); |
841 | | _Py_hashtable_destroy(interp->open_stackrefs_table); |
842 | | interp->open_stackrefs_table = NULL; |
843 | | #endif |
844 | |
|
845 | 0 | if (tstate->interp == interp) { |
846 | | /* We are now safe to fix tstate->_status.cleared. */ |
847 | | // XXX Do this (much) earlier? |
848 | 0 | tstate->_status.cleared = 1; |
849 | 0 | } |
850 | |
|
851 | 0 | for (int i=0; i < DICT_MAX_WATCHERS; i++) { |
852 | 0 | interp->dict_state.watchers[i] = NULL; |
853 | 0 | } |
854 | |
|
855 | 0 | for (int i=0; i < TYPE_MAX_WATCHERS; i++) { |
856 | 0 | interp->type_watchers[i] = NULL; |
857 | 0 | } |
858 | |
|
859 | 0 | for (int i=0; i < FUNC_MAX_WATCHERS; i++) { |
860 | 0 | interp->func_watchers[i] = NULL; |
861 | 0 | } |
862 | 0 | interp->active_func_watchers = 0; |
863 | |
|
864 | 0 | for (int i=0; i < CODE_MAX_WATCHERS; i++) { |
865 | 0 | interp->code_watchers[i] = NULL; |
866 | 0 | } |
867 | 0 | interp->active_code_watchers = 0; |
868 | |
|
869 | 0 | for (int i=0; i < CONTEXT_MAX_WATCHERS; i++) { |
870 | 0 | interp->context_watchers[i] = NULL; |
871 | 0 | } |
872 | 0 | interp->active_context_watchers = 0; |
873 | | // XXX Once we have one allocator per interpreter (i.e. |
874 | | // per-interpreter GC) we must ensure that all of the interpreter's |
875 | | // objects have been cleaned up at the point. |
876 | | |
877 | | // We could clear interp->threads.freelist here |
878 | | // if it held more than just the initial thread state. |
879 | 0 | } |
880 | | |
881 | | |
882 | | void |
883 | | PyInterpreterState_Clear(PyInterpreterState *interp) |
884 | 0 | { |
885 | | // Use the current Python thread state to call audit hooks and to collect |
886 | | // garbage. It can be different than the current Python thread state |
887 | | // of 'interp'. |
888 | 0 | PyThreadState *current_tstate = current_fast_get(); |
889 | 0 | _PyImport_ClearCore(interp); |
890 | 0 | interpreter_clear(interp, current_tstate); |
891 | 0 | } |
892 | | |
893 | | |
894 | | void |
895 | | _PyInterpreterState_Clear(PyThreadState *tstate) |
896 | 0 | { |
897 | 0 | _PyImport_ClearCore(tstate->interp); |
898 | 0 | interpreter_clear(tstate->interp, tstate); |
899 | 0 | } |
900 | | |
901 | | |
902 | | static inline void tstate_deactivate(PyThreadState *tstate); |
903 | | static void tstate_set_detached(PyThreadState *tstate, int detached_state); |
904 | | static void zapthreads(PyInterpreterState *interp); |
905 | | |
906 | | void |
907 | | PyInterpreterState_Delete(PyInterpreterState *interp) |
908 | 0 | { |
909 | 0 | _PyRuntimeState *runtime = interp->runtime; |
910 | 0 | struct pyinterpreters *interpreters = &runtime->interpreters; |
911 | | |
912 | | // XXX Clearing the "current" thread state should happen before |
913 | | // we start finalizing the interpreter (or the current thread state). |
914 | 0 | PyThreadState *tcur = current_fast_get(); |
915 | 0 | if (tcur != NULL && interp == tcur->interp) { |
916 | | /* Unset current thread. After this, many C API calls become crashy. */ |
917 | 0 | _PyThreadState_Detach(tcur); |
918 | 0 | } |
919 | |
|
920 | 0 | zapthreads(interp); |
921 | | |
922 | | // XXX These two calls should be done at the end of clear_interpreter(), |
923 | | // but currently some objects get decref'ed after that. |
924 | | #ifdef Py_REF_DEBUG |
925 | | _PyInterpreterState_FinalizeRefTotal(interp); |
926 | | #endif |
927 | 0 | _PyInterpreterState_FinalizeAllocatedBlocks(interp); |
928 | |
|
929 | 0 | HEAD_LOCK(runtime); |
930 | 0 | PyInterpreterState **p; |
931 | 0 | for (p = &interpreters->head; ; p = &(*p)->next) { |
932 | 0 | if (*p == NULL) { |
933 | 0 | Py_FatalError("NULL interpreter"); |
934 | 0 | } |
935 | 0 | if (*p == interp) { |
936 | 0 | break; |
937 | 0 | } |
938 | 0 | } |
939 | 0 | if (interp->threads.head != NULL) { |
940 | 0 | Py_FatalError("remaining threads"); |
941 | 0 | } |
942 | 0 | *p = interp->next; |
943 | |
|
944 | 0 | if (interpreters->main == interp) { |
945 | 0 | interpreters->main = NULL; |
946 | 0 | if (interpreters->head != NULL) { |
947 | 0 | Py_FatalError("remaining subinterpreters"); |
948 | 0 | } |
949 | 0 | } |
950 | 0 | HEAD_UNLOCK(runtime); |
951 | |
|
952 | 0 | _Py_qsbr_fini(interp); |
953 | |
|
954 | 0 | _PyObject_FiniState(interp); |
955 | |
|
956 | 0 | PyConfig_Clear(&interp->config); |
957 | |
|
958 | 0 | free_interpreter(interp); |
959 | 0 | } |
960 | | |
961 | | |
962 | | #ifdef HAVE_FORK |
963 | | /* |
964 | | * Delete all interpreter states except the main interpreter. If there |
965 | | * is a current interpreter state, it *must* be the main interpreter. |
966 | | */ |
967 | | PyStatus |
968 | | _PyInterpreterState_DeleteExceptMain(_PyRuntimeState *runtime) |
969 | 0 | { |
970 | 0 | struct pyinterpreters *interpreters = &runtime->interpreters; |
971 | |
|
972 | 0 | PyThreadState *tstate = _PyThreadState_Swap(runtime, NULL); |
973 | 0 | if (tstate != NULL && tstate->interp != interpreters->main) { |
974 | 0 | return _PyStatus_ERR("not main interpreter"); |
975 | 0 | } |
976 | | |
977 | 0 | HEAD_LOCK(runtime); |
978 | 0 | PyInterpreterState *interp = interpreters->head; |
979 | 0 | interpreters->head = NULL; |
980 | 0 | while (interp != NULL) { |
981 | 0 | if (interp == interpreters->main) { |
982 | 0 | interpreters->main->next = NULL; |
983 | 0 | interpreters->head = interp; |
984 | 0 | interp = interp->next; |
985 | 0 | continue; |
986 | 0 | } |
987 | | |
988 | | // XXX Won't this fail since PyInterpreterState_Clear() requires |
989 | | // the "current" tstate to be set? |
990 | 0 | PyInterpreterState_Clear(interp); // XXX must activate? |
991 | 0 | zapthreads(interp); |
992 | 0 | PyInterpreterState *prev_interp = interp; |
993 | 0 | interp = interp->next; |
994 | 0 | free_interpreter(prev_interp); |
995 | 0 | } |
996 | 0 | HEAD_UNLOCK(runtime); |
997 | |
|
998 | 0 | if (interpreters->head == NULL) { |
999 | 0 | return _PyStatus_ERR("missing main interpreter"); |
1000 | 0 | } |
1001 | 0 | _PyThreadState_Swap(runtime, tstate); |
1002 | 0 | return _PyStatus_OK(); |
1003 | 0 | } |
1004 | | #endif |
1005 | | |
1006 | | static inline void |
1007 | | set_main_thread(PyInterpreterState *interp, PyThreadState *tstate) |
1008 | 0 | { |
1009 | 0 | _Py_atomic_store_ptr_relaxed(&interp->threads.main, tstate); |
1010 | 0 | } |
1011 | | |
1012 | | static inline PyThreadState * |
1013 | | get_main_thread(PyInterpreterState *interp) |
1014 | 0 | { |
1015 | 0 | return _Py_atomic_load_ptr_relaxed(&interp->threads.main); |
1016 | 0 | } |
1017 | | |
1018 | | void |
1019 | | _PyErr_SetInterpreterAlreadyRunning(void) |
1020 | 0 | { |
1021 | 0 | PyErr_SetString(PyExc_InterpreterError, "interpreter already running"); |
1022 | 0 | } |
1023 | | |
1024 | | int |
1025 | | _PyInterpreterState_SetRunningMain(PyInterpreterState *interp) |
1026 | 0 | { |
1027 | 0 | if (get_main_thread(interp) != NULL) { |
1028 | 0 | _PyErr_SetInterpreterAlreadyRunning(); |
1029 | 0 | return -1; |
1030 | 0 | } |
1031 | 0 | PyThreadState *tstate = current_fast_get(); |
1032 | 0 | _Py_EnsureTstateNotNULL(tstate); |
1033 | 0 | if (tstate->interp != interp) { |
1034 | 0 | PyErr_SetString(PyExc_RuntimeError, |
1035 | 0 | "current tstate has wrong interpreter"); |
1036 | 0 | return -1; |
1037 | 0 | } |
1038 | 0 | set_main_thread(interp, tstate); |
1039 | |
|
1040 | 0 | return 0; |
1041 | 0 | } |
1042 | | |
1043 | | void |
1044 | | _PyInterpreterState_SetNotRunningMain(PyInterpreterState *interp) |
1045 | 0 | { |
1046 | 0 | assert(get_main_thread(interp) == current_fast_get()); |
1047 | 0 | set_main_thread(interp, NULL); |
1048 | 0 | } |
1049 | | |
1050 | | int |
1051 | | _PyInterpreterState_IsRunningMain(PyInterpreterState *interp) |
1052 | 0 | { |
1053 | 0 | if (get_main_thread(interp) != NULL) { |
1054 | 0 | return 1; |
1055 | 0 | } |
1056 | | // Embedders might not know to call _PyInterpreterState_SetRunningMain(), |
1057 | | // so their main thread wouldn't show it is running the main interpreter's |
1058 | | // program. (Py_Main() doesn't have this problem.) For now this isn't |
1059 | | // critical. If it were, we would need to infer "running main" from other |
1060 | | // information, like if it's the main interpreter. We used to do that |
1061 | | // but the naive approach led to some inconsistencies that caused problems. |
1062 | 0 | return 0; |
1063 | 0 | } |
1064 | | |
1065 | | int |
1066 | | _PyThreadState_IsRunningMain(PyThreadState *tstate) |
1067 | 0 | { |
1068 | 0 | PyInterpreterState *interp = tstate->interp; |
1069 | | // See the note in _PyInterpreterState_IsRunningMain() about |
1070 | | // possible false negatives here for embedders. |
1071 | 0 | return get_main_thread(interp) == tstate; |
1072 | 0 | } |
1073 | | |
1074 | | void |
1075 | | _PyInterpreterState_ReinitRunningMain(PyThreadState *tstate) |
1076 | 0 | { |
1077 | 0 | PyInterpreterState *interp = tstate->interp; |
1078 | 0 | if (get_main_thread(interp) != tstate) { |
1079 | 0 | set_main_thread(interp, NULL); |
1080 | 0 | } |
1081 | 0 | } |
1082 | | |
1083 | | |
1084 | | //---------- |
1085 | | // accessors |
1086 | | //---------- |
1087 | | |
1088 | | int |
1089 | | _PyInterpreterState_IsReady(PyInterpreterState *interp) |
1090 | 0 | { |
1091 | 0 | return interp->_ready; |
1092 | 0 | } |
1093 | | |
1094 | | #ifndef NDEBUG |
1095 | | static inline int |
1096 | | check_interpreter_whence(long whence) |
1097 | | { |
1098 | | if(whence < 0) { |
1099 | | return -1; |
1100 | | } |
1101 | | if (whence > _PyInterpreterState_WHENCE_MAX) { |
1102 | | return -1; |
1103 | | } |
1104 | | return 0; |
1105 | | } |
1106 | | #endif |
1107 | | |
1108 | | long |
1109 | | _PyInterpreterState_GetWhence(PyInterpreterState *interp) |
1110 | 0 | { |
1111 | 0 | assert(check_interpreter_whence(interp->_whence) == 0); |
1112 | 0 | return interp->_whence; |
1113 | 0 | } |
1114 | | |
1115 | | void |
1116 | | _PyInterpreterState_SetWhence(PyInterpreterState *interp, long whence) |
1117 | 16 | { |
1118 | 16 | assert(interp->_whence != _PyInterpreterState_WHENCE_NOTSET); |
1119 | 16 | assert(check_interpreter_whence(whence) == 0); |
1120 | 16 | interp->_whence = whence; |
1121 | 16 | } |
1122 | | |
1123 | | |
1124 | | PyObject * |
1125 | | _Py_GetMainModule(PyThreadState *tstate) |
1126 | 0 | { |
1127 | | // We return None to indicate "not found" or "bogus". |
1128 | 0 | PyObject *modules = _PyImport_GetModulesRef(tstate->interp); |
1129 | 0 | if (modules == Py_None) { |
1130 | 0 | return modules; |
1131 | 0 | } |
1132 | 0 | PyObject *module = NULL; |
1133 | 0 | (void)PyMapping_GetOptionalItem(modules, &_Py_ID(__main__), &module); |
1134 | 0 | Py_DECREF(modules); |
1135 | 0 | if (module == NULL && !PyErr_Occurred()) { |
1136 | 0 | Py_RETURN_NONE; |
1137 | 0 | } |
1138 | 0 | return module; |
1139 | 0 | } |
1140 | | |
1141 | | int |
1142 | | _Py_CheckMainModule(PyObject *module) |
1143 | 0 | { |
1144 | 0 | if (module == NULL || module == Py_None) { |
1145 | 0 | if (!PyErr_Occurred()) { |
1146 | 0 | (void)_PyErr_SetModuleNotFoundError(&_Py_ID(__main__)); |
1147 | 0 | } |
1148 | 0 | return -1; |
1149 | 0 | } |
1150 | 0 | if (!Py_IS_TYPE(module, &PyModule_Type)) { |
1151 | | /* The __main__ module has been tampered with. */ |
1152 | 0 | PyObject *msg = PyUnicode_FromString("invalid __main__ module"); |
1153 | 0 | if (msg != NULL) { |
1154 | 0 | (void)PyErr_SetImportError(msg, &_Py_ID(__main__), NULL); |
1155 | 0 | Py_DECREF(msg); |
1156 | 0 | } |
1157 | 0 | return -1; |
1158 | 0 | } |
1159 | 0 | return 0; |
1160 | 0 | } |
1161 | | |
1162 | | |
1163 | | PyObject * |
1164 | | PyInterpreterState_GetDict(PyInterpreterState *interp) |
1165 | 12 | { |
1166 | 12 | if (interp->dict == NULL) { |
1167 | 6 | interp->dict = PyDict_New(); |
1168 | 6 | if (interp->dict == NULL) { |
1169 | 0 | PyErr_Clear(); |
1170 | 0 | } |
1171 | 6 | } |
1172 | | /* Returning NULL means no per-interpreter dict is available. */ |
1173 | 12 | return interp->dict; |
1174 | 12 | } |
1175 | | |
1176 | | |
1177 | | //---------- |
1178 | | // interp ID |
1179 | | //---------- |
1180 | | |
1181 | | int64_t |
1182 | | _PyInterpreterState_ObjectToID(PyObject *idobj) |
1183 | 0 | { |
1184 | 0 | if (!_PyIndex_Check(idobj)) { |
1185 | 0 | PyErr_Format(PyExc_TypeError, |
1186 | 0 | "interpreter ID must be an int, got %.100s", |
1187 | 0 | Py_TYPE(idobj)->tp_name); |
1188 | 0 | return -1; |
1189 | 0 | } |
1190 | | |
1191 | | // This may raise OverflowError. |
1192 | | // For now, we don't worry about if LLONG_MAX < INT64_MAX. |
1193 | 0 | long long id = PyLong_AsLongLong(idobj); |
1194 | 0 | if (id == -1 && PyErr_Occurred()) { |
1195 | 0 | return -1; |
1196 | 0 | } |
1197 | | |
1198 | 0 | if (id < 0) { |
1199 | 0 | PyErr_Format(PyExc_ValueError, |
1200 | 0 | "interpreter ID must be a non-negative int, got %R", |
1201 | 0 | idobj); |
1202 | 0 | return -1; |
1203 | 0 | } |
1204 | | #if LLONG_MAX > INT64_MAX |
1205 | | else if (id > INT64_MAX) { |
1206 | | PyErr_SetString(PyExc_OverflowError, "int too big to convert"); |
1207 | | return -1; |
1208 | | } |
1209 | | #endif |
1210 | 0 | else { |
1211 | 0 | return (int64_t)id; |
1212 | 0 | } |
1213 | 0 | } |
1214 | | |
1215 | | int64_t |
1216 | | PyInterpreterState_GetID(PyInterpreterState *interp) |
1217 | 0 | { |
1218 | 0 | if (interp == NULL) { |
1219 | 0 | PyErr_SetString(PyExc_RuntimeError, "no interpreter provided"); |
1220 | 0 | return -1; |
1221 | 0 | } |
1222 | 0 | return interp->id; |
1223 | 0 | } |
1224 | | |
1225 | | PyObject * |
1226 | | _PyInterpreterState_GetIDObject(PyInterpreterState *interp) |
1227 | 0 | { |
1228 | 0 | int64_t interpid = interp->id; |
1229 | 0 | if (interpid < 0) { |
1230 | 0 | return NULL; |
1231 | 0 | } |
1232 | 0 | assert(interpid < LLONG_MAX); |
1233 | 0 | return PyLong_FromLongLong(interpid); |
1234 | 0 | } |
1235 | | |
1236 | | |
1237 | | |
1238 | | void |
1239 | | _PyInterpreterState_IDIncref(PyInterpreterState *interp) |
1240 | 0 | { |
1241 | 0 | _Py_atomic_add_ssize(&interp->id_refcount, 1); |
1242 | 0 | } |
1243 | | |
1244 | | |
1245 | | void |
1246 | | _PyInterpreterState_IDDecref(PyInterpreterState *interp) |
1247 | 0 | { |
1248 | 0 | _PyRuntimeState *runtime = interp->runtime; |
1249 | |
|
1250 | 0 | Py_ssize_t refcount = _Py_atomic_add_ssize(&interp->id_refcount, -1); |
1251 | |
|
1252 | 0 | if (refcount == 1 && interp->requires_idref) { |
1253 | 0 | PyThreadState *tstate = |
1254 | 0 | _PyThreadState_NewBound(interp, _PyThreadState_WHENCE_FINI); |
1255 | | |
1256 | | // XXX Possible GILState issues? |
1257 | 0 | PyThreadState *save_tstate = _PyThreadState_Swap(runtime, tstate); |
1258 | 0 | Py_EndInterpreter(tstate); |
1259 | 0 | _PyThreadState_Swap(runtime, save_tstate); |
1260 | 0 | } |
1261 | 0 | } |
1262 | | |
1263 | | int |
1264 | | _PyInterpreterState_RequiresIDRef(PyInterpreterState *interp) |
1265 | 0 | { |
1266 | 0 | return interp->requires_idref; |
1267 | 0 | } |
1268 | | |
1269 | | void |
1270 | | _PyInterpreterState_RequireIDRef(PyInterpreterState *interp, int required) |
1271 | 0 | { |
1272 | 0 | interp->requires_idref = required ? 1 : 0; |
1273 | 0 | } |
1274 | | |
1275 | | |
1276 | | //----------------------------- |
1277 | | // look up an interpreter state |
1278 | | //----------------------------- |
1279 | | |
1280 | | /* Return the interpreter associated with the current OS thread. |
1281 | | |
1282 | | The GIL must be held. |
1283 | | */ |
1284 | | |
1285 | | PyInterpreterState* |
1286 | | PyInterpreterState_Get(void) |
1287 | 35 | { |
1288 | 35 | _Py_AssertHoldsTstate(); |
1289 | 35 | PyInterpreterState *interp = _Py_tss_interp; |
1290 | 35 | if (interp == NULL) { |
1291 | 0 | Py_FatalError("no current interpreter"); |
1292 | 0 | } |
1293 | 35 | return interp; |
1294 | 35 | } |
1295 | | |
1296 | | |
1297 | | static PyInterpreterState * |
1298 | | interp_look_up_id(_PyRuntimeState *runtime, int64_t requested_id) |
1299 | 0 | { |
1300 | 0 | PyInterpreterState *interp = runtime->interpreters.head; |
1301 | 0 | while (interp != NULL) { |
1302 | 0 | int64_t id = interp->id; |
1303 | 0 | assert(id >= 0); |
1304 | 0 | if (requested_id == id) { |
1305 | 0 | return interp; |
1306 | 0 | } |
1307 | 0 | interp = PyInterpreterState_Next(interp); |
1308 | 0 | } |
1309 | 0 | return NULL; |
1310 | 0 | } |
1311 | | |
1312 | | /* Return the interpreter state with the given ID. |
1313 | | |
1314 | | Fail with RuntimeError if the interpreter is not found. */ |
1315 | | |
1316 | | PyInterpreterState * |
1317 | | _PyInterpreterState_LookUpID(int64_t requested_id) |
1318 | 0 | { |
1319 | 0 | PyInterpreterState *interp = NULL; |
1320 | 0 | if (requested_id >= 0) { |
1321 | 0 | _PyRuntimeState *runtime = &_PyRuntime; |
1322 | 0 | HEAD_LOCK(runtime); |
1323 | 0 | interp = interp_look_up_id(runtime, requested_id); |
1324 | 0 | HEAD_UNLOCK(runtime); |
1325 | 0 | } |
1326 | 0 | if (interp == NULL && !PyErr_Occurred()) { |
1327 | 0 | PyErr_Format(PyExc_InterpreterNotFoundError, |
1328 | 0 | "unrecognized interpreter ID %lld", requested_id); |
1329 | 0 | } |
1330 | 0 | return interp; |
1331 | 0 | } |
1332 | | |
1333 | | PyInterpreterState * |
1334 | | _PyInterpreterState_LookUpIDObject(PyObject *requested_id) |
1335 | 0 | { |
1336 | 0 | int64_t id = _PyInterpreterState_ObjectToID(requested_id); |
1337 | 0 | if (id < 0) { |
1338 | 0 | return NULL; |
1339 | 0 | } |
1340 | 0 | return _PyInterpreterState_LookUpID(id); |
1341 | 0 | } |
1342 | | |
1343 | | |
1344 | | /********************************/ |
1345 | | /* the per-thread runtime state */ |
1346 | | /********************************/ |
1347 | | |
1348 | | #ifndef NDEBUG |
1349 | | static inline int |
1350 | | tstate_is_alive(PyThreadState *tstate) |
1351 | | { |
1352 | | return (tstate->_status.initialized && |
1353 | | !tstate->_status.finalized && |
1354 | | !tstate->_status.cleared && |
1355 | | !tstate->_status.finalizing); |
1356 | | } |
1357 | | #endif |
1358 | | |
1359 | | |
1360 | | //---------- |
1361 | | // lifecycle |
1362 | | //---------- |
1363 | | |
1364 | | static _PyStackChunk* |
1365 | | allocate_chunk(int size_in_bytes, _PyStackChunk* previous) |
1366 | 163k | { |
1367 | 163k | assert(size_in_bytes % sizeof(PyObject **) == 0); |
1368 | 163k | _PyStackChunk *res = _PyObject_VirtualAlloc(size_in_bytes); |
1369 | 163k | if (res == NULL) { |
1370 | 0 | return NULL; |
1371 | 0 | } |
1372 | 163k | res->previous = previous; |
1373 | 163k | res->size = size_in_bytes; |
1374 | 163k | res->top = 0; |
1375 | 163k | return res; |
1376 | 163k | } |
1377 | | |
1378 | | static void |
1379 | | reset_threadstate(_PyThreadStateImpl *tstate) |
1380 | 0 | { |
1381 | | // Set to _PyThreadState_INIT directly? |
1382 | 0 | memcpy(tstate, |
1383 | 0 | &initial._main_interpreter._initial_thread, |
1384 | 0 | sizeof(*tstate)); |
1385 | 0 | } |
1386 | | |
1387 | | static _PyThreadStateImpl * |
1388 | | alloc_threadstate(PyInterpreterState *interp) |
1389 | 16 | { |
1390 | 16 | _PyThreadStateImpl *tstate; |
1391 | | |
1392 | | // Try the preallocated tstate first. |
1393 | 16 | tstate = _Py_atomic_exchange_ptr(&interp->threads.preallocated, NULL); |
1394 | | |
1395 | | // Fall back to the allocator. |
1396 | 16 | if (tstate == NULL) { |
1397 | 0 | tstate = PyMem_RawCalloc(1, sizeof(_PyThreadStateImpl)); |
1398 | 0 | if (tstate == NULL) { |
1399 | 0 | return NULL; |
1400 | 0 | } |
1401 | 0 | reset_threadstate(tstate); |
1402 | 0 | } |
1403 | 16 | return tstate; |
1404 | 16 | } |
1405 | | |
1406 | | static void |
1407 | | free_threadstate(_PyThreadStateImpl *tstate) |
1408 | 0 | { |
1409 | 0 | PyInterpreterState *interp = tstate->base.interp; |
1410 | | // The initial thread state of the interpreter is allocated |
1411 | | // as part of the interpreter state so should not be freed. |
1412 | 0 | if (tstate == &interp->_initial_thread) { |
1413 | | // Make it available again. |
1414 | 0 | reset_threadstate(tstate); |
1415 | 0 | assert(interp->threads.preallocated == NULL); |
1416 | 0 | _Py_atomic_store_ptr(&interp->threads.preallocated, tstate); |
1417 | 0 | } |
1418 | 0 | else { |
1419 | 0 | PyMem_RawFree(tstate); |
1420 | 0 | } |
1421 | 0 | } |
1422 | | |
1423 | | static void |
1424 | | decref_threadstate(_PyThreadStateImpl *tstate) |
1425 | 0 | { |
1426 | 0 | if (_Py_atomic_add_ssize(&tstate->refcount, -1) == 1) { |
1427 | | // The last reference to the thread state is gone. |
1428 | 0 | free_threadstate(tstate); |
1429 | 0 | } |
1430 | 0 | } |
1431 | | |
1432 | | /* Get the thread state to a minimal consistent state. |
1433 | | Further init happens in pylifecycle.c before it can be used. |
1434 | | All fields not initialized here are expected to be zeroed out, |
1435 | | e.g. by PyMem_RawCalloc() or memset(), or otherwise pre-initialized. |
1436 | | The interpreter state is not manipulated. Instead it is assumed that |
1437 | | the thread is getting added to the interpreter. |
1438 | | */ |
1439 | | |
1440 | | static void |
1441 | | init_threadstate(_PyThreadStateImpl *_tstate, |
1442 | | PyInterpreterState *interp, uint64_t id, int whence) |
1443 | 16 | { |
1444 | 16 | PyThreadState *tstate = (PyThreadState *)_tstate; |
1445 | 16 | if (tstate->_status.initialized) { |
1446 | 0 | Py_FatalError("thread state already initialized"); |
1447 | 0 | } |
1448 | | |
1449 | 16 | assert(interp != NULL); |
1450 | 16 | tstate->interp = interp; |
1451 | 16 | tstate->eval_breaker = |
1452 | 16 | _Py_atomic_load_uintptr_relaxed(&interp->ceval.instrumentation_version); |
1453 | | |
1454 | | // next/prev are set in add_threadstate(). |
1455 | 16 | assert(tstate->next == NULL); |
1456 | 16 | assert(tstate->prev == NULL); |
1457 | | |
1458 | 16 | assert(tstate->_whence == _PyThreadState_WHENCE_NOTSET); |
1459 | 16 | assert(whence >= 0 && whence <= _PyThreadState_WHENCE_THREADING_DAEMON); |
1460 | 16 | tstate->_whence = whence; |
1461 | | |
1462 | 16 | assert(id > 0); |
1463 | 16 | tstate->id = id; |
1464 | | |
1465 | | // thread_id and native_thread_id are set in bind_tstate(). |
1466 | | |
1467 | 16 | tstate->py_recursion_limit = interp->ceval.recursion_limit; |
1468 | 16 | tstate->py_recursion_remaining = interp->ceval.recursion_limit; |
1469 | 16 | tstate->exc_info = &tstate->exc_state; |
1470 | | |
1471 | | // PyGILState_Release must not try to delete this thread state. |
1472 | | // This is cleared when PyGILState_Ensure() creates the thread state. |
1473 | 16 | tstate->gilstate_counter = 1; |
1474 | | |
1475 | 16 | tstate->current_frame = NULL; |
1476 | 16 | tstate->datastack_chunk = NULL; |
1477 | 16 | tstate->datastack_top = NULL; |
1478 | 16 | tstate->datastack_limit = NULL; |
1479 | 16 | tstate->what_event = -1; |
1480 | 16 | tstate->current_executor = NULL; |
1481 | 16 | tstate->jit_exit = NULL; |
1482 | 16 | tstate->dict_global_version = 0; |
1483 | | |
1484 | 16 | _tstate->c_stack_soft_limit = UINTPTR_MAX; |
1485 | 16 | _tstate->c_stack_top = 0; |
1486 | 16 | _tstate->c_stack_hard_limit = 0; |
1487 | | |
1488 | 16 | _tstate->asyncio_running_loop = NULL; |
1489 | 16 | _tstate->asyncio_running_task = NULL; |
1490 | | |
1491 | 16 | tstate->delete_later = NULL; |
1492 | | |
1493 | 16 | llist_init(&_tstate->mem_free_queue); |
1494 | 16 | llist_init(&_tstate->asyncio_tasks_head); |
1495 | 16 | if (interp->stoptheworld.requested || _PyRuntime.stoptheworld.requested) { |
1496 | | // Start in the suspended state if there is an ongoing stop-the-world. |
1497 | 0 | tstate->state = _Py_THREAD_SUSPENDED; |
1498 | 0 | } |
1499 | | |
1500 | 16 | tstate->_status.initialized = 1; |
1501 | 16 | } |
1502 | | |
1503 | | static void |
1504 | | add_threadstate(PyInterpreterState *interp, PyThreadState *tstate, |
1505 | | PyThreadState *next) |
1506 | 16 | { |
1507 | 16 | assert(interp->threads.head != tstate); |
1508 | 16 | if (next != NULL) { |
1509 | 0 | assert(next->prev == NULL || next->prev == tstate); |
1510 | 0 | next->prev = tstate; |
1511 | 0 | } |
1512 | 16 | tstate->next = next; |
1513 | 16 | assert(tstate->prev == NULL); |
1514 | 16 | interp->threads.head = tstate; |
1515 | 16 | } |
1516 | | |
1517 | | static PyThreadState * |
1518 | | new_threadstate(PyInterpreterState *interp, int whence) |
1519 | 16 | { |
1520 | | // Allocate the thread state. |
1521 | 16 | _PyThreadStateImpl *tstate = alloc_threadstate(interp); |
1522 | 16 | if (tstate == NULL) { |
1523 | 0 | return NULL; |
1524 | 0 | } |
1525 | | |
1526 | | #ifdef Py_GIL_DISABLED |
1527 | | Py_ssize_t qsbr_idx = _Py_qsbr_reserve(interp); |
1528 | | if (qsbr_idx < 0) { |
1529 | | free_threadstate(tstate); |
1530 | | return NULL; |
1531 | | } |
1532 | | int32_t tlbc_idx = _Py_ReserveTLBCIndex(interp); |
1533 | | if (tlbc_idx < 0) { |
1534 | | free_threadstate(tstate); |
1535 | | return NULL; |
1536 | | } |
1537 | | #endif |
1538 | | |
1539 | | /* We serialize concurrent creation to protect global state. */ |
1540 | 16 | HEAD_LOCK(interp->runtime); |
1541 | | |
1542 | | // Initialize the new thread state. |
1543 | 16 | interp->threads.next_unique_id += 1; |
1544 | 16 | uint64_t id = interp->threads.next_unique_id; |
1545 | 16 | init_threadstate(tstate, interp, id, whence); |
1546 | | |
1547 | | // Add the new thread state to the interpreter. |
1548 | 16 | PyThreadState *old_head = interp->threads.head; |
1549 | 16 | add_threadstate(interp, (PyThreadState *)tstate, old_head); |
1550 | | |
1551 | 16 | HEAD_UNLOCK(interp->runtime); |
1552 | | |
1553 | | #ifdef Py_GIL_DISABLED |
1554 | | // Must be called with lock unlocked to avoid lock ordering deadlocks. |
1555 | | _Py_qsbr_register(tstate, interp, qsbr_idx); |
1556 | | tstate->tlbc_index = tlbc_idx; |
1557 | | #endif |
1558 | | |
1559 | 16 | return (PyThreadState *)tstate; |
1560 | 16 | } |
1561 | | |
1562 | | PyThreadState * |
1563 | | PyThreadState_New(PyInterpreterState *interp) |
1564 | 0 | { |
1565 | 0 | return _PyThreadState_NewBound(interp, _PyThreadState_WHENCE_UNKNOWN); |
1566 | 0 | } |
1567 | | |
1568 | | PyThreadState * |
1569 | | _PyThreadState_NewBound(PyInterpreterState *interp, int whence) |
1570 | 0 | { |
1571 | 0 | PyThreadState *tstate = new_threadstate(interp, whence); |
1572 | 0 | if (tstate) { |
1573 | 0 | bind_tstate(tstate); |
1574 | | // This makes sure there's a gilstate tstate bound |
1575 | | // as soon as possible. |
1576 | 0 | if (gilstate_get() == NULL) { |
1577 | 0 | bind_gilstate_tstate(tstate); |
1578 | 0 | } |
1579 | 0 | } |
1580 | 0 | return tstate; |
1581 | 0 | } |
1582 | | |
1583 | | // This must be followed by a call to _PyThreadState_Bind(); |
1584 | | PyThreadState * |
1585 | | _PyThreadState_New(PyInterpreterState *interp, int whence) |
1586 | 16 | { |
1587 | 16 | return new_threadstate(interp, whence); |
1588 | 16 | } |
1589 | | |
1590 | | // We keep this for stable ABI compabibility. |
1591 | | PyAPI_FUNC(PyThreadState*) |
1592 | | _PyThreadState_Prealloc(PyInterpreterState *interp) |
1593 | 0 | { |
1594 | 0 | return _PyThreadState_New(interp, _PyThreadState_WHENCE_UNKNOWN); |
1595 | 0 | } |
1596 | | |
1597 | | // We keep this around for (accidental) stable ABI compatibility. |
1598 | | // Realistically, no extensions are using it. |
1599 | | PyAPI_FUNC(void) |
1600 | | _PyThreadState_Init(PyThreadState *tstate) |
1601 | 0 | { |
1602 | 0 | Py_FatalError("_PyThreadState_Init() is for internal use only"); |
1603 | 0 | } |
1604 | | |
1605 | | |
1606 | | static void |
1607 | | clear_datastack(PyThreadState *tstate) |
1608 | 0 | { |
1609 | 0 | _PyStackChunk *chunk = tstate->datastack_chunk; |
1610 | 0 | tstate->datastack_chunk = NULL; |
1611 | 0 | while (chunk != NULL) { |
1612 | 0 | _PyStackChunk *prev = chunk->previous; |
1613 | 0 | _PyObject_VirtualFree(chunk, chunk->size); |
1614 | 0 | chunk = prev; |
1615 | 0 | } |
1616 | 0 | } |
1617 | | |
1618 | | void |
1619 | | PyThreadState_Clear(PyThreadState *tstate) |
1620 | 0 | { |
1621 | 0 | assert(tstate->_status.initialized && !tstate->_status.cleared); |
1622 | 0 | assert(current_fast_get()->interp == tstate->interp); |
1623 | | // GH-126016: In the _interpreters module, KeyboardInterrupt exceptions |
1624 | | // during PyEval_EvalCode() are sent to finalization, which doesn't let us |
1625 | | // mark threads as "not running main". So, for now this assertion is |
1626 | | // disabled. |
1627 | | // XXX assert(!_PyThreadState_IsRunningMain(tstate)); |
1628 | | // XXX assert(!tstate->_status.bound || tstate->_status.unbound); |
1629 | 0 | tstate->_status.finalizing = 1; // just in case |
1630 | | |
1631 | | /* XXX Conditions we need to enforce: |
1632 | | |
1633 | | * the GIL must be held by the current thread |
1634 | | * current_fast_get()->interp must match tstate->interp |
1635 | | * for the main interpreter, current_fast_get() must be the main thread |
1636 | | */ |
1637 | |
|
1638 | 0 | int verbose = _PyInterpreterState_GetConfig(tstate->interp)->verbose; |
1639 | |
|
1640 | 0 | if (verbose && tstate->current_frame != NULL) { |
1641 | | /* bpo-20526: After the main thread calls |
1642 | | _PyInterpreterState_SetFinalizing() in Py_FinalizeEx() |
1643 | | (or in Py_EndInterpreter() for subinterpreters), |
1644 | | threads must exit when trying to take the GIL. |
1645 | | If a thread exit in the middle of _PyEval_EvalFrameDefault(), |
1646 | | tstate->frame is not reset to its previous value. |
1647 | | It is more likely with daemon threads, but it can happen |
1648 | | with regular threads if threading._shutdown() fails |
1649 | | (ex: interrupted by CTRL+C). */ |
1650 | 0 | fprintf(stderr, |
1651 | 0 | "PyThreadState_Clear: warning: thread still has a frame\n"); |
1652 | 0 | } |
1653 | |
|
1654 | 0 | if (verbose && tstate->current_exception != NULL) { |
1655 | 0 | fprintf(stderr, "PyThreadState_Clear: warning: thread has an exception set\n"); |
1656 | 0 | _PyErr_Print(tstate); |
1657 | 0 | } |
1658 | | |
1659 | | /* At this point tstate shouldn't be used any more, |
1660 | | neither to run Python code nor for other uses. |
1661 | | |
1662 | | This is tricky when current_fast_get() == tstate, in the same way |
1663 | | as noted in interpreter_clear() above. The below finalizers |
1664 | | can possibly run Python code or otherwise use the partially |
1665 | | cleared thread state. For now we trust that isn't a problem |
1666 | | in practice. |
1667 | | */ |
1668 | | // XXX Deal with the possibility of problematic finalizers. |
1669 | | |
1670 | | /* Don't clear tstate->pyframe: it is a borrowed reference */ |
1671 | |
|
1672 | 0 | Py_CLEAR(tstate->threading_local_key); |
1673 | 0 | Py_CLEAR(tstate->threading_local_sentinel); |
1674 | |
|
1675 | 0 | Py_CLEAR(((_PyThreadStateImpl *)tstate)->asyncio_running_loop); |
1676 | 0 | Py_CLEAR(((_PyThreadStateImpl *)tstate)->asyncio_running_task); |
1677 | | |
1678 | |
|
1679 | 0 | PyMutex_Lock(&tstate->interp->asyncio_tasks_lock); |
1680 | | // merge any lingering tasks from thread state to interpreter's |
1681 | | // tasks list |
1682 | 0 | llist_concat(&tstate->interp->asyncio_tasks_head, |
1683 | 0 | &((_PyThreadStateImpl *)tstate)->asyncio_tasks_head); |
1684 | 0 | PyMutex_Unlock(&tstate->interp->asyncio_tasks_lock); |
1685 | |
|
1686 | 0 | Py_CLEAR(tstate->dict); |
1687 | 0 | Py_CLEAR(tstate->async_exc); |
1688 | |
|
1689 | 0 | Py_CLEAR(tstate->current_exception); |
1690 | |
|
1691 | 0 | Py_CLEAR(tstate->exc_state.exc_value); |
1692 | | |
1693 | | /* The stack of exception states should contain just this thread. */ |
1694 | 0 | if (verbose && tstate->exc_info != &tstate->exc_state) { |
1695 | 0 | fprintf(stderr, |
1696 | 0 | "PyThreadState_Clear: warning: thread still has a generator\n"); |
1697 | 0 | } |
1698 | |
|
1699 | 0 | if (tstate->c_profilefunc != NULL) { |
1700 | 0 | FT_ATOMIC_ADD_SSIZE(tstate->interp->sys_profiling_threads, -1); |
1701 | 0 | tstate->c_profilefunc = NULL; |
1702 | 0 | } |
1703 | 0 | if (tstate->c_tracefunc != NULL) { |
1704 | 0 | FT_ATOMIC_ADD_SSIZE(tstate->interp->sys_tracing_threads, -1); |
1705 | 0 | tstate->c_tracefunc = NULL; |
1706 | 0 | } |
1707 | |
|
1708 | 0 | Py_CLEAR(tstate->c_profileobj); |
1709 | 0 | Py_CLEAR(tstate->c_traceobj); |
1710 | |
|
1711 | 0 | Py_CLEAR(tstate->async_gen_firstiter); |
1712 | 0 | Py_CLEAR(tstate->async_gen_finalizer); |
1713 | |
|
1714 | 0 | Py_CLEAR(tstate->context); |
1715 | |
|
1716 | | #ifdef Py_GIL_DISABLED |
1717 | | // Each thread should clear own freelists in free-threading builds. |
1718 | | struct _Py_freelists *freelists = _Py_freelists_GET(); |
1719 | | _PyObject_ClearFreeLists(freelists, 1); |
1720 | | |
1721 | | // Merge our thread-local refcounts into the type's own refcount and |
1722 | | // free our local refcount array. |
1723 | | _PyObject_FinalizePerThreadRefcounts((_PyThreadStateImpl *)tstate); |
1724 | | |
1725 | | // Remove ourself from the biased reference counting table of threads. |
1726 | | _Py_brc_remove_thread(tstate); |
1727 | | |
1728 | | // Release our thread-local copies of the bytecode for reuse by another |
1729 | | // thread |
1730 | | _Py_ClearTLBCIndex((_PyThreadStateImpl *)tstate); |
1731 | | #endif |
1732 | | |
1733 | | // Merge our queue of pointers to be freed into the interpreter queue. |
1734 | 0 | _PyMem_AbandonDelayed(tstate); |
1735 | |
|
1736 | 0 | _PyThreadState_ClearMimallocHeaps(tstate); |
1737 | |
|
1738 | 0 | tstate->_status.cleared = 1; |
1739 | | |
1740 | | // XXX Call _PyThreadStateSwap(runtime, NULL) here if "current". |
1741 | | // XXX Do it as early in the function as possible. |
1742 | 0 | } |
1743 | | |
1744 | | static void |
1745 | | decrement_stoptheworld_countdown(struct _stoptheworld_state *stw); |
1746 | | |
1747 | | /* Common code for PyThreadState_Delete() and PyThreadState_DeleteCurrent() */ |
1748 | | static void |
1749 | | tstate_delete_common(PyThreadState *tstate, int release_gil) |
1750 | 0 | { |
1751 | 0 | assert(tstate->_status.cleared && !tstate->_status.finalized); |
1752 | 0 | tstate_verify_not_active(tstate); |
1753 | 0 | assert(!_PyThreadState_IsRunningMain(tstate)); |
1754 | |
|
1755 | 0 | PyInterpreterState *interp = tstate->interp; |
1756 | 0 | if (interp == NULL) { |
1757 | 0 | Py_FatalError("NULL interpreter"); |
1758 | 0 | } |
1759 | 0 | _PyRuntimeState *runtime = interp->runtime; |
1760 | |
|
1761 | 0 | HEAD_LOCK(runtime); |
1762 | 0 | if (tstate->prev) { |
1763 | 0 | tstate->prev->next = tstate->next; |
1764 | 0 | } |
1765 | 0 | else { |
1766 | 0 | interp->threads.head = tstate->next; |
1767 | 0 | } |
1768 | 0 | if (tstate->next) { |
1769 | 0 | tstate->next->prev = tstate->prev; |
1770 | 0 | } |
1771 | 0 | if (tstate->state != _Py_THREAD_SUSPENDED) { |
1772 | | // Any ongoing stop-the-world request should not wait for us because |
1773 | | // our thread is getting deleted. |
1774 | 0 | if (interp->stoptheworld.requested) { |
1775 | 0 | decrement_stoptheworld_countdown(&interp->stoptheworld); |
1776 | 0 | } |
1777 | 0 | if (runtime->stoptheworld.requested) { |
1778 | 0 | decrement_stoptheworld_countdown(&runtime->stoptheworld); |
1779 | 0 | } |
1780 | 0 | } |
1781 | |
|
1782 | | #if defined(Py_REF_DEBUG) && defined(Py_GIL_DISABLED) |
1783 | | // Add our portion of the total refcount to the interpreter's total. |
1784 | | _PyThreadStateImpl *tstate_impl = (_PyThreadStateImpl *)tstate; |
1785 | | tstate->interp->object_state.reftotal += tstate_impl->reftotal; |
1786 | | tstate_impl->reftotal = 0; |
1787 | | assert(tstate_impl->refcounts.values == NULL); |
1788 | | #endif |
1789 | |
|
1790 | 0 | HEAD_UNLOCK(runtime); |
1791 | | |
1792 | | // XXX Unbind in PyThreadState_Clear(), or earlier |
1793 | | // (and assert not-equal here)? |
1794 | 0 | if (tstate->_status.bound_gilstate) { |
1795 | 0 | unbind_gilstate_tstate(tstate); |
1796 | 0 | } |
1797 | 0 | if (tstate->_status.bound) { |
1798 | 0 | unbind_tstate(tstate); |
1799 | 0 | } |
1800 | | |
1801 | | // XXX Move to PyThreadState_Clear()? |
1802 | 0 | clear_datastack(tstate); |
1803 | |
|
1804 | 0 | if (release_gil) { |
1805 | 0 | _PyEval_ReleaseLock(tstate->interp, tstate, 1); |
1806 | 0 | } |
1807 | |
|
1808 | | #ifdef Py_GIL_DISABLED |
1809 | | _Py_qsbr_unregister(tstate); |
1810 | | #endif |
1811 | |
|
1812 | 0 | tstate->_status.finalized = 1; |
1813 | 0 | } |
1814 | | |
1815 | | static void |
1816 | | zapthreads(PyInterpreterState *interp) |
1817 | 0 | { |
1818 | 0 | PyThreadState *tstate; |
1819 | | /* No need to lock the mutex here because this should only happen |
1820 | | when the threads are all really dead (XXX famous last words). |
1821 | | |
1822 | | Cannot use _Py_FOR_EACH_TSTATE_UNLOCKED because we are freeing |
1823 | | the thread states here. |
1824 | | */ |
1825 | 0 | while ((tstate = interp->threads.head) != NULL) { |
1826 | 0 | tstate_verify_not_active(tstate); |
1827 | 0 | tstate_delete_common(tstate, 0); |
1828 | 0 | free_threadstate((_PyThreadStateImpl *)tstate); |
1829 | 0 | } |
1830 | 0 | } |
1831 | | |
1832 | | |
1833 | | void |
1834 | | PyThreadState_Delete(PyThreadState *tstate) |
1835 | 0 | { |
1836 | 0 | _Py_EnsureTstateNotNULL(tstate); |
1837 | 0 | tstate_verify_not_active(tstate); |
1838 | 0 | tstate_delete_common(tstate, 0); |
1839 | 0 | free_threadstate((_PyThreadStateImpl *)tstate); |
1840 | 0 | } |
1841 | | |
1842 | | |
1843 | | void |
1844 | | _PyThreadState_DeleteCurrent(PyThreadState *tstate) |
1845 | 0 | { |
1846 | 0 | _Py_EnsureTstateNotNULL(tstate); |
1847 | | #ifdef Py_GIL_DISABLED |
1848 | | _Py_qsbr_detach(((_PyThreadStateImpl *)tstate)->qsbr); |
1849 | | #endif |
1850 | 0 | current_fast_clear(tstate->interp->runtime); |
1851 | 0 | tstate_delete_common(tstate, 1); // release GIL as part of call |
1852 | 0 | free_threadstate((_PyThreadStateImpl *)tstate); |
1853 | 0 | } |
1854 | | |
1855 | | void |
1856 | | PyThreadState_DeleteCurrent(void) |
1857 | 0 | { |
1858 | 0 | PyThreadState *tstate = current_fast_get(); |
1859 | 0 | _PyThreadState_DeleteCurrent(tstate); |
1860 | 0 | } |
1861 | | |
1862 | | |
1863 | | // Unlinks and removes all thread states from `tstate->interp`, with the |
1864 | | // exception of the one passed as an argument. However, it does not delete |
1865 | | // these thread states. Instead, it returns the removed thread states as a |
1866 | | // linked list. |
1867 | | // |
1868 | | // Note that if there is a current thread state, it *must* be the one |
1869 | | // passed as argument. Also, this won't touch any interpreters other |
1870 | | // than the current one, since we don't know which thread state should |
1871 | | // be kept in those other interpreters. |
1872 | | PyThreadState * |
1873 | | _PyThreadState_RemoveExcept(PyThreadState *tstate) |
1874 | 0 | { |
1875 | 0 | assert(tstate != NULL); |
1876 | 0 | PyInterpreterState *interp = tstate->interp; |
1877 | 0 | _PyRuntimeState *runtime = interp->runtime; |
1878 | |
|
1879 | | #ifdef Py_GIL_DISABLED |
1880 | | assert(runtime->stoptheworld.world_stopped); |
1881 | | #endif |
1882 | |
|
1883 | 0 | HEAD_LOCK(runtime); |
1884 | | /* Remove all thread states, except tstate, from the linked list of |
1885 | | thread states. */ |
1886 | 0 | PyThreadState *list = interp->threads.head; |
1887 | 0 | if (list == tstate) { |
1888 | 0 | list = tstate->next; |
1889 | 0 | } |
1890 | 0 | if (tstate->prev) { |
1891 | 0 | tstate->prev->next = tstate->next; |
1892 | 0 | } |
1893 | 0 | if (tstate->next) { |
1894 | 0 | tstate->next->prev = tstate->prev; |
1895 | 0 | } |
1896 | 0 | tstate->prev = tstate->next = NULL; |
1897 | 0 | interp->threads.head = tstate; |
1898 | 0 | HEAD_UNLOCK(runtime); |
1899 | |
|
1900 | 0 | return list; |
1901 | 0 | } |
1902 | | |
1903 | | // Deletes the thread states in the linked list `list`. |
1904 | | // |
1905 | | // This is intended to be used in conjunction with _PyThreadState_RemoveExcept. |
1906 | | // |
1907 | | // If `is_after_fork` is true, the thread states are immediately freed. |
1908 | | // Otherwise, they are decref'd because they may still be referenced by an |
1909 | | // OS thread. |
1910 | | void |
1911 | | _PyThreadState_DeleteList(PyThreadState *list, int is_after_fork) |
1912 | 0 | { |
1913 | | // The world can't be stopped because we PyThreadState_Clear() can |
1914 | | // call destructors. |
1915 | 0 | assert(!_PyRuntime.stoptheworld.world_stopped); |
1916 | |
|
1917 | 0 | PyThreadState *p, *next; |
1918 | 0 | for (p = list; p; p = next) { |
1919 | 0 | next = p->next; |
1920 | 0 | PyThreadState_Clear(p); |
1921 | 0 | if (is_after_fork) { |
1922 | 0 | free_threadstate((_PyThreadStateImpl *)p); |
1923 | 0 | } |
1924 | 0 | else { |
1925 | 0 | decref_threadstate((_PyThreadStateImpl *)p); |
1926 | 0 | } |
1927 | 0 | } |
1928 | 0 | } |
1929 | | |
1930 | | |
1931 | | //---------- |
1932 | | // accessors |
1933 | | //---------- |
1934 | | |
1935 | | /* An extension mechanism to store arbitrary additional per-thread state. |
1936 | | PyThreadState_GetDict() returns a dictionary that can be used to hold such |
1937 | | state; the caller should pick a unique key and store its state there. If |
1938 | | PyThreadState_GetDict() returns NULL, an exception has *not* been raised |
1939 | | and the caller should assume no per-thread state is available. */ |
1940 | | |
1941 | | PyObject * |
1942 | | _PyThreadState_GetDict(PyThreadState *tstate) |
1943 | 7.85M | { |
1944 | 7.85M | assert(tstate != NULL); |
1945 | 7.85M | if (tstate->dict == NULL) { |
1946 | 1 | tstate->dict = PyDict_New(); |
1947 | 1 | if (tstate->dict == NULL) { |
1948 | 0 | _PyErr_Clear(tstate); |
1949 | 0 | } |
1950 | 1 | } |
1951 | 7.85M | return tstate->dict; |
1952 | 7.85M | } |
1953 | | |
1954 | | |
1955 | | PyObject * |
1956 | | PyThreadState_GetDict(void) |
1957 | 7.85M | { |
1958 | 7.85M | PyThreadState *tstate = current_fast_get(); |
1959 | 7.85M | if (tstate == NULL) { |
1960 | 0 | return NULL; |
1961 | 0 | } |
1962 | 7.85M | return _PyThreadState_GetDict(tstate); |
1963 | 7.85M | } |
1964 | | |
1965 | | |
1966 | | PyInterpreterState * |
1967 | | PyThreadState_GetInterpreter(PyThreadState *tstate) |
1968 | 0 | { |
1969 | 0 | assert(tstate != NULL); |
1970 | 0 | return tstate->interp; |
1971 | 0 | } |
1972 | | |
1973 | | |
1974 | | PyFrameObject* |
1975 | | PyThreadState_GetFrame(PyThreadState *tstate) |
1976 | 0 | { |
1977 | 0 | assert(tstate != NULL); |
1978 | 0 | _PyInterpreterFrame *f = _PyThreadState_GetFrame(tstate); |
1979 | 0 | if (f == NULL) { |
1980 | 0 | return NULL; |
1981 | 0 | } |
1982 | 0 | PyFrameObject *frame = _PyFrame_GetFrameObject(f); |
1983 | 0 | if (frame == NULL) { |
1984 | 0 | PyErr_Clear(); |
1985 | 0 | } |
1986 | 0 | return (PyFrameObject*)Py_XNewRef(frame); |
1987 | 0 | } |
1988 | | |
1989 | | |
1990 | | uint64_t |
1991 | | PyThreadState_GetID(PyThreadState *tstate) |
1992 | 0 | { |
1993 | 0 | assert(tstate != NULL); |
1994 | 0 | return tstate->id; |
1995 | 0 | } |
1996 | | |
1997 | | |
1998 | | static inline void |
1999 | | tstate_activate(PyThreadState *tstate) |
2000 | 30.4k | { |
2001 | 30.4k | assert(tstate != NULL); |
2002 | | // XXX assert(tstate_is_alive(tstate)); |
2003 | 30.4k | assert(tstate_is_bound(tstate)); |
2004 | 30.4k | assert(!tstate->_status.active); |
2005 | | |
2006 | 30.4k | assert(!tstate->_status.bound_gilstate || |
2007 | 30.4k | tstate == gilstate_get()); |
2008 | 30.4k | if (!tstate->_status.bound_gilstate) { |
2009 | 0 | bind_gilstate_tstate(tstate); |
2010 | 0 | } |
2011 | | |
2012 | 30.4k | tstate->_status.active = 1; |
2013 | 30.4k | } |
2014 | | |
2015 | | static inline void |
2016 | | tstate_deactivate(PyThreadState *tstate) |
2017 | 30.4k | { |
2018 | 30.4k | assert(tstate != NULL); |
2019 | | // XXX assert(tstate_is_alive(tstate)); |
2020 | 30.4k | assert(tstate_is_bound(tstate)); |
2021 | 30.4k | assert(tstate->_status.active); |
2022 | | |
2023 | 30.4k | tstate->_status.active = 0; |
2024 | | |
2025 | | // We do not unbind the gilstate tstate here. |
2026 | | // It will still be used in PyGILState_Ensure(). |
2027 | 30.4k | } |
2028 | | |
2029 | | static int |
2030 | | tstate_try_attach(PyThreadState *tstate) |
2031 | 30.4k | { |
2032 | | #ifdef Py_GIL_DISABLED |
2033 | | int expected = _Py_THREAD_DETACHED; |
2034 | | return _Py_atomic_compare_exchange_int(&tstate->state, |
2035 | | &expected, |
2036 | | _Py_THREAD_ATTACHED); |
2037 | | #else |
2038 | 30.4k | assert(tstate->state == _Py_THREAD_DETACHED); |
2039 | 30.4k | tstate->state = _Py_THREAD_ATTACHED; |
2040 | 30.4k | return 1; |
2041 | 30.4k | #endif |
2042 | 30.4k | } |
2043 | | |
2044 | | static void |
2045 | | tstate_set_detached(PyThreadState *tstate, int detached_state) |
2046 | 30.4k | { |
2047 | 30.4k | assert(_Py_atomic_load_int_relaxed(&tstate->state) == _Py_THREAD_ATTACHED); |
2048 | | #ifdef Py_GIL_DISABLED |
2049 | | _Py_atomic_store_int(&tstate->state, detached_state); |
2050 | | #else |
2051 | 30.4k | tstate->state = detached_state; |
2052 | 30.4k | #endif |
2053 | 30.4k | } |
2054 | | |
2055 | | static void |
2056 | | tstate_wait_attach(PyThreadState *tstate) |
2057 | 0 | { |
2058 | 0 | do { |
2059 | 0 | int state = _Py_atomic_load_int_relaxed(&tstate->state); |
2060 | 0 | if (state == _Py_THREAD_SUSPENDED) { |
2061 | | // Wait until we're switched out of SUSPENDED to DETACHED. |
2062 | 0 | _PyParkingLot_Park(&tstate->state, &state, sizeof(tstate->state), |
2063 | 0 | /*timeout=*/-1, NULL, /*detach=*/0); |
2064 | 0 | } |
2065 | 0 | else if (state == _Py_THREAD_SHUTTING_DOWN) { |
2066 | | // We're shutting down, so we can't attach. |
2067 | 0 | _PyThreadState_HangThread(tstate); |
2068 | 0 | } |
2069 | 0 | else { |
2070 | 0 | assert(state == _Py_THREAD_DETACHED); |
2071 | 0 | } |
2072 | | // Once we're back in DETACHED we can re-attach |
2073 | 0 | } while (!tstate_try_attach(tstate)); |
2074 | 0 | } |
2075 | | |
2076 | | void |
2077 | | _PyThreadState_Attach(PyThreadState *tstate) |
2078 | 30.4k | { |
2079 | | #if defined(Py_DEBUG) |
2080 | | // This is called from PyEval_RestoreThread(). Similar |
2081 | | // to it, we need to ensure errno doesn't change. |
2082 | | int err = errno; |
2083 | | #endif |
2084 | | |
2085 | 30.4k | _Py_EnsureTstateNotNULL(tstate); |
2086 | 30.4k | if (current_fast_get() != NULL) { |
2087 | 0 | Py_FatalError("non-NULL old thread state"); |
2088 | 0 | } |
2089 | 30.4k | _PyThreadStateImpl *_tstate = (_PyThreadStateImpl *)tstate; |
2090 | 30.4k | if (_tstate->c_stack_hard_limit == 0) { |
2091 | 16 | _Py_InitializeRecursionLimits(tstate); |
2092 | 16 | } |
2093 | | |
2094 | 30.4k | while (1) { |
2095 | 30.4k | _PyEval_AcquireLock(tstate); |
2096 | | |
2097 | | // XXX assert(tstate_is_alive(tstate)); |
2098 | 30.4k | current_fast_set(&_PyRuntime, tstate); |
2099 | 30.4k | if (!tstate_try_attach(tstate)) { |
2100 | 0 | tstate_wait_attach(tstate); |
2101 | 0 | } |
2102 | 30.4k | tstate_activate(tstate); |
2103 | | |
2104 | | #ifdef Py_GIL_DISABLED |
2105 | | if (_PyEval_IsGILEnabled(tstate) && !tstate->holds_gil) { |
2106 | | // The GIL was enabled between our call to _PyEval_AcquireLock() |
2107 | | // and when we attached (the GIL can't go from enabled to disabled |
2108 | | // here because only a thread holding the GIL can disable |
2109 | | // it). Detach and try again. |
2110 | | tstate_set_detached(tstate, _Py_THREAD_DETACHED); |
2111 | | tstate_deactivate(tstate); |
2112 | | current_fast_clear(&_PyRuntime); |
2113 | | continue; |
2114 | | } |
2115 | | _Py_qsbr_attach(((_PyThreadStateImpl *)tstate)->qsbr); |
2116 | | #endif |
2117 | 30.4k | break; |
2118 | 30.4k | } |
2119 | | |
2120 | | // Resume previous critical section. This acquires the lock(s) from the |
2121 | | // top-most critical section. |
2122 | 30.4k | if (tstate->critical_section != 0) { |
2123 | 0 | _PyCriticalSection_Resume(tstate); |
2124 | 0 | } |
2125 | | |
2126 | | #if defined(Py_DEBUG) |
2127 | | errno = err; |
2128 | | #endif |
2129 | 30.4k | } |
2130 | | |
2131 | | static void |
2132 | | detach_thread(PyThreadState *tstate, int detached_state) |
2133 | 30.4k | { |
2134 | | // XXX assert(tstate_is_alive(tstate) && tstate_is_bound(tstate)); |
2135 | 30.4k | assert(_Py_atomic_load_int_relaxed(&tstate->state) == _Py_THREAD_ATTACHED); |
2136 | 30.4k | assert(tstate == current_fast_get()); |
2137 | 30.4k | if (tstate->critical_section != 0) { |
2138 | 0 | _PyCriticalSection_SuspendAll(tstate); |
2139 | 0 | } |
2140 | | #ifdef Py_GIL_DISABLED |
2141 | | _Py_qsbr_detach(((_PyThreadStateImpl *)tstate)->qsbr); |
2142 | | #endif |
2143 | 30.4k | tstate_deactivate(tstate); |
2144 | 30.4k | tstate_set_detached(tstate, detached_state); |
2145 | 30.4k | current_fast_clear(&_PyRuntime); |
2146 | 30.4k | _PyEval_ReleaseLock(tstate->interp, tstate, 0); |
2147 | 30.4k | } |
2148 | | |
2149 | | void |
2150 | | _PyThreadState_Detach(PyThreadState *tstate) |
2151 | 30.4k | { |
2152 | 30.4k | detach_thread(tstate, _Py_THREAD_DETACHED); |
2153 | 30.4k | } |
2154 | | |
2155 | | void |
2156 | | _PyThreadState_Suspend(PyThreadState *tstate) |
2157 | 0 | { |
2158 | 0 | _PyRuntimeState *runtime = &_PyRuntime; |
2159 | |
|
2160 | 0 | assert(_Py_atomic_load_int_relaxed(&tstate->state) == _Py_THREAD_ATTACHED); |
2161 | |
|
2162 | 0 | struct _stoptheworld_state *stw = NULL; |
2163 | 0 | HEAD_LOCK(runtime); |
2164 | 0 | if (runtime->stoptheworld.requested) { |
2165 | 0 | stw = &runtime->stoptheworld; |
2166 | 0 | } |
2167 | 0 | else if (tstate->interp->stoptheworld.requested) { |
2168 | 0 | stw = &tstate->interp->stoptheworld; |
2169 | 0 | } |
2170 | 0 | HEAD_UNLOCK(runtime); |
2171 | |
|
2172 | 0 | if (stw == NULL) { |
2173 | | // Switch directly to "detached" if there is no active stop-the-world |
2174 | | // request. |
2175 | 0 | detach_thread(tstate, _Py_THREAD_DETACHED); |
2176 | 0 | return; |
2177 | 0 | } |
2178 | | |
2179 | | // Switch to "suspended" state. |
2180 | 0 | detach_thread(tstate, _Py_THREAD_SUSPENDED); |
2181 | | |
2182 | | // Decrease the count of remaining threads needing to park. |
2183 | 0 | HEAD_LOCK(runtime); |
2184 | 0 | decrement_stoptheworld_countdown(stw); |
2185 | 0 | HEAD_UNLOCK(runtime); |
2186 | 0 | } |
2187 | | |
2188 | | void |
2189 | | _PyThreadState_SetShuttingDown(PyThreadState *tstate) |
2190 | 0 | { |
2191 | 0 | _Py_atomic_store_int(&tstate->state, _Py_THREAD_SHUTTING_DOWN); |
2192 | | #ifdef Py_GIL_DISABLED |
2193 | | _PyParkingLot_UnparkAll(&tstate->state); |
2194 | | #endif |
2195 | 0 | } |
2196 | | |
2197 | | // Decrease stop-the-world counter of remaining number of threads that need to |
2198 | | // pause. If we are the final thread to pause, notify the requesting thread. |
2199 | | static void |
2200 | | decrement_stoptheworld_countdown(struct _stoptheworld_state *stw) |
2201 | 0 | { |
2202 | 0 | assert(stw->thread_countdown > 0); |
2203 | 0 | if (--stw->thread_countdown == 0) { |
2204 | 0 | _PyEvent_Notify(&stw->stop_event); |
2205 | 0 | } |
2206 | 0 | } |
2207 | | |
2208 | | #ifdef Py_GIL_DISABLED |
2209 | | // Interpreter for _Py_FOR_EACH_STW_INTERP(). For global stop-the-world events, |
2210 | | // we start with the first interpreter and then iterate over all interpreters. |
2211 | | // For per-interpreter stop-the-world events, we only operate on the one |
2212 | | // interpreter. |
2213 | | static PyInterpreterState * |
2214 | | interp_for_stop_the_world(struct _stoptheworld_state *stw) |
2215 | | { |
2216 | | return (stw->is_global |
2217 | | ? PyInterpreterState_Head() |
2218 | | : _Py_CONTAINER_OF(stw, PyInterpreterState, stoptheworld)); |
2219 | | } |
2220 | | |
2221 | | // Loops over threads for a stop-the-world event. |
2222 | | // For global: all threads in all interpreters |
2223 | | // For per-interpreter: all threads in the interpreter |
2224 | | #define _Py_FOR_EACH_STW_INTERP(stw, i) \ |
2225 | | for (PyInterpreterState *i = interp_for_stop_the_world((stw)); \ |
2226 | | i != NULL; i = ((stw->is_global) ? i->next : NULL)) |
2227 | | |
2228 | | |
2229 | | // Try to transition threads atomically from the "detached" state to the |
2230 | | // "gc stopped" state. Returns true if all threads are in the "gc stopped" |
2231 | | static bool |
2232 | | park_detached_threads(struct _stoptheworld_state *stw) |
2233 | | { |
2234 | | int num_parked = 0; |
2235 | | _Py_FOR_EACH_STW_INTERP(stw, i) { |
2236 | | _Py_FOR_EACH_TSTATE_UNLOCKED(i, t) { |
2237 | | int state = _Py_atomic_load_int_relaxed(&t->state); |
2238 | | if (state == _Py_THREAD_DETACHED) { |
2239 | | // Atomically transition to "suspended" if in "detached" state. |
2240 | | if (_Py_atomic_compare_exchange_int( |
2241 | | &t->state, &state, _Py_THREAD_SUSPENDED)) { |
2242 | | num_parked++; |
2243 | | } |
2244 | | } |
2245 | | else if (state == _Py_THREAD_ATTACHED && t != stw->requester) { |
2246 | | _Py_set_eval_breaker_bit(t, _PY_EVAL_PLEASE_STOP_BIT); |
2247 | | } |
2248 | | } |
2249 | | } |
2250 | | stw->thread_countdown -= num_parked; |
2251 | | assert(stw->thread_countdown >= 0); |
2252 | | return num_parked > 0 && stw->thread_countdown == 0; |
2253 | | } |
2254 | | |
2255 | | static void |
2256 | | stop_the_world(struct _stoptheworld_state *stw) |
2257 | | { |
2258 | | _PyRuntimeState *runtime = &_PyRuntime; |
2259 | | |
2260 | | // gh-137433: Acquire the rwmutex first to avoid deadlocks with daemon |
2261 | | // threads that may hang when blocked on lock acquisition. |
2262 | | if (stw->is_global) { |
2263 | | _PyRWMutex_Lock(&runtime->stoptheworld_mutex); |
2264 | | } |
2265 | | else { |
2266 | | _PyRWMutex_RLock(&runtime->stoptheworld_mutex); |
2267 | | } |
2268 | | PyMutex_Lock(&stw->mutex); |
2269 | | |
2270 | | HEAD_LOCK(runtime); |
2271 | | stw->requested = 1; |
2272 | | stw->thread_countdown = 0; |
2273 | | stw->stop_event = (PyEvent){0}; // zero-initialize (unset) |
2274 | | stw->requester = _PyThreadState_GET(); // may be NULL |
2275 | | |
2276 | | _Py_FOR_EACH_STW_INTERP(stw, i) { |
2277 | | _Py_FOR_EACH_TSTATE_UNLOCKED(i, t) { |
2278 | | if (t != stw->requester) { |
2279 | | // Count all the other threads (we don't wait on ourself). |
2280 | | stw->thread_countdown++; |
2281 | | } |
2282 | | } |
2283 | | } |
2284 | | |
2285 | | if (stw->thread_countdown == 0) { |
2286 | | HEAD_UNLOCK(runtime); |
2287 | | stw->world_stopped = 1; |
2288 | | return; |
2289 | | } |
2290 | | |
2291 | | for (;;) { |
2292 | | // Switch threads that are detached to the GC stopped state |
2293 | | bool stopped_all_threads = park_detached_threads(stw); |
2294 | | HEAD_UNLOCK(runtime); |
2295 | | |
2296 | | if (stopped_all_threads) { |
2297 | | break; |
2298 | | } |
2299 | | |
2300 | | PyTime_t wait_ns = 1000*1000; // 1ms (arbitrary, may need tuning) |
2301 | | int detach = 0; |
2302 | | if (PyEvent_WaitTimed(&stw->stop_event, wait_ns, detach)) { |
2303 | | assert(stw->thread_countdown == 0); |
2304 | | break; |
2305 | | } |
2306 | | |
2307 | | HEAD_LOCK(runtime); |
2308 | | } |
2309 | | stw->world_stopped = 1; |
2310 | | } |
2311 | | |
2312 | | static void |
2313 | | start_the_world(struct _stoptheworld_state *stw) |
2314 | | { |
2315 | | _PyRuntimeState *runtime = &_PyRuntime; |
2316 | | assert(PyMutex_IsLocked(&stw->mutex)); |
2317 | | |
2318 | | HEAD_LOCK(runtime); |
2319 | | stw->requested = 0; |
2320 | | stw->world_stopped = 0; |
2321 | | // Switch threads back to the detached state. |
2322 | | _Py_FOR_EACH_STW_INTERP(stw, i) { |
2323 | | _Py_FOR_EACH_TSTATE_UNLOCKED(i, t) { |
2324 | | if (t != stw->requester) { |
2325 | | assert(_Py_atomic_load_int_relaxed(&t->state) == |
2326 | | _Py_THREAD_SUSPENDED); |
2327 | | _Py_atomic_store_int(&t->state, _Py_THREAD_DETACHED); |
2328 | | _PyParkingLot_UnparkAll(&t->state); |
2329 | | } |
2330 | | } |
2331 | | } |
2332 | | stw->requester = NULL; |
2333 | | HEAD_UNLOCK(runtime); |
2334 | | PyMutex_Unlock(&stw->mutex); |
2335 | | if (stw->is_global) { |
2336 | | _PyRWMutex_Unlock(&runtime->stoptheworld_mutex); |
2337 | | } |
2338 | | else { |
2339 | | _PyRWMutex_RUnlock(&runtime->stoptheworld_mutex); |
2340 | | } |
2341 | | } |
2342 | | #endif // Py_GIL_DISABLED |
2343 | | |
2344 | | void |
2345 | | _PyEval_StopTheWorldAll(_PyRuntimeState *runtime) |
2346 | 0 | { |
2347 | | #ifdef Py_GIL_DISABLED |
2348 | | stop_the_world(&runtime->stoptheworld); |
2349 | | #endif |
2350 | 0 | } |
2351 | | |
2352 | | void |
2353 | | _PyEval_StartTheWorldAll(_PyRuntimeState *runtime) |
2354 | 0 | { |
2355 | | #ifdef Py_GIL_DISABLED |
2356 | | start_the_world(&runtime->stoptheworld); |
2357 | | #endif |
2358 | 0 | } |
2359 | | |
2360 | | void |
2361 | | _PyEval_StopTheWorld(PyInterpreterState *interp) |
2362 | 4 | { |
2363 | | #ifdef Py_GIL_DISABLED |
2364 | | stop_the_world(&interp->stoptheworld); |
2365 | | #endif |
2366 | 4 | } |
2367 | | |
2368 | | void |
2369 | | _PyEval_StartTheWorld(PyInterpreterState *interp) |
2370 | 4 | { |
2371 | | #ifdef Py_GIL_DISABLED |
2372 | | start_the_world(&interp->stoptheworld); |
2373 | | #endif |
2374 | 4 | } |
2375 | | |
2376 | | //---------- |
2377 | | // other API |
2378 | | //---------- |
2379 | | |
2380 | | /* Asynchronously raise an exception in a thread. |
2381 | | Requested by Just van Rossum and Alex Martelli. |
2382 | | To prevent naive misuse, you must write your own extension |
2383 | | to call this, or use ctypes. Must be called with the GIL held. |
2384 | | Returns the number of tstates modified (normally 1, but 0 if `id` didn't |
2385 | | match any known thread id). Can be called with exc=NULL to clear an |
2386 | | existing async exception. This raises no exceptions. */ |
2387 | | |
2388 | | // XXX Move this to Python/ceval_gil.c? |
2389 | | // XXX Deprecate this. |
2390 | | int |
2391 | | PyThreadState_SetAsyncExc(unsigned long id, PyObject *exc) |
2392 | 0 | { |
2393 | 0 | PyInterpreterState *interp = _PyInterpreterState_GET(); |
2394 | | |
2395 | | /* Although the GIL is held, a few C API functions can be called |
2396 | | * without the GIL held, and in particular some that create and |
2397 | | * destroy thread and interpreter states. Those can mutate the |
2398 | | * list of thread states we're traversing, so to prevent that we lock |
2399 | | * head_mutex for the duration. |
2400 | | */ |
2401 | 0 | PyThreadState *tstate = NULL; |
2402 | 0 | _Py_FOR_EACH_TSTATE_BEGIN(interp, t) { |
2403 | 0 | if (t->thread_id == id) { |
2404 | 0 | tstate = t; |
2405 | 0 | break; |
2406 | 0 | } |
2407 | 0 | } |
2408 | 0 | _Py_FOR_EACH_TSTATE_END(interp); |
2409 | |
|
2410 | 0 | if (tstate != NULL) { |
2411 | | /* Tricky: we need to decref the current value |
2412 | | * (if any) in tstate->async_exc, but that can in turn |
2413 | | * allow arbitrary Python code to run, including |
2414 | | * perhaps calls to this function. To prevent |
2415 | | * deadlock, we need to release head_mutex before |
2416 | | * the decref. |
2417 | | */ |
2418 | 0 | Py_XINCREF(exc); |
2419 | 0 | PyObject *old_exc = _Py_atomic_exchange_ptr(&tstate->async_exc, exc); |
2420 | |
|
2421 | 0 | Py_XDECREF(old_exc); |
2422 | 0 | _Py_set_eval_breaker_bit(tstate, _PY_ASYNC_EXCEPTION_BIT); |
2423 | 0 | } |
2424 | |
|
2425 | 0 | return tstate != NULL; |
2426 | 0 | } |
2427 | | |
2428 | | //--------------------------------- |
2429 | | // API for the current thread state |
2430 | | //--------------------------------- |
2431 | | |
2432 | | PyThreadState * |
2433 | | PyThreadState_GetUnchecked(void) |
2434 | 0 | { |
2435 | 0 | return current_fast_get(); |
2436 | 0 | } |
2437 | | |
2438 | | |
2439 | | PyThreadState * |
2440 | | PyThreadState_Get(void) |
2441 | 79.4M | { |
2442 | 79.4M | PyThreadState *tstate = current_fast_get(); |
2443 | 79.4M | _Py_EnsureTstateNotNULL(tstate); |
2444 | 79.4M | return tstate; |
2445 | 79.4M | } |
2446 | | |
2447 | | PyThreadState * |
2448 | | _PyThreadState_Swap(_PyRuntimeState *runtime, PyThreadState *newts) |
2449 | 0 | { |
2450 | 0 | PyThreadState *oldts = current_fast_get(); |
2451 | 0 | if (oldts != NULL) { |
2452 | 0 | _PyThreadState_Detach(oldts); |
2453 | 0 | } |
2454 | 0 | if (newts != NULL) { |
2455 | 0 | _PyThreadState_Attach(newts); |
2456 | 0 | } |
2457 | 0 | return oldts; |
2458 | 0 | } |
2459 | | |
2460 | | PyThreadState * |
2461 | | PyThreadState_Swap(PyThreadState *newts) |
2462 | 0 | { |
2463 | 0 | return _PyThreadState_Swap(&_PyRuntime, newts); |
2464 | 0 | } |
2465 | | |
2466 | | |
2467 | | void |
2468 | | _PyThreadState_Bind(PyThreadState *tstate) |
2469 | 16 | { |
2470 | | // gh-104690: If Python is being finalized and PyInterpreterState_Delete() |
2471 | | // was called, tstate becomes a dangling pointer. |
2472 | 16 | assert(_PyThreadState_CheckConsistency(tstate)); |
2473 | | |
2474 | 16 | bind_tstate(tstate); |
2475 | | // This makes sure there's a gilstate tstate bound |
2476 | | // as soon as possible. |
2477 | 16 | if (gilstate_get() == NULL) { |
2478 | 16 | bind_gilstate_tstate(tstate); |
2479 | 16 | } |
2480 | 16 | } |
2481 | | |
2482 | | #if defined(Py_GIL_DISABLED) && !defined(Py_LIMITED_API) |
2483 | | uintptr_t |
2484 | | _Py_GetThreadLocal_Addr(void) |
2485 | | { |
2486 | | // gh-112535: Use the address of the thread-local PyThreadState variable as |
2487 | | // a unique identifier for the current thread. Each thread has a unique |
2488 | | // _Py_tss_tstate variable with a unique address. |
2489 | | return (uintptr_t)&_Py_tss_tstate; |
2490 | | } |
2491 | | #endif |
2492 | | |
2493 | | /***********************************/ |
2494 | | /* routines for advanced debuggers */ |
2495 | | /***********************************/ |
2496 | | |
2497 | | // (requested by David Beazley) |
2498 | | // Don't use unless you know what you are doing! |
2499 | | |
2500 | | PyInterpreterState * |
2501 | | PyInterpreterState_Head(void) |
2502 | 0 | { |
2503 | 0 | return _PyRuntime.interpreters.head; |
2504 | 0 | } |
2505 | | |
2506 | | PyInterpreterState * |
2507 | | PyInterpreterState_Main(void) |
2508 | 0 | { |
2509 | 0 | return _PyInterpreterState_Main(); |
2510 | 0 | } |
2511 | | |
2512 | | PyInterpreterState * |
2513 | 0 | PyInterpreterState_Next(PyInterpreterState *interp) { |
2514 | 0 | return interp->next; |
2515 | 0 | } |
2516 | | |
2517 | | PyThreadState * |
2518 | 11.8k | PyInterpreterState_ThreadHead(PyInterpreterState *interp) { |
2519 | 11.8k | return interp->threads.head; |
2520 | 11.8k | } |
2521 | | |
2522 | | PyThreadState * |
2523 | 11.8k | PyThreadState_Next(PyThreadState *tstate) { |
2524 | 11.8k | return tstate->next; |
2525 | 11.8k | } |
2526 | | |
2527 | | |
2528 | | /********************************************/ |
2529 | | /* reporting execution state of all threads */ |
2530 | | /********************************************/ |
2531 | | |
2532 | | /* The implementation of sys._current_frames(). This is intended to be |
2533 | | called with the GIL held, as it will be when called via |
2534 | | sys._current_frames(). It's possible it would work fine even without |
2535 | | the GIL held, but haven't thought enough about that. |
2536 | | */ |
2537 | | PyObject * |
2538 | | _PyThread_CurrentFrames(void) |
2539 | 0 | { |
2540 | 0 | _PyRuntimeState *runtime = &_PyRuntime; |
2541 | 0 | PyThreadState *tstate = current_fast_get(); |
2542 | 0 | if (_PySys_Audit(tstate, "sys._current_frames", NULL) < 0) { |
2543 | 0 | return NULL; |
2544 | 0 | } |
2545 | | |
2546 | 0 | PyObject *result = PyDict_New(); |
2547 | 0 | if (result == NULL) { |
2548 | 0 | return NULL; |
2549 | 0 | } |
2550 | | |
2551 | | /* for i in all interpreters: |
2552 | | * for t in all of i's thread states: |
2553 | | * if t's frame isn't NULL, map t's id to its frame |
2554 | | * Because these lists can mutate even when the GIL is held, we |
2555 | | * need to grab head_mutex for the duration. |
2556 | | */ |
2557 | 0 | _PyEval_StopTheWorldAll(runtime); |
2558 | 0 | HEAD_LOCK(runtime); |
2559 | 0 | PyInterpreterState *i; |
2560 | 0 | for (i = runtime->interpreters.head; i != NULL; i = i->next) { |
2561 | 0 | _Py_FOR_EACH_TSTATE_UNLOCKED(i, t) { |
2562 | 0 | _PyInterpreterFrame *frame = t->current_frame; |
2563 | 0 | frame = _PyFrame_GetFirstComplete(frame); |
2564 | 0 | if (frame == NULL) { |
2565 | 0 | continue; |
2566 | 0 | } |
2567 | 0 | PyObject *id = PyLong_FromUnsignedLong(t->thread_id); |
2568 | 0 | if (id == NULL) { |
2569 | 0 | goto fail; |
2570 | 0 | } |
2571 | 0 | PyObject *frameobj = (PyObject *)_PyFrame_GetFrameObject(frame); |
2572 | 0 | if (frameobj == NULL) { |
2573 | 0 | Py_DECREF(id); |
2574 | 0 | goto fail; |
2575 | 0 | } |
2576 | 0 | int stat = PyDict_SetItem(result, id, frameobj); |
2577 | 0 | Py_DECREF(id); |
2578 | 0 | if (stat < 0) { |
2579 | 0 | goto fail; |
2580 | 0 | } |
2581 | 0 | } |
2582 | 0 | } |
2583 | 0 | goto done; |
2584 | | |
2585 | 0 | fail: |
2586 | 0 | Py_CLEAR(result); |
2587 | |
|
2588 | 0 | done: |
2589 | 0 | HEAD_UNLOCK(runtime); |
2590 | 0 | _PyEval_StartTheWorldAll(runtime); |
2591 | 0 | return result; |
2592 | 0 | } |
2593 | | |
2594 | | /* The implementation of sys._current_exceptions(). This is intended to be |
2595 | | called with the GIL held, as it will be when called via |
2596 | | sys._current_exceptions(). It's possible it would work fine even without |
2597 | | the GIL held, but haven't thought enough about that. |
2598 | | */ |
2599 | | PyObject * |
2600 | | _PyThread_CurrentExceptions(void) |
2601 | 0 | { |
2602 | 0 | _PyRuntimeState *runtime = &_PyRuntime; |
2603 | 0 | PyThreadState *tstate = current_fast_get(); |
2604 | |
|
2605 | 0 | _Py_EnsureTstateNotNULL(tstate); |
2606 | |
|
2607 | 0 | if (_PySys_Audit(tstate, "sys._current_exceptions", NULL) < 0) { |
2608 | 0 | return NULL; |
2609 | 0 | } |
2610 | | |
2611 | 0 | PyObject *result = PyDict_New(); |
2612 | 0 | if (result == NULL) { |
2613 | 0 | return NULL; |
2614 | 0 | } |
2615 | | |
2616 | | /* for i in all interpreters: |
2617 | | * for t in all of i's thread states: |
2618 | | * if t's frame isn't NULL, map t's id to its frame |
2619 | | * Because these lists can mutate even when the GIL is held, we |
2620 | | * need to grab head_mutex for the duration. |
2621 | | */ |
2622 | 0 | _PyEval_StopTheWorldAll(runtime); |
2623 | 0 | HEAD_LOCK(runtime); |
2624 | 0 | PyInterpreterState *i; |
2625 | 0 | for (i = runtime->interpreters.head; i != NULL; i = i->next) { |
2626 | 0 | _Py_FOR_EACH_TSTATE_UNLOCKED(i, t) { |
2627 | 0 | _PyErr_StackItem *err_info = _PyErr_GetTopmostException(t); |
2628 | 0 | if (err_info == NULL) { |
2629 | 0 | continue; |
2630 | 0 | } |
2631 | 0 | PyObject *id = PyLong_FromUnsignedLong(t->thread_id); |
2632 | 0 | if (id == NULL) { |
2633 | 0 | goto fail; |
2634 | 0 | } |
2635 | 0 | PyObject *exc = err_info->exc_value; |
2636 | 0 | assert(exc == NULL || |
2637 | 0 | exc == Py_None || |
2638 | 0 | PyExceptionInstance_Check(exc)); |
2639 | |
|
2640 | 0 | int stat = PyDict_SetItem(result, id, exc == NULL ? Py_None : exc); |
2641 | 0 | Py_DECREF(id); |
2642 | 0 | if (stat < 0) { |
2643 | 0 | goto fail; |
2644 | 0 | } |
2645 | 0 | } |
2646 | 0 | } |
2647 | 0 | goto done; |
2648 | | |
2649 | 0 | fail: |
2650 | 0 | Py_CLEAR(result); |
2651 | |
|
2652 | 0 | done: |
2653 | 0 | HEAD_UNLOCK(runtime); |
2654 | 0 | _PyEval_StartTheWorldAll(runtime); |
2655 | 0 | return result; |
2656 | 0 | } |
2657 | | |
2658 | | |
2659 | | /***********************************/ |
2660 | | /* Python "auto thread state" API. */ |
2661 | | /***********************************/ |
2662 | | |
2663 | | /* Internal initialization/finalization functions called by |
2664 | | Py_Initialize/Py_FinalizeEx |
2665 | | */ |
2666 | | PyStatus |
2667 | | _PyGILState_Init(PyInterpreterState *interp) |
2668 | 16 | { |
2669 | 16 | if (!_Py_IsMainInterpreter(interp)) { |
2670 | | /* Currently, PyGILState is shared by all interpreters. The main |
2671 | | * interpreter is responsible to initialize it. */ |
2672 | 0 | return _PyStatus_OK(); |
2673 | 0 | } |
2674 | 16 | _PyRuntimeState *runtime = interp->runtime; |
2675 | 16 | assert(gilstate_get() == NULL); |
2676 | 16 | assert(runtime->gilstate.autoInterpreterState == NULL); |
2677 | 16 | runtime->gilstate.autoInterpreterState = interp; |
2678 | 16 | return _PyStatus_OK(); |
2679 | 16 | } |
2680 | | |
2681 | | void |
2682 | | _PyGILState_Fini(PyInterpreterState *interp) |
2683 | 0 | { |
2684 | 0 | if (!_Py_IsMainInterpreter(interp)) { |
2685 | | /* Currently, PyGILState is shared by all interpreters. The main |
2686 | | * interpreter is responsible to initialize it. */ |
2687 | 0 | return; |
2688 | 0 | } |
2689 | 0 | interp->runtime->gilstate.autoInterpreterState = NULL; |
2690 | 0 | } |
2691 | | |
2692 | | |
2693 | | // XXX Drop this. |
2694 | | void |
2695 | | _PyGILState_SetTstate(PyThreadState *tstate) |
2696 | 16 | { |
2697 | | /* must init with valid states */ |
2698 | 16 | assert(tstate != NULL); |
2699 | 16 | assert(tstate->interp != NULL); |
2700 | | |
2701 | 16 | if (!_Py_IsMainInterpreter(tstate->interp)) { |
2702 | | /* Currently, PyGILState is shared by all interpreters. The main |
2703 | | * interpreter is responsible to initialize it. */ |
2704 | 0 | return; |
2705 | 0 | } |
2706 | | |
2707 | | #ifndef NDEBUG |
2708 | | _PyRuntimeState *runtime = tstate->interp->runtime; |
2709 | | |
2710 | | assert(runtime->gilstate.autoInterpreterState == tstate->interp); |
2711 | | assert(gilstate_get() == tstate); |
2712 | | assert(tstate->gilstate_counter == 1); |
2713 | | #endif |
2714 | 16 | } |
2715 | | |
2716 | | PyInterpreterState * |
2717 | | _PyGILState_GetInterpreterStateUnsafe(void) |
2718 | 0 | { |
2719 | 0 | return _PyRuntime.gilstate.autoInterpreterState; |
2720 | 0 | } |
2721 | | |
2722 | | /* The public functions */ |
2723 | | |
2724 | | PyThreadState * |
2725 | | PyGILState_GetThisThreadState(void) |
2726 | 0 | { |
2727 | 0 | return gilstate_get(); |
2728 | 0 | } |
2729 | | |
2730 | | int |
2731 | | PyGILState_Check(void) |
2732 | 0 | { |
2733 | 0 | _PyRuntimeState *runtime = &_PyRuntime; |
2734 | 0 | if (!runtime->gilstate.check_enabled) { |
2735 | 0 | return 1; |
2736 | 0 | } |
2737 | | |
2738 | 0 | PyThreadState *tstate = current_fast_get(); |
2739 | 0 | if (tstate == NULL) { |
2740 | 0 | return 0; |
2741 | 0 | } |
2742 | | |
2743 | 0 | PyThreadState *tcur = gilstate_get(); |
2744 | 0 | return (tstate == tcur); |
2745 | 0 | } |
2746 | | |
2747 | | PyGILState_STATE |
2748 | | PyGILState_Ensure(void) |
2749 | 0 | { |
2750 | 0 | _PyRuntimeState *runtime = &_PyRuntime; |
2751 | | |
2752 | | /* Note that we do not auto-init Python here - apart from |
2753 | | potential races with 2 threads auto-initializing, pep-311 |
2754 | | spells out other issues. Embedders are expected to have |
2755 | | called Py_Initialize(). */ |
2756 | | |
2757 | | /* Ensure that _PyEval_InitThreads() and _PyGILState_Init() have been |
2758 | | called by Py_Initialize() |
2759 | | |
2760 | | TODO: This isn't thread-safe. There's no protection here against |
2761 | | concurrent finalization of the interpreter; it's simply a guard |
2762 | | for *after* the interpreter has finalized. |
2763 | | */ |
2764 | 0 | if (!_PyEval_ThreadsInitialized() || runtime->gilstate.autoInterpreterState == NULL) { |
2765 | 0 | PyThread_hang_thread(); |
2766 | 0 | } |
2767 | | |
2768 | 0 | PyThreadState *tcur = gilstate_get(); |
2769 | 0 | int has_gil; |
2770 | 0 | if (tcur == NULL) { |
2771 | | /* Create a new Python thread state for this thread */ |
2772 | | // XXX Use PyInterpreterState_EnsureThreadState()? |
2773 | 0 | tcur = new_threadstate(runtime->gilstate.autoInterpreterState, |
2774 | 0 | _PyThreadState_WHENCE_GILSTATE); |
2775 | 0 | if (tcur == NULL) { |
2776 | 0 | Py_FatalError("Couldn't create thread-state for new thread"); |
2777 | 0 | } |
2778 | 0 | bind_tstate(tcur); |
2779 | 0 | bind_gilstate_tstate(tcur); |
2780 | | |
2781 | | /* This is our thread state! We'll need to delete it in the |
2782 | | matching call to PyGILState_Release(). */ |
2783 | 0 | assert(tcur->gilstate_counter == 1); |
2784 | 0 | tcur->gilstate_counter = 0; |
2785 | 0 | has_gil = 0; /* new thread state is never current */ |
2786 | 0 | } |
2787 | 0 | else { |
2788 | 0 | has_gil = holds_gil(tcur); |
2789 | 0 | } |
2790 | | |
2791 | 0 | if (!has_gil) { |
2792 | 0 | PyEval_RestoreThread(tcur); |
2793 | 0 | } |
2794 | | |
2795 | | /* Update our counter in the thread-state - no need for locks: |
2796 | | - tcur will remain valid as we hold the GIL. |
2797 | | - the counter is safe as we are the only thread "allowed" |
2798 | | to modify this value |
2799 | | */ |
2800 | 0 | ++tcur->gilstate_counter; |
2801 | |
|
2802 | 0 | return has_gil ? PyGILState_LOCKED : PyGILState_UNLOCKED; |
2803 | 0 | } |
2804 | | |
2805 | | void |
2806 | | PyGILState_Release(PyGILState_STATE oldstate) |
2807 | 0 | { |
2808 | 0 | PyThreadState *tstate = gilstate_get(); |
2809 | 0 | if (tstate == NULL) { |
2810 | 0 | Py_FatalError("auto-releasing thread-state, " |
2811 | 0 | "but no thread-state for this thread"); |
2812 | 0 | } |
2813 | | |
2814 | | /* We must hold the GIL and have our thread state current */ |
2815 | 0 | if (!holds_gil(tstate)) { |
2816 | 0 | _Py_FatalErrorFormat(__func__, |
2817 | 0 | "thread state %p must be current when releasing", |
2818 | 0 | tstate); |
2819 | 0 | } |
2820 | 0 | --tstate->gilstate_counter; |
2821 | 0 | assert(tstate->gilstate_counter >= 0); /* illegal counter value */ |
2822 | | |
2823 | | /* If we're going to destroy this thread-state, we must |
2824 | | * clear it while the GIL is held, as destructors may run. |
2825 | | */ |
2826 | 0 | if (tstate->gilstate_counter == 0) { |
2827 | | /* can't have been locked when we created it */ |
2828 | 0 | assert(oldstate == PyGILState_UNLOCKED); |
2829 | | // XXX Unbind tstate here. |
2830 | | // gh-119585: `PyThreadState_Clear()` may call destructors that |
2831 | | // themselves use PyGILState_Ensure and PyGILState_Release, so make |
2832 | | // sure that gilstate_counter is not zero when calling it. |
2833 | 0 | ++tstate->gilstate_counter; |
2834 | 0 | PyThreadState_Clear(tstate); |
2835 | 0 | --tstate->gilstate_counter; |
2836 | | /* Delete the thread-state. Note this releases the GIL too! |
2837 | | * It's vital that the GIL be held here, to avoid shutdown |
2838 | | * races; see bugs 225673 and 1061968 (that nasty bug has a |
2839 | | * habit of coming back). |
2840 | | */ |
2841 | 0 | assert(tstate->gilstate_counter == 0); |
2842 | 0 | assert(current_fast_get() == tstate); |
2843 | 0 | _PyThreadState_DeleteCurrent(tstate); |
2844 | 0 | } |
2845 | | /* Release the lock if necessary */ |
2846 | 0 | else if (oldstate == PyGILState_UNLOCKED) { |
2847 | 0 | PyEval_SaveThread(); |
2848 | 0 | } |
2849 | 0 | } |
2850 | | |
2851 | | |
2852 | | /*************/ |
2853 | | /* Other API */ |
2854 | | /*************/ |
2855 | | |
2856 | | _PyFrameEvalFunction |
2857 | | _PyInterpreterState_GetEvalFrameFunc(PyInterpreterState *interp) |
2858 | 0 | { |
2859 | 0 | if (interp->eval_frame == NULL) { |
2860 | 0 | return _PyEval_EvalFrameDefault; |
2861 | 0 | } |
2862 | 0 | return interp->eval_frame; |
2863 | 0 | } |
2864 | | |
2865 | | |
2866 | | void |
2867 | | _PyInterpreterState_SetEvalFrameFunc(PyInterpreterState *interp, |
2868 | | _PyFrameEvalFunction eval_frame) |
2869 | 0 | { |
2870 | 0 | if (eval_frame == _PyEval_EvalFrameDefault) { |
2871 | 0 | eval_frame = NULL; |
2872 | 0 | } |
2873 | 0 | if (eval_frame == interp->eval_frame) { |
2874 | 0 | return; |
2875 | 0 | } |
2876 | | #ifdef _Py_TIER2 |
2877 | | if (eval_frame != NULL) { |
2878 | | _Py_Executors_InvalidateAll(interp, 1); |
2879 | | } |
2880 | | #endif |
2881 | 0 | RARE_EVENT_INC(set_eval_frame_func); |
2882 | 0 | _PyEval_StopTheWorld(interp); |
2883 | 0 | interp->eval_frame = eval_frame; |
2884 | 0 | _PyEval_StartTheWorld(interp); |
2885 | 0 | } |
2886 | | |
2887 | | |
2888 | | const PyConfig* |
2889 | | _PyInterpreterState_GetConfig(PyInterpreterState *interp) |
2890 | 84.0M | { |
2891 | 84.0M | return &interp->config; |
2892 | 84.0M | } |
2893 | | |
2894 | | |
2895 | | const PyConfig* |
2896 | | _Py_GetConfig(void) |
2897 | 45.4k | { |
2898 | 45.4k | PyThreadState *tstate = current_fast_get(); |
2899 | 45.4k | _Py_EnsureTstateNotNULL(tstate); |
2900 | 45.4k | return _PyInterpreterState_GetConfig(tstate->interp); |
2901 | 45.4k | } |
2902 | | |
2903 | | |
2904 | | int |
2905 | | _PyInterpreterState_HasFeature(PyInterpreterState *interp, unsigned long feature) |
2906 | 0 | { |
2907 | 0 | return ((interp->feature_flags & feature) != 0); |
2908 | 0 | } |
2909 | | |
2910 | | |
2911 | 163k | #define MINIMUM_OVERHEAD 1000 |
2912 | | |
2913 | | static PyObject ** |
2914 | | push_chunk(PyThreadState *tstate, int size) |
2915 | 163k | { |
2916 | 163k | int allocate_size = _PY_DATA_STACK_CHUNK_SIZE; |
2917 | 163k | while (allocate_size < (int)sizeof(PyObject*)*(size + MINIMUM_OVERHEAD)) { |
2918 | 0 | allocate_size *= 2; |
2919 | 0 | } |
2920 | 163k | _PyStackChunk *new = allocate_chunk(allocate_size, tstate->datastack_chunk); |
2921 | 163k | if (new == NULL) { |
2922 | 0 | return NULL; |
2923 | 0 | } |
2924 | 163k | if (tstate->datastack_chunk) { |
2925 | 163k | tstate->datastack_chunk->top = tstate->datastack_top - |
2926 | 163k | &tstate->datastack_chunk->data[0]; |
2927 | 163k | } |
2928 | 163k | tstate->datastack_chunk = new; |
2929 | 163k | tstate->datastack_limit = (PyObject **)(((char *)new) + allocate_size); |
2930 | | // When new is the "root" chunk (i.e. new->previous == NULL), we can keep |
2931 | | // _PyThreadState_PopFrame from freeing it later by "skipping" over the |
2932 | | // first element: |
2933 | 163k | PyObject **res = &new->data[new->previous == NULL]; |
2934 | 163k | tstate->datastack_top = res + size; |
2935 | 163k | return res; |
2936 | 163k | } |
2937 | | |
2938 | | _PyInterpreterFrame * |
2939 | | _PyThreadState_PushFrame(PyThreadState *tstate, size_t size) |
2940 | 162M | { |
2941 | 162M | assert(size < INT_MAX/sizeof(PyObject *)); |
2942 | 162M | if (_PyThreadState_HasStackSpace(tstate, (int)size)) { |
2943 | 161M | _PyInterpreterFrame *res = (_PyInterpreterFrame *)tstate->datastack_top; |
2944 | 161M | tstate->datastack_top += size; |
2945 | 161M | return res; |
2946 | 161M | } |
2947 | 163k | return (_PyInterpreterFrame *)push_chunk(tstate, (int)size); |
2948 | 162M | } |
2949 | | |
2950 | | void |
2951 | | _PyThreadState_PopFrame(PyThreadState *tstate, _PyInterpreterFrame * frame) |
2952 | 528M | { |
2953 | 528M | assert(tstate->datastack_chunk); |
2954 | 528M | PyObject **base = (PyObject **)frame; |
2955 | 528M | if (base == &tstate->datastack_chunk->data[0]) { |
2956 | 163k | _PyStackChunk *chunk = tstate->datastack_chunk; |
2957 | 163k | _PyStackChunk *previous = chunk->previous; |
2958 | | // push_chunk ensures that the root chunk is never popped: |
2959 | 163k | assert(previous); |
2960 | 163k | tstate->datastack_top = &previous->data[previous->top]; |
2961 | 163k | tstate->datastack_chunk = previous; |
2962 | 163k | _PyObject_VirtualFree(chunk, chunk->size); |
2963 | 163k | tstate->datastack_limit = (PyObject **)(((char *)previous) + previous->size); |
2964 | 163k | } |
2965 | 528M | else { |
2966 | 528M | assert(tstate->datastack_top); |
2967 | 528M | assert(tstate->datastack_top >= base); |
2968 | 528M | tstate->datastack_top = base; |
2969 | 528M | } |
2970 | 528M | } |
2971 | | |
2972 | | |
2973 | | #ifndef NDEBUG |
2974 | | // Check that a Python thread state valid. In practice, this function is used |
2975 | | // on a Python debug build to check if 'tstate' is a dangling pointer, if the |
2976 | | // PyThreadState memory has been freed. |
2977 | | // |
2978 | | // Usage: |
2979 | | // |
2980 | | // assert(_PyThreadState_CheckConsistency(tstate)); |
2981 | | int |
2982 | | _PyThreadState_CheckConsistency(PyThreadState *tstate) |
2983 | | { |
2984 | | assert(!_PyMem_IsPtrFreed(tstate)); |
2985 | | assert(!_PyMem_IsPtrFreed(tstate->interp)); |
2986 | | return 1; |
2987 | | } |
2988 | | #endif |
2989 | | |
2990 | | |
2991 | | // Check if a Python thread must call _PyThreadState_HangThread(), rather than |
2992 | | // taking the GIL or attaching to the interpreter if Py_Finalize() has been |
2993 | | // called. |
2994 | | // |
2995 | | // When this function is called by a daemon thread after Py_Finalize() has been |
2996 | | // called, the GIL may no longer exist. |
2997 | | // |
2998 | | // tstate must be non-NULL. |
2999 | | int |
3000 | | _PyThreadState_MustExit(PyThreadState *tstate) |
3001 | 60.9k | { |
3002 | 60.9k | int state = _Py_atomic_load_int_relaxed(&tstate->state); |
3003 | 60.9k | return state == _Py_THREAD_SHUTTING_DOWN; |
3004 | 60.9k | } |
3005 | | |
3006 | | void |
3007 | | _PyThreadState_HangThread(PyThreadState *tstate) |
3008 | 0 | { |
3009 | 0 | _PyThreadStateImpl *tstate_impl = (_PyThreadStateImpl *)tstate; |
3010 | 0 | decref_threadstate(tstate_impl); |
3011 | 0 | PyThread_hang_thread(); |
3012 | 0 | } |
3013 | | |
3014 | | /********************/ |
3015 | | /* mimalloc support */ |
3016 | | /********************/ |
3017 | | |
3018 | | static void |
3019 | | tstate_mimalloc_bind(PyThreadState *tstate) |
3020 | 16 | { |
3021 | | #ifdef Py_GIL_DISABLED |
3022 | | struct _mimalloc_thread_state *mts = &((_PyThreadStateImpl*)tstate)->mimalloc; |
3023 | | |
3024 | | // Initialize the mimalloc thread state. This must be called from the |
3025 | | // same thread that will use the thread state. The "mem" heap doubles as |
3026 | | // the "backing" heap. |
3027 | | mi_tld_t *tld = &mts->tld; |
3028 | | _mi_tld_init(tld, &mts->heaps[_Py_MIMALLOC_HEAP_MEM]); |
3029 | | llist_init(&mts->page_list); |
3030 | | |
3031 | | // Exiting threads push any remaining in-use segments to the abandoned |
3032 | | // pool to be re-claimed later by other threads. We use per-interpreter |
3033 | | // pools to keep Python objects from different interpreters separate. |
3034 | | tld->segments.abandoned = &tstate->interp->mimalloc.abandoned_pool; |
3035 | | |
3036 | | // Don't fill in the first N bytes up to ob_type in debug builds. We may |
3037 | | // access ob_tid and the refcount fields in the dict and list lock-less |
3038 | | // accesses, so they must remain valid for a while after deallocation. |
3039 | | size_t base_offset = offsetof(PyObject, ob_type); |
3040 | | if (_PyMem_DebugEnabled()) { |
3041 | | // The debug allocator adds two words at the beginning of each block. |
3042 | | base_offset += 2 * sizeof(size_t); |
3043 | | } |
3044 | | size_t debug_offsets[_Py_MIMALLOC_HEAP_COUNT] = { |
3045 | | [_Py_MIMALLOC_HEAP_OBJECT] = base_offset, |
3046 | | [_Py_MIMALLOC_HEAP_GC] = base_offset, |
3047 | | [_Py_MIMALLOC_HEAP_GC_PRE] = base_offset + 2 * sizeof(PyObject *), |
3048 | | }; |
3049 | | |
3050 | | // Initialize each heap |
3051 | | for (uint8_t i = 0; i < _Py_MIMALLOC_HEAP_COUNT; i++) { |
3052 | | _mi_heap_init_ex(&mts->heaps[i], tld, _mi_arena_id_none(), false, i); |
3053 | | mts->heaps[i].debug_offset = (uint8_t)debug_offsets[i]; |
3054 | | } |
3055 | | |
3056 | | // Heaps that store Python objects should use QSBR to delay freeing |
3057 | | // mimalloc pages while there may be concurrent lock-free readers. |
3058 | | mts->heaps[_Py_MIMALLOC_HEAP_OBJECT].page_use_qsbr = true; |
3059 | | mts->heaps[_Py_MIMALLOC_HEAP_GC].page_use_qsbr = true; |
3060 | | mts->heaps[_Py_MIMALLOC_HEAP_GC_PRE].page_use_qsbr = true; |
3061 | | |
3062 | | // By default, object allocations use _Py_MIMALLOC_HEAP_OBJECT. |
3063 | | // _PyObject_GC_New() and similar functions temporarily override this to |
3064 | | // use one of the GC heaps. |
3065 | | mts->current_object_heap = &mts->heaps[_Py_MIMALLOC_HEAP_OBJECT]; |
3066 | | |
3067 | | _Py_atomic_store_int(&mts->initialized, 1); |
3068 | | #endif |
3069 | 16 | } |
3070 | | |
3071 | | void |
3072 | | _PyThreadState_ClearMimallocHeaps(PyThreadState *tstate) |
3073 | 0 | { |
3074 | | #ifdef Py_GIL_DISABLED |
3075 | | if (!tstate->_status.bound) { |
3076 | | // The mimalloc heaps are only initialized when the thread is bound. |
3077 | | return; |
3078 | | } |
3079 | | |
3080 | | _PyThreadStateImpl *tstate_impl = (_PyThreadStateImpl *)tstate; |
3081 | | for (Py_ssize_t i = 0; i < _Py_MIMALLOC_HEAP_COUNT; i++) { |
3082 | | // Abandon all segments in use by this thread. This pushes them to |
3083 | | // a shared pool to later be reclaimed by other threads. It's important |
3084 | | // to do this before the thread state is destroyed so that objects |
3085 | | // remain visible to the GC. |
3086 | | _mi_heap_collect_abandon(&tstate_impl->mimalloc.heaps[i]); |
3087 | | } |
3088 | | #endif |
3089 | 0 | } |
3090 | | |
3091 | | |
3092 | | int |
3093 | | _Py_IsMainThread(void) |
3094 | 73.5M | { |
3095 | 73.5M | unsigned long thread = PyThread_get_thread_ident(); |
3096 | 73.5M | return (thread == _PyRuntime.main_thread); |
3097 | 73.5M | } |
3098 | | |
3099 | | |
3100 | | PyInterpreterState * |
3101 | | _PyInterpreterState_Main(void) |
3102 | 73.4M | { |
3103 | 73.4M | return _PyRuntime.interpreters.main; |
3104 | 73.4M | } |
3105 | | |
3106 | | |
3107 | | int |
3108 | | _Py_IsMainInterpreterFinalizing(PyInterpreterState *interp) |
3109 | 0 | { |
3110 | | /* bpo-39877: Access _PyRuntime directly rather than using |
3111 | | tstate->interp->runtime to support calls from Python daemon threads. |
3112 | | After Py_Finalize() has been called, tstate can be a dangling pointer: |
3113 | | point to PyThreadState freed memory. */ |
3114 | 0 | return (_PyRuntimeState_GetFinalizing(&_PyRuntime) != NULL && |
3115 | 0 | interp == &_PyRuntime._main_interpreter); |
3116 | 0 | } |
3117 | | |
3118 | | |
3119 | | const PyConfig * |
3120 | | _Py_GetMainConfig(void) |
3121 | 0 | { |
3122 | 0 | PyInterpreterState *interp = _PyInterpreterState_Main(); |
3123 | 0 | if (interp == NULL) { |
3124 | 0 | return NULL; |
3125 | 0 | } |
3126 | 0 | return _PyInterpreterState_GetConfig(interp); |
3127 | 0 | } |