/src/cpython/Python/pystate.c
Line | Count | Source |
1 | | |
2 | | /* Thread and interpreter state structures and their interfaces */ |
3 | | |
4 | | #include "Python.h" |
5 | | #include "pycore_abstract.h" // _PyIndex_Check() |
6 | | #include "pycore_audit.h" // _Py_AuditHookEntry |
7 | | #include "pycore_ceval.h" // _PyEval_AcquireLock() |
8 | | #include "pycore_codecs.h" // _PyCodec_Fini() |
9 | | #include "pycore_critical_section.h" // _PyCriticalSection_Resume() |
10 | | #include "pycore_dtoa.h" // _dtoa_state_INIT() |
11 | | #include "pycore_freelist.h" // _PyObject_ClearFreeLists() |
12 | | #include "pycore_initconfig.h" // _PyStatus_OK() |
13 | | #include "pycore_interpframe.h" // _PyThreadState_HasStackSpace() |
14 | | #include "pycore_object.h" // _PyType_InitCache() |
15 | | #include "pycore_obmalloc.h" // _PyMem_obmalloc_state_on_heap() |
16 | | #include "pycore_optimizer.h" // JIT_CLEANUP_THRESHOLD |
17 | | #include "pycore_parking_lot.h" // _PyParkingLot_AfterFork() |
18 | | #include "pycore_pyerrors.h" // _PyErr_Clear() |
19 | | #include "pycore_pylifecycle.h" // _PyAST_Fini() |
20 | | #include "pycore_pymem.h" // _PyMem_DebugEnabled() |
21 | | #include "pycore_runtime.h" // _PyRuntime |
22 | | #include "pycore_runtime_init.h" // _PyRuntimeState_INIT |
23 | | #include "pycore_stackref.h" // Py_STACKREF_DEBUG |
24 | | #include "pycore_stats.h" // FT_STAT_WORLD_STOP_INC() |
25 | | #include "pycore_time.h" // _PyTime_Init() |
26 | | #include "pycore_uop.h" // UOP_BUFFER_SIZE |
27 | | #include "pycore_uniqueid.h" // _PyObject_FinalizePerThreadRefcounts() |
28 | | |
29 | | |
30 | | /* -------------------------------------------------------------------------- |
31 | | CAUTION |
32 | | |
33 | | Always use PyMem_RawMalloc() and PyMem_RawFree() directly in this file. A |
34 | | number of these functions are advertised as safe to call when the GIL isn't |
35 | | held, and in a debug build Python redirects (e.g.) PyMem_NEW (etc) to Python's |
36 | | debugging obmalloc functions. Those aren't thread-safe (they rely on the GIL |
37 | | to avoid the expense of doing their own locking). |
38 | | -------------------------------------------------------------------------- */ |
39 | | |
40 | | #ifdef HAVE_DLOPEN |
41 | | # ifdef HAVE_DLFCN_H |
42 | | # include <dlfcn.h> |
43 | | # endif |
44 | | # if !HAVE_DECL_RTLD_LAZY |
45 | | # define RTLD_LAZY 1 |
46 | | # endif |
47 | | #endif |
48 | | |
49 | | |
50 | | /****************************************/ |
51 | | /* helpers for the current thread state */ |
52 | | /****************************************/ |
53 | | |
54 | | // API for the current thread state is further down. |
55 | | |
56 | | /* "current" means one of: |
57 | | - bound to the current OS thread |
58 | | - holds the GIL |
59 | | */ |
60 | | |
61 | | //------------------------------------------------- |
62 | | // a highly efficient lookup for the current thread |
63 | | //------------------------------------------------- |
64 | | |
65 | | /* |
66 | | The stored thread state is set by PyThreadState_Swap(). |
67 | | |
68 | | For each of these functions, the GIL must be held by the current thread. |
69 | | */ |
70 | | |
71 | | |
72 | | /* The attached thread state for the current thread. */ |
73 | | _Py_thread_local PyThreadState *_Py_tss_tstate = NULL; |
74 | | |
75 | | /* The "bound" thread state used by PyGILState_Ensure(), |
76 | | also known as a "gilstate." */ |
77 | | _Py_thread_local PyThreadState *_Py_tss_gilstate = NULL; |
78 | | |
79 | | /* The interpreter of the attached thread state, |
80 | | and is same as tstate->interp. */ |
81 | | _Py_thread_local PyInterpreterState *_Py_tss_interp = NULL; |
82 | | |
83 | | static inline PyThreadState * |
84 | | current_fast_get(void) |
85 | 105M | { |
86 | 105M | return _Py_tss_tstate; |
87 | 105M | } |
88 | | |
89 | | static inline void |
90 | | current_fast_set(_PyRuntimeState *Py_UNUSED(runtime), PyThreadState *tstate) |
91 | 35.6k | { |
92 | 35.6k | assert(tstate != NULL); |
93 | 35.6k | _Py_tss_tstate = tstate; |
94 | 35.6k | assert(tstate->interp != NULL); |
95 | 35.6k | _Py_tss_interp = tstate->interp; |
96 | 35.6k | } |
97 | | |
98 | | static inline void |
99 | | current_fast_clear(_PyRuntimeState *Py_UNUSED(runtime)) |
100 | 35.6k | { |
101 | 35.6k | _Py_tss_tstate = NULL; |
102 | 35.6k | _Py_tss_interp = NULL; |
103 | 35.6k | } |
104 | | |
105 | | #define tstate_verify_not_active(tstate) \ |
106 | 0 | if (tstate == current_fast_get()) { \ |
107 | 0 | _Py_FatalErrorFormat(__func__, "tstate %p is still current", tstate); \ |
108 | 0 | } |
109 | | |
110 | | PyThreadState * |
111 | | _PyThreadState_GetCurrent(void) |
112 | 13.2M | { |
113 | 13.2M | return current_fast_get(); |
114 | 13.2M | } |
115 | | |
116 | | |
117 | | //--------------------------------------------- |
118 | | // The thread state used by PyGILState_Ensure() |
119 | | //--------------------------------------------- |
120 | | |
121 | | /* |
122 | | The stored thread state is set by bind_tstate() (AKA PyThreadState_Bind(). |
123 | | |
124 | | The GIL does no need to be held for these. |
125 | | */ |
126 | | |
127 | | static inline PyThreadState * |
128 | | gilstate_get(void) |
129 | 44 | { |
130 | 44 | return _Py_tss_gilstate; |
131 | 44 | } |
132 | | |
133 | | static inline void |
134 | | gilstate_set(PyThreadState *tstate) |
135 | 22 | { |
136 | 22 | assert(tstate != NULL); |
137 | 22 | _Py_tss_gilstate = tstate; |
138 | 22 | } |
139 | | |
140 | | static inline void |
141 | | gilstate_clear(void) |
142 | 0 | { |
143 | 0 | _Py_tss_gilstate = NULL; |
144 | 0 | } |
145 | | |
146 | | |
147 | | #ifndef NDEBUG |
148 | | static inline int tstate_is_alive(PyThreadState *tstate); |
149 | | |
150 | | static inline int |
151 | | tstate_is_bound(PyThreadState *tstate) |
152 | | { |
153 | | return tstate->_status.bound && !tstate->_status.unbound; |
154 | | } |
155 | | #endif // !NDEBUG |
156 | | |
157 | | static void bind_gilstate_tstate(PyThreadState *); |
158 | | static void unbind_gilstate_tstate(PyThreadState *); |
159 | | |
160 | | static void tstate_mimalloc_bind(PyThreadState *); |
161 | | |
162 | | static void |
163 | | bind_tstate(PyThreadState *tstate) |
164 | 22 | { |
165 | 22 | assert(tstate != NULL); |
166 | 22 | assert(tstate_is_alive(tstate) && !tstate->_status.bound); |
167 | 22 | assert(!tstate->_status.unbound); // just in case |
168 | 22 | assert(!tstate->_status.bound_gilstate); |
169 | 22 | assert(tstate != gilstate_get()); |
170 | 22 | assert(!tstate->_status.active); |
171 | 22 | assert(tstate->thread_id == 0); |
172 | 22 | assert(tstate->native_thread_id == 0); |
173 | | |
174 | | // Currently we don't necessarily store the thread state |
175 | | // in thread-local storage (e.g. per-interpreter). |
176 | | |
177 | 22 | tstate->thread_id = PyThread_get_thread_ident(); |
178 | 22 | #ifdef PY_HAVE_THREAD_NATIVE_ID |
179 | 22 | tstate->native_thread_id = PyThread_get_thread_native_id(); |
180 | 22 | #endif |
181 | | |
182 | | #ifdef Py_GIL_DISABLED |
183 | | // Initialize biased reference counting inter-thread queue. Note that this |
184 | | // needs to be initialized from the active thread. |
185 | | _Py_brc_init_thread(tstate); |
186 | | #endif |
187 | | |
188 | | // mimalloc state needs to be initialized from the active thread. |
189 | 22 | tstate_mimalloc_bind(tstate); |
190 | | |
191 | 22 | tstate->_status.bound = 1; |
192 | 22 | } |
193 | | |
194 | | static void |
195 | | unbind_tstate(PyThreadState *tstate) |
196 | 0 | { |
197 | 0 | assert(tstate != NULL); |
198 | 0 | assert(tstate_is_bound(tstate)); |
199 | 0 | #ifndef HAVE_PTHREAD_STUBS |
200 | 0 | assert(tstate->thread_id > 0); |
201 | 0 | #endif |
202 | 0 | #ifdef PY_HAVE_THREAD_NATIVE_ID |
203 | 0 | assert(tstate->native_thread_id > 0); |
204 | 0 | #endif |
205 | | |
206 | | // We leave thread_id and native_thread_id alone |
207 | | // since they can be useful for debugging. |
208 | | // Check the `_status` field to know if these values |
209 | | // are still valid. |
210 | | |
211 | | // We leave tstate->_status.bound set to 1 |
212 | | // to indicate it was previously bound. |
213 | 0 | tstate->_status.unbound = 1; |
214 | 0 | } |
215 | | |
216 | | |
217 | | /* Stick the thread state for this thread in thread specific storage. |
218 | | |
219 | | When a thread state is created for a thread by some mechanism |
220 | | other than PyGILState_Ensure(), it's important that the GILState |
221 | | machinery knows about it so it doesn't try to create another |
222 | | thread state for the thread. |
223 | | (This is a better fix for SF bug #1010677 than the first one attempted.) |
224 | | |
225 | | The only situation where you can legitimately have more than one |
226 | | thread state for an OS level thread is when there are multiple |
227 | | interpreters. |
228 | | |
229 | | Before 3.12, the PyGILState_*() APIs didn't work with multiple |
230 | | interpreters (see bpo-10915 and bpo-15751), so this function used |
231 | | to set TSS only once. Thus, the first thread state created for that |
232 | | given OS level thread would "win", which seemed reasonable behaviour. |
233 | | */ |
234 | | |
235 | | static void |
236 | | bind_gilstate_tstate(PyThreadState *tstate) |
237 | 22 | { |
238 | 22 | assert(tstate != NULL); |
239 | 22 | assert(tstate_is_alive(tstate)); |
240 | 22 | assert(tstate_is_bound(tstate)); |
241 | | // XXX assert(!tstate->_status.active); |
242 | 22 | assert(!tstate->_status.bound_gilstate); |
243 | | |
244 | 22 | PyThreadState *tcur = gilstate_get(); |
245 | 22 | assert(tstate != tcur); |
246 | | |
247 | 22 | if (tcur != NULL) { |
248 | 0 | tcur->_status.bound_gilstate = 0; |
249 | 0 | } |
250 | 22 | gilstate_set(tstate); |
251 | 22 | tstate->_status.bound_gilstate = 1; |
252 | 22 | } |
253 | | |
254 | | static void |
255 | | unbind_gilstate_tstate(PyThreadState *tstate) |
256 | 0 | { |
257 | 0 | assert(tstate != NULL); |
258 | | // XXX assert(tstate_is_alive(tstate)); |
259 | 0 | assert(tstate_is_bound(tstate)); |
260 | | // XXX assert(!tstate->_status.active); |
261 | 0 | assert(tstate->_status.bound_gilstate); |
262 | 0 | assert(tstate == gilstate_get()); |
263 | 0 | gilstate_clear(); |
264 | 0 | tstate->_status.bound_gilstate = 0; |
265 | 0 | } |
266 | | |
267 | | |
268 | | //---------------------------------------------- |
269 | | // the thread state that currently holds the GIL |
270 | | //---------------------------------------------- |
271 | | |
272 | | /* This is not exported, as it is not reliable! It can only |
273 | | ever be compared to the state for the *current* thread. |
274 | | * If not equal, then it doesn't matter that the actual |
275 | | value may change immediately after comparison, as it can't |
276 | | possibly change to the current thread's state. |
277 | | * If equal, then the current thread holds the lock, so the value can't |
278 | | change until we yield the lock. |
279 | | */ |
280 | | static int |
281 | | holds_gil(PyThreadState *tstate) |
282 | 0 | { |
283 | | // XXX Fall back to tstate->interp->runtime->ceval.gil.last_holder |
284 | | // (and tstate->interp->runtime->ceval.gil.locked). |
285 | 0 | assert(tstate != NULL); |
286 | | /* Must be the tstate for this thread */ |
287 | 0 | assert(tstate == gilstate_get()); |
288 | 0 | return tstate == current_fast_get(); |
289 | 0 | } |
290 | | |
291 | | |
292 | | /****************************/ |
293 | | /* the global runtime state */ |
294 | | /****************************/ |
295 | | |
296 | | //---------- |
297 | | // lifecycle |
298 | | //---------- |
299 | | |
300 | | /* Suppress deprecation warning for PyBytesObject.ob_shash */ |
301 | | _Py_COMP_DIAG_PUSH |
302 | | _Py_COMP_DIAG_IGNORE_DEPR_DECLS |
303 | | /* We use "initial" if the runtime gets re-used |
304 | | (e.g. Py_Finalize() followed by Py_Initialize(). |
305 | | Note that we initialize "initial" relative to _PyRuntime, |
306 | | to ensure pre-initialized pointers point to the active |
307 | | runtime state (and not "initial"). */ |
308 | | static const _PyRuntimeState initial = _PyRuntimeState_INIT(_PyRuntime, ""); |
309 | | _Py_COMP_DIAG_POP |
310 | | |
311 | | #define LOCKS_INIT(runtime) \ |
312 | 0 | { \ |
313 | 0 | &(runtime)->interpreters.mutex, \ |
314 | 0 | &(runtime)->xi.data_lookup.registry.mutex, \ |
315 | 0 | &(runtime)->unicode_state.ids.mutex, \ |
316 | 0 | &(runtime)->imports.extensions.mutex, \ |
317 | 0 | &(runtime)->ceval.pending_mainthread.mutex, \ |
318 | 0 | &(runtime)->atexit.mutex, \ |
319 | 0 | &(runtime)->audit_hooks.mutex, \ |
320 | 0 | &(runtime)->allocators.mutex, \ |
321 | 0 | &(runtime)->_main_interpreter.types.mutex, \ |
322 | 0 | &(runtime)->_main_interpreter.code_state.mutex, \ |
323 | 0 | } |
324 | | |
325 | | static void |
326 | | init_runtime(_PyRuntimeState *runtime, |
327 | | void *open_code_hook, void *open_code_userdata, |
328 | | _Py_AuditHookEntry *audit_hook_head, |
329 | | Py_ssize_t unicode_next_index) |
330 | 22 | { |
331 | 22 | assert(!runtime->preinitializing); |
332 | 22 | assert(!runtime->preinitialized); |
333 | 22 | assert(!runtime->core_initialized); |
334 | 22 | assert(!runtime->initialized); |
335 | 22 | assert(!runtime->_initialized); |
336 | | |
337 | 22 | runtime->open_code_hook = open_code_hook; |
338 | 22 | runtime->open_code_userdata = open_code_userdata; |
339 | 22 | runtime->audit_hooks.head = audit_hook_head; |
340 | | |
341 | 22 | PyPreConfig_InitPythonConfig(&runtime->preconfig); |
342 | | |
343 | | // Set it to the ID of the main thread of the main interpreter. |
344 | 22 | runtime->main_thread = PyThread_get_thread_ident(); |
345 | | |
346 | 22 | runtime->unicode_state.ids.next_index = unicode_next_index; |
347 | 22 | runtime->_initialized = 1; |
348 | 22 | } |
349 | | |
350 | | PyStatus |
351 | | _PyRuntimeState_Init(_PyRuntimeState *runtime) |
352 | 22 | { |
353 | | /* We preserve the hook across init, because there is |
354 | | currently no public API to set it between runtime |
355 | | initialization and interpreter initialization. */ |
356 | 22 | void *open_code_hook = runtime->open_code_hook; |
357 | 22 | void *open_code_userdata = runtime->open_code_userdata; |
358 | 22 | _Py_AuditHookEntry *audit_hook_head = runtime->audit_hooks.head; |
359 | | // bpo-42882: Preserve next_index value if Py_Initialize()/Py_Finalize() |
360 | | // is called multiple times. |
361 | 22 | Py_ssize_t unicode_next_index = runtime->unicode_state.ids.next_index; |
362 | | |
363 | 22 | if (runtime->_initialized) { |
364 | | // Py_Initialize() must be running again. |
365 | | // Reset to _PyRuntimeState_INIT. |
366 | 0 | memcpy(runtime, &initial, sizeof(*runtime)); |
367 | | // Preserve the cookie from the original runtime. |
368 | 0 | memcpy(runtime->debug_offsets.cookie, _Py_Debug_Cookie, 8); |
369 | 0 | assert(!runtime->_initialized); |
370 | 0 | } |
371 | | |
372 | 22 | PyStatus status = _PyTime_Init(&runtime->time); |
373 | 22 | if (_PyStatus_EXCEPTION(status)) { |
374 | 0 | return status; |
375 | 0 | } |
376 | | |
377 | 22 | init_runtime(runtime, open_code_hook, open_code_userdata, audit_hook_head, |
378 | 22 | unicode_next_index); |
379 | | |
380 | 22 | return _PyStatus_OK(); |
381 | 22 | } |
382 | | |
383 | | void |
384 | | _PyRuntimeState_Fini(_PyRuntimeState *runtime) |
385 | 0 | { |
386 | | #ifdef Py_REF_DEBUG |
387 | | /* The count is cleared by _Py_FinalizeRefTotal(). */ |
388 | | assert(runtime->object_state.interpreter_leaks == 0); |
389 | | #endif |
390 | 0 | gilstate_clear(); |
391 | 0 | } |
392 | | |
393 | | #ifdef HAVE_FORK |
394 | | /* This function is called from PyOS_AfterFork_Child to ensure that |
395 | | newly created child processes do not share locks with the parent. */ |
396 | | PyStatus |
397 | | _PyRuntimeState_ReInitThreads(_PyRuntimeState *runtime) |
398 | 0 | { |
399 | | // This was initially set in _PyRuntimeState_Init(). |
400 | 0 | runtime->main_thread = PyThread_get_thread_ident(); |
401 | | |
402 | | // Clears the parking lot. Any waiting threads are dead. This must be |
403 | | // called before releasing any locks that use the parking lot. |
404 | 0 | _PyParkingLot_AfterFork(); |
405 | | |
406 | | // Re-initialize global locks |
407 | 0 | PyMutex *locks[] = LOCKS_INIT(runtime); |
408 | 0 | for (size_t i = 0; i < Py_ARRAY_LENGTH(locks); i++) { |
409 | 0 | _PyMutex_at_fork_reinit(locks[i]); |
410 | 0 | } |
411 | | #ifdef Py_GIL_DISABLED |
412 | | for (PyInterpreterState *interp = runtime->interpreters.head; |
413 | | interp != NULL; interp = interp->next) |
414 | | { |
415 | | for (int i = 0; i < NUM_WEAKREF_LIST_LOCKS; i++) { |
416 | | _PyMutex_at_fork_reinit(&interp->weakref_locks[i]); |
417 | | } |
418 | | } |
419 | | #endif |
420 | |
|
421 | 0 | _PyTypes_AfterFork(); |
422 | |
|
423 | 0 | _PyThread_AfterFork(&runtime->threads); |
424 | |
|
425 | 0 | return _PyStatus_OK(); |
426 | 0 | } |
427 | | #endif |
428 | | |
429 | | |
430 | | /*************************************/ |
431 | | /* the per-interpreter runtime state */ |
432 | | /*************************************/ |
433 | | |
434 | | //---------- |
435 | | // lifecycle |
436 | | //---------- |
437 | | |
438 | | /* Calling this indicates that the runtime is ready to create interpreters. */ |
439 | | |
440 | | PyStatus |
441 | | _PyInterpreterState_Enable(_PyRuntimeState *runtime) |
442 | 22 | { |
443 | 22 | struct pyinterpreters *interpreters = &runtime->interpreters; |
444 | 22 | interpreters->next_id = 0; |
445 | 22 | return _PyStatus_OK(); |
446 | 22 | } |
447 | | |
448 | | static PyInterpreterState * |
449 | | alloc_interpreter(void) |
450 | 0 | { |
451 | | // Aligned allocation for PyInterpreterState. |
452 | | // the first word of the memory block is used to store |
453 | | // the original pointer to be used later to free the memory. |
454 | 0 | size_t alignment = _Alignof(PyInterpreterState); |
455 | 0 | size_t allocsize = sizeof(PyInterpreterState) + sizeof(void *) + alignment - 1; |
456 | 0 | void *mem = PyMem_RawCalloc(1, allocsize); |
457 | 0 | if (mem == NULL) { |
458 | 0 | return NULL; |
459 | 0 | } |
460 | 0 | void *ptr = _Py_ALIGN_UP((char *)mem + sizeof(void *), alignment); |
461 | 0 | ((void **)ptr)[-1] = mem; |
462 | 0 | assert(_Py_IS_ALIGNED(ptr, alignment)); |
463 | 0 | return ptr; |
464 | 0 | } |
465 | | |
466 | | static void |
467 | | free_interpreter(PyInterpreterState *interp) |
468 | 0 | { |
469 | | #ifdef Py_STATS |
470 | | if (interp->pystats_struct) { |
471 | | PyMem_RawFree(interp->pystats_struct); |
472 | | interp->pystats_struct = NULL; |
473 | | } |
474 | | #endif |
475 | | // The main interpreter is statically allocated so |
476 | | // should not be freed. |
477 | 0 | if (interp != &_PyRuntime._main_interpreter) { |
478 | 0 | if (_PyMem_obmalloc_state_on_heap(interp)) { |
479 | | // interpreter has its own obmalloc state, free it |
480 | 0 | PyMem_RawFree(interp->obmalloc); |
481 | 0 | interp->obmalloc = NULL; |
482 | 0 | } |
483 | 0 | assert(_Py_IS_ALIGNED(interp, _Alignof(PyInterpreterState))); |
484 | 0 | PyMem_RawFree(((void **)interp)[-1]); |
485 | 0 | } |
486 | 0 | } |
487 | | |
488 | | #ifndef NDEBUG |
489 | | static inline int check_interpreter_whence(long); |
490 | | #endif |
491 | | |
492 | | extern _Py_CODEUNIT * |
493 | | _Py_LazyJitTrampoline( |
494 | | struct _PyExecutorObject *exec, _PyInterpreterFrame *frame, _PyStackRef *stack_pointer, PyThreadState *tstate |
495 | | ); |
496 | | |
497 | | /* Get the interpreter state to a minimal consistent state. |
498 | | Further init happens in pylifecycle.c before it can be used. |
499 | | All fields not initialized here are expected to be zeroed out, |
500 | | e.g. by PyMem_RawCalloc() or memset(), or otherwise pre-initialized. |
501 | | The runtime state is not manipulated. Instead it is assumed that |
502 | | the interpreter is getting added to the runtime. |
503 | | |
504 | | Note that the main interpreter was statically initialized as part |
505 | | of the runtime and most state is already set properly. That leaves |
506 | | a small number of fields to initialize dynamically, as well as some |
507 | | that are initialized lazily. |
508 | | |
509 | | For subinterpreters we memcpy() the main interpreter in |
510 | | PyInterpreterState_New(), leaving it in the same mostly-initialized |
511 | | state. The only difference is that the interpreter has some |
512 | | self-referential state that is statically initializexd to the |
513 | | main interpreter. We fix those fields here, in addition |
514 | | to the other dynamically initialized fields. |
515 | | */ |
516 | | static PyStatus |
517 | | init_interpreter(PyInterpreterState *interp, |
518 | | _PyRuntimeState *runtime, int64_t id, |
519 | | PyInterpreterState *next, |
520 | | long whence) |
521 | 22 | { |
522 | 22 | if (interp->_initialized) { |
523 | 0 | return _PyStatus_ERR("interpreter already initialized"); |
524 | 0 | } |
525 | | |
526 | 22 | assert(interp->_whence == _PyInterpreterState_WHENCE_NOTSET); |
527 | 22 | assert(check_interpreter_whence(whence) == 0); |
528 | 22 | interp->_whence = whence; |
529 | | |
530 | 22 | assert(runtime != NULL); |
531 | 22 | interp->runtime = runtime; |
532 | | |
533 | 22 | assert(id > 0 || (id == 0 && interp == runtime->interpreters.main)); |
534 | 22 | interp->id = id; |
535 | | |
536 | 22 | interp->id_refcount = 0; |
537 | | |
538 | 22 | assert(runtime->interpreters.head == interp); |
539 | 22 | assert(next != NULL || (interp == runtime->interpreters.main)); |
540 | 22 | interp->next = next; |
541 | | |
542 | 22 | interp->threads.preallocated = &interp->_initial_thread; |
543 | | |
544 | | // We would call _PyObject_InitState() at this point |
545 | | // if interp->feature_flags were alredy set. |
546 | | |
547 | 22 | _PyEval_InitState(interp); |
548 | 22 | _PyGC_InitState(&interp->gc); |
549 | 22 | PyConfig_InitPythonConfig(&interp->config); |
550 | 22 | _PyType_InitCache(interp); |
551 | | #ifdef Py_GIL_DISABLED |
552 | | _Py_brc_init_state(interp); |
553 | | #endif |
554 | | |
555 | | #ifdef _Py_TIER2 |
556 | | // Ensure the buffer is to be set as NULL. |
557 | | interp->jit_uop_buffer = NULL; |
558 | | #endif |
559 | 22 | llist_init(&interp->mem_free_queue.head); |
560 | 22 | llist_init(&interp->asyncio_tasks_head); |
561 | 22 | interp->asyncio_tasks_lock = (PyMutex){0}; |
562 | 374 | for (int i = 0; i < _PY_MONITORING_UNGROUPED_EVENTS; i++) { |
563 | 352 | interp->monitors.tools[i] = 0; |
564 | 352 | } |
565 | 198 | for (int t = 0; t < PY_MONITORING_TOOL_IDS; t++) { |
566 | 3.52k | for (int e = 0; e < _PY_MONITORING_EVENTS; e++) { |
567 | 3.34k | interp->monitoring_callables[t][e] = NULL; |
568 | | |
569 | 3.34k | } |
570 | 176 | interp->monitoring_tool_versions[t] = 0; |
571 | 176 | } |
572 | 22 | interp->_code_object_generation = 0; |
573 | 22 | interp->jit = false; |
574 | 22 | interp->compiling = false; |
575 | 22 | interp->executor_list_head = NULL; |
576 | 22 | interp->executor_deletion_list_head = NULL; |
577 | 22 | interp->executor_deletion_list_remaining_capacity = 0; |
578 | 22 | interp->executor_creation_counter = JIT_CLEANUP_THRESHOLD; |
579 | 22 | if (interp != &runtime->_main_interpreter) { |
580 | | /* Fix the self-referential, statically initialized fields. */ |
581 | 0 | interp->dtoa = (struct _dtoa_state)_dtoa_state_INIT(interp); |
582 | 0 | } |
583 | | #if !defined(Py_GIL_DISABLED) && defined(Py_STACKREF_DEBUG) |
584 | | interp->next_stackref = INITIAL_STACKREF_INDEX; |
585 | | _Py_hashtable_allocator_t alloc = { |
586 | | .malloc = malloc, |
587 | | .free = free, |
588 | | }; |
589 | | interp->open_stackrefs_table = _Py_hashtable_new_full( |
590 | | _Py_hashtable_hash_ptr, |
591 | | _Py_hashtable_compare_direct, |
592 | | NULL, |
593 | | NULL, |
594 | | &alloc |
595 | | ); |
596 | | # ifdef Py_STACKREF_CLOSE_DEBUG |
597 | | interp->closed_stackrefs_table = _Py_hashtable_new_full( |
598 | | _Py_hashtable_hash_ptr, |
599 | | _Py_hashtable_compare_direct, |
600 | | NULL, |
601 | | NULL, |
602 | | &alloc |
603 | | ); |
604 | | # endif |
605 | | _Py_stackref_associate(interp, Py_None, PyStackRef_None); |
606 | | _Py_stackref_associate(interp, Py_False, PyStackRef_False); |
607 | | _Py_stackref_associate(interp, Py_True, PyStackRef_True); |
608 | | #endif |
609 | | |
610 | 22 | interp->_initialized = 1; |
611 | 22 | return _PyStatus_OK(); |
612 | 22 | } |
613 | | |
614 | | |
615 | | PyStatus |
616 | | _PyInterpreterState_New(PyThreadState *tstate, PyInterpreterState **pinterp) |
617 | 22 | { |
618 | 22 | *pinterp = NULL; |
619 | | |
620 | | // Don't get runtime from tstate since tstate can be NULL |
621 | 22 | _PyRuntimeState *runtime = &_PyRuntime; |
622 | | |
623 | | // tstate is NULL when pycore_create_interpreter() calls |
624 | | // _PyInterpreterState_New() to create the main interpreter. |
625 | 22 | if (tstate != NULL) { |
626 | 0 | if (_PySys_Audit(tstate, "cpython.PyInterpreterState_New", NULL) < 0) { |
627 | 0 | return _PyStatus_ERR("sys.audit failed"); |
628 | 0 | } |
629 | 0 | } |
630 | | |
631 | | /* We completely serialize creation of multiple interpreters, since |
632 | | it simplifies things here and blocking concurrent calls isn't a problem. |
633 | | Regardless, we must fully block subinterpreter creation until |
634 | | after the main interpreter is created. */ |
635 | 22 | HEAD_LOCK(runtime); |
636 | | |
637 | 22 | struct pyinterpreters *interpreters = &runtime->interpreters; |
638 | 22 | int64_t id = interpreters->next_id; |
639 | 22 | interpreters->next_id += 1; |
640 | | |
641 | | // Allocate the interpreter and add it to the runtime state. |
642 | 22 | PyInterpreterState *interp; |
643 | 22 | PyStatus status; |
644 | 22 | PyInterpreterState *old_head = interpreters->head; |
645 | 22 | if (old_head == NULL) { |
646 | | // We are creating the main interpreter. |
647 | 22 | assert(interpreters->main == NULL); |
648 | 22 | assert(id == 0); |
649 | | |
650 | 22 | interp = &runtime->_main_interpreter; |
651 | 22 | assert(interp->id == 0); |
652 | 22 | assert(interp->next == NULL); |
653 | | |
654 | 22 | interpreters->main = interp; |
655 | 22 | } |
656 | 0 | else { |
657 | 0 | assert(interpreters->main != NULL); |
658 | 0 | assert(id != 0); |
659 | |
|
660 | 0 | interp = alloc_interpreter(); |
661 | 0 | if (interp == NULL) { |
662 | 0 | status = _PyStatus_NO_MEMORY(); |
663 | 0 | goto error; |
664 | 0 | } |
665 | | // Set to _PyInterpreterState_INIT. |
666 | 0 | memcpy(interp, &initial._main_interpreter, sizeof(*interp)); |
667 | |
|
668 | 0 | if (id < 0) { |
669 | | /* overflow or Py_Initialize() not called yet! */ |
670 | 0 | status = _PyStatus_ERR("failed to get an interpreter ID"); |
671 | 0 | goto error; |
672 | 0 | } |
673 | 0 | } |
674 | 22 | interpreters->head = interp; |
675 | | |
676 | 22 | long whence = _PyInterpreterState_WHENCE_UNKNOWN; |
677 | 22 | status = init_interpreter(interp, runtime, |
678 | 22 | id, old_head, whence); |
679 | 22 | if (_PyStatus_EXCEPTION(status)) { |
680 | 0 | goto error; |
681 | 0 | } |
682 | | |
683 | 22 | HEAD_UNLOCK(runtime); |
684 | | |
685 | 22 | assert(interp != NULL); |
686 | 22 | *pinterp = interp; |
687 | 22 | return _PyStatus_OK(); |
688 | | |
689 | 0 | error: |
690 | 0 | HEAD_UNLOCK(runtime); |
691 | |
|
692 | 0 | if (interp != NULL) { |
693 | 0 | free_interpreter(interp); |
694 | 0 | } |
695 | 0 | return status; |
696 | 22 | } |
697 | | |
698 | | |
699 | | PyInterpreterState * |
700 | | PyInterpreterState_New(void) |
701 | 0 | { |
702 | | // tstate can be NULL |
703 | 0 | PyThreadState *tstate = current_fast_get(); |
704 | |
|
705 | 0 | PyInterpreterState *interp; |
706 | 0 | PyStatus status = _PyInterpreterState_New(tstate, &interp); |
707 | 0 | if (_PyStatus_EXCEPTION(status)) { |
708 | 0 | Py_ExitStatusException(status); |
709 | 0 | } |
710 | 0 | assert(interp != NULL); |
711 | 0 | return interp; |
712 | 0 | } |
713 | | |
714 | | #if !defined(Py_GIL_DISABLED) && defined(Py_STACKREF_DEBUG) |
715 | | extern void |
716 | | _Py_stackref_report_leaks(PyInterpreterState *interp); |
717 | | #endif |
718 | | |
719 | | static void |
720 | | interpreter_clear(PyInterpreterState *interp, PyThreadState *tstate) |
721 | 0 | { |
722 | 0 | assert(interp != NULL); |
723 | 0 | assert(tstate != NULL); |
724 | 0 | _PyRuntimeState *runtime = interp->runtime; |
725 | | |
726 | | /* XXX Conditions we need to enforce: |
727 | | |
728 | | * the GIL must be held by the current thread |
729 | | * tstate must be the "current" thread state (current_fast_get()) |
730 | | * tstate->interp must be interp |
731 | | * for the main interpreter, tstate must be the main thread |
732 | | */ |
733 | | // XXX Ideally, we would not rely on any thread state in this function |
734 | | // (and we would drop the "tstate" argument). |
735 | |
|
736 | 0 | if (_PySys_Audit(tstate, "cpython.PyInterpreterState_Clear", NULL) < 0) { |
737 | 0 | _PyErr_Clear(tstate); |
738 | 0 | } |
739 | | |
740 | | // Clear the current/main thread state last. |
741 | 0 | _Py_FOR_EACH_TSTATE_BEGIN(interp, p) { |
742 | | // See https://github.com/python/cpython/issues/102126 |
743 | | // Must be called without HEAD_LOCK held as it can deadlock |
744 | | // if any finalizer tries to acquire that lock. |
745 | 0 | HEAD_UNLOCK(runtime); |
746 | 0 | PyThreadState_Clear(p); |
747 | 0 | HEAD_LOCK(runtime); |
748 | 0 | } |
749 | 0 | _Py_FOR_EACH_TSTATE_END(interp); |
750 | 0 | if (tstate->interp == interp) { |
751 | | /* We fix tstate->_status below when we for sure aren't using it |
752 | | (e.g. no longer need the GIL). */ |
753 | | // XXX Eliminate the need to do this. |
754 | 0 | tstate->_status.cleared = 0; |
755 | 0 | } |
756 | | |
757 | | /* It is possible that any of the objects below have a finalizer |
758 | | that runs Python code or otherwise relies on a thread state |
759 | | or even the interpreter state. For now we trust that isn't |
760 | | a problem. |
761 | | */ |
762 | | // XXX Make sure we properly deal with problematic finalizers. |
763 | |
|
764 | 0 | Py_CLEAR(interp->audit_hooks); |
765 | | |
766 | | // gh-140257: Threads have already been cleared, but daemon threads may |
767 | | // still access eval_breaker atomically via take_gil() right before they |
768 | | // hang. Use an atomic store to prevent data races during finalization. |
769 | 0 | interp->ceval.instrumentation_version = 0; |
770 | 0 | _Py_atomic_store_uintptr(&tstate->eval_breaker, 0); |
771 | |
|
772 | 0 | for (int i = 0; i < _PY_MONITORING_UNGROUPED_EVENTS; i++) { |
773 | 0 | interp->monitors.tools[i] = 0; |
774 | 0 | } |
775 | 0 | for (int t = 0; t < PY_MONITORING_TOOL_IDS; t++) { |
776 | 0 | for (int e = 0; e < _PY_MONITORING_EVENTS; e++) { |
777 | 0 | Py_CLEAR(interp->monitoring_callables[t][e]); |
778 | 0 | } |
779 | 0 | } |
780 | 0 | for (int t = 0; t < PY_MONITORING_TOOL_IDS; t++) { |
781 | 0 | Py_CLEAR(interp->monitoring_tool_names[t]); |
782 | 0 | } |
783 | 0 | interp->_code_object_generation = 0; |
784 | | #ifdef Py_GIL_DISABLED |
785 | | interp->tlbc_indices.tlbc_generation = 0; |
786 | | #endif |
787 | |
|
788 | 0 | PyConfig_Clear(&interp->config); |
789 | 0 | _PyCodec_Fini(interp); |
790 | |
|
791 | 0 | assert(interp->imports.modules == NULL); |
792 | 0 | assert(interp->imports.modules_by_index == NULL); |
793 | 0 | assert(interp->imports.importlib == NULL); |
794 | 0 | assert(interp->imports.import_func == NULL); |
795 | |
|
796 | 0 | Py_CLEAR(interp->sysdict_copy); |
797 | 0 | Py_CLEAR(interp->builtins_copy); |
798 | 0 | Py_CLEAR(interp->dict); |
799 | 0 | #ifdef HAVE_FORK |
800 | 0 | Py_CLEAR(interp->before_forkers); |
801 | 0 | Py_CLEAR(interp->after_forkers_parent); |
802 | 0 | Py_CLEAR(interp->after_forkers_child); |
803 | 0 | #endif |
804 | | |
805 | |
|
806 | | #ifdef _Py_TIER2 |
807 | | _Py_ClearExecutorDeletionList(interp); |
808 | | if (interp->jit_uop_buffer != NULL) { |
809 | | _PyObject_VirtualFree(interp->jit_uop_buffer, UOP_BUFFER_SIZE); |
810 | | interp->jit_uop_buffer = NULL; |
811 | | } |
812 | | #endif |
813 | 0 | _PyAST_Fini(interp); |
814 | 0 | _PyAtExit_Fini(interp); |
815 | | |
816 | | // All Python types must be destroyed before the last GC collection. Python |
817 | | // types create a reference cycle to themselves in their in their |
818 | | // PyTypeObject.tp_mro member (the tuple contains the type). |
819 | | |
820 | | /* Last garbage collection on this interpreter */ |
821 | 0 | _PyGC_CollectNoFail(tstate); |
822 | 0 | _PyGC_Fini(interp); |
823 | | |
824 | | // Finalize warnings after last gc so that any finalizers can |
825 | | // access warnings state |
826 | 0 | _PyWarnings_Fini(interp); |
827 | 0 | struct _PyExecutorObject *cold = interp->cold_executor; |
828 | 0 | if (cold != NULL) { |
829 | 0 | interp->cold_executor = NULL; |
830 | 0 | assert(cold->vm_data.valid); |
831 | 0 | assert(cold->vm_data.warm); |
832 | 0 | _PyExecutor_Free(cold); |
833 | 0 | } |
834 | | /* We don't clear sysdict and builtins until the end of this function. |
835 | | Because clearing other attributes can execute arbitrary Python code |
836 | | which requires sysdict and builtins. */ |
837 | 0 | PyDict_Clear(interp->sysdict); |
838 | 0 | PyDict_Clear(interp->builtins); |
839 | 0 | Py_CLEAR(interp->sysdict); |
840 | 0 | Py_CLEAR(interp->builtins); |
841 | |
|
842 | | #if !defined(Py_GIL_DISABLED) && defined(Py_STACKREF_DEBUG) |
843 | | # ifdef Py_STACKREF_CLOSE_DEBUG |
844 | | _Py_hashtable_destroy(interp->closed_stackrefs_table); |
845 | | interp->closed_stackrefs_table = NULL; |
846 | | # endif |
847 | | _Py_stackref_report_leaks(interp); |
848 | | _Py_hashtable_destroy(interp->open_stackrefs_table); |
849 | | interp->open_stackrefs_table = NULL; |
850 | | #endif |
851 | |
|
852 | 0 | if (tstate->interp == interp) { |
853 | | /* We are now safe to fix tstate->_status.cleared. */ |
854 | | // XXX Do this (much) earlier? |
855 | 0 | tstate->_status.cleared = 1; |
856 | 0 | } |
857 | |
|
858 | 0 | for (int i=0; i < DICT_MAX_WATCHERS; i++) { |
859 | 0 | interp->dict_state.watchers[i] = NULL; |
860 | 0 | } |
861 | |
|
862 | 0 | for (int i=0; i < TYPE_MAX_WATCHERS; i++) { |
863 | 0 | interp->type_watchers[i] = NULL; |
864 | 0 | } |
865 | |
|
866 | 0 | for (int i=0; i < FUNC_MAX_WATCHERS; i++) { |
867 | 0 | interp->func_watchers[i] = NULL; |
868 | 0 | } |
869 | 0 | interp->active_func_watchers = 0; |
870 | |
|
871 | 0 | for (int i=0; i < CODE_MAX_WATCHERS; i++) { |
872 | 0 | interp->code_watchers[i] = NULL; |
873 | 0 | } |
874 | 0 | interp->active_code_watchers = 0; |
875 | |
|
876 | 0 | for (int i=0; i < CONTEXT_MAX_WATCHERS; i++) { |
877 | 0 | interp->context_watchers[i] = NULL; |
878 | 0 | } |
879 | 0 | interp->active_context_watchers = 0; |
880 | | // XXX Once we have one allocator per interpreter (i.e. |
881 | | // per-interpreter GC) we must ensure that all of the interpreter's |
882 | | // objects have been cleaned up at the point. |
883 | | |
884 | | // We could clear interp->threads.freelist here |
885 | | // if it held more than just the initial thread state. |
886 | 0 | } |
887 | | |
888 | | |
889 | | void |
890 | | PyInterpreterState_Clear(PyInterpreterState *interp) |
891 | 0 | { |
892 | | // Use the current Python thread state to call audit hooks and to collect |
893 | | // garbage. It can be different than the current Python thread state |
894 | | // of 'interp'. |
895 | 0 | PyThreadState *current_tstate = current_fast_get(); |
896 | 0 | _PyImport_ClearCore(interp); |
897 | 0 | interpreter_clear(interp, current_tstate); |
898 | 0 | } |
899 | | |
900 | | |
901 | | void |
902 | | _PyInterpreterState_Clear(PyThreadState *tstate) |
903 | 0 | { |
904 | 0 | _PyImport_ClearCore(tstate->interp); |
905 | 0 | interpreter_clear(tstate->interp, tstate); |
906 | 0 | } |
907 | | |
908 | | |
909 | | static inline void tstate_deactivate(PyThreadState *tstate); |
910 | | static void tstate_set_detached(PyThreadState *tstate, int detached_state); |
911 | | static void zapthreads(PyInterpreterState *interp); |
912 | | |
913 | | void |
914 | | PyInterpreterState_Delete(PyInterpreterState *interp) |
915 | 0 | { |
916 | 0 | _PyRuntimeState *runtime = interp->runtime; |
917 | 0 | struct pyinterpreters *interpreters = &runtime->interpreters; |
918 | | |
919 | | // XXX Clearing the "current" thread state should happen before |
920 | | // we start finalizing the interpreter (or the current thread state). |
921 | 0 | PyThreadState *tcur = current_fast_get(); |
922 | 0 | if (tcur != NULL && interp == tcur->interp) { |
923 | | /* Unset current thread. After this, many C API calls become crashy. */ |
924 | 0 | _PyThreadState_Detach(tcur); |
925 | 0 | } |
926 | |
|
927 | 0 | zapthreads(interp); |
928 | | |
929 | | // XXX These two calls should be done at the end of clear_interpreter(), |
930 | | // but currently some objects get decref'ed after that. |
931 | | #ifdef Py_REF_DEBUG |
932 | | _PyInterpreterState_FinalizeRefTotal(interp); |
933 | | #endif |
934 | 0 | _PyInterpreterState_FinalizeAllocatedBlocks(interp); |
935 | |
|
936 | 0 | HEAD_LOCK(runtime); |
937 | 0 | PyInterpreterState **p; |
938 | 0 | for (p = &interpreters->head; ; p = &(*p)->next) { |
939 | 0 | if (*p == NULL) { |
940 | 0 | Py_FatalError("NULL interpreter"); |
941 | 0 | } |
942 | 0 | if (*p == interp) { |
943 | 0 | break; |
944 | 0 | } |
945 | 0 | } |
946 | 0 | if (interp->threads.head != NULL) { |
947 | 0 | Py_FatalError("remaining threads"); |
948 | 0 | } |
949 | 0 | *p = interp->next; |
950 | |
|
951 | 0 | if (interpreters->main == interp) { |
952 | 0 | interpreters->main = NULL; |
953 | 0 | if (interpreters->head != NULL) { |
954 | 0 | Py_FatalError("remaining subinterpreters"); |
955 | 0 | } |
956 | 0 | } |
957 | 0 | HEAD_UNLOCK(runtime); |
958 | |
|
959 | 0 | _Py_qsbr_fini(interp); |
960 | |
|
961 | 0 | _PyObject_FiniState(interp); |
962 | |
|
963 | 0 | PyConfig_Clear(&interp->config); |
964 | |
|
965 | 0 | free_interpreter(interp); |
966 | 0 | } |
967 | | |
968 | | |
969 | | #ifdef HAVE_FORK |
970 | | /* |
971 | | * Delete all interpreter states except the main interpreter. If there |
972 | | * is a current interpreter state, it *must* be the main interpreter. |
973 | | */ |
974 | | PyStatus |
975 | | _PyInterpreterState_DeleteExceptMain(_PyRuntimeState *runtime) |
976 | 0 | { |
977 | 0 | struct pyinterpreters *interpreters = &runtime->interpreters; |
978 | |
|
979 | 0 | PyThreadState *tstate = _PyThreadState_Swap(runtime, NULL); |
980 | 0 | if (tstate != NULL && tstate->interp != interpreters->main) { |
981 | 0 | return _PyStatus_ERR("not main interpreter"); |
982 | 0 | } |
983 | | |
984 | 0 | HEAD_LOCK(runtime); |
985 | 0 | PyInterpreterState *interp = interpreters->head; |
986 | 0 | interpreters->head = NULL; |
987 | 0 | while (interp != NULL) { |
988 | 0 | if (interp == interpreters->main) { |
989 | 0 | interpreters->main->next = NULL; |
990 | 0 | interpreters->head = interp; |
991 | 0 | interp = interp->next; |
992 | 0 | continue; |
993 | 0 | } |
994 | | |
995 | | // XXX Won't this fail since PyInterpreterState_Clear() requires |
996 | | // the "current" tstate to be set? |
997 | 0 | PyInterpreterState_Clear(interp); // XXX must activate? |
998 | 0 | zapthreads(interp); |
999 | 0 | PyInterpreterState *prev_interp = interp; |
1000 | 0 | interp = interp->next; |
1001 | 0 | free_interpreter(prev_interp); |
1002 | 0 | } |
1003 | 0 | HEAD_UNLOCK(runtime); |
1004 | |
|
1005 | 0 | if (interpreters->head == NULL) { |
1006 | 0 | return _PyStatus_ERR("missing main interpreter"); |
1007 | 0 | } |
1008 | 0 | _PyThreadState_Swap(runtime, tstate); |
1009 | 0 | return _PyStatus_OK(); |
1010 | 0 | } |
1011 | | #endif |
1012 | | |
1013 | | static inline void |
1014 | | set_main_thread(PyInterpreterState *interp, PyThreadState *tstate) |
1015 | 0 | { |
1016 | 0 | _Py_atomic_store_ptr_relaxed(&interp->threads.main, tstate); |
1017 | 0 | } |
1018 | | |
1019 | | static inline PyThreadState * |
1020 | | get_main_thread(PyInterpreterState *interp) |
1021 | 0 | { |
1022 | 0 | return _Py_atomic_load_ptr_relaxed(&interp->threads.main); |
1023 | 0 | } |
1024 | | |
1025 | | void |
1026 | | _PyErr_SetInterpreterAlreadyRunning(void) |
1027 | 0 | { |
1028 | 0 | PyErr_SetString(PyExc_InterpreterError, "interpreter already running"); |
1029 | 0 | } |
1030 | | |
1031 | | int |
1032 | | _PyInterpreterState_SetRunningMain(PyInterpreterState *interp) |
1033 | 0 | { |
1034 | 0 | if (get_main_thread(interp) != NULL) { |
1035 | 0 | _PyErr_SetInterpreterAlreadyRunning(); |
1036 | 0 | return -1; |
1037 | 0 | } |
1038 | 0 | PyThreadState *tstate = current_fast_get(); |
1039 | 0 | _Py_EnsureTstateNotNULL(tstate); |
1040 | 0 | if (tstate->interp != interp) { |
1041 | 0 | PyErr_SetString(PyExc_RuntimeError, |
1042 | 0 | "current tstate has wrong interpreter"); |
1043 | 0 | return -1; |
1044 | 0 | } |
1045 | 0 | set_main_thread(interp, tstate); |
1046 | |
|
1047 | 0 | return 0; |
1048 | 0 | } |
1049 | | |
1050 | | void |
1051 | | _PyInterpreterState_SetNotRunningMain(PyInterpreterState *interp) |
1052 | 0 | { |
1053 | 0 | assert(get_main_thread(interp) == current_fast_get()); |
1054 | 0 | set_main_thread(interp, NULL); |
1055 | 0 | } |
1056 | | |
1057 | | int |
1058 | | _PyInterpreterState_IsRunningMain(PyInterpreterState *interp) |
1059 | 0 | { |
1060 | 0 | if (get_main_thread(interp) != NULL) { |
1061 | 0 | return 1; |
1062 | 0 | } |
1063 | | // Embedders might not know to call _PyInterpreterState_SetRunningMain(), |
1064 | | // so their main thread wouldn't show it is running the main interpreter's |
1065 | | // program. (Py_Main() doesn't have this problem.) For now this isn't |
1066 | | // critical. If it were, we would need to infer "running main" from other |
1067 | | // information, like if it's the main interpreter. We used to do that |
1068 | | // but the naive approach led to some inconsistencies that caused problems. |
1069 | 0 | return 0; |
1070 | 0 | } |
1071 | | |
1072 | | int |
1073 | | _PyThreadState_IsRunningMain(PyThreadState *tstate) |
1074 | 0 | { |
1075 | 0 | PyInterpreterState *interp = tstate->interp; |
1076 | | // See the note in _PyInterpreterState_IsRunningMain() about |
1077 | | // possible false negatives here for embedders. |
1078 | 0 | return get_main_thread(interp) == tstate; |
1079 | 0 | } |
1080 | | |
1081 | | void |
1082 | | _PyInterpreterState_ReinitRunningMain(PyThreadState *tstate) |
1083 | 0 | { |
1084 | 0 | PyInterpreterState *interp = tstate->interp; |
1085 | 0 | if (get_main_thread(interp) != tstate) { |
1086 | 0 | set_main_thread(interp, NULL); |
1087 | 0 | } |
1088 | 0 | } |
1089 | | |
1090 | | |
1091 | | //---------- |
1092 | | // accessors |
1093 | | //---------- |
1094 | | |
1095 | | int |
1096 | | _PyInterpreterState_IsReady(PyInterpreterState *interp) |
1097 | 0 | { |
1098 | 0 | return interp->_ready; |
1099 | 0 | } |
1100 | | |
1101 | | #ifndef NDEBUG |
1102 | | static inline int |
1103 | | check_interpreter_whence(long whence) |
1104 | | { |
1105 | | if(whence < 0) { |
1106 | | return -1; |
1107 | | } |
1108 | | if (whence > _PyInterpreterState_WHENCE_MAX) { |
1109 | | return -1; |
1110 | | } |
1111 | | return 0; |
1112 | | } |
1113 | | #endif |
1114 | | |
1115 | | long |
1116 | | _PyInterpreterState_GetWhence(PyInterpreterState *interp) |
1117 | 0 | { |
1118 | 0 | assert(check_interpreter_whence(interp->_whence) == 0); |
1119 | 0 | return interp->_whence; |
1120 | 0 | } |
1121 | | |
1122 | | void |
1123 | | _PyInterpreterState_SetWhence(PyInterpreterState *interp, long whence) |
1124 | 22 | { |
1125 | 22 | assert(interp->_whence != _PyInterpreterState_WHENCE_NOTSET); |
1126 | 22 | assert(check_interpreter_whence(whence) == 0); |
1127 | 22 | interp->_whence = whence; |
1128 | 22 | } |
1129 | | |
1130 | | |
1131 | | PyObject * |
1132 | | _Py_GetMainModule(PyThreadState *tstate) |
1133 | 0 | { |
1134 | | // We return None to indicate "not found" or "bogus". |
1135 | 0 | PyObject *modules = _PyImport_GetModulesRef(tstate->interp); |
1136 | 0 | if (modules == Py_None) { |
1137 | 0 | return modules; |
1138 | 0 | } |
1139 | 0 | PyObject *module = NULL; |
1140 | 0 | (void)PyMapping_GetOptionalItem(modules, &_Py_ID(__main__), &module); |
1141 | 0 | Py_DECREF(modules); |
1142 | 0 | if (module == NULL && !PyErr_Occurred()) { |
1143 | 0 | Py_RETURN_NONE; |
1144 | 0 | } |
1145 | 0 | return module; |
1146 | 0 | } |
1147 | | |
1148 | | int |
1149 | | _Py_CheckMainModule(PyObject *module) |
1150 | 0 | { |
1151 | 0 | if (module == NULL || module == Py_None) { |
1152 | 0 | if (!PyErr_Occurred()) { |
1153 | 0 | (void)_PyErr_SetModuleNotFoundError(&_Py_ID(__main__)); |
1154 | 0 | } |
1155 | 0 | return -1; |
1156 | 0 | } |
1157 | 0 | if (!Py_IS_TYPE(module, &PyModule_Type)) { |
1158 | | /* The __main__ module has been tampered with. */ |
1159 | 0 | PyObject *msg = PyUnicode_FromString("invalid __main__ module"); |
1160 | 0 | if (msg != NULL) { |
1161 | 0 | (void)PyErr_SetImportError(msg, &_Py_ID(__main__), NULL); |
1162 | 0 | Py_DECREF(msg); |
1163 | 0 | } |
1164 | 0 | return -1; |
1165 | 0 | } |
1166 | 0 | return 0; |
1167 | 0 | } |
1168 | | |
1169 | | |
1170 | | PyObject * |
1171 | | PyInterpreterState_GetDict(PyInterpreterState *interp) |
1172 | 16 | { |
1173 | 16 | if (interp->dict == NULL) { |
1174 | 8 | interp->dict = PyDict_New(); |
1175 | 8 | if (interp->dict == NULL) { |
1176 | 0 | PyErr_Clear(); |
1177 | 0 | } |
1178 | 8 | } |
1179 | | /* Returning NULL means no per-interpreter dict is available. */ |
1180 | 16 | return interp->dict; |
1181 | 16 | } |
1182 | | |
1183 | | |
1184 | | //---------- |
1185 | | // interp ID |
1186 | | //---------- |
1187 | | |
1188 | | int64_t |
1189 | | _PyInterpreterState_ObjectToID(PyObject *idobj) |
1190 | 0 | { |
1191 | 0 | if (!_PyIndex_Check(idobj)) { |
1192 | 0 | PyErr_Format(PyExc_TypeError, |
1193 | 0 | "interpreter ID must be an int, got %.100s", |
1194 | 0 | Py_TYPE(idobj)->tp_name); |
1195 | 0 | return -1; |
1196 | 0 | } |
1197 | | |
1198 | | // This may raise OverflowError. |
1199 | | // For now, we don't worry about if LLONG_MAX < INT64_MAX. |
1200 | 0 | long long id = PyLong_AsLongLong(idobj); |
1201 | 0 | if (id == -1 && PyErr_Occurred()) { |
1202 | 0 | return -1; |
1203 | 0 | } |
1204 | | |
1205 | 0 | if (id < 0) { |
1206 | 0 | PyErr_Format(PyExc_ValueError, |
1207 | 0 | "interpreter ID must be a non-negative int, got %R", |
1208 | 0 | idobj); |
1209 | 0 | return -1; |
1210 | 0 | } |
1211 | | #if LLONG_MAX > INT64_MAX |
1212 | | else if (id > INT64_MAX) { |
1213 | | PyErr_SetString(PyExc_OverflowError, "int too big to convert"); |
1214 | | return -1; |
1215 | | } |
1216 | | #endif |
1217 | 0 | else { |
1218 | 0 | return (int64_t)id; |
1219 | 0 | } |
1220 | 0 | } |
1221 | | |
1222 | | int64_t |
1223 | | PyInterpreterState_GetID(PyInterpreterState *interp) |
1224 | 0 | { |
1225 | 0 | if (interp == NULL) { |
1226 | 0 | PyErr_SetString(PyExc_RuntimeError, "no interpreter provided"); |
1227 | 0 | return -1; |
1228 | 0 | } |
1229 | 0 | return interp->id; |
1230 | 0 | } |
1231 | | |
1232 | | PyObject * |
1233 | | _PyInterpreterState_GetIDObject(PyInterpreterState *interp) |
1234 | 0 | { |
1235 | 0 | int64_t interpid = interp->id; |
1236 | 0 | if (interpid < 0) { |
1237 | 0 | return NULL; |
1238 | 0 | } |
1239 | 0 | assert(interpid < LLONG_MAX); |
1240 | 0 | return PyLong_FromLongLong(interpid); |
1241 | 0 | } |
1242 | | |
1243 | | |
1244 | | |
1245 | | void |
1246 | | _PyInterpreterState_IDIncref(PyInterpreterState *interp) |
1247 | 0 | { |
1248 | 0 | _Py_atomic_add_ssize(&interp->id_refcount, 1); |
1249 | 0 | } |
1250 | | |
1251 | | |
1252 | | void |
1253 | | _PyInterpreterState_IDDecref(PyInterpreterState *interp) |
1254 | 0 | { |
1255 | 0 | _PyRuntimeState *runtime = interp->runtime; |
1256 | |
|
1257 | 0 | Py_ssize_t refcount = _Py_atomic_add_ssize(&interp->id_refcount, -1); |
1258 | |
|
1259 | 0 | if (refcount == 1 && interp->requires_idref) { |
1260 | 0 | PyThreadState *tstate = |
1261 | 0 | _PyThreadState_NewBound(interp, _PyThreadState_WHENCE_FINI); |
1262 | | |
1263 | | // XXX Possible GILState issues? |
1264 | 0 | PyThreadState *save_tstate = _PyThreadState_Swap(runtime, tstate); |
1265 | 0 | Py_EndInterpreter(tstate); |
1266 | 0 | _PyThreadState_Swap(runtime, save_tstate); |
1267 | 0 | } |
1268 | 0 | } |
1269 | | |
1270 | | int |
1271 | | _PyInterpreterState_RequiresIDRef(PyInterpreterState *interp) |
1272 | 0 | { |
1273 | 0 | return interp->requires_idref; |
1274 | 0 | } |
1275 | | |
1276 | | void |
1277 | | _PyInterpreterState_RequireIDRef(PyInterpreterState *interp, int required) |
1278 | 0 | { |
1279 | 0 | interp->requires_idref = required ? 1 : 0; |
1280 | 0 | } |
1281 | | |
1282 | | |
1283 | | //----------------------------- |
1284 | | // look up an interpreter state |
1285 | | //----------------------------- |
1286 | | |
1287 | | /* Return the interpreter associated with the current OS thread. |
1288 | | |
1289 | | The GIL must be held. |
1290 | | */ |
1291 | | |
1292 | | PyInterpreterState* |
1293 | | PyInterpreterState_Get(void) |
1294 | 53 | { |
1295 | 53 | _Py_AssertHoldsTstate(); |
1296 | 53 | PyInterpreterState *interp = _Py_tss_interp; |
1297 | 53 | if (interp == NULL) { |
1298 | 0 | Py_FatalError("no current interpreter"); |
1299 | 0 | } |
1300 | 53 | return interp; |
1301 | 53 | } |
1302 | | |
1303 | | |
1304 | | static PyInterpreterState * |
1305 | | interp_look_up_id(_PyRuntimeState *runtime, int64_t requested_id) |
1306 | 0 | { |
1307 | 0 | PyInterpreterState *interp = runtime->interpreters.head; |
1308 | 0 | while (interp != NULL) { |
1309 | 0 | int64_t id = interp->id; |
1310 | 0 | assert(id >= 0); |
1311 | 0 | if (requested_id == id) { |
1312 | 0 | return interp; |
1313 | 0 | } |
1314 | 0 | interp = PyInterpreterState_Next(interp); |
1315 | 0 | } |
1316 | 0 | return NULL; |
1317 | 0 | } |
1318 | | |
1319 | | /* Return the interpreter state with the given ID. |
1320 | | |
1321 | | Fail with RuntimeError if the interpreter is not found. */ |
1322 | | |
1323 | | PyInterpreterState * |
1324 | | _PyInterpreterState_LookUpID(int64_t requested_id) |
1325 | 0 | { |
1326 | 0 | PyInterpreterState *interp = NULL; |
1327 | 0 | if (requested_id >= 0) { |
1328 | 0 | _PyRuntimeState *runtime = &_PyRuntime; |
1329 | 0 | HEAD_LOCK(runtime); |
1330 | 0 | interp = interp_look_up_id(runtime, requested_id); |
1331 | 0 | HEAD_UNLOCK(runtime); |
1332 | 0 | } |
1333 | 0 | if (interp == NULL && !PyErr_Occurred()) { |
1334 | 0 | PyErr_Format(PyExc_InterpreterNotFoundError, |
1335 | 0 | "unrecognized interpreter ID %lld", requested_id); |
1336 | 0 | } |
1337 | 0 | return interp; |
1338 | 0 | } |
1339 | | |
1340 | | PyInterpreterState * |
1341 | | _PyInterpreterState_LookUpIDObject(PyObject *requested_id) |
1342 | 0 | { |
1343 | 0 | int64_t id = _PyInterpreterState_ObjectToID(requested_id); |
1344 | 0 | if (id < 0) { |
1345 | 0 | return NULL; |
1346 | 0 | } |
1347 | 0 | return _PyInterpreterState_LookUpID(id); |
1348 | 0 | } |
1349 | | |
1350 | | |
1351 | | /********************************/ |
1352 | | /* the per-thread runtime state */ |
1353 | | /********************************/ |
1354 | | |
1355 | | #ifndef NDEBUG |
1356 | | static inline int |
1357 | | tstate_is_alive(PyThreadState *tstate) |
1358 | | { |
1359 | | return (tstate->_status.initialized && |
1360 | | !tstate->_status.finalized && |
1361 | | !tstate->_status.cleared && |
1362 | | !tstate->_status.finalizing); |
1363 | | } |
1364 | | #endif |
1365 | | |
1366 | | |
1367 | | //---------- |
1368 | | // lifecycle |
1369 | | //---------- |
1370 | | |
1371 | | static _PyStackChunk* |
1372 | | allocate_chunk(int size_in_bytes, _PyStackChunk* previous) |
1373 | 162k | { |
1374 | 162k | assert(size_in_bytes % sizeof(PyObject **) == 0); |
1375 | 162k | _PyStackChunk *res = _PyObject_VirtualAlloc(size_in_bytes); |
1376 | 162k | if (res == NULL) { |
1377 | 0 | return NULL; |
1378 | 0 | } |
1379 | 162k | res->previous = previous; |
1380 | 162k | res->size = size_in_bytes; |
1381 | 162k | res->top = 0; |
1382 | 162k | return res; |
1383 | 162k | } |
1384 | | |
1385 | | static void |
1386 | | reset_threadstate(_PyThreadStateImpl *tstate) |
1387 | 0 | { |
1388 | | // Set to _PyThreadState_INIT directly? |
1389 | 0 | memcpy(tstate, |
1390 | 0 | &initial._main_interpreter._initial_thread, |
1391 | 0 | sizeof(*tstate)); |
1392 | 0 | } |
1393 | | |
1394 | | static _PyThreadStateImpl * |
1395 | | alloc_threadstate(PyInterpreterState *interp) |
1396 | 22 | { |
1397 | 22 | _PyThreadStateImpl *tstate; |
1398 | | |
1399 | | // Try the preallocated tstate first. |
1400 | 22 | tstate = _Py_atomic_exchange_ptr(&interp->threads.preallocated, NULL); |
1401 | | |
1402 | | // Fall back to the allocator. |
1403 | 22 | if (tstate == NULL) { |
1404 | 0 | tstate = PyMem_RawCalloc(1, sizeof(_PyThreadStateImpl)); |
1405 | 0 | if (tstate == NULL) { |
1406 | 0 | return NULL; |
1407 | 0 | } |
1408 | 0 | reset_threadstate(tstate); |
1409 | 0 | } |
1410 | 22 | return tstate; |
1411 | 22 | } |
1412 | | |
1413 | | static void |
1414 | | free_threadstate(_PyThreadStateImpl *tstate) |
1415 | 0 | { |
1416 | 0 | PyInterpreterState *interp = tstate->base.interp; |
1417 | | #ifdef Py_STATS |
1418 | | _PyStats_ThreadFini(tstate); |
1419 | | #endif |
1420 | | // The initial thread state of the interpreter is allocated |
1421 | | // as part of the interpreter state so should not be freed. |
1422 | 0 | if (tstate == &interp->_initial_thread) { |
1423 | | // Make it available again. |
1424 | 0 | reset_threadstate(tstate); |
1425 | 0 | assert(interp->threads.preallocated == NULL); |
1426 | 0 | _Py_atomic_store_ptr(&interp->threads.preallocated, tstate); |
1427 | 0 | } |
1428 | 0 | else { |
1429 | 0 | PyMem_RawFree(tstate); |
1430 | 0 | } |
1431 | 0 | } |
1432 | | |
1433 | | static void |
1434 | | decref_threadstate(_PyThreadStateImpl *tstate) |
1435 | 0 | { |
1436 | 0 | if (_Py_atomic_add_ssize(&tstate->refcount, -1) == 1) { |
1437 | | // The last reference to the thread state is gone. |
1438 | 0 | free_threadstate(tstate); |
1439 | 0 | } |
1440 | 0 | } |
1441 | | |
1442 | | /* Get the thread state to a minimal consistent state. |
1443 | | Further init happens in pylifecycle.c before it can be used. |
1444 | | All fields not initialized here are expected to be zeroed out, |
1445 | | e.g. by PyMem_RawCalloc() or memset(), or otherwise pre-initialized. |
1446 | | The interpreter state is not manipulated. Instead it is assumed that |
1447 | | the thread is getting added to the interpreter. |
1448 | | */ |
1449 | | |
1450 | | static void |
1451 | | init_threadstate(_PyThreadStateImpl *_tstate, |
1452 | | PyInterpreterState *interp, uint64_t id, int whence) |
1453 | 22 | { |
1454 | 22 | PyThreadState *tstate = (PyThreadState *)_tstate; |
1455 | 22 | if (tstate->_status.initialized) { |
1456 | 0 | Py_FatalError("thread state already initialized"); |
1457 | 0 | } |
1458 | | |
1459 | 22 | assert(interp != NULL); |
1460 | 22 | tstate->interp = interp; |
1461 | 22 | tstate->eval_breaker = |
1462 | 22 | _Py_atomic_load_uintptr_relaxed(&interp->ceval.instrumentation_version); |
1463 | | |
1464 | | // next/prev are set in add_threadstate(). |
1465 | 22 | assert(tstate->next == NULL); |
1466 | 22 | assert(tstate->prev == NULL); |
1467 | | |
1468 | 22 | assert(tstate->_whence == _PyThreadState_WHENCE_NOTSET); |
1469 | 22 | assert(whence >= 0 && whence <= _PyThreadState_WHENCE_THREADING_DAEMON); |
1470 | 22 | tstate->_whence = whence; |
1471 | | |
1472 | 22 | assert(id > 0); |
1473 | 22 | tstate->id = id; |
1474 | | |
1475 | | // thread_id and native_thread_id are set in bind_tstate(). |
1476 | | |
1477 | 22 | tstate->py_recursion_limit = interp->ceval.recursion_limit; |
1478 | 22 | tstate->py_recursion_remaining = interp->ceval.recursion_limit; |
1479 | 22 | tstate->exc_info = &tstate->exc_state; |
1480 | | |
1481 | | // PyGILState_Release must not try to delete this thread state. |
1482 | | // This is cleared when PyGILState_Ensure() creates the thread state. |
1483 | 22 | tstate->gilstate_counter = 1; |
1484 | | |
1485 | 22 | tstate->current_frame = NULL; |
1486 | 22 | tstate->datastack_chunk = NULL; |
1487 | 22 | tstate->datastack_top = NULL; |
1488 | 22 | tstate->datastack_limit = NULL; |
1489 | 22 | tstate->what_event = -1; |
1490 | 22 | tstate->current_executor = NULL; |
1491 | 22 | tstate->jit_exit = NULL; |
1492 | 22 | tstate->dict_global_version = 0; |
1493 | | |
1494 | 22 | _tstate->c_stack_soft_limit = UINTPTR_MAX; |
1495 | 22 | _tstate->c_stack_top = 0; |
1496 | 22 | _tstate->c_stack_hard_limit = 0; |
1497 | | |
1498 | 22 | _tstate->asyncio_running_loop = NULL; |
1499 | 22 | _tstate->asyncio_running_task = NULL; |
1500 | | |
1501 | 22 | tstate->delete_later = NULL; |
1502 | | |
1503 | 22 | llist_init(&_tstate->mem_free_queue); |
1504 | 22 | llist_init(&_tstate->asyncio_tasks_head); |
1505 | 22 | if (interp->stoptheworld.requested || _PyRuntime.stoptheworld.requested) { |
1506 | | // Start in the suspended state if there is an ongoing stop-the-world. |
1507 | 0 | tstate->state = _Py_THREAD_SUSPENDED; |
1508 | 0 | } |
1509 | | |
1510 | 22 | tstate->_status.initialized = 1; |
1511 | 22 | } |
1512 | | |
1513 | | static void |
1514 | | add_threadstate(PyInterpreterState *interp, PyThreadState *tstate, |
1515 | | PyThreadState *next) |
1516 | 22 | { |
1517 | 22 | assert(interp->threads.head != tstate); |
1518 | 22 | if (next != NULL) { |
1519 | 0 | assert(next->prev == NULL || next->prev == tstate); |
1520 | 0 | next->prev = tstate; |
1521 | 0 | } |
1522 | 22 | tstate->next = next; |
1523 | 22 | assert(tstate->prev == NULL); |
1524 | 22 | interp->threads.head = tstate; |
1525 | 22 | } |
1526 | | |
1527 | | static PyThreadState * |
1528 | | new_threadstate(PyInterpreterState *interp, int whence) |
1529 | 22 | { |
1530 | | // Allocate the thread state. |
1531 | 22 | _PyThreadStateImpl *tstate = alloc_threadstate(interp); |
1532 | 22 | if (tstate == NULL) { |
1533 | 0 | return NULL; |
1534 | 0 | } |
1535 | | |
1536 | | #ifdef Py_GIL_DISABLED |
1537 | | Py_ssize_t qsbr_idx = _Py_qsbr_reserve(interp); |
1538 | | if (qsbr_idx < 0) { |
1539 | | free_threadstate(tstate); |
1540 | | return NULL; |
1541 | | } |
1542 | | int32_t tlbc_idx = _Py_ReserveTLBCIndex(interp); |
1543 | | if (tlbc_idx < 0) { |
1544 | | free_threadstate(tstate); |
1545 | | return NULL; |
1546 | | } |
1547 | | #endif |
1548 | | #ifdef Py_STATS |
1549 | | // The PyStats structure is quite large and is allocated separated from tstate. |
1550 | | if (!_PyStats_ThreadInit(interp, tstate)) { |
1551 | | free_threadstate(tstate); |
1552 | | return NULL; |
1553 | | } |
1554 | | #endif |
1555 | | |
1556 | | /* We serialize concurrent creation to protect global state. */ |
1557 | 22 | HEAD_LOCK(interp->runtime); |
1558 | | |
1559 | | // Initialize the new thread state. |
1560 | 22 | interp->threads.next_unique_id += 1; |
1561 | 22 | uint64_t id = interp->threads.next_unique_id; |
1562 | 22 | init_threadstate(tstate, interp, id, whence); |
1563 | | |
1564 | | // Add the new thread state to the interpreter. |
1565 | 22 | PyThreadState *old_head = interp->threads.head; |
1566 | 22 | add_threadstate(interp, (PyThreadState *)tstate, old_head); |
1567 | | |
1568 | 22 | HEAD_UNLOCK(interp->runtime); |
1569 | | |
1570 | | #ifdef Py_GIL_DISABLED |
1571 | | // Must be called with lock unlocked to avoid lock ordering deadlocks. |
1572 | | _Py_qsbr_register(tstate, interp, qsbr_idx); |
1573 | | tstate->tlbc_index = tlbc_idx; |
1574 | | #endif |
1575 | | |
1576 | 22 | return (PyThreadState *)tstate; |
1577 | 22 | } |
1578 | | |
1579 | | PyThreadState * |
1580 | | PyThreadState_New(PyInterpreterState *interp) |
1581 | 0 | { |
1582 | 0 | return _PyThreadState_NewBound(interp, _PyThreadState_WHENCE_UNKNOWN); |
1583 | 0 | } |
1584 | | |
1585 | | PyThreadState * |
1586 | | _PyThreadState_NewBound(PyInterpreterState *interp, int whence) |
1587 | 0 | { |
1588 | 0 | PyThreadState *tstate = new_threadstate(interp, whence); |
1589 | 0 | if (tstate) { |
1590 | 0 | bind_tstate(tstate); |
1591 | | // This makes sure there's a gilstate tstate bound |
1592 | | // as soon as possible. |
1593 | 0 | if (gilstate_get() == NULL) { |
1594 | 0 | bind_gilstate_tstate(tstate); |
1595 | 0 | } |
1596 | 0 | } |
1597 | 0 | return tstate; |
1598 | 0 | } |
1599 | | |
1600 | | // This must be followed by a call to _PyThreadState_Bind(); |
1601 | | PyThreadState * |
1602 | | _PyThreadState_New(PyInterpreterState *interp, int whence) |
1603 | 22 | { |
1604 | 22 | return new_threadstate(interp, whence); |
1605 | 22 | } |
1606 | | |
1607 | | // We keep this for stable ABI compabibility. |
1608 | | PyAPI_FUNC(PyThreadState*) |
1609 | | _PyThreadState_Prealloc(PyInterpreterState *interp) |
1610 | 0 | { |
1611 | 0 | return _PyThreadState_New(interp, _PyThreadState_WHENCE_UNKNOWN); |
1612 | 0 | } |
1613 | | |
1614 | | // We keep this around for (accidental) stable ABI compatibility. |
1615 | | // Realistically, no extensions are using it. |
1616 | | PyAPI_FUNC(void) |
1617 | | _PyThreadState_Init(PyThreadState *tstate) |
1618 | 0 | { |
1619 | 0 | Py_FatalError("_PyThreadState_Init() is for internal use only"); |
1620 | 0 | } |
1621 | | |
1622 | | |
1623 | | static void |
1624 | | clear_datastack(PyThreadState *tstate) |
1625 | 0 | { |
1626 | 0 | _PyStackChunk *chunk = tstate->datastack_chunk; |
1627 | 0 | tstate->datastack_chunk = NULL; |
1628 | 0 | while (chunk != NULL) { |
1629 | 0 | _PyStackChunk *prev = chunk->previous; |
1630 | 0 | _PyObject_VirtualFree(chunk, chunk->size); |
1631 | 0 | chunk = prev; |
1632 | 0 | } |
1633 | 0 | } |
1634 | | |
1635 | | void |
1636 | | PyThreadState_Clear(PyThreadState *tstate) |
1637 | 0 | { |
1638 | 0 | assert(tstate->_status.initialized && !tstate->_status.cleared); |
1639 | 0 | assert(current_fast_get()->interp == tstate->interp); |
1640 | | // GH-126016: In the _interpreters module, KeyboardInterrupt exceptions |
1641 | | // during PyEval_EvalCode() are sent to finalization, which doesn't let us |
1642 | | // mark threads as "not running main". So, for now this assertion is |
1643 | | // disabled. |
1644 | | // XXX assert(!_PyThreadState_IsRunningMain(tstate)); |
1645 | | // XXX assert(!tstate->_status.bound || tstate->_status.unbound); |
1646 | 0 | tstate->_status.finalizing = 1; // just in case |
1647 | | |
1648 | | /* XXX Conditions we need to enforce: |
1649 | | |
1650 | | * the GIL must be held by the current thread |
1651 | | * current_fast_get()->interp must match tstate->interp |
1652 | | * for the main interpreter, current_fast_get() must be the main thread |
1653 | | */ |
1654 | |
|
1655 | 0 | int verbose = _PyInterpreterState_GetConfig(tstate->interp)->verbose; |
1656 | |
|
1657 | 0 | if (verbose && tstate->current_frame != NULL) { |
1658 | | /* bpo-20526: After the main thread calls |
1659 | | _PyInterpreterState_SetFinalizing() in Py_FinalizeEx() |
1660 | | (or in Py_EndInterpreter() for subinterpreters), |
1661 | | threads must exit when trying to take the GIL. |
1662 | | If a thread exit in the middle of _PyEval_EvalFrameDefault(), |
1663 | | tstate->frame is not reset to its previous value. |
1664 | | It is more likely with daemon threads, but it can happen |
1665 | | with regular threads if threading._shutdown() fails |
1666 | | (ex: interrupted by CTRL+C). */ |
1667 | 0 | fprintf(stderr, |
1668 | 0 | "PyThreadState_Clear: warning: thread still has a frame\n"); |
1669 | 0 | } |
1670 | |
|
1671 | 0 | if (verbose && tstate->current_exception != NULL) { |
1672 | 0 | fprintf(stderr, "PyThreadState_Clear: warning: thread has an exception set\n"); |
1673 | 0 | _PyErr_Print(tstate); |
1674 | 0 | } |
1675 | | |
1676 | | /* At this point tstate shouldn't be used any more, |
1677 | | neither to run Python code nor for other uses. |
1678 | | |
1679 | | This is tricky when current_fast_get() == tstate, in the same way |
1680 | | as noted in interpreter_clear() above. The below finalizers |
1681 | | can possibly run Python code or otherwise use the partially |
1682 | | cleared thread state. For now we trust that isn't a problem |
1683 | | in practice. |
1684 | | */ |
1685 | | // XXX Deal with the possibility of problematic finalizers. |
1686 | | |
1687 | | /* Don't clear tstate->pyframe: it is a borrowed reference */ |
1688 | |
|
1689 | 0 | Py_CLEAR(tstate->threading_local_key); |
1690 | 0 | Py_CLEAR(tstate->threading_local_sentinel); |
1691 | |
|
1692 | 0 | Py_CLEAR(((_PyThreadStateImpl *)tstate)->asyncio_running_loop); |
1693 | 0 | Py_CLEAR(((_PyThreadStateImpl *)tstate)->asyncio_running_task); |
1694 | | |
1695 | |
|
1696 | 0 | PyMutex_Lock(&tstate->interp->asyncio_tasks_lock); |
1697 | | // merge any lingering tasks from thread state to interpreter's |
1698 | | // tasks list |
1699 | 0 | llist_concat(&tstate->interp->asyncio_tasks_head, |
1700 | 0 | &((_PyThreadStateImpl *)tstate)->asyncio_tasks_head); |
1701 | 0 | PyMutex_Unlock(&tstate->interp->asyncio_tasks_lock); |
1702 | |
|
1703 | 0 | Py_CLEAR(tstate->dict); |
1704 | 0 | Py_CLEAR(tstate->async_exc); |
1705 | |
|
1706 | 0 | Py_CLEAR(tstate->current_exception); |
1707 | |
|
1708 | 0 | Py_CLEAR(tstate->exc_state.exc_value); |
1709 | | |
1710 | | /* The stack of exception states should contain just this thread. */ |
1711 | 0 | if (verbose && tstate->exc_info != &tstate->exc_state) { |
1712 | 0 | fprintf(stderr, |
1713 | 0 | "PyThreadState_Clear: warning: thread still has a generator\n"); |
1714 | 0 | } |
1715 | |
|
1716 | 0 | if (tstate->c_profilefunc != NULL) { |
1717 | 0 | FT_ATOMIC_ADD_SSIZE(tstate->interp->sys_profiling_threads, -1); |
1718 | 0 | tstate->c_profilefunc = NULL; |
1719 | 0 | } |
1720 | 0 | if (tstate->c_tracefunc != NULL) { |
1721 | 0 | FT_ATOMIC_ADD_SSIZE(tstate->interp->sys_tracing_threads, -1); |
1722 | 0 | tstate->c_tracefunc = NULL; |
1723 | 0 | } |
1724 | |
|
1725 | 0 | Py_CLEAR(tstate->c_profileobj); |
1726 | 0 | Py_CLEAR(tstate->c_traceobj); |
1727 | |
|
1728 | 0 | Py_CLEAR(tstate->async_gen_firstiter); |
1729 | 0 | Py_CLEAR(tstate->async_gen_finalizer); |
1730 | |
|
1731 | 0 | Py_CLEAR(tstate->context); |
1732 | |
|
1733 | | #ifdef Py_GIL_DISABLED |
1734 | | // Each thread should clear own freelists in free-threading builds. |
1735 | | struct _Py_freelists *freelists = _Py_freelists_GET(); |
1736 | | _PyObject_ClearFreeLists(freelists, 1); |
1737 | | |
1738 | | // Merge our thread-local refcounts into the type's own refcount and |
1739 | | // free our local refcount array. |
1740 | | _PyObject_FinalizePerThreadRefcounts((_PyThreadStateImpl *)tstate); |
1741 | | |
1742 | | // Remove ourself from the biased reference counting table of threads. |
1743 | | _Py_brc_remove_thread(tstate); |
1744 | | |
1745 | | // Release our thread-local copies of the bytecode for reuse by another |
1746 | | // thread |
1747 | | _Py_ClearTLBCIndex((_PyThreadStateImpl *)tstate); |
1748 | | #endif |
1749 | | |
1750 | | // Merge our queue of pointers to be freed into the interpreter queue. |
1751 | 0 | _PyMem_AbandonDelayed(tstate); |
1752 | |
|
1753 | 0 | _PyThreadState_ClearMimallocHeaps(tstate); |
1754 | |
|
1755 | 0 | tstate->_status.cleared = 1; |
1756 | | |
1757 | | // XXX Call _PyThreadStateSwap(runtime, NULL) here if "current". |
1758 | | // XXX Do it as early in the function as possible. |
1759 | 0 | } |
1760 | | |
1761 | | static void |
1762 | | decrement_stoptheworld_countdown(struct _stoptheworld_state *stw); |
1763 | | |
1764 | | /* Common code for PyThreadState_Delete() and PyThreadState_DeleteCurrent() */ |
1765 | | static void |
1766 | | tstate_delete_common(PyThreadState *tstate, int release_gil) |
1767 | 0 | { |
1768 | 0 | assert(tstate->_status.cleared && !tstate->_status.finalized); |
1769 | 0 | tstate_verify_not_active(tstate); |
1770 | 0 | assert(!_PyThreadState_IsRunningMain(tstate)); |
1771 | |
|
1772 | 0 | PyInterpreterState *interp = tstate->interp; |
1773 | 0 | if (interp == NULL) { |
1774 | 0 | Py_FatalError("NULL interpreter"); |
1775 | 0 | } |
1776 | 0 | _PyRuntimeState *runtime = interp->runtime; |
1777 | |
|
1778 | 0 | HEAD_LOCK(runtime); |
1779 | 0 | if (tstate->prev) { |
1780 | 0 | tstate->prev->next = tstate->next; |
1781 | 0 | } |
1782 | 0 | else { |
1783 | 0 | interp->threads.head = tstate->next; |
1784 | 0 | } |
1785 | 0 | if (tstate->next) { |
1786 | 0 | tstate->next->prev = tstate->prev; |
1787 | 0 | } |
1788 | 0 | if (tstate->state != _Py_THREAD_SUSPENDED) { |
1789 | | // Any ongoing stop-the-world request should not wait for us because |
1790 | | // our thread is getting deleted. |
1791 | 0 | if (interp->stoptheworld.requested) { |
1792 | 0 | decrement_stoptheworld_countdown(&interp->stoptheworld); |
1793 | 0 | } |
1794 | 0 | if (runtime->stoptheworld.requested) { |
1795 | 0 | decrement_stoptheworld_countdown(&runtime->stoptheworld); |
1796 | 0 | } |
1797 | 0 | } |
1798 | |
|
1799 | | #if defined(Py_REF_DEBUG) && defined(Py_GIL_DISABLED) |
1800 | | // Add our portion of the total refcount to the interpreter's total. |
1801 | | _PyThreadStateImpl *tstate_impl = (_PyThreadStateImpl *)tstate; |
1802 | | tstate->interp->object_state.reftotal += tstate_impl->reftotal; |
1803 | | tstate_impl->reftotal = 0; |
1804 | | assert(tstate_impl->refcounts.values == NULL); |
1805 | | #endif |
1806 | |
|
1807 | 0 | HEAD_UNLOCK(runtime); |
1808 | | |
1809 | | // XXX Unbind in PyThreadState_Clear(), or earlier |
1810 | | // (and assert not-equal here)? |
1811 | 0 | if (tstate->_status.bound_gilstate) { |
1812 | 0 | unbind_gilstate_tstate(tstate); |
1813 | 0 | } |
1814 | 0 | if (tstate->_status.bound) { |
1815 | 0 | unbind_tstate(tstate); |
1816 | 0 | } |
1817 | | |
1818 | | // XXX Move to PyThreadState_Clear()? |
1819 | 0 | clear_datastack(tstate); |
1820 | |
|
1821 | 0 | if (release_gil) { |
1822 | 0 | _PyEval_ReleaseLock(tstate->interp, tstate, 1); |
1823 | 0 | } |
1824 | |
|
1825 | | #ifdef Py_GIL_DISABLED |
1826 | | _Py_qsbr_unregister(tstate); |
1827 | | #endif |
1828 | |
|
1829 | 0 | tstate->_status.finalized = 1; |
1830 | 0 | } |
1831 | | |
1832 | | static void |
1833 | | zapthreads(PyInterpreterState *interp) |
1834 | 0 | { |
1835 | 0 | PyThreadState *tstate; |
1836 | | /* No need to lock the mutex here because this should only happen |
1837 | | when the threads are all really dead (XXX famous last words). |
1838 | | |
1839 | | Cannot use _Py_FOR_EACH_TSTATE_UNLOCKED because we are freeing |
1840 | | the thread states here. |
1841 | | */ |
1842 | 0 | while ((tstate = interp->threads.head) != NULL) { |
1843 | 0 | tstate_verify_not_active(tstate); |
1844 | 0 | tstate_delete_common(tstate, 0); |
1845 | 0 | free_threadstate((_PyThreadStateImpl *)tstate); |
1846 | 0 | } |
1847 | 0 | } |
1848 | | |
1849 | | |
1850 | | void |
1851 | | PyThreadState_Delete(PyThreadState *tstate) |
1852 | 0 | { |
1853 | 0 | _Py_EnsureTstateNotNULL(tstate); |
1854 | 0 | tstate_verify_not_active(tstate); |
1855 | 0 | tstate_delete_common(tstate, 0); |
1856 | 0 | free_threadstate((_PyThreadStateImpl *)tstate); |
1857 | 0 | } |
1858 | | |
1859 | | |
1860 | | void |
1861 | | _PyThreadState_DeleteCurrent(PyThreadState *tstate) |
1862 | 0 | { |
1863 | 0 | _Py_EnsureTstateNotNULL(tstate); |
1864 | | #ifdef Py_GIL_DISABLED |
1865 | | _Py_qsbr_detach(((_PyThreadStateImpl *)tstate)->qsbr); |
1866 | | #endif |
1867 | | #ifdef Py_STATS |
1868 | | _PyStats_Detach((_PyThreadStateImpl *)tstate); |
1869 | | #endif |
1870 | 0 | current_fast_clear(tstate->interp->runtime); |
1871 | 0 | tstate_delete_common(tstate, 1); // release GIL as part of call |
1872 | 0 | free_threadstate((_PyThreadStateImpl *)tstate); |
1873 | 0 | } |
1874 | | |
1875 | | void |
1876 | | PyThreadState_DeleteCurrent(void) |
1877 | 0 | { |
1878 | 0 | PyThreadState *tstate = current_fast_get(); |
1879 | 0 | _PyThreadState_DeleteCurrent(tstate); |
1880 | 0 | } |
1881 | | |
1882 | | |
1883 | | // Unlinks and removes all thread states from `tstate->interp`, with the |
1884 | | // exception of the one passed as an argument. However, it does not delete |
1885 | | // these thread states. Instead, it returns the removed thread states as a |
1886 | | // linked list. |
1887 | | // |
1888 | | // Note that if there is a current thread state, it *must* be the one |
1889 | | // passed as argument. Also, this won't touch any interpreters other |
1890 | | // than the current one, since we don't know which thread state should |
1891 | | // be kept in those other interpreters. |
1892 | | PyThreadState * |
1893 | | _PyThreadState_RemoveExcept(PyThreadState *tstate) |
1894 | 0 | { |
1895 | 0 | assert(tstate != NULL); |
1896 | 0 | PyInterpreterState *interp = tstate->interp; |
1897 | 0 | _PyRuntimeState *runtime = interp->runtime; |
1898 | |
|
1899 | | #ifdef Py_GIL_DISABLED |
1900 | | assert(runtime->stoptheworld.world_stopped); |
1901 | | #endif |
1902 | |
|
1903 | 0 | HEAD_LOCK(runtime); |
1904 | | /* Remove all thread states, except tstate, from the linked list of |
1905 | | thread states. */ |
1906 | 0 | PyThreadState *list = interp->threads.head; |
1907 | 0 | if (list == tstate) { |
1908 | 0 | list = tstate->next; |
1909 | 0 | } |
1910 | 0 | if (tstate->prev) { |
1911 | 0 | tstate->prev->next = tstate->next; |
1912 | 0 | } |
1913 | 0 | if (tstate->next) { |
1914 | 0 | tstate->next->prev = tstate->prev; |
1915 | 0 | } |
1916 | 0 | tstate->prev = tstate->next = NULL; |
1917 | 0 | interp->threads.head = tstate; |
1918 | 0 | HEAD_UNLOCK(runtime); |
1919 | |
|
1920 | 0 | return list; |
1921 | 0 | } |
1922 | | |
1923 | | // Deletes the thread states in the linked list `list`. |
1924 | | // |
1925 | | // This is intended to be used in conjunction with _PyThreadState_RemoveExcept. |
1926 | | // |
1927 | | // If `is_after_fork` is true, the thread states are immediately freed. |
1928 | | // Otherwise, they are decref'd because they may still be referenced by an |
1929 | | // OS thread. |
1930 | | void |
1931 | | _PyThreadState_DeleteList(PyThreadState *list, int is_after_fork) |
1932 | 0 | { |
1933 | | // The world can't be stopped because we PyThreadState_Clear() can |
1934 | | // call destructors. |
1935 | 0 | assert(!_PyRuntime.stoptheworld.world_stopped); |
1936 | |
|
1937 | 0 | PyThreadState *p, *next; |
1938 | 0 | for (p = list; p; p = next) { |
1939 | 0 | next = p->next; |
1940 | 0 | PyThreadState_Clear(p); |
1941 | 0 | if (is_after_fork) { |
1942 | 0 | free_threadstate((_PyThreadStateImpl *)p); |
1943 | 0 | } |
1944 | 0 | else { |
1945 | 0 | decref_threadstate((_PyThreadStateImpl *)p); |
1946 | 0 | } |
1947 | 0 | } |
1948 | 0 | } |
1949 | | |
1950 | | |
1951 | | //---------- |
1952 | | // accessors |
1953 | | //---------- |
1954 | | |
1955 | | /* An extension mechanism to store arbitrary additional per-thread state. |
1956 | | PyThreadState_GetDict() returns a dictionary that can be used to hold such |
1957 | | state; the caller should pick a unique key and store its state there. If |
1958 | | PyThreadState_GetDict() returns NULL, an exception has *not* been raised |
1959 | | and the caller should assume no per-thread state is available. */ |
1960 | | |
1961 | | PyObject * |
1962 | | _PyThreadState_GetDict(PyThreadState *tstate) |
1963 | 8.33M | { |
1964 | 8.33M | assert(tstate != NULL); |
1965 | 8.33M | if (tstate->dict == NULL) { |
1966 | 1 | tstate->dict = PyDict_New(); |
1967 | 1 | if (tstate->dict == NULL) { |
1968 | 0 | _PyErr_Clear(tstate); |
1969 | 0 | } |
1970 | 1 | } |
1971 | 8.33M | return tstate->dict; |
1972 | 8.33M | } |
1973 | | |
1974 | | |
1975 | | PyObject * |
1976 | | PyThreadState_GetDict(void) |
1977 | 8.33M | { |
1978 | 8.33M | PyThreadState *tstate = current_fast_get(); |
1979 | 8.33M | if (tstate == NULL) { |
1980 | 0 | return NULL; |
1981 | 0 | } |
1982 | 8.33M | return _PyThreadState_GetDict(tstate); |
1983 | 8.33M | } |
1984 | | |
1985 | | |
1986 | | PyInterpreterState * |
1987 | | PyThreadState_GetInterpreter(PyThreadState *tstate) |
1988 | 0 | { |
1989 | 0 | assert(tstate != NULL); |
1990 | 0 | return tstate->interp; |
1991 | 0 | } |
1992 | | |
1993 | | |
1994 | | PyFrameObject* |
1995 | | PyThreadState_GetFrame(PyThreadState *tstate) |
1996 | 6.72k | { |
1997 | 6.72k | assert(tstate != NULL); |
1998 | 6.72k | _PyInterpreterFrame *f = _PyThreadState_GetFrame(tstate); |
1999 | 6.72k | if (f == NULL) { |
2000 | 0 | return NULL; |
2001 | 0 | } |
2002 | 6.72k | PyFrameObject *frame = _PyFrame_GetFrameObject(f); |
2003 | 6.72k | if (frame == NULL) { |
2004 | 0 | PyErr_Clear(); |
2005 | 0 | } |
2006 | 6.72k | return (PyFrameObject*)Py_XNewRef(frame); |
2007 | 6.72k | } |
2008 | | |
2009 | | |
2010 | | uint64_t |
2011 | | PyThreadState_GetID(PyThreadState *tstate) |
2012 | 0 | { |
2013 | 0 | assert(tstate != NULL); |
2014 | 0 | return tstate->id; |
2015 | 0 | } |
2016 | | |
2017 | | |
2018 | | static inline void |
2019 | | tstate_activate(PyThreadState *tstate) |
2020 | 35.6k | { |
2021 | 35.6k | assert(tstate != NULL); |
2022 | | // XXX assert(tstate_is_alive(tstate)); |
2023 | 35.6k | assert(tstate_is_bound(tstate)); |
2024 | 35.6k | assert(!tstate->_status.active); |
2025 | | |
2026 | 35.6k | assert(!tstate->_status.bound_gilstate || |
2027 | 35.6k | tstate == gilstate_get()); |
2028 | 35.6k | if (!tstate->_status.bound_gilstate) { |
2029 | 0 | bind_gilstate_tstate(tstate); |
2030 | 0 | } |
2031 | | |
2032 | 35.6k | tstate->_status.active = 1; |
2033 | 35.6k | } |
2034 | | |
2035 | | static inline void |
2036 | | tstate_deactivate(PyThreadState *tstate) |
2037 | 35.6k | { |
2038 | 35.6k | assert(tstate != NULL); |
2039 | | // XXX assert(tstate_is_alive(tstate)); |
2040 | 35.6k | assert(tstate_is_bound(tstate)); |
2041 | 35.6k | assert(tstate->_status.active); |
2042 | | |
2043 | | #if Py_STATS |
2044 | | _PyStats_Detach((_PyThreadStateImpl *)tstate); |
2045 | | #endif |
2046 | | |
2047 | 35.6k | tstate->_status.active = 0; |
2048 | | |
2049 | | // We do not unbind the gilstate tstate here. |
2050 | | // It will still be used in PyGILState_Ensure(). |
2051 | 35.6k | } |
2052 | | |
2053 | | static int |
2054 | | tstate_try_attach(PyThreadState *tstate) |
2055 | 35.6k | { |
2056 | | #ifdef Py_GIL_DISABLED |
2057 | | int expected = _Py_THREAD_DETACHED; |
2058 | | return _Py_atomic_compare_exchange_int(&tstate->state, |
2059 | | &expected, |
2060 | | _Py_THREAD_ATTACHED); |
2061 | | #else |
2062 | 35.6k | assert(tstate->state == _Py_THREAD_DETACHED); |
2063 | 35.6k | tstate->state = _Py_THREAD_ATTACHED; |
2064 | 35.6k | return 1; |
2065 | 35.6k | #endif |
2066 | 35.6k | } |
2067 | | |
2068 | | static void |
2069 | | tstate_set_detached(PyThreadState *tstate, int detached_state) |
2070 | 35.6k | { |
2071 | 35.6k | assert(_Py_atomic_load_int_relaxed(&tstate->state) == _Py_THREAD_ATTACHED); |
2072 | | #ifdef Py_GIL_DISABLED |
2073 | | _Py_atomic_store_int(&tstate->state, detached_state); |
2074 | | #else |
2075 | 35.6k | tstate->state = detached_state; |
2076 | 35.6k | #endif |
2077 | 35.6k | } |
2078 | | |
2079 | | static void |
2080 | | tstate_wait_attach(PyThreadState *tstate) |
2081 | 0 | { |
2082 | 0 | do { |
2083 | 0 | int state = _Py_atomic_load_int_relaxed(&tstate->state); |
2084 | 0 | if (state == _Py_THREAD_SUSPENDED) { |
2085 | | // Wait until we're switched out of SUSPENDED to DETACHED. |
2086 | 0 | _PyParkingLot_Park(&tstate->state, &state, sizeof(tstate->state), |
2087 | 0 | /*timeout=*/-1, NULL, /*detach=*/0); |
2088 | 0 | } |
2089 | 0 | else if (state == _Py_THREAD_SHUTTING_DOWN) { |
2090 | | // We're shutting down, so we can't attach. |
2091 | 0 | _PyThreadState_HangThread(tstate); |
2092 | 0 | } |
2093 | 0 | else { |
2094 | 0 | assert(state == _Py_THREAD_DETACHED); |
2095 | 0 | } |
2096 | | // Once we're back in DETACHED we can re-attach |
2097 | 0 | } while (!tstate_try_attach(tstate)); |
2098 | 0 | } |
2099 | | |
2100 | | void |
2101 | | _PyThreadState_Attach(PyThreadState *tstate) |
2102 | 35.6k | { |
2103 | | #if defined(Py_DEBUG) |
2104 | | // This is called from PyEval_RestoreThread(). Similar |
2105 | | // to it, we need to ensure errno doesn't change. |
2106 | | int err = errno; |
2107 | | #endif |
2108 | | |
2109 | 35.6k | _Py_EnsureTstateNotNULL(tstate); |
2110 | 35.6k | if (current_fast_get() != NULL) { |
2111 | 0 | Py_FatalError("non-NULL old thread state"); |
2112 | 0 | } |
2113 | 35.6k | _PyThreadStateImpl *_tstate = (_PyThreadStateImpl *)tstate; |
2114 | 35.6k | if (_tstate->c_stack_hard_limit == 0) { |
2115 | 22 | _Py_InitializeRecursionLimits(tstate); |
2116 | 22 | } |
2117 | | |
2118 | 35.6k | while (1) { |
2119 | 35.6k | _PyEval_AcquireLock(tstate); |
2120 | | |
2121 | | // XXX assert(tstate_is_alive(tstate)); |
2122 | 35.6k | current_fast_set(&_PyRuntime, tstate); |
2123 | 35.6k | if (!tstate_try_attach(tstate)) { |
2124 | 0 | tstate_wait_attach(tstate); |
2125 | 0 | } |
2126 | 35.6k | tstate_activate(tstate); |
2127 | | |
2128 | | #ifdef Py_GIL_DISABLED |
2129 | | if (_PyEval_IsGILEnabled(tstate) && !tstate->holds_gil) { |
2130 | | // The GIL was enabled between our call to _PyEval_AcquireLock() |
2131 | | // and when we attached (the GIL can't go from enabled to disabled |
2132 | | // here because only a thread holding the GIL can disable |
2133 | | // it). Detach and try again. |
2134 | | tstate_set_detached(tstate, _Py_THREAD_DETACHED); |
2135 | | tstate_deactivate(tstate); |
2136 | | current_fast_clear(&_PyRuntime); |
2137 | | continue; |
2138 | | } |
2139 | | _Py_qsbr_attach(((_PyThreadStateImpl *)tstate)->qsbr); |
2140 | | #endif |
2141 | 35.6k | break; |
2142 | 35.6k | } |
2143 | | |
2144 | | // Resume previous critical section. This acquires the lock(s) from the |
2145 | | // top-most critical section. |
2146 | 35.6k | if (tstate->critical_section != 0) { |
2147 | 0 | _PyCriticalSection_Resume(tstate); |
2148 | 0 | } |
2149 | | |
2150 | | #ifdef Py_STATS |
2151 | | _PyStats_Attach((_PyThreadStateImpl *)tstate); |
2152 | | #endif |
2153 | | |
2154 | | #if defined(Py_DEBUG) |
2155 | | errno = err; |
2156 | | #endif |
2157 | 35.6k | } |
2158 | | |
2159 | | static void |
2160 | | detach_thread(PyThreadState *tstate, int detached_state) |
2161 | 35.6k | { |
2162 | | // XXX assert(tstate_is_alive(tstate) && tstate_is_bound(tstate)); |
2163 | 35.6k | assert(_Py_atomic_load_int_relaxed(&tstate->state) == _Py_THREAD_ATTACHED); |
2164 | 35.6k | assert(tstate == current_fast_get()); |
2165 | 35.6k | if (tstate->critical_section != 0) { |
2166 | 0 | _PyCriticalSection_SuspendAll(tstate); |
2167 | 0 | } |
2168 | | #ifdef Py_GIL_DISABLED |
2169 | | _Py_qsbr_detach(((_PyThreadStateImpl *)tstate)->qsbr); |
2170 | | #endif |
2171 | 35.6k | tstate_deactivate(tstate); |
2172 | 35.6k | tstate_set_detached(tstate, detached_state); |
2173 | 35.6k | current_fast_clear(&_PyRuntime); |
2174 | 35.6k | _PyEval_ReleaseLock(tstate->interp, tstate, 0); |
2175 | 35.6k | } |
2176 | | |
2177 | | void |
2178 | | _PyThreadState_Detach(PyThreadState *tstate) |
2179 | 35.6k | { |
2180 | 35.6k | detach_thread(tstate, _Py_THREAD_DETACHED); |
2181 | 35.6k | } |
2182 | | |
2183 | | void |
2184 | | _PyThreadState_Suspend(PyThreadState *tstate) |
2185 | 0 | { |
2186 | 0 | _PyRuntimeState *runtime = &_PyRuntime; |
2187 | |
|
2188 | 0 | assert(_Py_atomic_load_int_relaxed(&tstate->state) == _Py_THREAD_ATTACHED); |
2189 | |
|
2190 | 0 | struct _stoptheworld_state *stw = NULL; |
2191 | 0 | HEAD_LOCK(runtime); |
2192 | 0 | if (runtime->stoptheworld.requested) { |
2193 | 0 | stw = &runtime->stoptheworld; |
2194 | 0 | } |
2195 | 0 | else if (tstate->interp->stoptheworld.requested) { |
2196 | 0 | stw = &tstate->interp->stoptheworld; |
2197 | 0 | } |
2198 | 0 | HEAD_UNLOCK(runtime); |
2199 | |
|
2200 | 0 | if (stw == NULL) { |
2201 | | // Switch directly to "detached" if there is no active stop-the-world |
2202 | | // request. |
2203 | 0 | detach_thread(tstate, _Py_THREAD_DETACHED); |
2204 | 0 | return; |
2205 | 0 | } |
2206 | | |
2207 | | // Switch to "suspended" state. |
2208 | 0 | detach_thread(tstate, _Py_THREAD_SUSPENDED); |
2209 | | |
2210 | | // Decrease the count of remaining threads needing to park. |
2211 | 0 | HEAD_LOCK(runtime); |
2212 | 0 | decrement_stoptheworld_countdown(stw); |
2213 | 0 | HEAD_UNLOCK(runtime); |
2214 | 0 | } |
2215 | | |
2216 | | void |
2217 | | _PyThreadState_SetShuttingDown(PyThreadState *tstate) |
2218 | 0 | { |
2219 | 0 | _Py_atomic_store_int(&tstate->state, _Py_THREAD_SHUTTING_DOWN); |
2220 | | #ifdef Py_GIL_DISABLED |
2221 | | _PyParkingLot_UnparkAll(&tstate->state); |
2222 | | #endif |
2223 | 0 | } |
2224 | | |
2225 | | // Decrease stop-the-world counter of remaining number of threads that need to |
2226 | | // pause. If we are the final thread to pause, notify the requesting thread. |
2227 | | static void |
2228 | | decrement_stoptheworld_countdown(struct _stoptheworld_state *stw) |
2229 | 0 | { |
2230 | 0 | assert(stw->thread_countdown > 0); |
2231 | 0 | if (--stw->thread_countdown == 0) { |
2232 | 0 | _PyEvent_Notify(&stw->stop_event); |
2233 | 0 | } |
2234 | 0 | } |
2235 | | |
2236 | | #ifdef Py_GIL_DISABLED |
2237 | | // Interpreter for _Py_FOR_EACH_STW_INTERP(). For global stop-the-world events, |
2238 | | // we start with the first interpreter and then iterate over all interpreters. |
2239 | | // For per-interpreter stop-the-world events, we only operate on the one |
2240 | | // interpreter. |
2241 | | static PyInterpreterState * |
2242 | | interp_for_stop_the_world(struct _stoptheworld_state *stw) |
2243 | | { |
2244 | | return (stw->is_global |
2245 | | ? PyInterpreterState_Head() |
2246 | | : _Py_CONTAINER_OF(stw, PyInterpreterState, stoptheworld)); |
2247 | | } |
2248 | | |
2249 | | // Loops over threads for a stop-the-world event. |
2250 | | // For global: all threads in all interpreters |
2251 | | // For per-interpreter: all threads in the interpreter |
2252 | | #define _Py_FOR_EACH_STW_INTERP(stw, i) \ |
2253 | | for (PyInterpreterState *i = interp_for_stop_the_world((stw)); \ |
2254 | | i != NULL; i = ((stw->is_global) ? i->next : NULL)) |
2255 | | |
2256 | | |
2257 | | // Try to transition threads atomically from the "detached" state to the |
2258 | | // "gc stopped" state. Returns true if all threads are in the "gc stopped" |
2259 | | static bool |
2260 | | park_detached_threads(struct _stoptheworld_state *stw) |
2261 | | { |
2262 | | int num_parked = 0; |
2263 | | _Py_FOR_EACH_STW_INTERP(stw, i) { |
2264 | | _Py_FOR_EACH_TSTATE_UNLOCKED(i, t) { |
2265 | | int state = _Py_atomic_load_int_relaxed(&t->state); |
2266 | | if (state == _Py_THREAD_DETACHED) { |
2267 | | // Atomically transition to "suspended" if in "detached" state. |
2268 | | if (_Py_atomic_compare_exchange_int( |
2269 | | &t->state, &state, _Py_THREAD_SUSPENDED)) { |
2270 | | num_parked++; |
2271 | | } |
2272 | | } |
2273 | | else if (state == _Py_THREAD_ATTACHED && t != stw->requester) { |
2274 | | _Py_set_eval_breaker_bit(t, _PY_EVAL_PLEASE_STOP_BIT); |
2275 | | } |
2276 | | } |
2277 | | } |
2278 | | stw->thread_countdown -= num_parked; |
2279 | | assert(stw->thread_countdown >= 0); |
2280 | | return num_parked > 0 && stw->thread_countdown == 0; |
2281 | | } |
2282 | | |
2283 | | static void |
2284 | | stop_the_world(struct _stoptheworld_state *stw) |
2285 | | { |
2286 | | _PyRuntimeState *runtime = &_PyRuntime; |
2287 | | |
2288 | | // gh-137433: Acquire the rwmutex first to avoid deadlocks with daemon |
2289 | | // threads that may hang when blocked on lock acquisition. |
2290 | | if (stw->is_global) { |
2291 | | _PyRWMutex_Lock(&runtime->stoptheworld_mutex); |
2292 | | } |
2293 | | else { |
2294 | | _PyRWMutex_RLock(&runtime->stoptheworld_mutex); |
2295 | | } |
2296 | | PyMutex_Lock(&stw->mutex); |
2297 | | |
2298 | | HEAD_LOCK(runtime); |
2299 | | stw->requested = 1; |
2300 | | stw->thread_countdown = 0; |
2301 | | stw->stop_event = (PyEvent){0}; // zero-initialize (unset) |
2302 | | stw->requester = _PyThreadState_GET(); // may be NULL |
2303 | | FT_STAT_WORLD_STOP_INC(); |
2304 | | |
2305 | | _Py_FOR_EACH_STW_INTERP(stw, i) { |
2306 | | _Py_FOR_EACH_TSTATE_UNLOCKED(i, t) { |
2307 | | if (t != stw->requester) { |
2308 | | // Count all the other threads (we don't wait on ourself). |
2309 | | stw->thread_countdown++; |
2310 | | } |
2311 | | } |
2312 | | } |
2313 | | |
2314 | | if (stw->thread_countdown == 0) { |
2315 | | HEAD_UNLOCK(runtime); |
2316 | | stw->world_stopped = 1; |
2317 | | return; |
2318 | | } |
2319 | | |
2320 | | for (;;) { |
2321 | | // Switch threads that are detached to the GC stopped state |
2322 | | bool stopped_all_threads = park_detached_threads(stw); |
2323 | | HEAD_UNLOCK(runtime); |
2324 | | |
2325 | | if (stopped_all_threads) { |
2326 | | break; |
2327 | | } |
2328 | | |
2329 | | PyTime_t wait_ns = 1000*1000; // 1ms (arbitrary, may need tuning) |
2330 | | int detach = 0; |
2331 | | if (PyEvent_WaitTimed(&stw->stop_event, wait_ns, detach)) { |
2332 | | assert(stw->thread_countdown == 0); |
2333 | | break; |
2334 | | } |
2335 | | |
2336 | | HEAD_LOCK(runtime); |
2337 | | } |
2338 | | stw->world_stopped = 1; |
2339 | | } |
2340 | | |
2341 | | static void |
2342 | | start_the_world(struct _stoptheworld_state *stw) |
2343 | | { |
2344 | | _PyRuntimeState *runtime = &_PyRuntime; |
2345 | | assert(PyMutex_IsLocked(&stw->mutex)); |
2346 | | |
2347 | | HEAD_LOCK(runtime); |
2348 | | stw->requested = 0; |
2349 | | stw->world_stopped = 0; |
2350 | | // Switch threads back to the detached state. |
2351 | | _Py_FOR_EACH_STW_INTERP(stw, i) { |
2352 | | _Py_FOR_EACH_TSTATE_UNLOCKED(i, t) { |
2353 | | if (t != stw->requester) { |
2354 | | assert(_Py_atomic_load_int_relaxed(&t->state) == |
2355 | | _Py_THREAD_SUSPENDED); |
2356 | | _Py_atomic_store_int(&t->state, _Py_THREAD_DETACHED); |
2357 | | _PyParkingLot_UnparkAll(&t->state); |
2358 | | } |
2359 | | } |
2360 | | } |
2361 | | stw->requester = NULL; |
2362 | | HEAD_UNLOCK(runtime); |
2363 | | PyMutex_Unlock(&stw->mutex); |
2364 | | if (stw->is_global) { |
2365 | | _PyRWMutex_Unlock(&runtime->stoptheworld_mutex); |
2366 | | } |
2367 | | else { |
2368 | | _PyRWMutex_RUnlock(&runtime->stoptheworld_mutex); |
2369 | | } |
2370 | | } |
2371 | | #endif // Py_GIL_DISABLED |
2372 | | |
2373 | | void |
2374 | | _PyEval_StopTheWorldAll(_PyRuntimeState *runtime) |
2375 | 0 | { |
2376 | | #ifdef Py_GIL_DISABLED |
2377 | | stop_the_world(&runtime->stoptheworld); |
2378 | | #endif |
2379 | 0 | } |
2380 | | |
2381 | | void |
2382 | | _PyEval_StartTheWorldAll(_PyRuntimeState *runtime) |
2383 | 0 | { |
2384 | | #ifdef Py_GIL_DISABLED |
2385 | | start_the_world(&runtime->stoptheworld); |
2386 | | #endif |
2387 | 0 | } |
2388 | | |
2389 | | void |
2390 | | _PyEval_StopTheWorld(PyInterpreterState *interp) |
2391 | 4 | { |
2392 | | #ifdef Py_GIL_DISABLED |
2393 | | stop_the_world(&interp->stoptheworld); |
2394 | | #endif |
2395 | 4 | } |
2396 | | |
2397 | | void |
2398 | | _PyEval_StartTheWorld(PyInterpreterState *interp) |
2399 | 4 | { |
2400 | | #ifdef Py_GIL_DISABLED |
2401 | | start_the_world(&interp->stoptheworld); |
2402 | | #endif |
2403 | 4 | } |
2404 | | |
2405 | | //---------- |
2406 | | // other API |
2407 | | //---------- |
2408 | | |
2409 | | /* Asynchronously raise an exception in a thread. |
2410 | | Requested by Just van Rossum and Alex Martelli. |
2411 | | To prevent naive misuse, you must write your own extension |
2412 | | to call this, or use ctypes. Must be called with the GIL held. |
2413 | | Returns the number of tstates modified (normally 1, but 0 if `id` didn't |
2414 | | match any known thread id). Can be called with exc=NULL to clear an |
2415 | | existing async exception. This raises no exceptions. */ |
2416 | | |
2417 | | // XXX Move this to Python/ceval_gil.c? |
2418 | | // XXX Deprecate this. |
2419 | | int |
2420 | | PyThreadState_SetAsyncExc(unsigned long id, PyObject *exc) |
2421 | 0 | { |
2422 | 0 | PyInterpreterState *interp = _PyInterpreterState_GET(); |
2423 | | |
2424 | | /* Although the GIL is held, a few C API functions can be called |
2425 | | * without the GIL held, and in particular some that create and |
2426 | | * destroy thread and interpreter states. Those can mutate the |
2427 | | * list of thread states we're traversing, so to prevent that we lock |
2428 | | * head_mutex for the duration. |
2429 | | */ |
2430 | 0 | PyThreadState *tstate = NULL; |
2431 | 0 | _Py_FOR_EACH_TSTATE_BEGIN(interp, t) { |
2432 | 0 | if (t->thread_id == id) { |
2433 | 0 | tstate = t; |
2434 | 0 | break; |
2435 | 0 | } |
2436 | 0 | } |
2437 | 0 | _Py_FOR_EACH_TSTATE_END(interp); |
2438 | |
|
2439 | 0 | if (tstate != NULL) { |
2440 | | /* Tricky: we need to decref the current value |
2441 | | * (if any) in tstate->async_exc, but that can in turn |
2442 | | * allow arbitrary Python code to run, including |
2443 | | * perhaps calls to this function. To prevent |
2444 | | * deadlock, we need to release head_mutex before |
2445 | | * the decref. |
2446 | | */ |
2447 | 0 | Py_XINCREF(exc); |
2448 | 0 | PyObject *old_exc = _Py_atomic_exchange_ptr(&tstate->async_exc, exc); |
2449 | |
|
2450 | 0 | Py_XDECREF(old_exc); |
2451 | 0 | _Py_set_eval_breaker_bit(tstate, _PY_ASYNC_EXCEPTION_BIT); |
2452 | 0 | } |
2453 | |
|
2454 | 0 | return tstate != NULL; |
2455 | 0 | } |
2456 | | |
2457 | | //--------------------------------- |
2458 | | // API for the current thread state |
2459 | | //--------------------------------- |
2460 | | |
2461 | | PyThreadState * |
2462 | | PyThreadState_GetUnchecked(void) |
2463 | 0 | { |
2464 | 0 | return current_fast_get(); |
2465 | 0 | } |
2466 | | |
2467 | | |
2468 | | PyThreadState * |
2469 | | PyThreadState_Get(void) |
2470 | 83.3M | { |
2471 | 83.3M | PyThreadState *tstate = current_fast_get(); |
2472 | 83.3M | _Py_EnsureTstateNotNULL(tstate); |
2473 | 83.3M | return tstate; |
2474 | 83.3M | } |
2475 | | |
2476 | | PyThreadState * |
2477 | | _PyThreadState_Swap(_PyRuntimeState *runtime, PyThreadState *newts) |
2478 | 0 | { |
2479 | 0 | PyThreadState *oldts = current_fast_get(); |
2480 | 0 | if (oldts != NULL) { |
2481 | 0 | _PyThreadState_Detach(oldts); |
2482 | 0 | } |
2483 | 0 | if (newts != NULL) { |
2484 | 0 | _PyThreadState_Attach(newts); |
2485 | 0 | } |
2486 | 0 | return oldts; |
2487 | 0 | } |
2488 | | |
2489 | | PyThreadState * |
2490 | | PyThreadState_Swap(PyThreadState *newts) |
2491 | 0 | { |
2492 | 0 | return _PyThreadState_Swap(&_PyRuntime, newts); |
2493 | 0 | } |
2494 | | |
2495 | | |
2496 | | void |
2497 | | _PyThreadState_Bind(PyThreadState *tstate) |
2498 | 22 | { |
2499 | | // gh-104690: If Python is being finalized and PyInterpreterState_Delete() |
2500 | | // was called, tstate becomes a dangling pointer. |
2501 | 22 | assert(_PyThreadState_CheckConsistency(tstate)); |
2502 | | |
2503 | 22 | bind_tstate(tstate); |
2504 | | // This makes sure there's a gilstate tstate bound |
2505 | | // as soon as possible. |
2506 | 22 | if (gilstate_get() == NULL) { |
2507 | 22 | bind_gilstate_tstate(tstate); |
2508 | 22 | } |
2509 | 22 | } |
2510 | | |
2511 | | #if defined(Py_GIL_DISABLED) && !defined(Py_LIMITED_API) |
2512 | | uintptr_t |
2513 | | _Py_GetThreadLocal_Addr(void) |
2514 | | { |
2515 | | // gh-112535: Use the address of the thread-local PyThreadState variable as |
2516 | | // a unique identifier for the current thread. Each thread has a unique |
2517 | | // _Py_tss_tstate variable with a unique address. |
2518 | | return (uintptr_t)&_Py_tss_tstate; |
2519 | | } |
2520 | | #endif |
2521 | | |
2522 | | /***********************************/ |
2523 | | /* routines for advanced debuggers */ |
2524 | | /***********************************/ |
2525 | | |
2526 | | // (requested by David Beazley) |
2527 | | // Don't use unless you know what you are doing! |
2528 | | |
2529 | | PyInterpreterState * |
2530 | | PyInterpreterState_Head(void) |
2531 | 0 | { |
2532 | 0 | return _PyRuntime.interpreters.head; |
2533 | 0 | } |
2534 | | |
2535 | | PyInterpreterState * |
2536 | | PyInterpreterState_Main(void) |
2537 | 0 | { |
2538 | 0 | return _PyInterpreterState_Main(); |
2539 | 0 | } |
2540 | | |
2541 | | PyInterpreterState * |
2542 | 0 | PyInterpreterState_Next(PyInterpreterState *interp) { |
2543 | 0 | return interp->next; |
2544 | 0 | } |
2545 | | |
2546 | | PyThreadState * |
2547 | 12.3k | PyInterpreterState_ThreadHead(PyInterpreterState *interp) { |
2548 | 12.3k | return interp->threads.head; |
2549 | 12.3k | } |
2550 | | |
2551 | | PyThreadState * |
2552 | 12.3k | PyThreadState_Next(PyThreadState *tstate) { |
2553 | 12.3k | return tstate->next; |
2554 | 12.3k | } |
2555 | | |
2556 | | |
2557 | | /********************************************/ |
2558 | | /* reporting execution state of all threads */ |
2559 | | /********************************************/ |
2560 | | |
2561 | | /* The implementation of sys._current_frames(). This is intended to be |
2562 | | called with the GIL held, as it will be when called via |
2563 | | sys._current_frames(). It's possible it would work fine even without |
2564 | | the GIL held, but haven't thought enough about that. |
2565 | | */ |
2566 | | PyObject * |
2567 | | _PyThread_CurrentFrames(void) |
2568 | 0 | { |
2569 | 0 | _PyRuntimeState *runtime = &_PyRuntime; |
2570 | 0 | PyThreadState *tstate = current_fast_get(); |
2571 | 0 | if (_PySys_Audit(tstate, "sys._current_frames", NULL) < 0) { |
2572 | 0 | return NULL; |
2573 | 0 | } |
2574 | | |
2575 | 0 | PyObject *result = PyDict_New(); |
2576 | 0 | if (result == NULL) { |
2577 | 0 | return NULL; |
2578 | 0 | } |
2579 | | |
2580 | | /* for i in all interpreters: |
2581 | | * for t in all of i's thread states: |
2582 | | * if t's frame isn't NULL, map t's id to its frame |
2583 | | * Because these lists can mutate even when the GIL is held, we |
2584 | | * need to grab head_mutex for the duration. |
2585 | | */ |
2586 | 0 | _PyEval_StopTheWorldAll(runtime); |
2587 | 0 | HEAD_LOCK(runtime); |
2588 | 0 | PyInterpreterState *i; |
2589 | 0 | for (i = runtime->interpreters.head; i != NULL; i = i->next) { |
2590 | 0 | _Py_FOR_EACH_TSTATE_UNLOCKED(i, t) { |
2591 | 0 | _PyInterpreterFrame *frame = t->current_frame; |
2592 | 0 | frame = _PyFrame_GetFirstComplete(frame); |
2593 | 0 | if (frame == NULL) { |
2594 | 0 | continue; |
2595 | 0 | } |
2596 | 0 | PyObject *id = PyLong_FromUnsignedLong(t->thread_id); |
2597 | 0 | if (id == NULL) { |
2598 | 0 | goto fail; |
2599 | 0 | } |
2600 | 0 | PyObject *frameobj = (PyObject *)_PyFrame_GetFrameObject(frame); |
2601 | 0 | if (frameobj == NULL) { |
2602 | 0 | Py_DECREF(id); |
2603 | 0 | goto fail; |
2604 | 0 | } |
2605 | 0 | int stat = PyDict_SetItem(result, id, frameobj); |
2606 | 0 | Py_DECREF(id); |
2607 | 0 | if (stat < 0) { |
2608 | 0 | goto fail; |
2609 | 0 | } |
2610 | 0 | } |
2611 | 0 | } |
2612 | 0 | goto done; |
2613 | | |
2614 | 0 | fail: |
2615 | 0 | Py_CLEAR(result); |
2616 | |
|
2617 | 0 | done: |
2618 | 0 | HEAD_UNLOCK(runtime); |
2619 | 0 | _PyEval_StartTheWorldAll(runtime); |
2620 | 0 | return result; |
2621 | 0 | } |
2622 | | |
2623 | | /* The implementation of sys._current_exceptions(). This is intended to be |
2624 | | called with the GIL held, as it will be when called via |
2625 | | sys._current_exceptions(). It's possible it would work fine even without |
2626 | | the GIL held, but haven't thought enough about that. |
2627 | | */ |
2628 | | PyObject * |
2629 | | _PyThread_CurrentExceptions(void) |
2630 | 0 | { |
2631 | 0 | _PyRuntimeState *runtime = &_PyRuntime; |
2632 | 0 | PyThreadState *tstate = current_fast_get(); |
2633 | |
|
2634 | 0 | _Py_EnsureTstateNotNULL(tstate); |
2635 | |
|
2636 | 0 | if (_PySys_Audit(tstate, "sys._current_exceptions", NULL) < 0) { |
2637 | 0 | return NULL; |
2638 | 0 | } |
2639 | | |
2640 | 0 | PyObject *result = PyDict_New(); |
2641 | 0 | if (result == NULL) { |
2642 | 0 | return NULL; |
2643 | 0 | } |
2644 | | |
2645 | | /* for i in all interpreters: |
2646 | | * for t in all of i's thread states: |
2647 | | * if t's frame isn't NULL, map t's id to its frame |
2648 | | * Because these lists can mutate even when the GIL is held, we |
2649 | | * need to grab head_mutex for the duration. |
2650 | | */ |
2651 | 0 | _PyEval_StopTheWorldAll(runtime); |
2652 | 0 | HEAD_LOCK(runtime); |
2653 | 0 | PyInterpreterState *i; |
2654 | 0 | for (i = runtime->interpreters.head; i != NULL; i = i->next) { |
2655 | 0 | _Py_FOR_EACH_TSTATE_UNLOCKED(i, t) { |
2656 | 0 | _PyErr_StackItem *err_info = _PyErr_GetTopmostException(t); |
2657 | 0 | if (err_info == NULL) { |
2658 | 0 | continue; |
2659 | 0 | } |
2660 | 0 | PyObject *id = PyLong_FromUnsignedLong(t->thread_id); |
2661 | 0 | if (id == NULL) { |
2662 | 0 | goto fail; |
2663 | 0 | } |
2664 | 0 | PyObject *exc = err_info->exc_value; |
2665 | 0 | assert(exc == NULL || |
2666 | 0 | exc == Py_None || |
2667 | 0 | PyExceptionInstance_Check(exc)); |
2668 | |
|
2669 | 0 | int stat = PyDict_SetItem(result, id, exc == NULL ? Py_None : exc); |
2670 | 0 | Py_DECREF(id); |
2671 | 0 | if (stat < 0) { |
2672 | 0 | goto fail; |
2673 | 0 | } |
2674 | 0 | } |
2675 | 0 | } |
2676 | 0 | goto done; |
2677 | | |
2678 | 0 | fail: |
2679 | 0 | Py_CLEAR(result); |
2680 | |
|
2681 | 0 | done: |
2682 | 0 | HEAD_UNLOCK(runtime); |
2683 | 0 | _PyEval_StartTheWorldAll(runtime); |
2684 | 0 | return result; |
2685 | 0 | } |
2686 | | |
2687 | | |
2688 | | /***********************************/ |
2689 | | /* Python "auto thread state" API. */ |
2690 | | /***********************************/ |
2691 | | |
2692 | | /* Internal initialization/finalization functions called by |
2693 | | Py_Initialize/Py_FinalizeEx |
2694 | | */ |
2695 | | PyStatus |
2696 | | _PyGILState_Init(PyInterpreterState *interp) |
2697 | 22 | { |
2698 | 22 | if (!_Py_IsMainInterpreter(interp)) { |
2699 | | /* Currently, PyGILState is shared by all interpreters. The main |
2700 | | * interpreter is responsible to initialize it. */ |
2701 | 0 | return _PyStatus_OK(); |
2702 | 0 | } |
2703 | 22 | _PyRuntimeState *runtime = interp->runtime; |
2704 | 22 | assert(gilstate_get() == NULL); |
2705 | 22 | assert(runtime->gilstate.autoInterpreterState == NULL); |
2706 | 22 | runtime->gilstate.autoInterpreterState = interp; |
2707 | 22 | return _PyStatus_OK(); |
2708 | 22 | } |
2709 | | |
2710 | | void |
2711 | | _PyGILState_Fini(PyInterpreterState *interp) |
2712 | 0 | { |
2713 | 0 | if (!_Py_IsMainInterpreter(interp)) { |
2714 | | /* Currently, PyGILState is shared by all interpreters. The main |
2715 | | * interpreter is responsible to initialize it. */ |
2716 | 0 | return; |
2717 | 0 | } |
2718 | 0 | interp->runtime->gilstate.autoInterpreterState = NULL; |
2719 | 0 | } |
2720 | | |
2721 | | |
2722 | | // XXX Drop this. |
2723 | | void |
2724 | | _PyGILState_SetTstate(PyThreadState *tstate) |
2725 | 22 | { |
2726 | | /* must init with valid states */ |
2727 | 22 | assert(tstate != NULL); |
2728 | 22 | assert(tstate->interp != NULL); |
2729 | | |
2730 | 22 | if (!_Py_IsMainInterpreter(tstate->interp)) { |
2731 | | /* Currently, PyGILState is shared by all interpreters. The main |
2732 | | * interpreter is responsible to initialize it. */ |
2733 | 0 | return; |
2734 | 0 | } |
2735 | | |
2736 | | #ifndef NDEBUG |
2737 | | _PyRuntimeState *runtime = tstate->interp->runtime; |
2738 | | |
2739 | | assert(runtime->gilstate.autoInterpreterState == tstate->interp); |
2740 | | assert(gilstate_get() == tstate); |
2741 | | assert(tstate->gilstate_counter == 1); |
2742 | | #endif |
2743 | 22 | } |
2744 | | |
2745 | | PyInterpreterState * |
2746 | | _PyGILState_GetInterpreterStateUnsafe(void) |
2747 | 0 | { |
2748 | 0 | return _PyRuntime.gilstate.autoInterpreterState; |
2749 | 0 | } |
2750 | | |
2751 | | /* The public functions */ |
2752 | | |
2753 | | PyThreadState * |
2754 | | PyGILState_GetThisThreadState(void) |
2755 | 0 | { |
2756 | 0 | return gilstate_get(); |
2757 | 0 | } |
2758 | | |
2759 | | int |
2760 | | PyGILState_Check(void) |
2761 | 0 | { |
2762 | 0 | _PyRuntimeState *runtime = &_PyRuntime; |
2763 | 0 | if (!runtime->gilstate.check_enabled) { |
2764 | 0 | return 1; |
2765 | 0 | } |
2766 | | |
2767 | 0 | PyThreadState *tstate = current_fast_get(); |
2768 | 0 | if (tstate == NULL) { |
2769 | 0 | return 0; |
2770 | 0 | } |
2771 | | |
2772 | 0 | PyThreadState *tcur = gilstate_get(); |
2773 | 0 | return (tstate == tcur); |
2774 | 0 | } |
2775 | | |
2776 | | PyGILState_STATE |
2777 | | PyGILState_Ensure(void) |
2778 | 0 | { |
2779 | 0 | _PyRuntimeState *runtime = &_PyRuntime; |
2780 | | |
2781 | | /* Note that we do not auto-init Python here - apart from |
2782 | | potential races with 2 threads auto-initializing, pep-311 |
2783 | | spells out other issues. Embedders are expected to have |
2784 | | called Py_Initialize(). */ |
2785 | | |
2786 | | /* Ensure that _PyEval_InitThreads() and _PyGILState_Init() have been |
2787 | | called by Py_Initialize() |
2788 | | |
2789 | | TODO: This isn't thread-safe. There's no protection here against |
2790 | | concurrent finalization of the interpreter; it's simply a guard |
2791 | | for *after* the interpreter has finalized. |
2792 | | */ |
2793 | 0 | if (!_PyEval_ThreadsInitialized() || runtime->gilstate.autoInterpreterState == NULL) { |
2794 | 0 | PyThread_hang_thread(); |
2795 | 0 | } |
2796 | | |
2797 | 0 | PyThreadState *tcur = gilstate_get(); |
2798 | 0 | int has_gil; |
2799 | 0 | if (tcur == NULL) { |
2800 | | /* Create a new Python thread state for this thread */ |
2801 | | // XXX Use PyInterpreterState_EnsureThreadState()? |
2802 | 0 | tcur = new_threadstate(runtime->gilstate.autoInterpreterState, |
2803 | 0 | _PyThreadState_WHENCE_GILSTATE); |
2804 | 0 | if (tcur == NULL) { |
2805 | 0 | Py_FatalError("Couldn't create thread-state for new thread"); |
2806 | 0 | } |
2807 | 0 | bind_tstate(tcur); |
2808 | 0 | bind_gilstate_tstate(tcur); |
2809 | | |
2810 | | /* This is our thread state! We'll need to delete it in the |
2811 | | matching call to PyGILState_Release(). */ |
2812 | 0 | assert(tcur->gilstate_counter == 1); |
2813 | 0 | tcur->gilstate_counter = 0; |
2814 | 0 | has_gil = 0; /* new thread state is never current */ |
2815 | 0 | } |
2816 | 0 | else { |
2817 | 0 | has_gil = holds_gil(tcur); |
2818 | 0 | } |
2819 | | |
2820 | 0 | if (!has_gil) { |
2821 | 0 | PyEval_RestoreThread(tcur); |
2822 | 0 | } |
2823 | | |
2824 | | /* Update our counter in the thread-state - no need for locks: |
2825 | | - tcur will remain valid as we hold the GIL. |
2826 | | - the counter is safe as we are the only thread "allowed" |
2827 | | to modify this value |
2828 | | */ |
2829 | 0 | ++tcur->gilstate_counter; |
2830 | |
|
2831 | 0 | return has_gil ? PyGILState_LOCKED : PyGILState_UNLOCKED; |
2832 | 0 | } |
2833 | | |
2834 | | void |
2835 | | PyGILState_Release(PyGILState_STATE oldstate) |
2836 | 0 | { |
2837 | 0 | PyThreadState *tstate = gilstate_get(); |
2838 | 0 | if (tstate == NULL) { |
2839 | 0 | Py_FatalError("auto-releasing thread-state, " |
2840 | 0 | "but no thread-state for this thread"); |
2841 | 0 | } |
2842 | | |
2843 | | /* We must hold the GIL and have our thread state current */ |
2844 | 0 | if (!holds_gil(tstate)) { |
2845 | 0 | _Py_FatalErrorFormat(__func__, |
2846 | 0 | "thread state %p must be current when releasing", |
2847 | 0 | tstate); |
2848 | 0 | } |
2849 | 0 | --tstate->gilstate_counter; |
2850 | 0 | assert(tstate->gilstate_counter >= 0); /* illegal counter value */ |
2851 | | |
2852 | | /* If we're going to destroy this thread-state, we must |
2853 | | * clear it while the GIL is held, as destructors may run. |
2854 | | */ |
2855 | 0 | if (tstate->gilstate_counter == 0) { |
2856 | | /* can't have been locked when we created it */ |
2857 | 0 | assert(oldstate == PyGILState_UNLOCKED); |
2858 | | // XXX Unbind tstate here. |
2859 | | // gh-119585: `PyThreadState_Clear()` may call destructors that |
2860 | | // themselves use PyGILState_Ensure and PyGILState_Release, so make |
2861 | | // sure that gilstate_counter is not zero when calling it. |
2862 | 0 | ++tstate->gilstate_counter; |
2863 | 0 | PyThreadState_Clear(tstate); |
2864 | 0 | --tstate->gilstate_counter; |
2865 | | /* Delete the thread-state. Note this releases the GIL too! |
2866 | | * It's vital that the GIL be held here, to avoid shutdown |
2867 | | * races; see bugs 225673 and 1061968 (that nasty bug has a |
2868 | | * habit of coming back). |
2869 | | */ |
2870 | 0 | assert(tstate->gilstate_counter == 0); |
2871 | 0 | assert(current_fast_get() == tstate); |
2872 | 0 | _PyThreadState_DeleteCurrent(tstate); |
2873 | 0 | } |
2874 | | /* Release the lock if necessary */ |
2875 | 0 | else if (oldstate == PyGILState_UNLOCKED) { |
2876 | 0 | PyEval_SaveThread(); |
2877 | 0 | } |
2878 | 0 | } |
2879 | | |
2880 | | |
2881 | | /*************/ |
2882 | | /* Other API */ |
2883 | | /*************/ |
2884 | | |
2885 | | _PyFrameEvalFunction |
2886 | | _PyInterpreterState_GetEvalFrameFunc(PyInterpreterState *interp) |
2887 | 0 | { |
2888 | 0 | if (interp->eval_frame == NULL) { |
2889 | 0 | return _PyEval_EvalFrameDefault; |
2890 | 0 | } |
2891 | 0 | return interp->eval_frame; |
2892 | 0 | } |
2893 | | |
2894 | | |
2895 | | void |
2896 | | _PyInterpreterState_SetEvalFrameFunc(PyInterpreterState *interp, |
2897 | | _PyFrameEvalFunction eval_frame) |
2898 | 0 | { |
2899 | 0 | if (eval_frame == _PyEval_EvalFrameDefault) { |
2900 | 0 | eval_frame = NULL; |
2901 | 0 | } |
2902 | 0 | if (eval_frame == interp->eval_frame) { |
2903 | 0 | return; |
2904 | 0 | } |
2905 | | #ifdef _Py_TIER2 |
2906 | | if (eval_frame != NULL) { |
2907 | | _Py_Executors_InvalidateAll(interp, 1); |
2908 | | } |
2909 | | #endif |
2910 | 0 | RARE_EVENT_INC(set_eval_frame_func); |
2911 | 0 | _PyEval_StopTheWorld(interp); |
2912 | 0 | interp->eval_frame = eval_frame; |
2913 | 0 | _PyEval_StartTheWorld(interp); |
2914 | 0 | } |
2915 | | |
2916 | | |
2917 | | const PyConfig* |
2918 | | _PyInterpreterState_GetConfig(PyInterpreterState *interp) |
2919 | 77.1M | { |
2920 | 77.1M | return &interp->config; |
2921 | 77.1M | } |
2922 | | |
2923 | | |
2924 | | const PyConfig* |
2925 | | _Py_GetConfig(void) |
2926 | 56.2k | { |
2927 | 56.2k | PyThreadState *tstate = current_fast_get(); |
2928 | 56.2k | _Py_EnsureTstateNotNULL(tstate); |
2929 | 56.2k | return _PyInterpreterState_GetConfig(tstate->interp); |
2930 | 56.2k | } |
2931 | | |
2932 | | |
2933 | | int |
2934 | | _PyInterpreterState_HasFeature(PyInterpreterState *interp, unsigned long feature) |
2935 | 0 | { |
2936 | 0 | return ((interp->feature_flags & feature) != 0); |
2937 | 0 | } |
2938 | | |
2939 | | |
2940 | 162k | #define MINIMUM_OVERHEAD 1000 |
2941 | | |
2942 | | static PyObject ** |
2943 | | push_chunk(PyThreadState *tstate, int size) |
2944 | 162k | { |
2945 | 162k | int allocate_size = _PY_DATA_STACK_CHUNK_SIZE; |
2946 | 162k | while (allocate_size < (int)sizeof(PyObject*)*(size + MINIMUM_OVERHEAD)) { |
2947 | 0 | allocate_size *= 2; |
2948 | 0 | } |
2949 | 162k | _PyStackChunk *new = allocate_chunk(allocate_size, tstate->datastack_chunk); |
2950 | 162k | if (new == NULL) { |
2951 | 0 | return NULL; |
2952 | 0 | } |
2953 | 162k | if (tstate->datastack_chunk) { |
2954 | 162k | tstate->datastack_chunk->top = tstate->datastack_top - |
2955 | 162k | &tstate->datastack_chunk->data[0]; |
2956 | 162k | } |
2957 | 162k | tstate->datastack_chunk = new; |
2958 | 162k | tstate->datastack_limit = (PyObject **)(((char *)new) + allocate_size); |
2959 | | // When new is the "root" chunk (i.e. new->previous == NULL), we can keep |
2960 | | // _PyThreadState_PopFrame from freeing it later by "skipping" over the |
2961 | | // first element: |
2962 | 162k | PyObject **res = &new->data[new->previous == NULL]; |
2963 | 162k | tstate->datastack_top = res + size; |
2964 | 162k | return res; |
2965 | 162k | } |
2966 | | |
2967 | | _PyInterpreterFrame * |
2968 | | _PyThreadState_PushFrame(PyThreadState *tstate, size_t size) |
2969 | 169M | { |
2970 | 169M | assert(size < INT_MAX/sizeof(PyObject *)); |
2971 | 169M | if (_PyThreadState_HasStackSpace(tstate, (int)size)) { |
2972 | 169M | _PyInterpreterFrame *res = (_PyInterpreterFrame *)tstate->datastack_top; |
2973 | 169M | tstate->datastack_top += size; |
2974 | 169M | return res; |
2975 | 169M | } |
2976 | 162k | return (_PyInterpreterFrame *)push_chunk(tstate, (int)size); |
2977 | 169M | } |
2978 | | |
2979 | | void |
2980 | | _PyThreadState_PopFrame(PyThreadState *tstate, _PyInterpreterFrame * frame) |
2981 | 574M | { |
2982 | 574M | assert(tstate->datastack_chunk); |
2983 | 574M | PyObject **base = (PyObject **)frame; |
2984 | 574M | if (base == &tstate->datastack_chunk->data[0]) { |
2985 | 162k | _PyStackChunk *chunk = tstate->datastack_chunk; |
2986 | 162k | _PyStackChunk *previous = chunk->previous; |
2987 | | // push_chunk ensures that the root chunk is never popped: |
2988 | 162k | assert(previous); |
2989 | 162k | tstate->datastack_top = &previous->data[previous->top]; |
2990 | 162k | tstate->datastack_chunk = previous; |
2991 | 162k | _PyObject_VirtualFree(chunk, chunk->size); |
2992 | 162k | tstate->datastack_limit = (PyObject **)(((char *)previous) + previous->size); |
2993 | 162k | } |
2994 | 574M | else { |
2995 | 574M | assert(tstate->datastack_top); |
2996 | 574M | assert(tstate->datastack_top >= base); |
2997 | 574M | tstate->datastack_top = base; |
2998 | 574M | } |
2999 | 574M | } |
3000 | | |
3001 | | |
3002 | | #ifndef NDEBUG |
3003 | | // Check that a Python thread state valid. In practice, this function is used |
3004 | | // on a Python debug build to check if 'tstate' is a dangling pointer, if the |
3005 | | // PyThreadState memory has been freed. |
3006 | | // |
3007 | | // Usage: |
3008 | | // |
3009 | | // assert(_PyThreadState_CheckConsistency(tstate)); |
3010 | | int |
3011 | | _PyThreadState_CheckConsistency(PyThreadState *tstate) |
3012 | | { |
3013 | | assert(!_PyMem_IsPtrFreed(tstate)); |
3014 | | assert(!_PyMem_IsPtrFreed(tstate->interp)); |
3015 | | return 1; |
3016 | | } |
3017 | | #endif |
3018 | | |
3019 | | |
3020 | | // Check if a Python thread must call _PyThreadState_HangThread(), rather than |
3021 | | // taking the GIL or attaching to the interpreter if Py_Finalize() has been |
3022 | | // called. |
3023 | | // |
3024 | | // When this function is called by a daemon thread after Py_Finalize() has been |
3025 | | // called, the GIL may no longer exist. |
3026 | | // |
3027 | | // tstate must be non-NULL. |
3028 | | int |
3029 | | _PyThreadState_MustExit(PyThreadState *tstate) |
3030 | 71.3k | { |
3031 | 71.3k | int state = _Py_atomic_load_int_relaxed(&tstate->state); |
3032 | 71.3k | return state == _Py_THREAD_SHUTTING_DOWN; |
3033 | 71.3k | } |
3034 | | |
3035 | | void |
3036 | | _PyThreadState_HangThread(PyThreadState *tstate) |
3037 | 0 | { |
3038 | 0 | _PyThreadStateImpl *tstate_impl = (_PyThreadStateImpl *)tstate; |
3039 | 0 | decref_threadstate(tstate_impl); |
3040 | 0 | PyThread_hang_thread(); |
3041 | 0 | } |
3042 | | |
3043 | | /********************/ |
3044 | | /* mimalloc support */ |
3045 | | /********************/ |
3046 | | |
3047 | | static void |
3048 | | tstate_mimalloc_bind(PyThreadState *tstate) |
3049 | 22 | { |
3050 | | #ifdef Py_GIL_DISABLED |
3051 | | struct _mimalloc_thread_state *mts = &((_PyThreadStateImpl*)tstate)->mimalloc; |
3052 | | |
3053 | | // Initialize the mimalloc thread state. This must be called from the |
3054 | | // same thread that will use the thread state. The "mem" heap doubles as |
3055 | | // the "backing" heap. |
3056 | | mi_tld_t *tld = &mts->tld; |
3057 | | _mi_tld_init(tld, &mts->heaps[_Py_MIMALLOC_HEAP_MEM]); |
3058 | | llist_init(&mts->page_list); |
3059 | | |
3060 | | // Exiting threads push any remaining in-use segments to the abandoned |
3061 | | // pool to be re-claimed later by other threads. We use per-interpreter |
3062 | | // pools to keep Python objects from different interpreters separate. |
3063 | | tld->segments.abandoned = &tstate->interp->mimalloc.abandoned_pool; |
3064 | | |
3065 | | // Don't fill in the first N bytes up to ob_type in debug builds. We may |
3066 | | // access ob_tid and the refcount fields in the dict and list lock-less |
3067 | | // accesses, so they must remain valid for a while after deallocation. |
3068 | | size_t base_offset = offsetof(PyObject, ob_type); |
3069 | | if (_PyMem_DebugEnabled()) { |
3070 | | // The debug allocator adds two words at the beginning of each block. |
3071 | | base_offset += 2 * sizeof(size_t); |
3072 | | } |
3073 | | size_t debug_offsets[_Py_MIMALLOC_HEAP_COUNT] = { |
3074 | | [_Py_MIMALLOC_HEAP_OBJECT] = base_offset, |
3075 | | [_Py_MIMALLOC_HEAP_GC] = base_offset, |
3076 | | [_Py_MIMALLOC_HEAP_GC_PRE] = base_offset + 2 * sizeof(PyObject *), |
3077 | | }; |
3078 | | |
3079 | | // Initialize each heap |
3080 | | for (uint8_t i = 0; i < _Py_MIMALLOC_HEAP_COUNT; i++) { |
3081 | | _mi_heap_init_ex(&mts->heaps[i], tld, _mi_arena_id_none(), false, i); |
3082 | | mts->heaps[i].debug_offset = (uint8_t)debug_offsets[i]; |
3083 | | } |
3084 | | |
3085 | | // Heaps that store Python objects should use QSBR to delay freeing |
3086 | | // mimalloc pages while there may be concurrent lock-free readers. |
3087 | | mts->heaps[_Py_MIMALLOC_HEAP_OBJECT].page_use_qsbr = true; |
3088 | | mts->heaps[_Py_MIMALLOC_HEAP_GC].page_use_qsbr = true; |
3089 | | mts->heaps[_Py_MIMALLOC_HEAP_GC_PRE].page_use_qsbr = true; |
3090 | | |
3091 | | // By default, object allocations use _Py_MIMALLOC_HEAP_OBJECT. |
3092 | | // _PyObject_GC_New() and similar functions temporarily override this to |
3093 | | // use one of the GC heaps. |
3094 | | mts->current_object_heap = &mts->heaps[_Py_MIMALLOC_HEAP_OBJECT]; |
3095 | | |
3096 | | _Py_atomic_store_int(&mts->initialized, 1); |
3097 | | #endif |
3098 | 22 | } |
3099 | | |
3100 | | void |
3101 | | _PyThreadState_ClearMimallocHeaps(PyThreadState *tstate) |
3102 | 0 | { |
3103 | | #ifdef Py_GIL_DISABLED |
3104 | | if (!tstate->_status.bound) { |
3105 | | // The mimalloc heaps are only initialized when the thread is bound. |
3106 | | return; |
3107 | | } |
3108 | | |
3109 | | _PyThreadStateImpl *tstate_impl = (_PyThreadStateImpl *)tstate; |
3110 | | for (Py_ssize_t i = 0; i < _Py_MIMALLOC_HEAP_COUNT; i++) { |
3111 | | // Abandon all segments in use by this thread. This pushes them to |
3112 | | // a shared pool to later be reclaimed by other threads. It's important |
3113 | | // to do this before the thread state is destroyed so that objects |
3114 | | // remain visible to the GC. |
3115 | | _mi_heap_collect_abandon(&tstate_impl->mimalloc.heaps[i]); |
3116 | | } |
3117 | | #endif |
3118 | 0 | } |
3119 | | |
3120 | | |
3121 | | int |
3122 | | _Py_IsMainThread(void) |
3123 | 65.5M | { |
3124 | 65.5M | unsigned long thread = PyThread_get_thread_ident(); |
3125 | 65.5M | return (thread == _PyRuntime.main_thread); |
3126 | 65.5M | } |
3127 | | |
3128 | | |
3129 | | PyInterpreterState * |
3130 | | _PyInterpreterState_Main(void) |
3131 | 65.4M | { |
3132 | 65.4M | return _PyRuntime.interpreters.main; |
3133 | 65.4M | } |
3134 | | |
3135 | | |
3136 | | int |
3137 | | _Py_IsMainInterpreterFinalizing(PyInterpreterState *interp) |
3138 | 0 | { |
3139 | | /* bpo-39877: Access _PyRuntime directly rather than using |
3140 | | tstate->interp->runtime to support calls from Python daemon threads. |
3141 | | After Py_Finalize() has been called, tstate can be a dangling pointer: |
3142 | | point to PyThreadState freed memory. */ |
3143 | 0 | return (_PyRuntimeState_GetFinalizing(&_PyRuntime) != NULL && |
3144 | 0 | interp == &_PyRuntime._main_interpreter); |
3145 | 0 | } |
3146 | | |
3147 | | |
3148 | | const PyConfig * |
3149 | | _Py_GetMainConfig(void) |
3150 | 0 | { |
3151 | 0 | PyInterpreterState *interp = _PyInterpreterState_Main(); |
3152 | 0 | if (interp == NULL) { |
3153 | 0 | return NULL; |
3154 | 0 | } |
3155 | 0 | return _PyInterpreterState_GetConfig(interp); |
3156 | 0 | } |