/src/cpython/Python/pystate.c
Line | Count | Source |
1 | | |
2 | | /* Thread and interpreter state structures and their interfaces */ |
3 | | |
4 | | #include "Python.h" |
5 | | #include "pycore_abstract.h" // _PyIndex_Check() |
6 | | #include "pycore_audit.h" // _Py_AuditHookEntry |
7 | | #include "pycore_ceval.h" // _PyEval_AcquireLock() |
8 | | #include "pycore_codecs.h" // _PyCodec_Fini() |
9 | | #include "pycore_critical_section.h" // _PyCriticalSection_Resume() |
10 | | #include "pycore_dtoa.h" // _dtoa_state_INIT() |
11 | | #include "pycore_freelist.h" // _PyObject_ClearFreeLists() |
12 | | #include "pycore_initconfig.h" // _PyStatus_OK() |
13 | | #include "pycore_interpframe.h" // _PyThreadState_HasStackSpace() |
14 | | #include "pycore_object.h" // _PyType_InitCache() |
15 | | #include "pycore_obmalloc.h" // _PyMem_obmalloc_state_on_heap() |
16 | | #include "pycore_optimizer.h" // JIT_CLEANUP_THRESHOLD |
17 | | #include "pycore_parking_lot.h" // _PyParkingLot_AfterFork() |
18 | | #include "pycore_pyerrors.h" // _PyErr_Clear() |
19 | | #include "pycore_pylifecycle.h" // _PyAST_Fini() |
20 | | #include "pycore_pymem.h" // _PyMem_DebugEnabled() |
21 | | #include "pycore_runtime.h" // _PyRuntime |
22 | | #include "pycore_runtime_init.h" // _PyRuntimeState_INIT |
23 | | #include "pycore_stackref.h" // Py_STACKREF_DEBUG |
24 | | #include "pycore_stats.h" // FT_STAT_WORLD_STOP_INC() |
25 | | #include "pycore_time.h" // _PyTime_Init() |
26 | | #include "pycore_uop.h" // UOP_BUFFER_SIZE |
27 | | #include "pycore_uniqueid.h" // _PyObject_FinalizePerThreadRefcounts() |
28 | | |
29 | | |
30 | | /* -------------------------------------------------------------------------- |
31 | | CAUTION |
32 | | |
33 | | Always use PyMem_RawMalloc() and PyMem_RawFree() directly in this file. A |
34 | | number of these functions are advertised as safe to call when the GIL isn't |
35 | | held, and in a debug build Python redirects (e.g.) PyMem_NEW (etc) to Python's |
36 | | debugging obmalloc functions. Those aren't thread-safe (they rely on the GIL |
37 | | to avoid the expense of doing their own locking). |
38 | | -------------------------------------------------------------------------- */ |
39 | | |
40 | | #ifdef HAVE_DLOPEN |
41 | | # ifdef HAVE_DLFCN_H |
42 | | # include <dlfcn.h> |
43 | | # endif |
44 | | # if !HAVE_DECL_RTLD_LAZY |
45 | | # define RTLD_LAZY 1 |
46 | | # endif |
47 | | #endif |
48 | | |
49 | | |
50 | | /****************************************/ |
51 | | /* helpers for the current thread state */ |
52 | | /****************************************/ |
53 | | |
54 | | // API for the current thread state is further down. |
55 | | |
56 | | /* "current" means one of: |
57 | | - bound to the current OS thread |
58 | | - holds the GIL |
59 | | */ |
60 | | |
61 | | //------------------------------------------------- |
62 | | // a highly efficient lookup for the current thread |
63 | | //------------------------------------------------- |
64 | | |
65 | | /* |
66 | | The stored thread state is set by PyThreadState_Swap(). |
67 | | |
68 | | For each of these functions, the GIL must be held by the current thread. |
69 | | */ |
70 | | |
71 | | |
72 | | /* The attached thread state for the current thread. */ |
73 | | _Py_thread_local PyThreadState *_Py_tss_tstate = NULL; |
74 | | |
75 | | /* The "bound" thread state used by PyGILState_Ensure(), |
76 | | also known as a "gilstate." */ |
77 | | _Py_thread_local PyThreadState *_Py_tss_gilstate = NULL; |
78 | | |
79 | | /* The interpreter of the attached thread state, |
80 | | and is same as tstate->interp. */ |
81 | | _Py_thread_local PyInterpreterState *_Py_tss_interp = NULL; |
82 | | |
83 | | static inline PyThreadState * |
84 | | current_fast_get(void) |
85 | 103M | { |
86 | 103M | return _Py_tss_tstate; |
87 | 103M | } |
88 | | |
89 | | static inline void |
90 | | current_fast_set(_PyRuntimeState *Py_UNUSED(runtime), PyThreadState *tstate) |
91 | 2.37M | { |
92 | 2.37M | assert(tstate != NULL); |
93 | 2.37M | _Py_tss_tstate = tstate; |
94 | 2.37M | assert(tstate->interp != NULL); |
95 | 2.37M | _Py_tss_interp = tstate->interp; |
96 | 2.37M | } |
97 | | |
98 | | static inline void |
99 | | current_fast_clear(_PyRuntimeState *Py_UNUSED(runtime)) |
100 | 2.37M | { |
101 | 2.37M | _Py_tss_tstate = NULL; |
102 | 2.37M | _Py_tss_interp = NULL; |
103 | 2.37M | } |
104 | | |
105 | | #define tstate_verify_not_active(tstate) \ |
106 | 0 | if (tstate == current_fast_get()) { \ |
107 | 0 | _Py_FatalErrorFormat(__func__, "tstate %p is still current", tstate); \ |
108 | 0 | } |
109 | | |
110 | | PyThreadState * |
111 | | _PyThreadState_GetCurrent(void) |
112 | 8.92M | { |
113 | 8.92M | return current_fast_get(); |
114 | 8.92M | } |
115 | | |
116 | | |
117 | | //--------------------------------------------- |
118 | | // The thread state used by PyGILState_Ensure() |
119 | | //--------------------------------------------- |
120 | | |
121 | | /* |
122 | | The stored thread state is set by bind_tstate() (AKA PyThreadState_Bind(). |
123 | | |
124 | | The GIL does no need to be held for these. |
125 | | */ |
126 | | |
127 | | static inline PyThreadState * |
128 | | gilstate_get(void) |
129 | 56 | { |
130 | 56 | return _Py_tss_gilstate; |
131 | 56 | } |
132 | | |
133 | | static inline void |
134 | | gilstate_set(PyThreadState *tstate) |
135 | 28 | { |
136 | 28 | assert(tstate != NULL); |
137 | 28 | _Py_tss_gilstate = tstate; |
138 | 28 | } |
139 | | |
140 | | static inline void |
141 | | gilstate_clear(void) |
142 | 0 | { |
143 | 0 | _Py_tss_gilstate = NULL; |
144 | 0 | } |
145 | | |
146 | | |
147 | | #ifndef NDEBUG |
148 | | static inline int tstate_is_alive(PyThreadState *tstate); |
149 | | |
150 | | static inline int |
151 | | tstate_is_bound(PyThreadState *tstate) |
152 | | { |
153 | | return tstate->_status.bound && !tstate->_status.unbound; |
154 | | } |
155 | | #endif // !NDEBUG |
156 | | |
157 | | static void bind_gilstate_tstate(PyThreadState *); |
158 | | static void unbind_gilstate_tstate(PyThreadState *); |
159 | | |
160 | | static void tstate_mimalloc_bind(PyThreadState *); |
161 | | |
162 | | static void |
163 | | bind_tstate(PyThreadState *tstate) |
164 | 28 | { |
165 | 28 | assert(tstate != NULL); |
166 | 28 | assert(tstate_is_alive(tstate) && !tstate->_status.bound); |
167 | 28 | assert(!tstate->_status.unbound); // just in case |
168 | 28 | assert(!tstate->_status.bound_gilstate); |
169 | 28 | assert(tstate != gilstate_get()); |
170 | 28 | assert(!tstate->_status.active); |
171 | 28 | assert(tstate->thread_id == 0); |
172 | 28 | assert(tstate->native_thread_id == 0); |
173 | | |
174 | | // Currently we don't necessarily store the thread state |
175 | | // in thread-local storage (e.g. per-interpreter). |
176 | | |
177 | 28 | tstate->thread_id = PyThread_get_thread_ident(); |
178 | 28 | #ifdef PY_HAVE_THREAD_NATIVE_ID |
179 | 28 | tstate->native_thread_id = PyThread_get_thread_native_id(); |
180 | 28 | #endif |
181 | | |
182 | | #ifdef Py_GIL_DISABLED |
183 | | // Initialize biased reference counting inter-thread queue. Note that this |
184 | | // needs to be initialized from the active thread. |
185 | | _Py_brc_init_thread(tstate); |
186 | | #endif |
187 | | |
188 | | // mimalloc state needs to be initialized from the active thread. |
189 | 28 | tstate_mimalloc_bind(tstate); |
190 | | |
191 | 28 | tstate->_status.bound = 1; |
192 | 28 | } |
193 | | |
194 | | static void |
195 | | unbind_tstate(PyThreadState *tstate) |
196 | 0 | { |
197 | 0 | assert(tstate != NULL); |
198 | 0 | assert(tstate_is_bound(tstate)); |
199 | 0 | #ifndef HAVE_PTHREAD_STUBS |
200 | 0 | assert(tstate->thread_id > 0); |
201 | 0 | #endif |
202 | 0 | #ifdef PY_HAVE_THREAD_NATIVE_ID |
203 | 0 | assert(tstate->native_thread_id > 0); |
204 | 0 | #endif |
205 | | |
206 | | // We leave thread_id and native_thread_id alone |
207 | | // since they can be useful for debugging. |
208 | | // Check the `_status` field to know if these values |
209 | | // are still valid. |
210 | | |
211 | | // We leave tstate->_status.bound set to 1 |
212 | | // to indicate it was previously bound. |
213 | 0 | tstate->_status.unbound = 1; |
214 | 0 | } |
215 | | |
216 | | |
217 | | /* Stick the thread state for this thread in thread specific storage. |
218 | | |
219 | | When a thread state is created for a thread by some mechanism |
220 | | other than PyGILState_Ensure(), it's important that the GILState |
221 | | machinery knows about it so it doesn't try to create another |
222 | | thread state for the thread. |
223 | | (This is a better fix for SF bug #1010677 than the first one attempted.) |
224 | | |
225 | | The only situation where you can legitimately have more than one |
226 | | thread state for an OS level thread is when there are multiple |
227 | | interpreters. |
228 | | |
229 | | Before 3.12, the PyGILState_*() APIs didn't work with multiple |
230 | | interpreters (see bpo-10915 and bpo-15751), so this function used |
231 | | to set TSS only once. Thus, the first thread state created for that |
232 | | given OS level thread would "win", which seemed reasonable behaviour. |
233 | | */ |
234 | | |
235 | | static void |
236 | | bind_gilstate_tstate(PyThreadState *tstate) |
237 | 28 | { |
238 | 28 | assert(tstate != NULL); |
239 | 28 | assert(tstate_is_alive(tstate)); |
240 | 28 | assert(tstate_is_bound(tstate)); |
241 | | // XXX assert(!tstate->_status.active); |
242 | 28 | assert(!tstate->_status.bound_gilstate); |
243 | | |
244 | 28 | PyThreadState *tcur = gilstate_get(); |
245 | 28 | assert(tstate != tcur); |
246 | | |
247 | 28 | if (tcur != NULL) { |
248 | 0 | tcur->_status.bound_gilstate = 0; |
249 | 0 | } |
250 | 28 | gilstate_set(tstate); |
251 | 28 | tstate->_status.bound_gilstate = 1; |
252 | 28 | } |
253 | | |
254 | | static void |
255 | | unbind_gilstate_tstate(PyThreadState *tstate) |
256 | 0 | { |
257 | 0 | assert(tstate != NULL); |
258 | | // XXX assert(tstate_is_alive(tstate)); |
259 | 0 | assert(tstate_is_bound(tstate)); |
260 | | // XXX assert(!tstate->_status.active); |
261 | 0 | assert(tstate->_status.bound_gilstate); |
262 | 0 | assert(tstate == gilstate_get()); |
263 | 0 | gilstate_clear(); |
264 | 0 | tstate->_status.bound_gilstate = 0; |
265 | 0 | } |
266 | | |
267 | | |
268 | | //---------------------------------------------- |
269 | | // the thread state that currently holds the GIL |
270 | | //---------------------------------------------- |
271 | | |
272 | | /* This is not exported, as it is not reliable! It can only |
273 | | ever be compared to the state for the *current* thread. |
274 | | * If not equal, then it doesn't matter that the actual |
275 | | value may change immediately after comparison, as it can't |
276 | | possibly change to the current thread's state. |
277 | | * If equal, then the current thread holds the lock, so the value can't |
278 | | change until we yield the lock. |
279 | | */ |
280 | | static int |
281 | | holds_gil(PyThreadState *tstate) |
282 | 0 | { |
283 | | // XXX Fall back to tstate->interp->runtime->ceval.gil.last_holder |
284 | | // (and tstate->interp->runtime->ceval.gil.locked). |
285 | 0 | assert(tstate != NULL); |
286 | | /* Must be the tstate for this thread */ |
287 | 0 | assert(tstate == gilstate_get()); |
288 | 0 | return tstate == current_fast_get(); |
289 | 0 | } |
290 | | |
291 | | |
292 | | /****************************/ |
293 | | /* the global runtime state */ |
294 | | /****************************/ |
295 | | |
296 | | //---------- |
297 | | // lifecycle |
298 | | //---------- |
299 | | |
300 | | /* Suppress deprecation warning for PyBytesObject.ob_shash */ |
301 | | _Py_COMP_DIAG_PUSH |
302 | | _Py_COMP_DIAG_IGNORE_DEPR_DECLS |
303 | | /* We use "initial" if the runtime gets re-used |
304 | | (e.g. Py_Finalize() followed by Py_Initialize(). |
305 | | Note that we initialize "initial" relative to _PyRuntime, |
306 | | to ensure pre-initialized pointers point to the active |
307 | | runtime state (and not "initial"). */ |
308 | | static const _PyRuntimeState initial = _PyRuntimeState_INIT(_PyRuntime, ""); |
309 | | _Py_COMP_DIAG_POP |
310 | | |
311 | | #define LOCKS_INIT(runtime) \ |
312 | 0 | { \ |
313 | 0 | &(runtime)->interpreters.mutex, \ |
314 | 0 | &(runtime)->xi.data_lookup.registry.mutex, \ |
315 | 0 | &(runtime)->unicode_state.ids.mutex, \ |
316 | 0 | &(runtime)->imports.extensions.mutex, \ |
317 | 0 | &(runtime)->ceval.pending_mainthread.mutex, \ |
318 | 0 | &(runtime)->atexit.mutex, \ |
319 | 0 | &(runtime)->audit_hooks.mutex, \ |
320 | 0 | &(runtime)->allocators.mutex, \ |
321 | 0 | &(runtime)->_main_interpreter.types.mutex, \ |
322 | 0 | &(runtime)->_main_interpreter.code_state.mutex, \ |
323 | 0 | } |
324 | | |
325 | | static void |
326 | | init_runtime(_PyRuntimeState *runtime, |
327 | | void *open_code_hook, void *open_code_userdata, |
328 | | _Py_AuditHookEntry *audit_hook_head, |
329 | | Py_ssize_t unicode_next_index) |
330 | 28 | { |
331 | 28 | assert(!runtime->preinitializing); |
332 | 28 | assert(!runtime->preinitialized); |
333 | 28 | assert(!runtime->core_initialized); |
334 | 28 | assert(!runtime->initialized); |
335 | 28 | assert(!runtime->_initialized); |
336 | | |
337 | 28 | runtime->open_code_hook = open_code_hook; |
338 | 28 | runtime->open_code_userdata = open_code_userdata; |
339 | 28 | runtime->audit_hooks.head = audit_hook_head; |
340 | | |
341 | 28 | PyPreConfig_InitPythonConfig(&runtime->preconfig); |
342 | | |
343 | | // Set it to the ID of the main thread of the main interpreter. |
344 | 28 | runtime->main_thread = PyThread_get_thread_ident(); |
345 | | |
346 | 28 | runtime->unicode_state.ids.next_index = unicode_next_index; |
347 | 28 | runtime->_initialized = 1; |
348 | 28 | } |
349 | | |
350 | | PyStatus |
351 | | _PyRuntimeState_Init(_PyRuntimeState *runtime) |
352 | 28 | { |
353 | | /* We preserve the hook across init, because there is |
354 | | currently no public API to set it between runtime |
355 | | initialization and interpreter initialization. */ |
356 | 28 | void *open_code_hook = runtime->open_code_hook; |
357 | 28 | void *open_code_userdata = runtime->open_code_userdata; |
358 | 28 | _Py_AuditHookEntry *audit_hook_head = runtime->audit_hooks.head; |
359 | | // bpo-42882: Preserve next_index value if Py_Initialize()/Py_Finalize() |
360 | | // is called multiple times. |
361 | 28 | Py_ssize_t unicode_next_index = runtime->unicode_state.ids.next_index; |
362 | | |
363 | 28 | if (runtime->_initialized) { |
364 | | // Py_Initialize() must be running again. |
365 | | // Reset to _PyRuntimeState_INIT. |
366 | 0 | memcpy(runtime, &initial, sizeof(*runtime)); |
367 | | // Preserve the cookie from the original runtime. |
368 | 0 | memcpy(runtime->debug_offsets.cookie, _Py_Debug_Cookie, 8); |
369 | 0 | assert(!runtime->_initialized); |
370 | 0 | } |
371 | | |
372 | 28 | PyStatus status = _PyTime_Init(&runtime->time); |
373 | 28 | if (_PyStatus_EXCEPTION(status)) { |
374 | 0 | return status; |
375 | 0 | } |
376 | | |
377 | 28 | init_runtime(runtime, open_code_hook, open_code_userdata, audit_hook_head, |
378 | 28 | unicode_next_index); |
379 | | |
380 | 28 | return _PyStatus_OK(); |
381 | 28 | } |
382 | | |
383 | | void |
384 | | _PyRuntimeState_Fini(_PyRuntimeState *runtime) |
385 | 0 | { |
386 | | #ifdef Py_REF_DEBUG |
387 | | /* The count is cleared by _Py_FinalizeRefTotal(). */ |
388 | | assert(runtime->object_state.interpreter_leaks == 0); |
389 | | #endif |
390 | 0 | gilstate_clear(); |
391 | 0 | } |
392 | | |
393 | | #ifdef HAVE_FORK |
394 | | /* This function is called from PyOS_AfterFork_Child to ensure that |
395 | | newly created child processes do not share locks with the parent. */ |
396 | | PyStatus |
397 | | _PyRuntimeState_ReInitThreads(_PyRuntimeState *runtime) |
398 | 0 | { |
399 | | // This was initially set in _PyRuntimeState_Init(). |
400 | 0 | runtime->main_thread = PyThread_get_thread_ident(); |
401 | | |
402 | | // Clears the parking lot. Any waiting threads are dead. This must be |
403 | | // called before releasing any locks that use the parking lot. |
404 | 0 | _PyParkingLot_AfterFork(); |
405 | | |
406 | | // Re-initialize global locks |
407 | 0 | PyMutex *locks[] = LOCKS_INIT(runtime); |
408 | 0 | for (size_t i = 0; i < Py_ARRAY_LENGTH(locks); i++) { |
409 | 0 | _PyMutex_at_fork_reinit(locks[i]); |
410 | 0 | } |
411 | | #ifdef Py_GIL_DISABLED |
412 | | for (PyInterpreterState *interp = runtime->interpreters.head; |
413 | | interp != NULL; interp = interp->next) |
414 | | { |
415 | | for (int i = 0; i < NUM_WEAKREF_LIST_LOCKS; i++) { |
416 | | _PyMutex_at_fork_reinit(&interp->weakref_locks[i]); |
417 | | } |
418 | | } |
419 | | #endif |
420 | |
|
421 | 0 | _PyTypes_AfterFork(); |
422 | |
|
423 | 0 | _PyThread_AfterFork(&runtime->threads); |
424 | |
|
425 | 0 | return _PyStatus_OK(); |
426 | 0 | } |
427 | | #endif |
428 | | |
429 | | |
430 | | /*************************************/ |
431 | | /* the per-interpreter runtime state */ |
432 | | /*************************************/ |
433 | | |
434 | | //---------- |
435 | | // lifecycle |
436 | | //---------- |
437 | | |
438 | | /* Calling this indicates that the runtime is ready to create interpreters. */ |
439 | | |
440 | | PyStatus |
441 | | _PyInterpreterState_Enable(_PyRuntimeState *runtime) |
442 | 28 | { |
443 | 28 | struct pyinterpreters *interpreters = &runtime->interpreters; |
444 | 28 | interpreters->next_id = 0; |
445 | 28 | return _PyStatus_OK(); |
446 | 28 | } |
447 | | |
448 | | static PyInterpreterState * |
449 | | alloc_interpreter(void) |
450 | 0 | { |
451 | | // Aligned allocation for PyInterpreterState. |
452 | | // the first word of the memory block is used to store |
453 | | // the original pointer to be used later to free the memory. |
454 | 0 | size_t alignment = _Alignof(PyInterpreterState); |
455 | 0 | size_t allocsize = sizeof(PyInterpreterState) + sizeof(void *) + alignment - 1; |
456 | 0 | void *mem = PyMem_RawCalloc(1, allocsize); |
457 | 0 | if (mem == NULL) { |
458 | 0 | return NULL; |
459 | 0 | } |
460 | 0 | void *ptr = _Py_ALIGN_UP((char *)mem + sizeof(void *), alignment); |
461 | 0 | ((void **)ptr)[-1] = mem; |
462 | 0 | assert(_Py_IS_ALIGNED(ptr, alignment)); |
463 | 0 | return ptr; |
464 | 0 | } |
465 | | |
466 | | static void |
467 | | free_interpreter(PyInterpreterState *interp) |
468 | 0 | { |
469 | | #ifdef Py_STATS |
470 | | if (interp->pystats_struct) { |
471 | | PyMem_RawFree(interp->pystats_struct); |
472 | | interp->pystats_struct = NULL; |
473 | | } |
474 | | #endif |
475 | | // The main interpreter is statically allocated so |
476 | | // should not be freed. |
477 | 0 | if (interp != &_PyRuntime._main_interpreter) { |
478 | 0 | if (_PyMem_obmalloc_state_on_heap(interp)) { |
479 | | // interpreter has its own obmalloc state, free it |
480 | 0 | PyMem_RawFree(interp->obmalloc); |
481 | 0 | interp->obmalloc = NULL; |
482 | 0 | } |
483 | 0 | assert(_Py_IS_ALIGNED(interp, _Alignof(PyInterpreterState))); |
484 | 0 | PyMem_RawFree(((void **)interp)[-1]); |
485 | 0 | } |
486 | 0 | } |
487 | | |
488 | | #ifndef NDEBUG |
489 | | static inline int check_interpreter_whence(long); |
490 | | #endif |
491 | | |
492 | | extern _Py_CODEUNIT * |
493 | | _Py_LazyJitTrampoline( |
494 | | struct _PyExecutorObject *exec, _PyInterpreterFrame *frame, _PyStackRef *stack_pointer, PyThreadState *tstate |
495 | | ); |
496 | | |
497 | | /* Get the interpreter state to a minimal consistent state. |
498 | | Further init happens in pylifecycle.c before it can be used. |
499 | | All fields not initialized here are expected to be zeroed out, |
500 | | e.g. by PyMem_RawCalloc() or memset(), or otherwise pre-initialized. |
501 | | The runtime state is not manipulated. Instead it is assumed that |
502 | | the interpreter is getting added to the runtime. |
503 | | |
504 | | Note that the main interpreter was statically initialized as part |
505 | | of the runtime and most state is already set properly. That leaves |
506 | | a small number of fields to initialize dynamically, as well as some |
507 | | that are initialized lazily. |
508 | | |
509 | | For subinterpreters we memcpy() the main interpreter in |
510 | | PyInterpreterState_New(), leaving it in the same mostly-initialized |
511 | | state. The only difference is that the interpreter has some |
512 | | self-referential state that is statically initializexd to the |
513 | | main interpreter. We fix those fields here, in addition |
514 | | to the other dynamically initialized fields. |
515 | | */ |
516 | | static PyStatus |
517 | | init_interpreter(PyInterpreterState *interp, |
518 | | _PyRuntimeState *runtime, int64_t id, |
519 | | PyInterpreterState *next, |
520 | | long whence) |
521 | 28 | { |
522 | 28 | if (interp->_initialized) { |
523 | 0 | return _PyStatus_ERR("interpreter already initialized"); |
524 | 0 | } |
525 | | |
526 | 28 | assert(interp->_whence == _PyInterpreterState_WHENCE_NOTSET); |
527 | 28 | assert(check_interpreter_whence(whence) == 0); |
528 | 28 | interp->_whence = whence; |
529 | | |
530 | 28 | assert(runtime != NULL); |
531 | 28 | interp->runtime = runtime; |
532 | | |
533 | 28 | assert(id > 0 || (id == 0 && interp == runtime->interpreters.main)); |
534 | 28 | interp->id = id; |
535 | | |
536 | 28 | interp->id_refcount = 0; |
537 | | |
538 | 28 | assert(runtime->interpreters.head == interp); |
539 | 28 | assert(next != NULL || (interp == runtime->interpreters.main)); |
540 | 28 | interp->next = next; |
541 | | |
542 | 28 | interp->threads.preallocated = &interp->_initial_thread; |
543 | | |
544 | | // We would call _PyObject_InitState() at this point |
545 | | // if interp->feature_flags were alredy set. |
546 | | |
547 | 28 | _PyEval_InitState(interp); |
548 | 28 | _PyGC_InitState(&interp->gc); |
549 | 28 | PyConfig_InitPythonConfig(&interp->config); |
550 | 28 | _PyType_InitCache(interp); |
551 | | #ifdef Py_GIL_DISABLED |
552 | | _Py_brc_init_state(interp); |
553 | | #endif |
554 | | |
555 | 28 | llist_init(&interp->mem_free_queue.head); |
556 | 28 | llist_init(&interp->asyncio_tasks_head); |
557 | 28 | interp->asyncio_tasks_lock = (PyMutex){0}; |
558 | 476 | for (int i = 0; i < _PY_MONITORING_UNGROUPED_EVENTS; i++) { |
559 | 448 | interp->monitors.tools[i] = 0; |
560 | 448 | } |
561 | 252 | for (int t = 0; t < PY_MONITORING_TOOL_IDS; t++) { |
562 | 4.48k | for (int e = 0; e < _PY_MONITORING_EVENTS; e++) { |
563 | 4.25k | interp->monitoring_callables[t][e] = NULL; |
564 | | |
565 | 4.25k | } |
566 | 224 | interp->monitoring_tool_versions[t] = 0; |
567 | 224 | } |
568 | 28 | interp->_code_object_generation = 0; |
569 | 28 | interp->jit = false; |
570 | 28 | interp->compiling = false; |
571 | 28 | interp->executor_list_head = NULL; |
572 | 28 | interp->executor_deletion_list_head = NULL; |
573 | 28 | interp->executor_deletion_list_remaining_capacity = 0; |
574 | 28 | interp->executor_creation_counter = JIT_CLEANUP_THRESHOLD; |
575 | 28 | if (interp != &runtime->_main_interpreter) { |
576 | | /* Fix the self-referential, statically initialized fields. */ |
577 | 0 | interp->dtoa = (struct _dtoa_state)_dtoa_state_INIT(interp); |
578 | 0 | } |
579 | | #if !defined(Py_GIL_DISABLED) && defined(Py_STACKREF_DEBUG) |
580 | | interp->next_stackref = INITIAL_STACKREF_INDEX; |
581 | | _Py_hashtable_allocator_t alloc = { |
582 | | .malloc = malloc, |
583 | | .free = free, |
584 | | }; |
585 | | interp->open_stackrefs_table = _Py_hashtable_new_full( |
586 | | _Py_hashtable_hash_ptr, |
587 | | _Py_hashtable_compare_direct, |
588 | | NULL, |
589 | | NULL, |
590 | | &alloc |
591 | | ); |
592 | | # ifdef Py_STACKREF_CLOSE_DEBUG |
593 | | interp->closed_stackrefs_table = _Py_hashtable_new_full( |
594 | | _Py_hashtable_hash_ptr, |
595 | | _Py_hashtable_compare_direct, |
596 | | NULL, |
597 | | NULL, |
598 | | &alloc |
599 | | ); |
600 | | # endif |
601 | | _Py_stackref_associate(interp, Py_None, PyStackRef_None); |
602 | | _Py_stackref_associate(interp, Py_False, PyStackRef_False); |
603 | | _Py_stackref_associate(interp, Py_True, PyStackRef_True); |
604 | | #endif |
605 | | |
606 | 28 | interp->_initialized = 1; |
607 | 28 | return _PyStatus_OK(); |
608 | 28 | } |
609 | | |
610 | | |
611 | | PyStatus |
612 | | _PyInterpreterState_New(PyThreadState *tstate, PyInterpreterState **pinterp) |
613 | 28 | { |
614 | 28 | *pinterp = NULL; |
615 | | |
616 | | // Don't get runtime from tstate since tstate can be NULL |
617 | 28 | _PyRuntimeState *runtime = &_PyRuntime; |
618 | | |
619 | | // tstate is NULL when pycore_create_interpreter() calls |
620 | | // _PyInterpreterState_New() to create the main interpreter. |
621 | 28 | if (tstate != NULL) { |
622 | 0 | if (_PySys_Audit(tstate, "cpython.PyInterpreterState_New", NULL) < 0) { |
623 | 0 | return _PyStatus_ERR("sys.audit failed"); |
624 | 0 | } |
625 | 0 | } |
626 | | |
627 | | /* We completely serialize creation of multiple interpreters, since |
628 | | it simplifies things here and blocking concurrent calls isn't a problem. |
629 | | Regardless, we must fully block subinterpreter creation until |
630 | | after the main interpreter is created. */ |
631 | 28 | HEAD_LOCK(runtime); |
632 | | |
633 | 28 | struct pyinterpreters *interpreters = &runtime->interpreters; |
634 | 28 | int64_t id = interpreters->next_id; |
635 | 28 | interpreters->next_id += 1; |
636 | | |
637 | | // Allocate the interpreter and add it to the runtime state. |
638 | 28 | PyInterpreterState *interp; |
639 | 28 | PyStatus status; |
640 | 28 | PyInterpreterState *old_head = interpreters->head; |
641 | 28 | if (old_head == NULL) { |
642 | | // We are creating the main interpreter. |
643 | 28 | assert(interpreters->main == NULL); |
644 | 28 | assert(id == 0); |
645 | | |
646 | 28 | interp = &runtime->_main_interpreter; |
647 | 28 | assert(interp->id == 0); |
648 | 28 | assert(interp->next == NULL); |
649 | | |
650 | 28 | interpreters->main = interp; |
651 | 28 | } |
652 | 0 | else { |
653 | 0 | assert(interpreters->main != NULL); |
654 | 0 | assert(id != 0); |
655 | |
|
656 | 0 | interp = alloc_interpreter(); |
657 | 0 | if (interp == NULL) { |
658 | 0 | status = _PyStatus_NO_MEMORY(); |
659 | 0 | goto error; |
660 | 0 | } |
661 | | // Set to _PyInterpreterState_INIT. |
662 | 0 | memcpy(interp, &initial._main_interpreter, sizeof(*interp)); |
663 | |
|
664 | 0 | if (id < 0) { |
665 | | /* overflow or Py_Initialize() not called yet! */ |
666 | 0 | status = _PyStatus_ERR("failed to get an interpreter ID"); |
667 | 0 | goto error; |
668 | 0 | } |
669 | 0 | } |
670 | 28 | interpreters->head = interp; |
671 | | |
672 | 28 | long whence = _PyInterpreterState_WHENCE_UNKNOWN; |
673 | 28 | status = init_interpreter(interp, runtime, |
674 | 28 | id, old_head, whence); |
675 | 28 | if (_PyStatus_EXCEPTION(status)) { |
676 | 0 | goto error; |
677 | 0 | } |
678 | | |
679 | 28 | HEAD_UNLOCK(runtime); |
680 | | |
681 | 28 | assert(interp != NULL); |
682 | 28 | *pinterp = interp; |
683 | 28 | return _PyStatus_OK(); |
684 | | |
685 | 0 | error: |
686 | 0 | HEAD_UNLOCK(runtime); |
687 | |
|
688 | 0 | if (interp != NULL) { |
689 | 0 | free_interpreter(interp); |
690 | 0 | } |
691 | 0 | return status; |
692 | 28 | } |
693 | | |
694 | | |
695 | | PyInterpreterState * |
696 | | PyInterpreterState_New(void) |
697 | 0 | { |
698 | | // tstate can be NULL |
699 | 0 | PyThreadState *tstate = current_fast_get(); |
700 | |
|
701 | 0 | PyInterpreterState *interp; |
702 | 0 | PyStatus status = _PyInterpreterState_New(tstate, &interp); |
703 | 0 | if (_PyStatus_EXCEPTION(status)) { |
704 | 0 | Py_ExitStatusException(status); |
705 | 0 | } |
706 | 0 | assert(interp != NULL); |
707 | 0 | return interp; |
708 | 0 | } |
709 | | |
710 | | #if !defined(Py_GIL_DISABLED) && defined(Py_STACKREF_DEBUG) |
711 | | extern void |
712 | | _Py_stackref_report_leaks(PyInterpreterState *interp); |
713 | | #endif |
714 | | |
715 | | static void |
716 | | interpreter_clear(PyInterpreterState *interp, PyThreadState *tstate) |
717 | 0 | { |
718 | 0 | assert(interp != NULL); |
719 | 0 | assert(tstate != NULL); |
720 | 0 | _PyRuntimeState *runtime = interp->runtime; |
721 | | |
722 | | /* XXX Conditions we need to enforce: |
723 | | |
724 | | * the GIL must be held by the current thread |
725 | | * tstate must be the "current" thread state (current_fast_get()) |
726 | | * tstate->interp must be interp |
727 | | * for the main interpreter, tstate must be the main thread |
728 | | */ |
729 | | // XXX Ideally, we would not rely on any thread state in this function |
730 | | // (and we would drop the "tstate" argument). |
731 | |
|
732 | 0 | if (_PySys_Audit(tstate, "cpython.PyInterpreterState_Clear", NULL) < 0) { |
733 | 0 | _PyErr_Clear(tstate); |
734 | 0 | } |
735 | | |
736 | | // Clear the current/main thread state last. |
737 | 0 | _Py_FOR_EACH_TSTATE_BEGIN(interp, p) { |
738 | | // See https://github.com/python/cpython/issues/102126 |
739 | | // Must be called without HEAD_LOCK held as it can deadlock |
740 | | // if any finalizer tries to acquire that lock. |
741 | 0 | HEAD_UNLOCK(runtime); |
742 | 0 | PyThreadState_Clear(p); |
743 | 0 | HEAD_LOCK(runtime); |
744 | 0 | } |
745 | 0 | _Py_FOR_EACH_TSTATE_END(interp); |
746 | 0 | if (tstate->interp == interp) { |
747 | | /* We fix tstate->_status below when we for sure aren't using it |
748 | | (e.g. no longer need the GIL). */ |
749 | | // XXX Eliminate the need to do this. |
750 | 0 | tstate->_status.cleared = 0; |
751 | 0 | } |
752 | | |
753 | | /* It is possible that any of the objects below have a finalizer |
754 | | that runs Python code or otherwise relies on a thread state |
755 | | or even the interpreter state. For now we trust that isn't |
756 | | a problem. |
757 | | */ |
758 | | // XXX Make sure we properly deal with problematic finalizers. |
759 | |
|
760 | 0 | Py_CLEAR(interp->audit_hooks); |
761 | | |
762 | | // gh-140257: Threads have already been cleared, but daemon threads may |
763 | | // still access eval_breaker atomically via take_gil() right before they |
764 | | // hang. Use an atomic store to prevent data races during finalization. |
765 | 0 | interp->ceval.instrumentation_version = 0; |
766 | 0 | _Py_atomic_store_uintptr(&tstate->eval_breaker, 0); |
767 | |
|
768 | 0 | for (int i = 0; i < _PY_MONITORING_UNGROUPED_EVENTS; i++) { |
769 | 0 | interp->monitors.tools[i] = 0; |
770 | 0 | } |
771 | 0 | for (int t = 0; t < PY_MONITORING_TOOL_IDS; t++) { |
772 | 0 | for (int e = 0; e < _PY_MONITORING_EVENTS; e++) { |
773 | 0 | Py_CLEAR(interp->monitoring_callables[t][e]); |
774 | 0 | } |
775 | 0 | } |
776 | 0 | for (int t = 0; t < PY_MONITORING_TOOL_IDS; t++) { |
777 | 0 | Py_CLEAR(interp->monitoring_tool_names[t]); |
778 | 0 | } |
779 | 0 | interp->_code_object_generation = 0; |
780 | | #ifdef Py_GIL_DISABLED |
781 | | interp->tlbc_indices.tlbc_generation = 0; |
782 | | #endif |
783 | |
|
784 | 0 | PyConfig_Clear(&interp->config); |
785 | 0 | _PyCodec_Fini(interp); |
786 | |
|
787 | 0 | assert(interp->imports.modules == NULL); |
788 | 0 | assert(interp->imports.modules_by_index == NULL); |
789 | 0 | assert(interp->imports.importlib == NULL); |
790 | 0 | assert(interp->imports.import_func == NULL); |
791 | |
|
792 | 0 | Py_CLEAR(interp->sysdict_copy); |
793 | 0 | Py_CLEAR(interp->builtins_copy); |
794 | 0 | Py_CLEAR(interp->dict); |
795 | 0 | #ifdef HAVE_FORK |
796 | 0 | Py_CLEAR(interp->before_forkers); |
797 | 0 | Py_CLEAR(interp->after_forkers_parent); |
798 | 0 | Py_CLEAR(interp->after_forkers_child); |
799 | 0 | #endif |
800 | | |
801 | |
|
802 | | #ifdef _Py_TIER2 |
803 | | _Py_ClearExecutorDeletionList(interp); |
804 | | #endif |
805 | 0 | _PyAST_Fini(interp); |
806 | 0 | _PyAtExit_Fini(interp); |
807 | | |
808 | | // All Python types must be destroyed before the last GC collection. Python |
809 | | // types create a reference cycle to themselves in their in their |
810 | | // PyTypeObject.tp_mro member (the tuple contains the type). |
811 | | |
812 | | /* Last garbage collection on this interpreter */ |
813 | 0 | _PyGC_CollectNoFail(tstate); |
814 | 0 | _PyGC_Fini(interp); |
815 | | |
816 | | // Finalize warnings after last gc so that any finalizers can |
817 | | // access warnings state |
818 | 0 | _PyWarnings_Fini(interp); |
819 | 0 | struct _PyExecutorObject *cold = interp->cold_executor; |
820 | 0 | if (cold != NULL) { |
821 | 0 | interp->cold_executor = NULL; |
822 | 0 | assert(cold->vm_data.valid); |
823 | 0 | assert(cold->vm_data.warm); |
824 | 0 | _PyExecutor_Free(cold); |
825 | 0 | } |
826 | |
|
827 | 0 | struct _PyExecutorObject *cold_dynamic = interp->cold_dynamic_executor; |
828 | 0 | if (cold_dynamic != NULL) { |
829 | 0 | interp->cold_dynamic_executor = NULL; |
830 | 0 | assert(cold_dynamic->vm_data.valid); |
831 | 0 | assert(cold_dynamic->vm_data.warm); |
832 | 0 | _PyExecutor_Free(cold_dynamic); |
833 | 0 | } |
834 | | /* We don't clear sysdict and builtins until the end of this function. |
835 | | Because clearing other attributes can execute arbitrary Python code |
836 | | which requires sysdict and builtins. */ |
837 | 0 | PyDict_Clear(interp->sysdict); |
838 | 0 | PyDict_Clear(interp->builtins); |
839 | 0 | Py_CLEAR(interp->sysdict); |
840 | 0 | Py_CLEAR(interp->builtins); |
841 | |
|
842 | | #if !defined(Py_GIL_DISABLED) && defined(Py_STACKREF_DEBUG) |
843 | | # ifdef Py_STACKREF_CLOSE_DEBUG |
844 | | _Py_hashtable_destroy(interp->closed_stackrefs_table); |
845 | | interp->closed_stackrefs_table = NULL; |
846 | | # endif |
847 | | _Py_stackref_report_leaks(interp); |
848 | | _Py_hashtable_destroy(interp->open_stackrefs_table); |
849 | | interp->open_stackrefs_table = NULL; |
850 | | #endif |
851 | |
|
852 | 0 | if (tstate->interp == interp) { |
853 | | /* We are now safe to fix tstate->_status.cleared. */ |
854 | | // XXX Do this (much) earlier? |
855 | 0 | tstate->_status.cleared = 1; |
856 | 0 | } |
857 | |
|
858 | 0 | for (int i=0; i < DICT_MAX_WATCHERS; i++) { |
859 | 0 | interp->dict_state.watchers[i] = NULL; |
860 | 0 | } |
861 | |
|
862 | 0 | for (int i=0; i < TYPE_MAX_WATCHERS; i++) { |
863 | 0 | interp->type_watchers[i] = NULL; |
864 | 0 | } |
865 | |
|
866 | 0 | for (int i=0; i < FUNC_MAX_WATCHERS; i++) { |
867 | 0 | interp->func_watchers[i] = NULL; |
868 | 0 | } |
869 | 0 | interp->active_func_watchers = 0; |
870 | |
|
871 | 0 | for (int i=0; i < CODE_MAX_WATCHERS; i++) { |
872 | 0 | interp->code_watchers[i] = NULL; |
873 | 0 | } |
874 | 0 | interp->active_code_watchers = 0; |
875 | |
|
876 | 0 | for (int i=0; i < CONTEXT_MAX_WATCHERS; i++) { |
877 | 0 | interp->context_watchers[i] = NULL; |
878 | 0 | } |
879 | 0 | interp->active_context_watchers = 0; |
880 | | // XXX Once we have one allocator per interpreter (i.e. |
881 | | // per-interpreter GC) we must ensure that all of the interpreter's |
882 | | // objects have been cleaned up at the point. |
883 | | |
884 | | // We could clear interp->threads.freelist here |
885 | | // if it held more than just the initial thread state. |
886 | 0 | } |
887 | | |
888 | | |
889 | | void |
890 | | PyInterpreterState_Clear(PyInterpreterState *interp) |
891 | 0 | { |
892 | | // Use the current Python thread state to call audit hooks and to collect |
893 | | // garbage. It can be different than the current Python thread state |
894 | | // of 'interp'. |
895 | 0 | PyThreadState *current_tstate = current_fast_get(); |
896 | 0 | _PyImport_ClearCore(interp); |
897 | 0 | interpreter_clear(interp, current_tstate); |
898 | 0 | } |
899 | | |
900 | | |
901 | | void |
902 | | _PyInterpreterState_Clear(PyThreadState *tstate) |
903 | 0 | { |
904 | 0 | _PyImport_ClearCore(tstate->interp); |
905 | 0 | interpreter_clear(tstate->interp, tstate); |
906 | 0 | } |
907 | | |
908 | | |
909 | | static inline void tstate_deactivate(PyThreadState *tstate); |
910 | | static void tstate_set_detached(PyThreadState *tstate, int detached_state); |
911 | | static void zapthreads(PyInterpreterState *interp); |
912 | | |
913 | | void |
914 | | PyInterpreterState_Delete(PyInterpreterState *interp) |
915 | 0 | { |
916 | 0 | _PyRuntimeState *runtime = interp->runtime; |
917 | 0 | struct pyinterpreters *interpreters = &runtime->interpreters; |
918 | | |
919 | | // XXX Clearing the "current" thread state should happen before |
920 | | // we start finalizing the interpreter (or the current thread state). |
921 | 0 | PyThreadState *tcur = current_fast_get(); |
922 | 0 | if (tcur != NULL && interp == tcur->interp) { |
923 | | /* Unset current thread. After this, many C API calls become crashy. */ |
924 | 0 | _PyThreadState_Detach(tcur); |
925 | 0 | } |
926 | |
|
927 | 0 | zapthreads(interp); |
928 | | |
929 | | // XXX These two calls should be done at the end of clear_interpreter(), |
930 | | // but currently some objects get decref'ed after that. |
931 | | #ifdef Py_REF_DEBUG |
932 | | _PyInterpreterState_FinalizeRefTotal(interp); |
933 | | #endif |
934 | 0 | _PyInterpreterState_FinalizeAllocatedBlocks(interp); |
935 | |
|
936 | 0 | HEAD_LOCK(runtime); |
937 | 0 | PyInterpreterState **p; |
938 | 0 | for (p = &interpreters->head; ; p = &(*p)->next) { |
939 | 0 | if (*p == NULL) { |
940 | 0 | Py_FatalError("NULL interpreter"); |
941 | 0 | } |
942 | 0 | if (*p == interp) { |
943 | 0 | break; |
944 | 0 | } |
945 | 0 | } |
946 | 0 | if (interp->threads.head != NULL) { |
947 | 0 | Py_FatalError("remaining threads"); |
948 | 0 | } |
949 | 0 | *p = interp->next; |
950 | |
|
951 | 0 | if (interpreters->main == interp) { |
952 | 0 | interpreters->main = NULL; |
953 | 0 | if (interpreters->head != NULL) { |
954 | 0 | Py_FatalError("remaining subinterpreters"); |
955 | 0 | } |
956 | 0 | } |
957 | 0 | HEAD_UNLOCK(runtime); |
958 | |
|
959 | 0 | _Py_qsbr_fini(interp); |
960 | |
|
961 | 0 | _PyObject_FiniState(interp); |
962 | |
|
963 | 0 | PyConfig_Clear(&interp->config); |
964 | |
|
965 | 0 | free_interpreter(interp); |
966 | 0 | } |
967 | | |
968 | | |
969 | | #ifdef HAVE_FORK |
970 | | /* |
971 | | * Delete all interpreter states except the main interpreter. If there |
972 | | * is a current interpreter state, it *must* be the main interpreter. |
973 | | */ |
974 | | PyStatus |
975 | | _PyInterpreterState_DeleteExceptMain(_PyRuntimeState *runtime) |
976 | 0 | { |
977 | 0 | struct pyinterpreters *interpreters = &runtime->interpreters; |
978 | |
|
979 | 0 | PyThreadState *tstate = _PyThreadState_Swap(runtime, NULL); |
980 | 0 | if (tstate != NULL && tstate->interp != interpreters->main) { |
981 | 0 | return _PyStatus_ERR("not main interpreter"); |
982 | 0 | } |
983 | | |
984 | 0 | HEAD_LOCK(runtime); |
985 | 0 | PyInterpreterState *interp = interpreters->head; |
986 | 0 | interpreters->head = NULL; |
987 | 0 | while (interp != NULL) { |
988 | 0 | if (interp == interpreters->main) { |
989 | 0 | interpreters->main->next = NULL; |
990 | 0 | interpreters->head = interp; |
991 | 0 | interp = interp->next; |
992 | 0 | continue; |
993 | 0 | } |
994 | | |
995 | | // XXX Won't this fail since PyInterpreterState_Clear() requires |
996 | | // the "current" tstate to be set? |
997 | 0 | PyInterpreterState_Clear(interp); // XXX must activate? |
998 | 0 | zapthreads(interp); |
999 | 0 | PyInterpreterState *prev_interp = interp; |
1000 | 0 | interp = interp->next; |
1001 | 0 | free_interpreter(prev_interp); |
1002 | 0 | } |
1003 | 0 | HEAD_UNLOCK(runtime); |
1004 | |
|
1005 | 0 | if (interpreters->head == NULL) { |
1006 | 0 | return _PyStatus_ERR("missing main interpreter"); |
1007 | 0 | } |
1008 | 0 | _PyThreadState_Swap(runtime, tstate); |
1009 | 0 | return _PyStatus_OK(); |
1010 | 0 | } |
1011 | | #endif |
1012 | | |
1013 | | static inline void |
1014 | | set_main_thread(PyInterpreterState *interp, PyThreadState *tstate) |
1015 | 0 | { |
1016 | 0 | _Py_atomic_store_ptr_relaxed(&interp->threads.main, tstate); |
1017 | 0 | } |
1018 | | |
1019 | | static inline PyThreadState * |
1020 | | get_main_thread(PyInterpreterState *interp) |
1021 | 0 | { |
1022 | 0 | return _Py_atomic_load_ptr_relaxed(&interp->threads.main); |
1023 | 0 | } |
1024 | | |
1025 | | void |
1026 | | _PyErr_SetInterpreterAlreadyRunning(void) |
1027 | 0 | { |
1028 | 0 | PyErr_SetString(PyExc_InterpreterError, "interpreter already running"); |
1029 | 0 | } |
1030 | | |
1031 | | int |
1032 | | _PyInterpreterState_SetRunningMain(PyInterpreterState *interp) |
1033 | 0 | { |
1034 | 0 | if (get_main_thread(interp) != NULL) { |
1035 | 0 | _PyErr_SetInterpreterAlreadyRunning(); |
1036 | 0 | return -1; |
1037 | 0 | } |
1038 | 0 | PyThreadState *tstate = current_fast_get(); |
1039 | 0 | _Py_EnsureTstateNotNULL(tstate); |
1040 | 0 | if (tstate->interp != interp) { |
1041 | 0 | PyErr_SetString(PyExc_RuntimeError, |
1042 | 0 | "current tstate has wrong interpreter"); |
1043 | 0 | return -1; |
1044 | 0 | } |
1045 | 0 | set_main_thread(interp, tstate); |
1046 | |
|
1047 | 0 | return 0; |
1048 | 0 | } |
1049 | | |
1050 | | void |
1051 | | _PyInterpreterState_SetNotRunningMain(PyInterpreterState *interp) |
1052 | 0 | { |
1053 | 0 | assert(get_main_thread(interp) == current_fast_get()); |
1054 | 0 | set_main_thread(interp, NULL); |
1055 | 0 | } |
1056 | | |
1057 | | int |
1058 | | _PyInterpreterState_IsRunningMain(PyInterpreterState *interp) |
1059 | 0 | { |
1060 | 0 | if (get_main_thread(interp) != NULL) { |
1061 | 0 | return 1; |
1062 | 0 | } |
1063 | | // Embedders might not know to call _PyInterpreterState_SetRunningMain(), |
1064 | | // so their main thread wouldn't show it is running the main interpreter's |
1065 | | // program. (Py_Main() doesn't have this problem.) For now this isn't |
1066 | | // critical. If it were, we would need to infer "running main" from other |
1067 | | // information, like if it's the main interpreter. We used to do that |
1068 | | // but the naive approach led to some inconsistencies that caused problems. |
1069 | 0 | return 0; |
1070 | 0 | } |
1071 | | |
1072 | | int |
1073 | | _PyThreadState_IsRunningMain(PyThreadState *tstate) |
1074 | 0 | { |
1075 | 0 | PyInterpreterState *interp = tstate->interp; |
1076 | | // See the note in _PyInterpreterState_IsRunningMain() about |
1077 | | // possible false negatives here for embedders. |
1078 | 0 | return get_main_thread(interp) == tstate; |
1079 | 0 | } |
1080 | | |
1081 | | void |
1082 | | _PyInterpreterState_ReinitRunningMain(PyThreadState *tstate) |
1083 | 0 | { |
1084 | 0 | PyInterpreterState *interp = tstate->interp; |
1085 | 0 | if (get_main_thread(interp) != tstate) { |
1086 | 0 | set_main_thread(interp, NULL); |
1087 | 0 | } |
1088 | 0 | } |
1089 | | |
1090 | | |
1091 | | //---------- |
1092 | | // accessors |
1093 | | //---------- |
1094 | | |
1095 | | int |
1096 | | _PyInterpreterState_IsReady(PyInterpreterState *interp) |
1097 | 0 | { |
1098 | 0 | return interp->_ready; |
1099 | 0 | } |
1100 | | |
1101 | | #ifndef NDEBUG |
1102 | | static inline int |
1103 | | check_interpreter_whence(long whence) |
1104 | | { |
1105 | | if(whence < 0) { |
1106 | | return -1; |
1107 | | } |
1108 | | if (whence > _PyInterpreterState_WHENCE_MAX) { |
1109 | | return -1; |
1110 | | } |
1111 | | return 0; |
1112 | | } |
1113 | | #endif |
1114 | | |
1115 | | long |
1116 | | _PyInterpreterState_GetWhence(PyInterpreterState *interp) |
1117 | 0 | { |
1118 | 0 | assert(check_interpreter_whence(interp->_whence) == 0); |
1119 | 0 | return interp->_whence; |
1120 | 0 | } |
1121 | | |
1122 | | void |
1123 | | _PyInterpreterState_SetWhence(PyInterpreterState *interp, long whence) |
1124 | 28 | { |
1125 | 28 | assert(interp->_whence != _PyInterpreterState_WHENCE_NOTSET); |
1126 | 28 | assert(check_interpreter_whence(whence) == 0); |
1127 | 28 | interp->_whence = whence; |
1128 | 28 | } |
1129 | | |
1130 | | |
1131 | | PyObject * |
1132 | | _Py_GetMainModule(PyThreadState *tstate) |
1133 | 0 | { |
1134 | | // We return None to indicate "not found" or "bogus". |
1135 | 0 | PyObject *modules = _PyImport_GetModulesRef(tstate->interp); |
1136 | 0 | if (modules == Py_None) { |
1137 | 0 | return modules; |
1138 | 0 | } |
1139 | 0 | PyObject *module = NULL; |
1140 | 0 | (void)PyMapping_GetOptionalItem(modules, &_Py_ID(__main__), &module); |
1141 | 0 | Py_DECREF(modules); |
1142 | 0 | if (module == NULL && !PyErr_Occurred()) { |
1143 | 0 | Py_RETURN_NONE; |
1144 | 0 | } |
1145 | 0 | return module; |
1146 | 0 | } |
1147 | | |
1148 | | int |
1149 | | _Py_CheckMainModule(PyObject *module) |
1150 | 0 | { |
1151 | 0 | if (module == NULL || module == Py_None) { |
1152 | 0 | if (!PyErr_Occurred()) { |
1153 | 0 | (void)_PyErr_SetModuleNotFoundError(&_Py_ID(__main__)); |
1154 | 0 | } |
1155 | 0 | return -1; |
1156 | 0 | } |
1157 | 0 | if (!Py_IS_TYPE(module, &PyModule_Type)) { |
1158 | | /* The __main__ module has been tampered with. */ |
1159 | 0 | PyObject *msg = PyUnicode_FromString("invalid __main__ module"); |
1160 | 0 | if (msg != NULL) { |
1161 | 0 | (void)PyErr_SetImportError(msg, &_Py_ID(__main__), NULL); |
1162 | 0 | Py_DECREF(msg); |
1163 | 0 | } |
1164 | 0 | return -1; |
1165 | 0 | } |
1166 | 0 | return 0; |
1167 | 0 | } |
1168 | | |
1169 | | |
1170 | | PyObject * |
1171 | | PyInterpreterState_GetDict(PyInterpreterState *interp) |
1172 | 32 | { |
1173 | 32 | if (interp->dict == NULL) { |
1174 | 10 | interp->dict = PyDict_New(); |
1175 | 10 | if (interp->dict == NULL) { |
1176 | 0 | PyErr_Clear(); |
1177 | 0 | } |
1178 | 10 | } |
1179 | | /* Returning NULL means no per-interpreter dict is available. */ |
1180 | 32 | return interp->dict; |
1181 | 32 | } |
1182 | | |
1183 | | |
1184 | | //---------- |
1185 | | // interp ID |
1186 | | //---------- |
1187 | | |
1188 | | int64_t |
1189 | | _PyInterpreterState_ObjectToID(PyObject *idobj) |
1190 | 0 | { |
1191 | 0 | if (!_PyIndex_Check(idobj)) { |
1192 | 0 | PyErr_Format(PyExc_TypeError, |
1193 | 0 | "interpreter ID must be an int, got %.100s", |
1194 | 0 | Py_TYPE(idobj)->tp_name); |
1195 | 0 | return -1; |
1196 | 0 | } |
1197 | | |
1198 | | // This may raise OverflowError. |
1199 | | // For now, we don't worry about if LLONG_MAX < INT64_MAX. |
1200 | 0 | long long id = PyLong_AsLongLong(idobj); |
1201 | 0 | if (id == -1 && PyErr_Occurred()) { |
1202 | 0 | return -1; |
1203 | 0 | } |
1204 | | |
1205 | 0 | if (id < 0) { |
1206 | 0 | PyErr_Format(PyExc_ValueError, |
1207 | 0 | "interpreter ID must be a non-negative int, got %R", |
1208 | 0 | idobj); |
1209 | 0 | return -1; |
1210 | 0 | } |
1211 | | #if LLONG_MAX > INT64_MAX |
1212 | | else if (id > INT64_MAX) { |
1213 | | PyErr_SetString(PyExc_OverflowError, "int too big to convert"); |
1214 | | return -1; |
1215 | | } |
1216 | | #endif |
1217 | 0 | else { |
1218 | 0 | return (int64_t)id; |
1219 | 0 | } |
1220 | 0 | } |
1221 | | |
1222 | | int64_t |
1223 | | PyInterpreterState_GetID(PyInterpreterState *interp) |
1224 | 0 | { |
1225 | 0 | if (interp == NULL) { |
1226 | 0 | PyErr_SetString(PyExc_RuntimeError, "no interpreter provided"); |
1227 | 0 | return -1; |
1228 | 0 | } |
1229 | 0 | return interp->id; |
1230 | 0 | } |
1231 | | |
1232 | | PyObject * |
1233 | | _PyInterpreterState_GetIDObject(PyInterpreterState *interp) |
1234 | 0 | { |
1235 | 0 | int64_t interpid = interp->id; |
1236 | 0 | if (interpid < 0) { |
1237 | 0 | return NULL; |
1238 | 0 | } |
1239 | 0 | assert(interpid < LLONG_MAX); |
1240 | 0 | return PyLong_FromLongLong(interpid); |
1241 | 0 | } |
1242 | | |
1243 | | |
1244 | | |
1245 | | void |
1246 | | _PyInterpreterState_IDIncref(PyInterpreterState *interp) |
1247 | 0 | { |
1248 | 0 | _Py_atomic_add_ssize(&interp->id_refcount, 1); |
1249 | 0 | } |
1250 | | |
1251 | | |
1252 | | void |
1253 | | _PyInterpreterState_IDDecref(PyInterpreterState *interp) |
1254 | 0 | { |
1255 | 0 | _PyRuntimeState *runtime = interp->runtime; |
1256 | |
|
1257 | 0 | Py_ssize_t refcount = _Py_atomic_add_ssize(&interp->id_refcount, -1); |
1258 | |
|
1259 | 0 | if (refcount == 1 && interp->requires_idref) { |
1260 | 0 | PyThreadState *tstate = |
1261 | 0 | _PyThreadState_NewBound(interp, _PyThreadState_WHENCE_FINI); |
1262 | | |
1263 | | // XXX Possible GILState issues? |
1264 | 0 | PyThreadState *save_tstate = _PyThreadState_Swap(runtime, tstate); |
1265 | 0 | Py_EndInterpreter(tstate); |
1266 | 0 | _PyThreadState_Swap(runtime, save_tstate); |
1267 | 0 | } |
1268 | 0 | } |
1269 | | |
1270 | | int |
1271 | | _PyInterpreterState_RequiresIDRef(PyInterpreterState *interp) |
1272 | 0 | { |
1273 | 0 | return interp->requires_idref; |
1274 | 0 | } |
1275 | | |
1276 | | void |
1277 | | _PyInterpreterState_RequireIDRef(PyInterpreterState *interp, int required) |
1278 | 0 | { |
1279 | 0 | interp->requires_idref = required ? 1 : 0; |
1280 | 0 | } |
1281 | | |
1282 | | |
1283 | | //----------------------------- |
1284 | | // look up an interpreter state |
1285 | | //----------------------------- |
1286 | | |
1287 | | /* Return the interpreter associated with the current OS thread. |
1288 | | |
1289 | | The GIL must be held. |
1290 | | */ |
1291 | | |
1292 | | PyInterpreterState* |
1293 | | PyInterpreterState_Get(void) |
1294 | 68 | { |
1295 | 68 | _Py_AssertHoldsTstate(); |
1296 | 68 | PyInterpreterState *interp = _Py_tss_interp; |
1297 | 68 | if (interp == NULL) { |
1298 | 0 | Py_FatalError("no current interpreter"); |
1299 | 0 | } |
1300 | 68 | return interp; |
1301 | 68 | } |
1302 | | |
1303 | | |
1304 | | static PyInterpreterState * |
1305 | | interp_look_up_id(_PyRuntimeState *runtime, int64_t requested_id) |
1306 | 0 | { |
1307 | 0 | PyInterpreterState *interp = runtime->interpreters.head; |
1308 | 0 | while (interp != NULL) { |
1309 | 0 | int64_t id = interp->id; |
1310 | 0 | assert(id >= 0); |
1311 | 0 | if (requested_id == id) { |
1312 | 0 | return interp; |
1313 | 0 | } |
1314 | 0 | interp = PyInterpreterState_Next(interp); |
1315 | 0 | } |
1316 | 0 | return NULL; |
1317 | 0 | } |
1318 | | |
1319 | | /* Return the interpreter state with the given ID. |
1320 | | |
1321 | | Fail with RuntimeError if the interpreter is not found. */ |
1322 | | |
1323 | | PyInterpreterState * |
1324 | | _PyInterpreterState_LookUpID(int64_t requested_id) |
1325 | 0 | { |
1326 | 0 | PyInterpreterState *interp = NULL; |
1327 | 0 | if (requested_id >= 0) { |
1328 | 0 | _PyRuntimeState *runtime = &_PyRuntime; |
1329 | 0 | HEAD_LOCK(runtime); |
1330 | 0 | interp = interp_look_up_id(runtime, requested_id); |
1331 | 0 | HEAD_UNLOCK(runtime); |
1332 | 0 | } |
1333 | 0 | if (interp == NULL && !PyErr_Occurred()) { |
1334 | 0 | PyErr_Format(PyExc_InterpreterNotFoundError, |
1335 | 0 | "unrecognized interpreter ID %lld", requested_id); |
1336 | 0 | } |
1337 | 0 | return interp; |
1338 | 0 | } |
1339 | | |
1340 | | PyInterpreterState * |
1341 | | _PyInterpreterState_LookUpIDObject(PyObject *requested_id) |
1342 | 0 | { |
1343 | 0 | int64_t id = _PyInterpreterState_ObjectToID(requested_id); |
1344 | 0 | if (id < 0) { |
1345 | 0 | return NULL; |
1346 | 0 | } |
1347 | 0 | return _PyInterpreterState_LookUpID(id); |
1348 | 0 | } |
1349 | | |
1350 | | |
1351 | | /********************************/ |
1352 | | /* the per-thread runtime state */ |
1353 | | /********************************/ |
1354 | | |
1355 | | #ifndef NDEBUG |
1356 | | static inline int |
1357 | | tstate_is_alive(PyThreadState *tstate) |
1358 | | { |
1359 | | return (tstate->_status.initialized && |
1360 | | !tstate->_status.finalized && |
1361 | | !tstate->_status.cleared && |
1362 | | !tstate->_status.finalizing); |
1363 | | } |
1364 | | #endif |
1365 | | |
1366 | | |
1367 | | //---------- |
1368 | | // lifecycle |
1369 | | //---------- |
1370 | | |
1371 | | static _PyStackChunk* |
1372 | | allocate_chunk(int size_in_bytes, _PyStackChunk* previous) |
1373 | 234k | { |
1374 | 234k | assert(size_in_bytes % sizeof(PyObject **) == 0); |
1375 | 234k | _PyStackChunk *res = _PyObject_VirtualAlloc(size_in_bytes); |
1376 | 234k | if (res == NULL) { |
1377 | 0 | return NULL; |
1378 | 0 | } |
1379 | 234k | res->previous = previous; |
1380 | 234k | res->size = size_in_bytes; |
1381 | 234k | res->top = 0; |
1382 | 234k | return res; |
1383 | 234k | } |
1384 | | |
1385 | | static void |
1386 | | reset_threadstate(_PyThreadStateImpl *tstate) |
1387 | 0 | { |
1388 | | // Set to _PyThreadState_INIT directly? |
1389 | 0 | memcpy(tstate, |
1390 | 0 | &initial._main_interpreter._initial_thread, |
1391 | 0 | sizeof(*tstate)); |
1392 | 0 | } |
1393 | | |
1394 | | static _PyThreadStateImpl * |
1395 | | alloc_threadstate(PyInterpreterState *interp) |
1396 | 28 | { |
1397 | 28 | _PyThreadStateImpl *tstate; |
1398 | | |
1399 | | // Try the preallocated tstate first. |
1400 | 28 | tstate = _Py_atomic_exchange_ptr(&interp->threads.preallocated, NULL); |
1401 | | |
1402 | | // Fall back to the allocator. |
1403 | 28 | if (tstate == NULL) { |
1404 | 0 | tstate = PyMem_RawCalloc(1, sizeof(_PyThreadStateImpl)); |
1405 | 0 | if (tstate == NULL) { |
1406 | 0 | return NULL; |
1407 | 0 | } |
1408 | 0 | reset_threadstate(tstate); |
1409 | 0 | } |
1410 | 28 | return tstate; |
1411 | 28 | } |
1412 | | |
1413 | | static void |
1414 | | free_threadstate(_PyThreadStateImpl *tstate) |
1415 | 0 | { |
1416 | 0 | PyInterpreterState *interp = tstate->base.interp; |
1417 | | #ifdef Py_STATS |
1418 | | _PyStats_ThreadFini(tstate); |
1419 | | #endif |
1420 | | // The initial thread state of the interpreter is allocated |
1421 | | // as part of the interpreter state so should not be freed. |
1422 | 0 | if (tstate == &interp->_initial_thread) { |
1423 | | // Make it available again. |
1424 | 0 | reset_threadstate(tstate); |
1425 | 0 | assert(interp->threads.preallocated == NULL); |
1426 | 0 | _Py_atomic_store_ptr(&interp->threads.preallocated, tstate); |
1427 | 0 | } |
1428 | 0 | else { |
1429 | 0 | PyMem_RawFree(tstate); |
1430 | 0 | } |
1431 | 0 | } |
1432 | | |
1433 | | static void |
1434 | | decref_threadstate(_PyThreadStateImpl *tstate) |
1435 | 0 | { |
1436 | 0 | if (_Py_atomic_add_ssize(&tstate->refcount, -1) == 1) { |
1437 | | // The last reference to the thread state is gone. |
1438 | 0 | free_threadstate(tstate); |
1439 | 0 | } |
1440 | 0 | } |
1441 | | |
1442 | | /* Get the thread state to a minimal consistent state. |
1443 | | Further init happens in pylifecycle.c before it can be used. |
1444 | | All fields not initialized here are expected to be zeroed out, |
1445 | | e.g. by PyMem_RawCalloc() or memset(), or otherwise pre-initialized. |
1446 | | The interpreter state is not manipulated. Instead it is assumed that |
1447 | | the thread is getting added to the interpreter. |
1448 | | */ |
1449 | | |
1450 | | static void |
1451 | | init_threadstate(_PyThreadStateImpl *_tstate, |
1452 | | PyInterpreterState *interp, uint64_t id, int whence) |
1453 | 28 | { |
1454 | 28 | PyThreadState *tstate = (PyThreadState *)_tstate; |
1455 | 28 | if (tstate->_status.initialized) { |
1456 | 0 | Py_FatalError("thread state already initialized"); |
1457 | 0 | } |
1458 | | |
1459 | 28 | assert(interp != NULL); |
1460 | 28 | tstate->interp = interp; |
1461 | 28 | tstate->eval_breaker = |
1462 | 28 | _Py_atomic_load_uintptr_relaxed(&interp->ceval.instrumentation_version); |
1463 | | |
1464 | | // next/prev are set in add_threadstate(). |
1465 | 28 | assert(tstate->next == NULL); |
1466 | 28 | assert(tstate->prev == NULL); |
1467 | | |
1468 | 28 | assert(tstate->_whence == _PyThreadState_WHENCE_NOTSET); |
1469 | 28 | assert(whence >= 0 && whence <= _PyThreadState_WHENCE_THREADING_DAEMON); |
1470 | 28 | tstate->_whence = whence; |
1471 | | |
1472 | 28 | assert(id > 0); |
1473 | 28 | tstate->id = id; |
1474 | | |
1475 | | // thread_id and native_thread_id are set in bind_tstate(). |
1476 | | |
1477 | 28 | tstate->py_recursion_limit = interp->ceval.recursion_limit; |
1478 | 28 | tstate->py_recursion_remaining = interp->ceval.recursion_limit; |
1479 | 28 | tstate->exc_info = &tstate->exc_state; |
1480 | | |
1481 | | // PyGILState_Release must not try to delete this thread state. |
1482 | | // This is cleared when PyGILState_Ensure() creates the thread state. |
1483 | 28 | tstate->gilstate_counter = 1; |
1484 | | |
1485 | 28 | tstate->current_frame = NULL; |
1486 | 28 | tstate->datastack_chunk = NULL; |
1487 | 28 | tstate->datastack_top = NULL; |
1488 | 28 | tstate->datastack_limit = NULL; |
1489 | 28 | tstate->what_event = -1; |
1490 | 28 | tstate->current_executor = NULL; |
1491 | 28 | tstate->jit_exit = NULL; |
1492 | 28 | tstate->dict_global_version = 0; |
1493 | | |
1494 | 28 | _tstate->c_stack_soft_limit = UINTPTR_MAX; |
1495 | 28 | _tstate->c_stack_top = 0; |
1496 | 28 | _tstate->c_stack_hard_limit = 0; |
1497 | | |
1498 | 28 | _tstate->c_stack_init_base = 0; |
1499 | 28 | _tstate->c_stack_init_top = 0; |
1500 | | |
1501 | 28 | _tstate->asyncio_running_loop = NULL; |
1502 | 28 | _tstate->asyncio_running_task = NULL; |
1503 | | |
1504 | | #ifdef _Py_TIER2 |
1505 | | _tstate->jit_tracer_state.code_buffer = NULL; |
1506 | | #endif |
1507 | 28 | tstate->delete_later = NULL; |
1508 | | |
1509 | 28 | llist_init(&_tstate->mem_free_queue); |
1510 | 28 | llist_init(&_tstate->asyncio_tasks_head); |
1511 | 28 | if (interp->stoptheworld.requested || _PyRuntime.stoptheworld.requested) { |
1512 | | // Start in the suspended state if there is an ongoing stop-the-world. |
1513 | 0 | tstate->state = _Py_THREAD_SUSPENDED; |
1514 | 0 | } |
1515 | | |
1516 | 28 | tstate->_status.initialized = 1; |
1517 | 28 | } |
1518 | | |
1519 | | static void |
1520 | | add_threadstate(PyInterpreterState *interp, PyThreadState *tstate, |
1521 | | PyThreadState *next) |
1522 | 28 | { |
1523 | 28 | assert(interp->threads.head != tstate); |
1524 | 28 | if (next != NULL) { |
1525 | 0 | assert(next->prev == NULL || next->prev == tstate); |
1526 | 0 | next->prev = tstate; |
1527 | 0 | } |
1528 | 28 | tstate->next = next; |
1529 | 28 | assert(tstate->prev == NULL); |
1530 | 28 | interp->threads.head = tstate; |
1531 | 28 | } |
1532 | | |
1533 | | static PyThreadState * |
1534 | | new_threadstate(PyInterpreterState *interp, int whence) |
1535 | 28 | { |
1536 | | // Allocate the thread state. |
1537 | 28 | _PyThreadStateImpl *tstate = alloc_threadstate(interp); |
1538 | 28 | if (tstate == NULL) { |
1539 | 0 | return NULL; |
1540 | 0 | } |
1541 | | |
1542 | | #ifdef Py_GIL_DISABLED |
1543 | | Py_ssize_t qsbr_idx = _Py_qsbr_reserve(interp); |
1544 | | if (qsbr_idx < 0) { |
1545 | | free_threadstate(tstate); |
1546 | | return NULL; |
1547 | | } |
1548 | | int32_t tlbc_idx = _Py_ReserveTLBCIndex(interp); |
1549 | | if (tlbc_idx < 0) { |
1550 | | free_threadstate(tstate); |
1551 | | return NULL; |
1552 | | } |
1553 | | #endif |
1554 | | #ifdef Py_STATS |
1555 | | // The PyStats structure is quite large and is allocated separated from tstate. |
1556 | | if (!_PyStats_ThreadInit(interp, tstate)) { |
1557 | | free_threadstate(tstate); |
1558 | | return NULL; |
1559 | | } |
1560 | | #endif |
1561 | | |
1562 | | /* We serialize concurrent creation to protect global state. */ |
1563 | 28 | HEAD_LOCK(interp->runtime); |
1564 | | |
1565 | | // Initialize the new thread state. |
1566 | 28 | interp->threads.next_unique_id += 1; |
1567 | 28 | uint64_t id = interp->threads.next_unique_id; |
1568 | 28 | init_threadstate(tstate, interp, id, whence); |
1569 | | |
1570 | | // Add the new thread state to the interpreter. |
1571 | 28 | PyThreadState *old_head = interp->threads.head; |
1572 | 28 | add_threadstate(interp, (PyThreadState *)tstate, old_head); |
1573 | | |
1574 | 28 | HEAD_UNLOCK(interp->runtime); |
1575 | | |
1576 | | #ifdef Py_GIL_DISABLED |
1577 | | // Must be called with lock unlocked to avoid lock ordering deadlocks. |
1578 | | _Py_qsbr_register(tstate, interp, qsbr_idx); |
1579 | | tstate->tlbc_index = tlbc_idx; |
1580 | | #endif |
1581 | | |
1582 | 28 | return (PyThreadState *)tstate; |
1583 | 28 | } |
1584 | | |
1585 | | PyThreadState * |
1586 | | PyThreadState_New(PyInterpreterState *interp) |
1587 | 0 | { |
1588 | 0 | return _PyThreadState_NewBound(interp, _PyThreadState_WHENCE_UNKNOWN); |
1589 | 0 | } |
1590 | | |
1591 | | PyThreadState * |
1592 | | _PyThreadState_NewBound(PyInterpreterState *interp, int whence) |
1593 | 0 | { |
1594 | 0 | PyThreadState *tstate = new_threadstate(interp, whence); |
1595 | 0 | if (tstate) { |
1596 | 0 | bind_tstate(tstate); |
1597 | | // This makes sure there's a gilstate tstate bound |
1598 | | // as soon as possible. |
1599 | 0 | if (gilstate_get() == NULL) { |
1600 | 0 | bind_gilstate_tstate(tstate); |
1601 | 0 | } |
1602 | 0 | } |
1603 | 0 | return tstate; |
1604 | 0 | } |
1605 | | |
1606 | | // This must be followed by a call to _PyThreadState_Bind(); |
1607 | | PyThreadState * |
1608 | | _PyThreadState_New(PyInterpreterState *interp, int whence) |
1609 | 28 | { |
1610 | 28 | return new_threadstate(interp, whence); |
1611 | 28 | } |
1612 | | |
1613 | | // We keep this for stable ABI compabibility. |
1614 | | PyAPI_FUNC(PyThreadState*) |
1615 | | _PyThreadState_Prealloc(PyInterpreterState *interp) |
1616 | 0 | { |
1617 | 0 | return _PyThreadState_New(interp, _PyThreadState_WHENCE_UNKNOWN); |
1618 | 0 | } |
1619 | | |
1620 | | // We keep this around for (accidental) stable ABI compatibility. |
1621 | | // Realistically, no extensions are using it. |
1622 | | PyAPI_FUNC(void) |
1623 | | _PyThreadState_Init(PyThreadState *tstate) |
1624 | 0 | { |
1625 | 0 | Py_FatalError("_PyThreadState_Init() is for internal use only"); |
1626 | 0 | } |
1627 | | |
1628 | | |
1629 | | static void |
1630 | | clear_datastack(PyThreadState *tstate) |
1631 | 0 | { |
1632 | 0 | _PyStackChunk *chunk = tstate->datastack_chunk; |
1633 | 0 | tstate->datastack_chunk = NULL; |
1634 | 0 | while (chunk != NULL) { |
1635 | 0 | _PyStackChunk *prev = chunk->previous; |
1636 | 0 | _PyObject_VirtualFree(chunk, chunk->size); |
1637 | 0 | chunk = prev; |
1638 | 0 | } |
1639 | 0 | } |
1640 | | |
1641 | | void |
1642 | | PyThreadState_Clear(PyThreadState *tstate) |
1643 | 0 | { |
1644 | 0 | assert(tstate->_status.initialized && !tstate->_status.cleared); |
1645 | 0 | assert(current_fast_get()->interp == tstate->interp); |
1646 | | // GH-126016: In the _interpreters module, KeyboardInterrupt exceptions |
1647 | | // during PyEval_EvalCode() are sent to finalization, which doesn't let us |
1648 | | // mark threads as "not running main". So, for now this assertion is |
1649 | | // disabled. |
1650 | | // XXX assert(!_PyThreadState_IsRunningMain(tstate)); |
1651 | | // XXX assert(!tstate->_status.bound || tstate->_status.unbound); |
1652 | 0 | tstate->_status.finalizing = 1; // just in case |
1653 | | |
1654 | | /* XXX Conditions we need to enforce: |
1655 | | |
1656 | | * the GIL must be held by the current thread |
1657 | | * current_fast_get()->interp must match tstate->interp |
1658 | | * for the main interpreter, current_fast_get() must be the main thread |
1659 | | */ |
1660 | |
|
1661 | 0 | int verbose = _PyInterpreterState_GetConfig(tstate->interp)->verbose; |
1662 | |
|
1663 | 0 | if (verbose && tstate->current_frame != NULL) { |
1664 | | /* bpo-20526: After the main thread calls |
1665 | | _PyInterpreterState_SetFinalizing() in Py_FinalizeEx() |
1666 | | (or in Py_EndInterpreter() for subinterpreters), |
1667 | | threads must exit when trying to take the GIL. |
1668 | | If a thread exit in the middle of _PyEval_EvalFrameDefault(), |
1669 | | tstate->frame is not reset to its previous value. |
1670 | | It is more likely with daemon threads, but it can happen |
1671 | | with regular threads if threading._shutdown() fails |
1672 | | (ex: interrupted by CTRL+C). */ |
1673 | 0 | fprintf(stderr, |
1674 | 0 | "PyThreadState_Clear: warning: thread still has a frame\n"); |
1675 | 0 | } |
1676 | |
|
1677 | 0 | if (verbose && tstate->current_exception != NULL) { |
1678 | 0 | fprintf(stderr, "PyThreadState_Clear: warning: thread has an exception set\n"); |
1679 | 0 | _PyErr_Print(tstate); |
1680 | 0 | } |
1681 | | |
1682 | | /* At this point tstate shouldn't be used any more, |
1683 | | neither to run Python code nor for other uses. |
1684 | | |
1685 | | This is tricky when current_fast_get() == tstate, in the same way |
1686 | | as noted in interpreter_clear() above. The below finalizers |
1687 | | can possibly run Python code or otherwise use the partially |
1688 | | cleared thread state. For now we trust that isn't a problem |
1689 | | in practice. |
1690 | | */ |
1691 | | // XXX Deal with the possibility of problematic finalizers. |
1692 | | |
1693 | | /* Don't clear tstate->pyframe: it is a borrowed reference */ |
1694 | |
|
1695 | 0 | Py_CLEAR(tstate->threading_local_key); |
1696 | 0 | Py_CLEAR(tstate->threading_local_sentinel); |
1697 | |
|
1698 | 0 | Py_CLEAR(((_PyThreadStateImpl *)tstate)->asyncio_running_loop); |
1699 | 0 | Py_CLEAR(((_PyThreadStateImpl *)tstate)->asyncio_running_task); |
1700 | | |
1701 | |
|
1702 | 0 | PyMutex_Lock(&tstate->interp->asyncio_tasks_lock); |
1703 | | // merge any lingering tasks from thread state to interpreter's |
1704 | | // tasks list |
1705 | 0 | llist_concat(&tstate->interp->asyncio_tasks_head, |
1706 | 0 | &((_PyThreadStateImpl *)tstate)->asyncio_tasks_head); |
1707 | 0 | PyMutex_Unlock(&tstate->interp->asyncio_tasks_lock); |
1708 | |
|
1709 | 0 | Py_CLEAR(tstate->dict); |
1710 | 0 | Py_CLEAR(tstate->async_exc); |
1711 | |
|
1712 | 0 | Py_CLEAR(tstate->current_exception); |
1713 | |
|
1714 | 0 | Py_CLEAR(tstate->exc_state.exc_value); |
1715 | | |
1716 | | /* The stack of exception states should contain just this thread. */ |
1717 | 0 | if (verbose && tstate->exc_info != &tstate->exc_state) { |
1718 | 0 | fprintf(stderr, |
1719 | 0 | "PyThreadState_Clear: warning: thread still has a generator\n"); |
1720 | 0 | } |
1721 | |
|
1722 | 0 | if (tstate->c_profilefunc != NULL) { |
1723 | 0 | FT_ATOMIC_ADD_SSIZE(tstate->interp->sys_profiling_threads, -1); |
1724 | 0 | tstate->c_profilefunc = NULL; |
1725 | 0 | } |
1726 | 0 | if (tstate->c_tracefunc != NULL) { |
1727 | 0 | FT_ATOMIC_ADD_SSIZE(tstate->interp->sys_tracing_threads, -1); |
1728 | 0 | tstate->c_tracefunc = NULL; |
1729 | 0 | } |
1730 | |
|
1731 | 0 | Py_CLEAR(tstate->c_profileobj); |
1732 | 0 | Py_CLEAR(tstate->c_traceobj); |
1733 | |
|
1734 | 0 | Py_CLEAR(tstate->async_gen_firstiter); |
1735 | 0 | Py_CLEAR(tstate->async_gen_finalizer); |
1736 | |
|
1737 | 0 | Py_CLEAR(tstate->context); |
1738 | |
|
1739 | | #ifdef Py_GIL_DISABLED |
1740 | | // Each thread should clear own freelists in free-threading builds. |
1741 | | struct _Py_freelists *freelists = _Py_freelists_GET(); |
1742 | | _PyObject_ClearFreeLists(freelists, 1); |
1743 | | |
1744 | | // Merge our thread-local refcounts into the type's own refcount and |
1745 | | // free our local refcount array. |
1746 | | _PyObject_FinalizePerThreadRefcounts((_PyThreadStateImpl *)tstate); |
1747 | | |
1748 | | // Remove ourself from the biased reference counting table of threads. |
1749 | | _Py_brc_remove_thread(tstate); |
1750 | | |
1751 | | // Release our thread-local copies of the bytecode for reuse by another |
1752 | | // thread |
1753 | | _Py_ClearTLBCIndex((_PyThreadStateImpl *)tstate); |
1754 | | #endif |
1755 | | |
1756 | | // Merge our queue of pointers to be freed into the interpreter queue. |
1757 | 0 | _PyMem_AbandonDelayed(tstate); |
1758 | |
|
1759 | 0 | _PyThreadState_ClearMimallocHeaps(tstate); |
1760 | |
|
1761 | 0 | tstate->_status.cleared = 1; |
1762 | | |
1763 | | // XXX Call _PyThreadStateSwap(runtime, NULL) here if "current". |
1764 | | // XXX Do it as early in the function as possible. |
1765 | 0 | } |
1766 | | |
1767 | | static void |
1768 | | decrement_stoptheworld_countdown(struct _stoptheworld_state *stw); |
1769 | | |
1770 | | /* Common code for PyThreadState_Delete() and PyThreadState_DeleteCurrent() */ |
1771 | | static void |
1772 | | tstate_delete_common(PyThreadState *tstate, int release_gil) |
1773 | 0 | { |
1774 | 0 | assert(tstate->_status.cleared && !tstate->_status.finalized); |
1775 | 0 | tstate_verify_not_active(tstate); |
1776 | 0 | assert(!_PyThreadState_IsRunningMain(tstate)); |
1777 | |
|
1778 | 0 | PyInterpreterState *interp = tstate->interp; |
1779 | 0 | if (interp == NULL) { |
1780 | 0 | Py_FatalError("NULL interpreter"); |
1781 | 0 | } |
1782 | 0 | _PyRuntimeState *runtime = interp->runtime; |
1783 | |
|
1784 | 0 | HEAD_LOCK(runtime); |
1785 | 0 | if (tstate->prev) { |
1786 | 0 | tstate->prev->next = tstate->next; |
1787 | 0 | } |
1788 | 0 | else { |
1789 | 0 | interp->threads.head = tstate->next; |
1790 | 0 | } |
1791 | 0 | if (tstate->next) { |
1792 | 0 | tstate->next->prev = tstate->prev; |
1793 | 0 | } |
1794 | 0 | if (tstate->state != _Py_THREAD_SUSPENDED) { |
1795 | | // Any ongoing stop-the-world request should not wait for us because |
1796 | | // our thread is getting deleted. |
1797 | 0 | if (interp->stoptheworld.requested) { |
1798 | 0 | decrement_stoptheworld_countdown(&interp->stoptheworld); |
1799 | 0 | } |
1800 | 0 | if (runtime->stoptheworld.requested) { |
1801 | 0 | decrement_stoptheworld_countdown(&runtime->stoptheworld); |
1802 | 0 | } |
1803 | 0 | } |
1804 | |
|
1805 | | #if defined(Py_REF_DEBUG) && defined(Py_GIL_DISABLED) |
1806 | | // Add our portion of the total refcount to the interpreter's total. |
1807 | | _PyThreadStateImpl *tstate_impl = (_PyThreadStateImpl *)tstate; |
1808 | | tstate->interp->object_state.reftotal += tstate_impl->reftotal; |
1809 | | tstate_impl->reftotal = 0; |
1810 | | assert(tstate_impl->refcounts.values == NULL); |
1811 | | #endif |
1812 | |
|
1813 | | #if _Py_TIER2 |
1814 | | _PyThreadStateImpl *_tstate = (_PyThreadStateImpl *)tstate; |
1815 | | if (_tstate->jit_tracer_state.code_buffer != NULL) { |
1816 | | _PyObject_VirtualFree(_tstate->jit_tracer_state.code_buffer, UOP_BUFFER_SIZE); |
1817 | | _tstate->jit_tracer_state.code_buffer = NULL; |
1818 | | } |
1819 | | #endif |
1820 | |
|
1821 | 0 | HEAD_UNLOCK(runtime); |
1822 | | |
1823 | | // XXX Unbind in PyThreadState_Clear(), or earlier |
1824 | | // (and assert not-equal here)? |
1825 | 0 | if (tstate->_status.bound_gilstate) { |
1826 | 0 | unbind_gilstate_tstate(tstate); |
1827 | 0 | } |
1828 | 0 | if (tstate->_status.bound) { |
1829 | 0 | unbind_tstate(tstate); |
1830 | 0 | } |
1831 | | |
1832 | | // XXX Move to PyThreadState_Clear()? |
1833 | 0 | clear_datastack(tstate); |
1834 | |
|
1835 | 0 | if (release_gil) { |
1836 | 0 | _PyEval_ReleaseLock(tstate->interp, tstate, 1); |
1837 | 0 | } |
1838 | |
|
1839 | | #ifdef Py_GIL_DISABLED |
1840 | | _Py_qsbr_unregister(tstate); |
1841 | | #endif |
1842 | |
|
1843 | 0 | tstate->_status.finalized = 1; |
1844 | 0 | } |
1845 | | |
1846 | | static void |
1847 | | zapthreads(PyInterpreterState *interp) |
1848 | 0 | { |
1849 | 0 | PyThreadState *tstate; |
1850 | | /* No need to lock the mutex here because this should only happen |
1851 | | when the threads are all really dead (XXX famous last words). |
1852 | | |
1853 | | Cannot use _Py_FOR_EACH_TSTATE_UNLOCKED because we are freeing |
1854 | | the thread states here. |
1855 | | */ |
1856 | 0 | while ((tstate = interp->threads.head) != NULL) { |
1857 | 0 | tstate_verify_not_active(tstate); |
1858 | 0 | tstate_delete_common(tstate, 0); |
1859 | 0 | free_threadstate((_PyThreadStateImpl *)tstate); |
1860 | 0 | } |
1861 | 0 | } |
1862 | | |
1863 | | |
1864 | | void |
1865 | | PyThreadState_Delete(PyThreadState *tstate) |
1866 | 0 | { |
1867 | 0 | _Py_EnsureTstateNotNULL(tstate); |
1868 | 0 | tstate_verify_not_active(tstate); |
1869 | 0 | tstate_delete_common(tstate, 0); |
1870 | 0 | free_threadstate((_PyThreadStateImpl *)tstate); |
1871 | 0 | } |
1872 | | |
1873 | | |
1874 | | void |
1875 | | _PyThreadState_DeleteCurrent(PyThreadState *tstate) |
1876 | 0 | { |
1877 | 0 | _Py_EnsureTstateNotNULL(tstate); |
1878 | | #ifdef Py_GIL_DISABLED |
1879 | | _Py_qsbr_detach(((_PyThreadStateImpl *)tstate)->qsbr); |
1880 | | #endif |
1881 | | #ifdef Py_STATS |
1882 | | _PyStats_Detach((_PyThreadStateImpl *)tstate); |
1883 | | #endif |
1884 | 0 | current_fast_clear(tstate->interp->runtime); |
1885 | 0 | tstate_delete_common(tstate, 1); // release GIL as part of call |
1886 | 0 | free_threadstate((_PyThreadStateImpl *)tstate); |
1887 | 0 | } |
1888 | | |
1889 | | void |
1890 | | PyThreadState_DeleteCurrent(void) |
1891 | 0 | { |
1892 | 0 | PyThreadState *tstate = current_fast_get(); |
1893 | 0 | _PyThreadState_DeleteCurrent(tstate); |
1894 | 0 | } |
1895 | | |
1896 | | |
1897 | | // Unlinks and removes all thread states from `tstate->interp`, with the |
1898 | | // exception of the one passed as an argument. However, it does not delete |
1899 | | // these thread states. Instead, it returns the removed thread states as a |
1900 | | // linked list. |
1901 | | // |
1902 | | // Note that if there is a current thread state, it *must* be the one |
1903 | | // passed as argument. Also, this won't touch any interpreters other |
1904 | | // than the current one, since we don't know which thread state should |
1905 | | // be kept in those other interpreters. |
1906 | | PyThreadState * |
1907 | | _PyThreadState_RemoveExcept(PyThreadState *tstate) |
1908 | 0 | { |
1909 | 0 | assert(tstate != NULL); |
1910 | 0 | PyInterpreterState *interp = tstate->interp; |
1911 | 0 | _PyRuntimeState *runtime = interp->runtime; |
1912 | |
|
1913 | | #ifdef Py_GIL_DISABLED |
1914 | | assert(runtime->stoptheworld.world_stopped); |
1915 | | #endif |
1916 | |
|
1917 | 0 | HEAD_LOCK(runtime); |
1918 | | /* Remove all thread states, except tstate, from the linked list of |
1919 | | thread states. */ |
1920 | 0 | PyThreadState *list = interp->threads.head; |
1921 | 0 | if (list == tstate) { |
1922 | 0 | list = tstate->next; |
1923 | 0 | } |
1924 | 0 | if (tstate->prev) { |
1925 | 0 | tstate->prev->next = tstate->next; |
1926 | 0 | } |
1927 | 0 | if (tstate->next) { |
1928 | 0 | tstate->next->prev = tstate->prev; |
1929 | 0 | } |
1930 | 0 | tstate->prev = tstate->next = NULL; |
1931 | 0 | interp->threads.head = tstate; |
1932 | 0 | HEAD_UNLOCK(runtime); |
1933 | |
|
1934 | 0 | return list; |
1935 | 0 | } |
1936 | | |
1937 | | // Deletes the thread states in the linked list `list`. |
1938 | | // |
1939 | | // This is intended to be used in conjunction with _PyThreadState_RemoveExcept. |
1940 | | // |
1941 | | // If `is_after_fork` is true, the thread states are immediately freed. |
1942 | | // Otherwise, they are decref'd because they may still be referenced by an |
1943 | | // OS thread. |
1944 | | void |
1945 | | _PyThreadState_DeleteList(PyThreadState *list, int is_after_fork) |
1946 | 0 | { |
1947 | | // The world can't be stopped because we PyThreadState_Clear() can |
1948 | | // call destructors. |
1949 | 0 | assert(!_PyRuntime.stoptheworld.world_stopped); |
1950 | |
|
1951 | 0 | PyThreadState *p, *next; |
1952 | 0 | for (p = list; p; p = next) { |
1953 | 0 | next = p->next; |
1954 | 0 | PyThreadState_Clear(p); |
1955 | 0 | if (is_after_fork) { |
1956 | 0 | free_threadstate((_PyThreadStateImpl *)p); |
1957 | 0 | } |
1958 | 0 | else { |
1959 | 0 | decref_threadstate((_PyThreadStateImpl *)p); |
1960 | 0 | } |
1961 | 0 | } |
1962 | 0 | } |
1963 | | |
1964 | | |
1965 | | //---------- |
1966 | | // accessors |
1967 | | //---------- |
1968 | | |
1969 | | /* An extension mechanism to store arbitrary additional per-thread state. |
1970 | | PyThreadState_GetDict() returns a dictionary that can be used to hold such |
1971 | | state; the caller should pick a unique key and store its state there. If |
1972 | | PyThreadState_GetDict() returns NULL, an exception has *not* been raised |
1973 | | and the caller should assume no per-thread state is available. */ |
1974 | | |
1975 | | PyObject * |
1976 | | _PyThreadState_GetDict(PyThreadState *tstate) |
1977 | 7.95M | { |
1978 | 7.95M | assert(tstate != NULL); |
1979 | 7.95M | if (tstate->dict == NULL) { |
1980 | 2 | tstate->dict = PyDict_New(); |
1981 | 2 | if (tstate->dict == NULL) { |
1982 | 0 | _PyErr_Clear(tstate); |
1983 | 0 | } |
1984 | 2 | } |
1985 | 7.95M | return tstate->dict; |
1986 | 7.95M | } |
1987 | | |
1988 | | |
1989 | | PyObject * |
1990 | | PyThreadState_GetDict(void) |
1991 | 7.95M | { |
1992 | 7.95M | PyThreadState *tstate = current_fast_get(); |
1993 | 7.95M | if (tstate == NULL) { |
1994 | 0 | return NULL; |
1995 | 0 | } |
1996 | 7.95M | return _PyThreadState_GetDict(tstate); |
1997 | 7.95M | } |
1998 | | |
1999 | | |
2000 | | PyInterpreterState * |
2001 | | PyThreadState_GetInterpreter(PyThreadState *tstate) |
2002 | 0 | { |
2003 | 0 | assert(tstate != NULL); |
2004 | 0 | return tstate->interp; |
2005 | 0 | } |
2006 | | |
2007 | | |
2008 | | PyFrameObject* |
2009 | | PyThreadState_GetFrame(PyThreadState *tstate) |
2010 | 579k | { |
2011 | 579k | assert(tstate != NULL); |
2012 | 579k | _PyInterpreterFrame *f = _PyThreadState_GetFrame(tstate); |
2013 | 579k | if (f == NULL) { |
2014 | 0 | return NULL; |
2015 | 0 | } |
2016 | 579k | PyFrameObject *frame = _PyFrame_GetFrameObject(f); |
2017 | 579k | if (frame == NULL) { |
2018 | 0 | PyErr_Clear(); |
2019 | 0 | } |
2020 | 579k | return (PyFrameObject*)Py_XNewRef(frame); |
2021 | 579k | } |
2022 | | |
2023 | | |
2024 | | uint64_t |
2025 | | PyThreadState_GetID(PyThreadState *tstate) |
2026 | 0 | { |
2027 | 0 | assert(tstate != NULL); |
2028 | 0 | return tstate->id; |
2029 | 0 | } |
2030 | | |
2031 | | |
2032 | | static inline void |
2033 | | tstate_activate(PyThreadState *tstate) |
2034 | 2.37M | { |
2035 | 2.37M | assert(tstate != NULL); |
2036 | | // XXX assert(tstate_is_alive(tstate)); |
2037 | 2.37M | assert(tstate_is_bound(tstate)); |
2038 | 2.37M | assert(!tstate->_status.active); |
2039 | | |
2040 | 2.37M | assert(!tstate->_status.bound_gilstate || |
2041 | 2.37M | tstate == gilstate_get()); |
2042 | 2.37M | if (!tstate->_status.bound_gilstate) { |
2043 | 0 | bind_gilstate_tstate(tstate); |
2044 | 0 | } |
2045 | | |
2046 | 2.37M | tstate->_status.active = 1; |
2047 | 2.37M | } |
2048 | | |
2049 | | static inline void |
2050 | | tstate_deactivate(PyThreadState *tstate) |
2051 | 2.37M | { |
2052 | 2.37M | assert(tstate != NULL); |
2053 | | // XXX assert(tstate_is_alive(tstate)); |
2054 | 2.37M | assert(tstate_is_bound(tstate)); |
2055 | 2.37M | assert(tstate->_status.active); |
2056 | | |
2057 | | #if Py_STATS |
2058 | | _PyStats_Detach((_PyThreadStateImpl *)tstate); |
2059 | | #endif |
2060 | | |
2061 | 2.37M | tstate->_status.active = 0; |
2062 | | |
2063 | | // We do not unbind the gilstate tstate here. |
2064 | | // It will still be used in PyGILState_Ensure(). |
2065 | 2.37M | } |
2066 | | |
2067 | | static int |
2068 | | tstate_try_attach(PyThreadState *tstate) |
2069 | 2.37M | { |
2070 | | #ifdef Py_GIL_DISABLED |
2071 | | int expected = _Py_THREAD_DETACHED; |
2072 | | return _Py_atomic_compare_exchange_int(&tstate->state, |
2073 | | &expected, |
2074 | | _Py_THREAD_ATTACHED); |
2075 | | #else |
2076 | 2.37M | assert(tstate->state == _Py_THREAD_DETACHED); |
2077 | 2.37M | tstate->state = _Py_THREAD_ATTACHED; |
2078 | 2.37M | return 1; |
2079 | 2.37M | #endif |
2080 | 2.37M | } |
2081 | | |
2082 | | static void |
2083 | | tstate_set_detached(PyThreadState *tstate, int detached_state) |
2084 | 2.37M | { |
2085 | 2.37M | assert(_Py_atomic_load_int_relaxed(&tstate->state) == _Py_THREAD_ATTACHED); |
2086 | | #ifdef Py_GIL_DISABLED |
2087 | | _Py_atomic_store_int(&tstate->state, detached_state); |
2088 | | #else |
2089 | 2.37M | tstate->state = detached_state; |
2090 | 2.37M | #endif |
2091 | 2.37M | } |
2092 | | |
2093 | | static void |
2094 | | tstate_wait_attach(PyThreadState *tstate) |
2095 | 0 | { |
2096 | 0 | do { |
2097 | 0 | int state = _Py_atomic_load_int_relaxed(&tstate->state); |
2098 | 0 | if (state == _Py_THREAD_SUSPENDED) { |
2099 | | // Wait until we're switched out of SUSPENDED to DETACHED. |
2100 | 0 | _PyParkingLot_Park(&tstate->state, &state, sizeof(tstate->state), |
2101 | 0 | /*timeout=*/-1, NULL, /*detach=*/0); |
2102 | 0 | } |
2103 | 0 | else if (state == _Py_THREAD_SHUTTING_DOWN) { |
2104 | | // We're shutting down, so we can't attach. |
2105 | 0 | _PyThreadState_HangThread(tstate); |
2106 | 0 | } |
2107 | 0 | else { |
2108 | 0 | assert(state == _Py_THREAD_DETACHED); |
2109 | 0 | } |
2110 | | // Once we're back in DETACHED we can re-attach |
2111 | 0 | } while (!tstate_try_attach(tstate)); |
2112 | 0 | } |
2113 | | |
2114 | | void |
2115 | | _PyThreadState_Attach(PyThreadState *tstate) |
2116 | 2.37M | { |
2117 | | #if defined(Py_DEBUG) |
2118 | | // This is called from PyEval_RestoreThread(). Similar |
2119 | | // to it, we need to ensure errno doesn't change. |
2120 | | int err = errno; |
2121 | | #endif |
2122 | | |
2123 | 2.37M | _Py_EnsureTstateNotNULL(tstate); |
2124 | 2.37M | if (current_fast_get() != NULL) { |
2125 | 0 | Py_FatalError("non-NULL old thread state"); |
2126 | 0 | } |
2127 | 2.37M | _PyThreadStateImpl *_tstate = (_PyThreadStateImpl *)tstate; |
2128 | 2.37M | if (_tstate->c_stack_hard_limit == 0) { |
2129 | 28 | _Py_InitializeRecursionLimits(tstate); |
2130 | 28 | } |
2131 | | |
2132 | 2.37M | while (1) { |
2133 | 2.37M | _PyEval_AcquireLock(tstate); |
2134 | | |
2135 | | // XXX assert(tstate_is_alive(tstate)); |
2136 | 2.37M | current_fast_set(&_PyRuntime, tstate); |
2137 | 2.37M | if (!tstate_try_attach(tstate)) { |
2138 | 0 | tstate_wait_attach(tstate); |
2139 | 0 | } |
2140 | 2.37M | tstate_activate(tstate); |
2141 | | |
2142 | | #ifdef Py_GIL_DISABLED |
2143 | | if (_PyEval_IsGILEnabled(tstate) && !tstate->holds_gil) { |
2144 | | // The GIL was enabled between our call to _PyEval_AcquireLock() |
2145 | | // and when we attached (the GIL can't go from enabled to disabled |
2146 | | // here because only a thread holding the GIL can disable |
2147 | | // it). Detach and try again. |
2148 | | tstate_set_detached(tstate, _Py_THREAD_DETACHED); |
2149 | | tstate_deactivate(tstate); |
2150 | | current_fast_clear(&_PyRuntime); |
2151 | | continue; |
2152 | | } |
2153 | | _Py_qsbr_attach(((_PyThreadStateImpl *)tstate)->qsbr); |
2154 | | #endif |
2155 | 2.37M | break; |
2156 | 2.37M | } |
2157 | | |
2158 | | // Resume previous critical section. This acquires the lock(s) from the |
2159 | | // top-most critical section. |
2160 | 2.37M | if (tstate->critical_section != 0) { |
2161 | 0 | _PyCriticalSection_Resume(tstate); |
2162 | 0 | } |
2163 | | |
2164 | | #ifdef Py_STATS |
2165 | | _PyStats_Attach((_PyThreadStateImpl *)tstate); |
2166 | | #endif |
2167 | | |
2168 | | #if defined(Py_DEBUG) |
2169 | | errno = err; |
2170 | | #endif |
2171 | 2.37M | } |
2172 | | |
2173 | | static void |
2174 | | detach_thread(PyThreadState *tstate, int detached_state) |
2175 | 2.37M | { |
2176 | | // XXX assert(tstate_is_alive(tstate) && tstate_is_bound(tstate)); |
2177 | 2.37M | assert(_Py_atomic_load_int_relaxed(&tstate->state) == _Py_THREAD_ATTACHED); |
2178 | 2.37M | assert(tstate == current_fast_get()); |
2179 | 2.37M | if (tstate->critical_section != 0) { |
2180 | 0 | _PyCriticalSection_SuspendAll(tstate); |
2181 | 0 | } |
2182 | | #ifdef Py_GIL_DISABLED |
2183 | | _Py_qsbr_detach(((_PyThreadStateImpl *)tstate)->qsbr); |
2184 | | #endif |
2185 | 2.37M | tstate_deactivate(tstate); |
2186 | 2.37M | tstate_set_detached(tstate, detached_state); |
2187 | 2.37M | current_fast_clear(&_PyRuntime); |
2188 | 2.37M | _PyEval_ReleaseLock(tstate->interp, tstate, 0); |
2189 | 2.37M | } |
2190 | | |
2191 | | void |
2192 | | _PyThreadState_Detach(PyThreadState *tstate) |
2193 | 2.37M | { |
2194 | 2.37M | detach_thread(tstate, _Py_THREAD_DETACHED); |
2195 | 2.37M | } |
2196 | | |
2197 | | void |
2198 | | _PyThreadState_Suspend(PyThreadState *tstate) |
2199 | 0 | { |
2200 | 0 | _PyRuntimeState *runtime = &_PyRuntime; |
2201 | |
|
2202 | 0 | assert(_Py_atomic_load_int_relaxed(&tstate->state) == _Py_THREAD_ATTACHED); |
2203 | |
|
2204 | 0 | struct _stoptheworld_state *stw = NULL; |
2205 | 0 | HEAD_LOCK(runtime); |
2206 | 0 | if (runtime->stoptheworld.requested) { |
2207 | 0 | stw = &runtime->stoptheworld; |
2208 | 0 | } |
2209 | 0 | else if (tstate->interp->stoptheworld.requested) { |
2210 | 0 | stw = &tstate->interp->stoptheworld; |
2211 | 0 | } |
2212 | 0 | HEAD_UNLOCK(runtime); |
2213 | |
|
2214 | 0 | if (stw == NULL) { |
2215 | | // Switch directly to "detached" if there is no active stop-the-world |
2216 | | // request. |
2217 | 0 | detach_thread(tstate, _Py_THREAD_DETACHED); |
2218 | 0 | return; |
2219 | 0 | } |
2220 | | |
2221 | | // Switch to "suspended" state. |
2222 | 0 | detach_thread(tstate, _Py_THREAD_SUSPENDED); |
2223 | | |
2224 | | // Decrease the count of remaining threads needing to park. |
2225 | 0 | HEAD_LOCK(runtime); |
2226 | 0 | decrement_stoptheworld_countdown(stw); |
2227 | 0 | HEAD_UNLOCK(runtime); |
2228 | 0 | } |
2229 | | |
2230 | | void |
2231 | | _PyThreadState_SetShuttingDown(PyThreadState *tstate) |
2232 | 0 | { |
2233 | 0 | _Py_atomic_store_int(&tstate->state, _Py_THREAD_SHUTTING_DOWN); |
2234 | | #ifdef Py_GIL_DISABLED |
2235 | | _PyParkingLot_UnparkAll(&tstate->state); |
2236 | | #endif |
2237 | 0 | } |
2238 | | |
2239 | | // Decrease stop-the-world counter of remaining number of threads that need to |
2240 | | // pause. If we are the final thread to pause, notify the requesting thread. |
2241 | | static void |
2242 | | decrement_stoptheworld_countdown(struct _stoptheworld_state *stw) |
2243 | 0 | { |
2244 | 0 | assert(stw->thread_countdown > 0); |
2245 | 0 | if (--stw->thread_countdown == 0) { |
2246 | 0 | _PyEvent_Notify(&stw->stop_event); |
2247 | 0 | } |
2248 | 0 | } |
2249 | | |
2250 | | #ifdef Py_GIL_DISABLED |
2251 | | // Interpreter for _Py_FOR_EACH_STW_INTERP(). For global stop-the-world events, |
2252 | | // we start with the first interpreter and then iterate over all interpreters. |
2253 | | // For per-interpreter stop-the-world events, we only operate on the one |
2254 | | // interpreter. |
2255 | | static PyInterpreterState * |
2256 | | interp_for_stop_the_world(struct _stoptheworld_state *stw) |
2257 | | { |
2258 | | return (stw->is_global |
2259 | | ? PyInterpreterState_Head() |
2260 | | : _Py_CONTAINER_OF(stw, PyInterpreterState, stoptheworld)); |
2261 | | } |
2262 | | |
2263 | | // Loops over threads for a stop-the-world event. |
2264 | | // For global: all threads in all interpreters |
2265 | | // For per-interpreter: all threads in the interpreter |
2266 | | #define _Py_FOR_EACH_STW_INTERP(stw, i) \ |
2267 | | for (PyInterpreterState *i = interp_for_stop_the_world((stw)); \ |
2268 | | i != NULL; i = ((stw->is_global) ? i->next : NULL)) |
2269 | | |
2270 | | |
2271 | | // Try to transition threads atomically from the "detached" state to the |
2272 | | // "gc stopped" state. Returns true if all threads are in the "gc stopped" |
2273 | | static bool |
2274 | | park_detached_threads(struct _stoptheworld_state *stw) |
2275 | | { |
2276 | | int num_parked = 0; |
2277 | | _Py_FOR_EACH_STW_INTERP(stw, i) { |
2278 | | _Py_FOR_EACH_TSTATE_UNLOCKED(i, t) { |
2279 | | int state = _Py_atomic_load_int_relaxed(&t->state); |
2280 | | if (state == _Py_THREAD_DETACHED) { |
2281 | | // Atomically transition to "suspended" if in "detached" state. |
2282 | | if (_Py_atomic_compare_exchange_int( |
2283 | | &t->state, &state, _Py_THREAD_SUSPENDED)) { |
2284 | | num_parked++; |
2285 | | } |
2286 | | } |
2287 | | else if (state == _Py_THREAD_ATTACHED && t != stw->requester) { |
2288 | | _Py_set_eval_breaker_bit(t, _PY_EVAL_PLEASE_STOP_BIT); |
2289 | | } |
2290 | | } |
2291 | | } |
2292 | | stw->thread_countdown -= num_parked; |
2293 | | assert(stw->thread_countdown >= 0); |
2294 | | return num_parked > 0 && stw->thread_countdown == 0; |
2295 | | } |
2296 | | |
2297 | | static void |
2298 | | stop_the_world(struct _stoptheworld_state *stw) |
2299 | | { |
2300 | | _PyRuntimeState *runtime = &_PyRuntime; |
2301 | | |
2302 | | // gh-137433: Acquire the rwmutex first to avoid deadlocks with daemon |
2303 | | // threads that may hang when blocked on lock acquisition. |
2304 | | if (stw->is_global) { |
2305 | | _PyRWMutex_Lock(&runtime->stoptheworld_mutex); |
2306 | | } |
2307 | | else { |
2308 | | _PyRWMutex_RLock(&runtime->stoptheworld_mutex); |
2309 | | } |
2310 | | PyMutex_Lock(&stw->mutex); |
2311 | | |
2312 | | HEAD_LOCK(runtime); |
2313 | | stw->requested = 1; |
2314 | | stw->thread_countdown = 0; |
2315 | | stw->stop_event = (PyEvent){0}; // zero-initialize (unset) |
2316 | | stw->requester = _PyThreadState_GET(); // may be NULL |
2317 | | FT_STAT_WORLD_STOP_INC(); |
2318 | | |
2319 | | _Py_FOR_EACH_STW_INTERP(stw, i) { |
2320 | | _Py_FOR_EACH_TSTATE_UNLOCKED(i, t) { |
2321 | | if (t != stw->requester) { |
2322 | | // Count all the other threads (we don't wait on ourself). |
2323 | | stw->thread_countdown++; |
2324 | | } |
2325 | | } |
2326 | | } |
2327 | | |
2328 | | if (stw->thread_countdown == 0) { |
2329 | | HEAD_UNLOCK(runtime); |
2330 | | stw->world_stopped = 1; |
2331 | | return; |
2332 | | } |
2333 | | |
2334 | | for (;;) { |
2335 | | // Switch threads that are detached to the GC stopped state |
2336 | | bool stopped_all_threads = park_detached_threads(stw); |
2337 | | HEAD_UNLOCK(runtime); |
2338 | | |
2339 | | if (stopped_all_threads) { |
2340 | | break; |
2341 | | } |
2342 | | |
2343 | | PyTime_t wait_ns = 1000*1000; // 1ms (arbitrary, may need tuning) |
2344 | | int detach = 0; |
2345 | | if (PyEvent_WaitTimed(&stw->stop_event, wait_ns, detach)) { |
2346 | | assert(stw->thread_countdown == 0); |
2347 | | break; |
2348 | | } |
2349 | | |
2350 | | HEAD_LOCK(runtime); |
2351 | | } |
2352 | | stw->world_stopped = 1; |
2353 | | } |
2354 | | |
2355 | | static void |
2356 | | start_the_world(struct _stoptheworld_state *stw) |
2357 | | { |
2358 | | _PyRuntimeState *runtime = &_PyRuntime; |
2359 | | assert(PyMutex_IsLocked(&stw->mutex)); |
2360 | | |
2361 | | HEAD_LOCK(runtime); |
2362 | | stw->requested = 0; |
2363 | | stw->world_stopped = 0; |
2364 | | // Switch threads back to the detached state. |
2365 | | _Py_FOR_EACH_STW_INTERP(stw, i) { |
2366 | | _Py_FOR_EACH_TSTATE_UNLOCKED(i, t) { |
2367 | | if (t != stw->requester) { |
2368 | | assert(_Py_atomic_load_int_relaxed(&t->state) == |
2369 | | _Py_THREAD_SUSPENDED); |
2370 | | _Py_atomic_store_int(&t->state, _Py_THREAD_DETACHED); |
2371 | | _PyParkingLot_UnparkAll(&t->state); |
2372 | | } |
2373 | | } |
2374 | | } |
2375 | | stw->requester = NULL; |
2376 | | HEAD_UNLOCK(runtime); |
2377 | | PyMutex_Unlock(&stw->mutex); |
2378 | | if (stw->is_global) { |
2379 | | _PyRWMutex_Unlock(&runtime->stoptheworld_mutex); |
2380 | | } |
2381 | | else { |
2382 | | _PyRWMutex_RUnlock(&runtime->stoptheworld_mutex); |
2383 | | } |
2384 | | } |
2385 | | #endif // Py_GIL_DISABLED |
2386 | | |
2387 | | void |
2388 | | _PyEval_StopTheWorldAll(_PyRuntimeState *runtime) |
2389 | 0 | { |
2390 | | #ifdef Py_GIL_DISABLED |
2391 | | stop_the_world(&runtime->stoptheworld); |
2392 | | #endif |
2393 | 0 | } |
2394 | | |
2395 | | void |
2396 | | _PyEval_StartTheWorldAll(_PyRuntimeState *runtime) |
2397 | 0 | { |
2398 | | #ifdef Py_GIL_DISABLED |
2399 | | start_the_world(&runtime->stoptheworld); |
2400 | | #endif |
2401 | 0 | } |
2402 | | |
2403 | | void |
2404 | | _PyEval_StopTheWorld(PyInterpreterState *interp) |
2405 | 4 | { |
2406 | | #ifdef Py_GIL_DISABLED |
2407 | | stop_the_world(&interp->stoptheworld); |
2408 | | #endif |
2409 | 4 | } |
2410 | | |
2411 | | void |
2412 | | _PyEval_StartTheWorld(PyInterpreterState *interp) |
2413 | 4 | { |
2414 | | #ifdef Py_GIL_DISABLED |
2415 | | start_the_world(&interp->stoptheworld); |
2416 | | #endif |
2417 | 4 | } |
2418 | | |
2419 | | //---------- |
2420 | | // other API |
2421 | | //---------- |
2422 | | |
2423 | | /* Asynchronously raise an exception in a thread. |
2424 | | Requested by Just van Rossum and Alex Martelli. |
2425 | | To prevent naive misuse, you must write your own extension |
2426 | | to call this, or use ctypes. Must be called with the GIL held. |
2427 | | Returns the number of tstates modified (normally 1, but 0 if `id` didn't |
2428 | | match any known thread id). Can be called with exc=NULL to clear an |
2429 | | existing async exception. This raises no exceptions. */ |
2430 | | |
2431 | | // XXX Move this to Python/ceval_gil.c? |
2432 | | // XXX Deprecate this. |
2433 | | int |
2434 | | PyThreadState_SetAsyncExc(unsigned long id, PyObject *exc) |
2435 | 0 | { |
2436 | 0 | PyInterpreterState *interp = _PyInterpreterState_GET(); |
2437 | | |
2438 | | /* Although the GIL is held, a few C API functions can be called |
2439 | | * without the GIL held, and in particular some that create and |
2440 | | * destroy thread and interpreter states. Those can mutate the |
2441 | | * list of thread states we're traversing, so to prevent that we lock |
2442 | | * head_mutex for the duration. |
2443 | | */ |
2444 | 0 | PyThreadState *tstate = NULL; |
2445 | 0 | _Py_FOR_EACH_TSTATE_BEGIN(interp, t) { |
2446 | 0 | if (t->thread_id == id) { |
2447 | 0 | tstate = t; |
2448 | 0 | break; |
2449 | 0 | } |
2450 | 0 | } |
2451 | 0 | _Py_FOR_EACH_TSTATE_END(interp); |
2452 | |
|
2453 | 0 | if (tstate != NULL) { |
2454 | | /* Tricky: we need to decref the current value |
2455 | | * (if any) in tstate->async_exc, but that can in turn |
2456 | | * allow arbitrary Python code to run, including |
2457 | | * perhaps calls to this function. To prevent |
2458 | | * deadlock, we need to release head_mutex before |
2459 | | * the decref. |
2460 | | */ |
2461 | 0 | Py_XINCREF(exc); |
2462 | 0 | PyObject *old_exc = _Py_atomic_exchange_ptr(&tstate->async_exc, exc); |
2463 | |
|
2464 | 0 | Py_XDECREF(old_exc); |
2465 | 0 | _Py_set_eval_breaker_bit(tstate, _PY_ASYNC_EXCEPTION_BIT); |
2466 | 0 | } |
2467 | |
|
2468 | 0 | return tstate != NULL; |
2469 | 0 | } |
2470 | | |
2471 | | //--------------------------------- |
2472 | | // API for the current thread state |
2473 | | //--------------------------------- |
2474 | | |
2475 | | PyThreadState * |
2476 | | PyThreadState_GetUnchecked(void) |
2477 | 0 | { |
2478 | 0 | return current_fast_get(); |
2479 | 0 | } |
2480 | | |
2481 | | |
2482 | | PyThreadState * |
2483 | | PyThreadState_Get(void) |
2484 | 84.1M | { |
2485 | 84.1M | PyThreadState *tstate = current_fast_get(); |
2486 | 84.1M | _Py_EnsureTstateNotNULL(tstate); |
2487 | 84.1M | return tstate; |
2488 | 84.1M | } |
2489 | | |
2490 | | PyThreadState * |
2491 | | _PyThreadState_Swap(_PyRuntimeState *runtime, PyThreadState *newts) |
2492 | 0 | { |
2493 | 0 | PyThreadState *oldts = current_fast_get(); |
2494 | 0 | if (oldts != NULL) { |
2495 | 0 | _PyThreadState_Detach(oldts); |
2496 | 0 | } |
2497 | 0 | if (newts != NULL) { |
2498 | 0 | _PyThreadState_Attach(newts); |
2499 | 0 | } |
2500 | 0 | return oldts; |
2501 | 0 | } |
2502 | | |
2503 | | PyThreadState * |
2504 | | PyThreadState_Swap(PyThreadState *newts) |
2505 | 0 | { |
2506 | 0 | return _PyThreadState_Swap(&_PyRuntime, newts); |
2507 | 0 | } |
2508 | | |
2509 | | |
2510 | | void |
2511 | | _PyThreadState_Bind(PyThreadState *tstate) |
2512 | 28 | { |
2513 | | // gh-104690: If Python is being finalized and PyInterpreterState_Delete() |
2514 | | // was called, tstate becomes a dangling pointer. |
2515 | 28 | assert(_PyThreadState_CheckConsistency(tstate)); |
2516 | | |
2517 | 28 | bind_tstate(tstate); |
2518 | | // This makes sure there's a gilstate tstate bound |
2519 | | // as soon as possible. |
2520 | 28 | if (gilstate_get() == NULL) { |
2521 | 28 | bind_gilstate_tstate(tstate); |
2522 | 28 | } |
2523 | 28 | } |
2524 | | |
2525 | | #if defined(Py_GIL_DISABLED) && !defined(Py_LIMITED_API) |
2526 | | uintptr_t |
2527 | | _Py_GetThreadLocal_Addr(void) |
2528 | | { |
2529 | | // gh-112535: Use the address of the thread-local PyThreadState variable as |
2530 | | // a unique identifier for the current thread. Each thread has a unique |
2531 | | // _Py_tss_tstate variable with a unique address. |
2532 | | return (uintptr_t)&_Py_tss_tstate; |
2533 | | } |
2534 | | #endif |
2535 | | |
2536 | | /***********************************/ |
2537 | | /* routines for advanced debuggers */ |
2538 | | /***********************************/ |
2539 | | |
2540 | | // (requested by David Beazley) |
2541 | | // Don't use unless you know what you are doing! |
2542 | | |
2543 | | PyInterpreterState * |
2544 | | PyInterpreterState_Head(void) |
2545 | 0 | { |
2546 | 0 | return _PyRuntime.interpreters.head; |
2547 | 0 | } |
2548 | | |
2549 | | PyInterpreterState * |
2550 | | PyInterpreterState_Main(void) |
2551 | 0 | { |
2552 | 0 | return _PyInterpreterState_Main(); |
2553 | 0 | } |
2554 | | |
2555 | | PyInterpreterState * |
2556 | 0 | PyInterpreterState_Next(PyInterpreterState *interp) { |
2557 | 0 | return interp->next; |
2558 | 0 | } |
2559 | | |
2560 | | PyThreadState * |
2561 | 17.8k | PyInterpreterState_ThreadHead(PyInterpreterState *interp) { |
2562 | 17.8k | return interp->threads.head; |
2563 | 17.8k | } |
2564 | | |
2565 | | PyThreadState * |
2566 | 17.8k | PyThreadState_Next(PyThreadState *tstate) { |
2567 | 17.8k | return tstate->next; |
2568 | 17.8k | } |
2569 | | |
2570 | | |
2571 | | /********************************************/ |
2572 | | /* reporting execution state of all threads */ |
2573 | | /********************************************/ |
2574 | | |
2575 | | /* The implementation of sys._current_frames(). This is intended to be |
2576 | | called with the GIL held, as it will be when called via |
2577 | | sys._current_frames(). It's possible it would work fine even without |
2578 | | the GIL held, but haven't thought enough about that. |
2579 | | */ |
2580 | | PyObject * |
2581 | | _PyThread_CurrentFrames(void) |
2582 | 0 | { |
2583 | 0 | _PyRuntimeState *runtime = &_PyRuntime; |
2584 | 0 | PyThreadState *tstate = current_fast_get(); |
2585 | 0 | if (_PySys_Audit(tstate, "sys._current_frames", NULL) < 0) { |
2586 | 0 | return NULL; |
2587 | 0 | } |
2588 | | |
2589 | 0 | PyObject *result = PyDict_New(); |
2590 | 0 | if (result == NULL) { |
2591 | 0 | return NULL; |
2592 | 0 | } |
2593 | | |
2594 | | /* for i in all interpreters: |
2595 | | * for t in all of i's thread states: |
2596 | | * if t's frame isn't NULL, map t's id to its frame |
2597 | | * Because these lists can mutate even when the GIL is held, we |
2598 | | * need to grab head_mutex for the duration. |
2599 | | */ |
2600 | 0 | _PyEval_StopTheWorldAll(runtime); |
2601 | 0 | HEAD_LOCK(runtime); |
2602 | 0 | PyInterpreterState *i; |
2603 | 0 | for (i = runtime->interpreters.head; i != NULL; i = i->next) { |
2604 | 0 | _Py_FOR_EACH_TSTATE_UNLOCKED(i, t) { |
2605 | 0 | _PyInterpreterFrame *frame = t->current_frame; |
2606 | 0 | frame = _PyFrame_GetFirstComplete(frame); |
2607 | 0 | if (frame == NULL) { |
2608 | 0 | continue; |
2609 | 0 | } |
2610 | 0 | PyObject *id = PyLong_FromUnsignedLong(t->thread_id); |
2611 | 0 | if (id == NULL) { |
2612 | 0 | goto fail; |
2613 | 0 | } |
2614 | 0 | PyObject *frameobj = (PyObject *)_PyFrame_GetFrameObject(frame); |
2615 | 0 | if (frameobj == NULL) { |
2616 | 0 | Py_DECREF(id); |
2617 | 0 | goto fail; |
2618 | 0 | } |
2619 | 0 | int stat = PyDict_SetItem(result, id, frameobj); |
2620 | 0 | Py_DECREF(id); |
2621 | 0 | if (stat < 0) { |
2622 | 0 | goto fail; |
2623 | 0 | } |
2624 | 0 | } |
2625 | 0 | } |
2626 | 0 | goto done; |
2627 | | |
2628 | 0 | fail: |
2629 | 0 | Py_CLEAR(result); |
2630 | |
|
2631 | 0 | done: |
2632 | 0 | HEAD_UNLOCK(runtime); |
2633 | 0 | _PyEval_StartTheWorldAll(runtime); |
2634 | 0 | return result; |
2635 | 0 | } |
2636 | | |
2637 | | /* The implementation of sys._current_exceptions(). This is intended to be |
2638 | | called with the GIL held, as it will be when called via |
2639 | | sys._current_exceptions(). It's possible it would work fine even without |
2640 | | the GIL held, but haven't thought enough about that. |
2641 | | */ |
2642 | | PyObject * |
2643 | | _PyThread_CurrentExceptions(void) |
2644 | 0 | { |
2645 | 0 | _PyRuntimeState *runtime = &_PyRuntime; |
2646 | 0 | PyThreadState *tstate = current_fast_get(); |
2647 | |
|
2648 | 0 | _Py_EnsureTstateNotNULL(tstate); |
2649 | |
|
2650 | 0 | if (_PySys_Audit(tstate, "sys._current_exceptions", NULL) < 0) { |
2651 | 0 | return NULL; |
2652 | 0 | } |
2653 | | |
2654 | 0 | PyObject *result = PyDict_New(); |
2655 | 0 | if (result == NULL) { |
2656 | 0 | return NULL; |
2657 | 0 | } |
2658 | | |
2659 | | /* for i in all interpreters: |
2660 | | * for t in all of i's thread states: |
2661 | | * if t's frame isn't NULL, map t's id to its frame |
2662 | | * Because these lists can mutate even when the GIL is held, we |
2663 | | * need to grab head_mutex for the duration. |
2664 | | */ |
2665 | 0 | _PyEval_StopTheWorldAll(runtime); |
2666 | 0 | HEAD_LOCK(runtime); |
2667 | 0 | PyInterpreterState *i; |
2668 | 0 | for (i = runtime->interpreters.head; i != NULL; i = i->next) { |
2669 | 0 | _Py_FOR_EACH_TSTATE_UNLOCKED(i, t) { |
2670 | 0 | _PyErr_StackItem *err_info = _PyErr_GetTopmostException(t); |
2671 | 0 | if (err_info == NULL) { |
2672 | 0 | continue; |
2673 | 0 | } |
2674 | 0 | PyObject *id = PyLong_FromUnsignedLong(t->thread_id); |
2675 | 0 | if (id == NULL) { |
2676 | 0 | goto fail; |
2677 | 0 | } |
2678 | 0 | PyObject *exc = err_info->exc_value; |
2679 | 0 | assert(exc == NULL || |
2680 | 0 | exc == Py_None || |
2681 | 0 | PyExceptionInstance_Check(exc)); |
2682 | |
|
2683 | 0 | int stat = PyDict_SetItem(result, id, exc == NULL ? Py_None : exc); |
2684 | 0 | Py_DECREF(id); |
2685 | 0 | if (stat < 0) { |
2686 | 0 | goto fail; |
2687 | 0 | } |
2688 | 0 | } |
2689 | 0 | } |
2690 | 0 | goto done; |
2691 | | |
2692 | 0 | fail: |
2693 | 0 | Py_CLEAR(result); |
2694 | |
|
2695 | 0 | done: |
2696 | 0 | HEAD_UNLOCK(runtime); |
2697 | 0 | _PyEval_StartTheWorldAll(runtime); |
2698 | 0 | return result; |
2699 | 0 | } |
2700 | | |
2701 | | |
2702 | | /***********************************/ |
2703 | | /* Python "auto thread state" API. */ |
2704 | | /***********************************/ |
2705 | | |
2706 | | /* Internal initialization/finalization functions called by |
2707 | | Py_Initialize/Py_FinalizeEx |
2708 | | */ |
2709 | | PyStatus |
2710 | | _PyGILState_Init(PyInterpreterState *interp) |
2711 | 28 | { |
2712 | 28 | if (!_Py_IsMainInterpreter(interp)) { |
2713 | | /* Currently, PyGILState is shared by all interpreters. The main |
2714 | | * interpreter is responsible to initialize it. */ |
2715 | 0 | return _PyStatus_OK(); |
2716 | 0 | } |
2717 | 28 | _PyRuntimeState *runtime = interp->runtime; |
2718 | 28 | assert(gilstate_get() == NULL); |
2719 | 28 | assert(runtime->gilstate.autoInterpreterState == NULL); |
2720 | 28 | runtime->gilstate.autoInterpreterState = interp; |
2721 | 28 | return _PyStatus_OK(); |
2722 | 28 | } |
2723 | | |
2724 | | void |
2725 | | _PyGILState_Fini(PyInterpreterState *interp) |
2726 | 0 | { |
2727 | 0 | if (!_Py_IsMainInterpreter(interp)) { |
2728 | | /* Currently, PyGILState is shared by all interpreters. The main |
2729 | | * interpreter is responsible to initialize it. */ |
2730 | 0 | return; |
2731 | 0 | } |
2732 | 0 | interp->runtime->gilstate.autoInterpreterState = NULL; |
2733 | 0 | } |
2734 | | |
2735 | | |
2736 | | // XXX Drop this. |
2737 | | void |
2738 | | _PyGILState_SetTstate(PyThreadState *tstate) |
2739 | 28 | { |
2740 | | /* must init with valid states */ |
2741 | 28 | assert(tstate != NULL); |
2742 | 28 | assert(tstate->interp != NULL); |
2743 | | |
2744 | 28 | if (!_Py_IsMainInterpreter(tstate->interp)) { |
2745 | | /* Currently, PyGILState is shared by all interpreters. The main |
2746 | | * interpreter is responsible to initialize it. */ |
2747 | 0 | return; |
2748 | 0 | } |
2749 | | |
2750 | | #ifndef NDEBUG |
2751 | | _PyRuntimeState *runtime = tstate->interp->runtime; |
2752 | | |
2753 | | assert(runtime->gilstate.autoInterpreterState == tstate->interp); |
2754 | | assert(gilstate_get() == tstate); |
2755 | | assert(tstate->gilstate_counter == 1); |
2756 | | #endif |
2757 | 28 | } |
2758 | | |
2759 | | PyInterpreterState * |
2760 | | _PyGILState_GetInterpreterStateUnsafe(void) |
2761 | 0 | { |
2762 | 0 | return _PyRuntime.gilstate.autoInterpreterState; |
2763 | 0 | } |
2764 | | |
2765 | | /* The public functions */ |
2766 | | |
2767 | | PyThreadState * |
2768 | | PyGILState_GetThisThreadState(void) |
2769 | 0 | { |
2770 | 0 | return gilstate_get(); |
2771 | 0 | } |
2772 | | |
2773 | | int |
2774 | | PyGILState_Check(void) |
2775 | 0 | { |
2776 | 0 | _PyRuntimeState *runtime = &_PyRuntime; |
2777 | 0 | if (!runtime->gilstate.check_enabled) { |
2778 | 0 | return 1; |
2779 | 0 | } |
2780 | | |
2781 | 0 | PyThreadState *tstate = current_fast_get(); |
2782 | 0 | if (tstate == NULL) { |
2783 | 0 | return 0; |
2784 | 0 | } |
2785 | | |
2786 | 0 | PyThreadState *tcur = gilstate_get(); |
2787 | 0 | return (tstate == tcur); |
2788 | 0 | } |
2789 | | |
2790 | | PyGILState_STATE |
2791 | | PyGILState_Ensure(void) |
2792 | 0 | { |
2793 | 0 | _PyRuntimeState *runtime = &_PyRuntime; |
2794 | | |
2795 | | /* Note that we do not auto-init Python here - apart from |
2796 | | potential races with 2 threads auto-initializing, pep-311 |
2797 | | spells out other issues. Embedders are expected to have |
2798 | | called Py_Initialize(). */ |
2799 | | |
2800 | | /* Ensure that _PyEval_InitThreads() and _PyGILState_Init() have been |
2801 | | called by Py_Initialize() |
2802 | | |
2803 | | TODO: This isn't thread-safe. There's no protection here against |
2804 | | concurrent finalization of the interpreter; it's simply a guard |
2805 | | for *after* the interpreter has finalized. |
2806 | | */ |
2807 | 0 | if (!_PyEval_ThreadsInitialized() || runtime->gilstate.autoInterpreterState == NULL) { |
2808 | 0 | PyThread_hang_thread(); |
2809 | 0 | } |
2810 | | |
2811 | 0 | PyThreadState *tcur = gilstate_get(); |
2812 | 0 | int has_gil; |
2813 | 0 | if (tcur == NULL) { |
2814 | | /* Create a new Python thread state for this thread */ |
2815 | | // XXX Use PyInterpreterState_EnsureThreadState()? |
2816 | 0 | tcur = new_threadstate(runtime->gilstate.autoInterpreterState, |
2817 | 0 | _PyThreadState_WHENCE_GILSTATE); |
2818 | 0 | if (tcur == NULL) { |
2819 | 0 | Py_FatalError("Couldn't create thread-state for new thread"); |
2820 | 0 | } |
2821 | 0 | bind_tstate(tcur); |
2822 | 0 | bind_gilstate_tstate(tcur); |
2823 | | |
2824 | | /* This is our thread state! We'll need to delete it in the |
2825 | | matching call to PyGILState_Release(). */ |
2826 | 0 | assert(tcur->gilstate_counter == 1); |
2827 | 0 | tcur->gilstate_counter = 0; |
2828 | 0 | has_gil = 0; /* new thread state is never current */ |
2829 | 0 | } |
2830 | 0 | else { |
2831 | 0 | has_gil = holds_gil(tcur); |
2832 | 0 | } |
2833 | | |
2834 | 0 | if (!has_gil) { |
2835 | 0 | PyEval_RestoreThread(tcur); |
2836 | 0 | } |
2837 | | |
2838 | | /* Update our counter in the thread-state - no need for locks: |
2839 | | - tcur will remain valid as we hold the GIL. |
2840 | | - the counter is safe as we are the only thread "allowed" |
2841 | | to modify this value |
2842 | | */ |
2843 | 0 | ++tcur->gilstate_counter; |
2844 | |
|
2845 | 0 | return has_gil ? PyGILState_LOCKED : PyGILState_UNLOCKED; |
2846 | 0 | } |
2847 | | |
2848 | | void |
2849 | | PyGILState_Release(PyGILState_STATE oldstate) |
2850 | 0 | { |
2851 | 0 | PyThreadState *tstate = gilstate_get(); |
2852 | 0 | if (tstate == NULL) { |
2853 | 0 | Py_FatalError("auto-releasing thread-state, " |
2854 | 0 | "but no thread-state for this thread"); |
2855 | 0 | } |
2856 | | |
2857 | | /* We must hold the GIL and have our thread state current */ |
2858 | 0 | if (!holds_gil(tstate)) { |
2859 | 0 | _Py_FatalErrorFormat(__func__, |
2860 | 0 | "thread state %p must be current when releasing", |
2861 | 0 | tstate); |
2862 | 0 | } |
2863 | 0 | --tstate->gilstate_counter; |
2864 | 0 | assert(tstate->gilstate_counter >= 0); /* illegal counter value */ |
2865 | | |
2866 | | /* If we're going to destroy this thread-state, we must |
2867 | | * clear it while the GIL is held, as destructors may run. |
2868 | | */ |
2869 | 0 | if (tstate->gilstate_counter == 0) { |
2870 | | /* can't have been locked when we created it */ |
2871 | 0 | assert(oldstate == PyGILState_UNLOCKED); |
2872 | | // XXX Unbind tstate here. |
2873 | | // gh-119585: `PyThreadState_Clear()` may call destructors that |
2874 | | // themselves use PyGILState_Ensure and PyGILState_Release, so make |
2875 | | // sure that gilstate_counter is not zero when calling it. |
2876 | 0 | ++tstate->gilstate_counter; |
2877 | 0 | PyThreadState_Clear(tstate); |
2878 | 0 | --tstate->gilstate_counter; |
2879 | | /* Delete the thread-state. Note this releases the GIL too! |
2880 | | * It's vital that the GIL be held here, to avoid shutdown |
2881 | | * races; see bugs 225673 and 1061968 (that nasty bug has a |
2882 | | * habit of coming back). |
2883 | | */ |
2884 | 0 | assert(tstate->gilstate_counter == 0); |
2885 | 0 | assert(current_fast_get() == tstate); |
2886 | 0 | _PyThreadState_DeleteCurrent(tstate); |
2887 | 0 | } |
2888 | | /* Release the lock if necessary */ |
2889 | 0 | else if (oldstate == PyGILState_UNLOCKED) { |
2890 | 0 | PyEval_SaveThread(); |
2891 | 0 | } |
2892 | 0 | } |
2893 | | |
2894 | | |
2895 | | /*************/ |
2896 | | /* Other API */ |
2897 | | /*************/ |
2898 | | |
2899 | | _PyFrameEvalFunction |
2900 | | _PyInterpreterState_GetEvalFrameFunc(PyInterpreterState *interp) |
2901 | 0 | { |
2902 | 0 | if (interp->eval_frame == NULL) { |
2903 | 0 | return _PyEval_EvalFrameDefault; |
2904 | 0 | } |
2905 | 0 | return interp->eval_frame; |
2906 | 0 | } |
2907 | | |
2908 | | |
2909 | | void |
2910 | | _PyInterpreterState_SetEvalFrameFunc(PyInterpreterState *interp, |
2911 | | _PyFrameEvalFunction eval_frame) |
2912 | 0 | { |
2913 | 0 | if (eval_frame == _PyEval_EvalFrameDefault) { |
2914 | 0 | eval_frame = NULL; |
2915 | 0 | } |
2916 | 0 | if (eval_frame == interp->eval_frame) { |
2917 | 0 | return; |
2918 | 0 | } |
2919 | | #ifdef _Py_TIER2 |
2920 | | if (eval_frame != NULL) { |
2921 | | _Py_Executors_InvalidateAll(interp, 1); |
2922 | | } |
2923 | | #endif |
2924 | 0 | RARE_EVENT_INC(set_eval_frame_func); |
2925 | 0 | _PyEval_StopTheWorld(interp); |
2926 | 0 | interp->eval_frame = eval_frame; |
2927 | 0 | _PyEval_StartTheWorld(interp); |
2928 | 0 | } |
2929 | | |
2930 | | |
2931 | | const PyConfig* |
2932 | | _PyInterpreterState_GetConfig(PyInterpreterState *interp) |
2933 | 118M | { |
2934 | 118M | return &interp->config; |
2935 | 118M | } |
2936 | | |
2937 | | |
2938 | | const PyConfig* |
2939 | | _Py_GetConfig(void) |
2940 | 176k | { |
2941 | 176k | PyThreadState *tstate = current_fast_get(); |
2942 | 176k | _Py_EnsureTstateNotNULL(tstate); |
2943 | 176k | return _PyInterpreterState_GetConfig(tstate->interp); |
2944 | 176k | } |
2945 | | |
2946 | | |
2947 | | int |
2948 | | _PyInterpreterState_HasFeature(PyInterpreterState *interp, unsigned long feature) |
2949 | 0 | { |
2950 | 0 | return ((interp->feature_flags & feature) != 0); |
2951 | 0 | } |
2952 | | |
2953 | | |
2954 | 234k | #define MINIMUM_OVERHEAD 1000 |
2955 | | |
2956 | | static PyObject ** |
2957 | | push_chunk(PyThreadState *tstate, int size) |
2958 | 234k | { |
2959 | 234k | int allocate_size = _PY_DATA_STACK_CHUNK_SIZE; |
2960 | 234k | while (allocate_size < (int)sizeof(PyObject*)*(size + MINIMUM_OVERHEAD)) { |
2961 | 0 | allocate_size *= 2; |
2962 | 0 | } |
2963 | 234k | _PyStackChunk *new = allocate_chunk(allocate_size, tstate->datastack_chunk); |
2964 | 234k | if (new == NULL) { |
2965 | 0 | return NULL; |
2966 | 0 | } |
2967 | 234k | if (tstate->datastack_chunk) { |
2968 | 234k | tstate->datastack_chunk->top = tstate->datastack_top - |
2969 | 234k | &tstate->datastack_chunk->data[0]; |
2970 | 234k | } |
2971 | 234k | tstate->datastack_chunk = new; |
2972 | 234k | tstate->datastack_limit = (PyObject **)(((char *)new) + allocate_size); |
2973 | | // When new is the "root" chunk (i.e. new->previous == NULL), we can keep |
2974 | | // _PyThreadState_PopFrame from freeing it later by "skipping" over the |
2975 | | // first element: |
2976 | 234k | PyObject **res = &new->data[new->previous == NULL]; |
2977 | 234k | tstate->datastack_top = res + size; |
2978 | 234k | return res; |
2979 | 234k | } |
2980 | | |
2981 | | _PyInterpreterFrame * |
2982 | | _PyThreadState_PushFrame(PyThreadState *tstate, size_t size) |
2983 | 212M | { |
2984 | 212M | assert(size < INT_MAX/sizeof(PyObject *)); |
2985 | 212M | if (_PyThreadState_HasStackSpace(tstate, (int)size)) { |
2986 | 212M | _PyInterpreterFrame *res = (_PyInterpreterFrame *)tstate->datastack_top; |
2987 | 212M | tstate->datastack_top += size; |
2988 | 212M | return res; |
2989 | 212M | } |
2990 | 234k | return (_PyInterpreterFrame *)push_chunk(tstate, (int)size); |
2991 | 212M | } |
2992 | | |
2993 | | void |
2994 | | _PyThreadState_PopFrame(PyThreadState *tstate, _PyInterpreterFrame * frame) |
2995 | 1.03G | { |
2996 | 1.03G | assert(tstate->datastack_chunk); |
2997 | 1.03G | PyObject **base = (PyObject **)frame; |
2998 | 1.03G | if (base == &tstate->datastack_chunk->data[0]) { |
2999 | 234k | _PyStackChunk *chunk = tstate->datastack_chunk; |
3000 | 234k | _PyStackChunk *previous = chunk->previous; |
3001 | | // push_chunk ensures that the root chunk is never popped: |
3002 | 234k | assert(previous); |
3003 | 234k | tstate->datastack_top = &previous->data[previous->top]; |
3004 | 234k | tstate->datastack_chunk = previous; |
3005 | 234k | _PyObject_VirtualFree(chunk, chunk->size); |
3006 | 234k | tstate->datastack_limit = (PyObject **)(((char *)previous) + previous->size); |
3007 | 234k | } |
3008 | 1.03G | else { |
3009 | 1.03G | assert(tstate->datastack_top); |
3010 | 1.03G | assert(tstate->datastack_top >= base); |
3011 | 1.03G | tstate->datastack_top = base; |
3012 | 1.03G | } |
3013 | 1.03G | } |
3014 | | |
3015 | | |
3016 | | #ifndef NDEBUG |
3017 | | // Check that a Python thread state valid. In practice, this function is used |
3018 | | // on a Python debug build to check if 'tstate' is a dangling pointer, if the |
3019 | | // PyThreadState memory has been freed. |
3020 | | // |
3021 | | // Usage: |
3022 | | // |
3023 | | // assert(_PyThreadState_CheckConsistency(tstate)); |
3024 | | int |
3025 | | _PyThreadState_CheckConsistency(PyThreadState *tstate) |
3026 | | { |
3027 | | assert(!_PyMem_IsPtrFreed(tstate)); |
3028 | | assert(!_PyMem_IsPtrFreed(tstate->interp)); |
3029 | | return 1; |
3030 | | } |
3031 | | #endif |
3032 | | |
3033 | | |
3034 | | // Check if a Python thread must call _PyThreadState_HangThread(), rather than |
3035 | | // taking the GIL or attaching to the interpreter if Py_Finalize() has been |
3036 | | // called. |
3037 | | // |
3038 | | // When this function is called by a daemon thread after Py_Finalize() has been |
3039 | | // called, the GIL may no longer exist. |
3040 | | // |
3041 | | // tstate must be non-NULL. |
3042 | | int |
3043 | | _PyThreadState_MustExit(PyThreadState *tstate) |
3044 | 4.75M | { |
3045 | 4.75M | int state = _Py_atomic_load_int_relaxed(&tstate->state); |
3046 | 4.75M | return state == _Py_THREAD_SHUTTING_DOWN; |
3047 | 4.75M | } |
3048 | | |
3049 | | void |
3050 | | _PyThreadState_HangThread(PyThreadState *tstate) |
3051 | 0 | { |
3052 | 0 | _PyThreadStateImpl *tstate_impl = (_PyThreadStateImpl *)tstate; |
3053 | 0 | decref_threadstate(tstate_impl); |
3054 | 0 | PyThread_hang_thread(); |
3055 | 0 | } |
3056 | | |
3057 | | /********************/ |
3058 | | /* mimalloc support */ |
3059 | | /********************/ |
3060 | | |
3061 | | static void |
3062 | | tstate_mimalloc_bind(PyThreadState *tstate) |
3063 | 28 | { |
3064 | | #ifdef Py_GIL_DISABLED |
3065 | | struct _mimalloc_thread_state *mts = &((_PyThreadStateImpl*)tstate)->mimalloc; |
3066 | | |
3067 | | // Initialize the mimalloc thread state. This must be called from the |
3068 | | // same thread that will use the thread state. The "mem" heap doubles as |
3069 | | // the "backing" heap. |
3070 | | mi_tld_t *tld = &mts->tld; |
3071 | | _mi_tld_init(tld, &mts->heaps[_Py_MIMALLOC_HEAP_MEM]); |
3072 | | llist_init(&mts->page_list); |
3073 | | |
3074 | | // Exiting threads push any remaining in-use segments to the abandoned |
3075 | | // pool to be re-claimed later by other threads. We use per-interpreter |
3076 | | // pools to keep Python objects from different interpreters separate. |
3077 | | tld->segments.abandoned = &tstate->interp->mimalloc.abandoned_pool; |
3078 | | |
3079 | | // Don't fill in the first N bytes up to ob_type in debug builds. We may |
3080 | | // access ob_tid and the refcount fields in the dict and list lock-less |
3081 | | // accesses, so they must remain valid for a while after deallocation. |
3082 | | size_t base_offset = offsetof(PyObject, ob_type); |
3083 | | if (_PyMem_DebugEnabled()) { |
3084 | | // The debug allocator adds two words at the beginning of each block. |
3085 | | base_offset += 2 * sizeof(size_t); |
3086 | | } |
3087 | | size_t debug_offsets[_Py_MIMALLOC_HEAP_COUNT] = { |
3088 | | [_Py_MIMALLOC_HEAP_OBJECT] = base_offset, |
3089 | | [_Py_MIMALLOC_HEAP_GC] = base_offset, |
3090 | | [_Py_MIMALLOC_HEAP_GC_PRE] = base_offset + 2 * sizeof(PyObject *), |
3091 | | }; |
3092 | | |
3093 | | // Initialize each heap |
3094 | | for (uint8_t i = 0; i < _Py_MIMALLOC_HEAP_COUNT; i++) { |
3095 | | _mi_heap_init_ex(&mts->heaps[i], tld, _mi_arena_id_none(), false, i); |
3096 | | mts->heaps[i].debug_offset = (uint8_t)debug_offsets[i]; |
3097 | | } |
3098 | | |
3099 | | // Heaps that store Python objects should use QSBR to delay freeing |
3100 | | // mimalloc pages while there may be concurrent lock-free readers. |
3101 | | mts->heaps[_Py_MIMALLOC_HEAP_OBJECT].page_use_qsbr = true; |
3102 | | mts->heaps[_Py_MIMALLOC_HEAP_GC].page_use_qsbr = true; |
3103 | | mts->heaps[_Py_MIMALLOC_HEAP_GC_PRE].page_use_qsbr = true; |
3104 | | |
3105 | | // By default, object allocations use _Py_MIMALLOC_HEAP_OBJECT. |
3106 | | // _PyObject_GC_New() and similar functions temporarily override this to |
3107 | | // use one of the GC heaps. |
3108 | | mts->current_object_heap = &mts->heaps[_Py_MIMALLOC_HEAP_OBJECT]; |
3109 | | |
3110 | | _Py_atomic_store_int(&mts->initialized, 1); |
3111 | | #endif |
3112 | 28 | } |
3113 | | |
3114 | | void |
3115 | | _PyThreadState_ClearMimallocHeaps(PyThreadState *tstate) |
3116 | 0 | { |
3117 | | #ifdef Py_GIL_DISABLED |
3118 | | if (!tstate->_status.bound) { |
3119 | | // The mimalloc heaps are only initialized when the thread is bound. |
3120 | | return; |
3121 | | } |
3122 | | |
3123 | | _PyThreadStateImpl *tstate_impl = (_PyThreadStateImpl *)tstate; |
3124 | | for (Py_ssize_t i = 0; i < _Py_MIMALLOC_HEAP_COUNT; i++) { |
3125 | | // Abandon all segments in use by this thread. This pushes them to |
3126 | | // a shared pool to later be reclaimed by other threads. It's important |
3127 | | // to do this before the thread state is destroyed so that objects |
3128 | | // remain visible to the GC. |
3129 | | _mi_heap_collect_abandon(&tstate_impl->mimalloc.heaps[i]); |
3130 | | } |
3131 | | #endif |
3132 | 0 | } |
3133 | | |
3134 | | |
3135 | | int |
3136 | | _Py_IsMainThread(void) |
3137 | 104M | { |
3138 | 104M | unsigned long thread = PyThread_get_thread_ident(); |
3139 | 104M | return (thread == _PyRuntime.main_thread); |
3140 | 104M | } |
3141 | | |
3142 | | |
3143 | | PyInterpreterState * |
3144 | | _PyInterpreterState_Main(void) |
3145 | 101M | { |
3146 | 101M | return _PyRuntime.interpreters.main; |
3147 | 101M | } |
3148 | | |
3149 | | |
3150 | | int |
3151 | | _Py_IsMainInterpreterFinalizing(PyInterpreterState *interp) |
3152 | 0 | { |
3153 | | /* bpo-39877: Access _PyRuntime directly rather than using |
3154 | | tstate->interp->runtime to support calls from Python daemon threads. |
3155 | | After Py_Finalize() has been called, tstate can be a dangling pointer: |
3156 | | point to PyThreadState freed memory. */ |
3157 | 0 | return (_PyRuntimeState_GetFinalizing(&_PyRuntime) != NULL && |
3158 | 0 | interp == &_PyRuntime._main_interpreter); |
3159 | 0 | } |
3160 | | |
3161 | | |
3162 | | const PyConfig * |
3163 | | _Py_GetMainConfig(void) |
3164 | 0 | { |
3165 | 0 | PyInterpreterState *interp = _PyInterpreterState_Main(); |
3166 | 0 | if (interp == NULL) { |
3167 | 0 | return NULL; |
3168 | 0 | } |
3169 | 0 | return _PyInterpreterState_GetConfig(interp); |
3170 | 0 | } |