/src/cpython/Python/pystate.c
Line | Count | Source (jump to first uncovered line) |
1 | | |
2 | | /* Thread and interpreter state structures and their interfaces */ |
3 | | |
4 | | #include "Python.h" |
5 | | #include "pycore_abstract.h" // _PyIndex_Check() |
6 | | #include "pycore_audit.h" // _Py_AuditHookEntry |
7 | | #include "pycore_ceval.h" // _PyEval_AcquireLock() |
8 | | #include "pycore_codecs.h" // _PyCodec_Fini() |
9 | | #include "pycore_critical_section.h" // _PyCriticalSection_Resume() |
10 | | #include "pycore_dtoa.h" // _dtoa_state_INIT() |
11 | | #include "pycore_emscripten_trampoline.h" // _Py_EmscriptenTrampoline_Init() |
12 | | #include "pycore_freelist.h" // _PyObject_ClearFreeLists() |
13 | | #include "pycore_initconfig.h" // _PyStatus_OK() |
14 | | #include "pycore_interpframe.h" // _PyThreadState_HasStackSpace() |
15 | | #include "pycore_object.h" // _PyType_InitCache() |
16 | | #include "pycore_obmalloc.h" // _PyMem_obmalloc_state_on_heap() |
17 | | #include "pycore_optimizer.h" // JIT_CLEANUP_THRESHOLD |
18 | | #include "pycore_parking_lot.h" // _PyParkingLot_AfterFork() |
19 | | #include "pycore_pyerrors.h" // _PyErr_Clear() |
20 | | #include "pycore_pylifecycle.h" // _PyAST_Fini() |
21 | | #include "pycore_pymem.h" // _PyMem_DebugEnabled() |
22 | | #include "pycore_runtime.h" // _PyRuntime |
23 | | #include "pycore_runtime_init.h" // _PyRuntimeState_INIT |
24 | | #include "pycore_stackref.h" // Py_STACKREF_DEBUG |
25 | | #include "pycore_time.h" // _PyTime_Init() |
26 | | #include "pycore_uniqueid.h" // _PyObject_FinalizePerThreadRefcounts() |
27 | | |
28 | | |
29 | | /* -------------------------------------------------------------------------- |
30 | | CAUTION |
31 | | |
32 | | Always use PyMem_RawMalloc() and PyMem_RawFree() directly in this file. A |
33 | | number of these functions are advertised as safe to call when the GIL isn't |
34 | | held, and in a debug build Python redirects (e.g.) PyMem_NEW (etc) to Python's |
35 | | debugging obmalloc functions. Those aren't thread-safe (they rely on the GIL |
36 | | to avoid the expense of doing their own locking). |
37 | | -------------------------------------------------------------------------- */ |
38 | | |
39 | | #ifdef HAVE_DLOPEN |
40 | | # ifdef HAVE_DLFCN_H |
41 | | # include <dlfcn.h> |
42 | | # endif |
43 | | # if !HAVE_DECL_RTLD_LAZY |
44 | | # define RTLD_LAZY 1 |
45 | | # endif |
46 | | #endif |
47 | | |
48 | | |
49 | | /****************************************/ |
50 | | /* helpers for the current thread state */ |
51 | | /****************************************/ |
52 | | |
53 | | // API for the current thread state is further down. |
54 | | |
55 | | /* "current" means one of: |
56 | | - bound to the current OS thread |
57 | | - holds the GIL |
58 | | */ |
59 | | |
60 | | //------------------------------------------------- |
61 | | // a highly efficient lookup for the current thread |
62 | | //------------------------------------------------- |
63 | | |
64 | | /* |
65 | | The stored thread state is set by PyThreadState_Swap(). |
66 | | |
67 | | For each of these functions, the GIL must be held by the current thread. |
68 | | */ |
69 | | |
70 | | |
71 | | #ifdef HAVE_THREAD_LOCAL |
72 | | /* The attached thread state for the current thread. */ |
73 | | _Py_thread_local PyThreadState *_Py_tss_tstate = NULL; |
74 | | |
75 | | /* The "bound" thread state used by PyGILState_Ensure(), |
76 | | also known as a "gilstate." */ |
77 | | _Py_thread_local PyThreadState *_Py_tss_gilstate = NULL; |
78 | | #endif |
79 | | |
80 | | static inline PyThreadState * |
81 | | current_fast_get(void) |
82 | 102M | { |
83 | 102M | #ifdef HAVE_THREAD_LOCAL |
84 | 102M | return _Py_tss_tstate; |
85 | | #else |
86 | | // XXX Fall back to the PyThread_tss_*() API. |
87 | | # error "no supported thread-local variable storage classifier" |
88 | | #endif |
89 | 102M | } |
90 | | |
91 | | static inline void |
92 | | current_fast_set(_PyRuntimeState *Py_UNUSED(runtime), PyThreadState *tstate) |
93 | 31.1k | { |
94 | 31.1k | assert(tstate != NULL); |
95 | 31.1k | #ifdef HAVE_THREAD_LOCAL |
96 | 31.1k | _Py_tss_tstate = tstate; |
97 | | #else |
98 | | // XXX Fall back to the PyThread_tss_*() API. |
99 | | # error "no supported thread-local variable storage classifier" |
100 | | #endif |
101 | 31.1k | } |
102 | | |
103 | | static inline void |
104 | | current_fast_clear(_PyRuntimeState *Py_UNUSED(runtime)) |
105 | 31.1k | { |
106 | 31.1k | #ifdef HAVE_THREAD_LOCAL |
107 | 31.1k | _Py_tss_tstate = NULL; |
108 | | #else |
109 | | // XXX Fall back to the PyThread_tss_*() API. |
110 | | # error "no supported thread-local variable storage classifier" |
111 | | #endif |
112 | 31.1k | } |
113 | | |
114 | | #define tstate_verify_not_active(tstate) \ |
115 | 0 | if (tstate == current_fast_get()) { \ |
116 | 0 | _Py_FatalErrorFormat(__func__, "tstate %p is still current", tstate); \ |
117 | 0 | } |
118 | | |
119 | | PyThreadState * |
120 | | _PyThreadState_GetCurrent(void) |
121 | 7.87M | { |
122 | 7.87M | return current_fast_get(); |
123 | 7.87M | } |
124 | | |
125 | | |
126 | | //--------------------------------------------- |
127 | | // The thread state used by PyGILState_Ensure() |
128 | | //--------------------------------------------- |
129 | | |
130 | | /* |
131 | | The stored thread state is set by bind_tstate() (AKA PyThreadState_Bind(). |
132 | | |
133 | | The GIL does no need to be held for these. |
134 | | */ |
135 | | |
136 | | static inline PyThreadState * |
137 | | gilstate_get(void) |
138 | 32 | { |
139 | 32 | return _Py_tss_gilstate; |
140 | 32 | } |
141 | | |
142 | | static inline void |
143 | | gilstate_set(PyThreadState *tstate) |
144 | 16 | { |
145 | 16 | assert(tstate != NULL); |
146 | 16 | _Py_tss_gilstate = tstate; |
147 | 16 | } |
148 | | |
149 | | static inline void |
150 | | gilstate_clear(void) |
151 | 0 | { |
152 | 0 | _Py_tss_gilstate = NULL; |
153 | 0 | } |
154 | | |
155 | | |
156 | | #ifndef NDEBUG |
157 | | static inline int tstate_is_alive(PyThreadState *tstate); |
158 | | |
159 | | static inline int |
160 | | tstate_is_bound(PyThreadState *tstate) |
161 | | { |
162 | | return tstate->_status.bound && !tstate->_status.unbound; |
163 | | } |
164 | | #endif // !NDEBUG |
165 | | |
166 | | static void bind_gilstate_tstate(PyThreadState *); |
167 | | static void unbind_gilstate_tstate(PyThreadState *); |
168 | | |
169 | | static void tstate_mimalloc_bind(PyThreadState *); |
170 | | |
171 | | static void |
172 | | bind_tstate(PyThreadState *tstate) |
173 | 16 | { |
174 | 16 | assert(tstate != NULL); |
175 | 16 | assert(tstate_is_alive(tstate) && !tstate->_status.bound); |
176 | 16 | assert(!tstate->_status.unbound); // just in case |
177 | 16 | assert(!tstate->_status.bound_gilstate); |
178 | 16 | assert(tstate != gilstate_get()); |
179 | 16 | assert(!tstate->_status.active); |
180 | 16 | assert(tstate->thread_id == 0); |
181 | 16 | assert(tstate->native_thread_id == 0); |
182 | | |
183 | | // Currently we don't necessarily store the thread state |
184 | | // in thread-local storage (e.g. per-interpreter). |
185 | | |
186 | 16 | tstate->thread_id = PyThread_get_thread_ident(); |
187 | 16 | #ifdef PY_HAVE_THREAD_NATIVE_ID |
188 | 16 | tstate->native_thread_id = PyThread_get_thread_native_id(); |
189 | 16 | #endif |
190 | | |
191 | | #ifdef Py_GIL_DISABLED |
192 | | // Initialize biased reference counting inter-thread queue. Note that this |
193 | | // needs to be initialized from the active thread. |
194 | | _Py_brc_init_thread(tstate); |
195 | | #endif |
196 | | |
197 | | // mimalloc state needs to be initialized from the active thread. |
198 | 16 | tstate_mimalloc_bind(tstate); |
199 | | |
200 | 16 | tstate->_status.bound = 1; |
201 | 16 | } |
202 | | |
203 | | static void |
204 | | unbind_tstate(PyThreadState *tstate) |
205 | 0 | { |
206 | 0 | assert(tstate != NULL); |
207 | 0 | assert(tstate_is_bound(tstate)); |
208 | 0 | #ifndef HAVE_PTHREAD_STUBS |
209 | 0 | assert(tstate->thread_id > 0); |
210 | 0 | #endif |
211 | 0 | #ifdef PY_HAVE_THREAD_NATIVE_ID |
212 | 0 | assert(tstate->native_thread_id > 0); |
213 | 0 | #endif |
214 | | |
215 | | // We leave thread_id and native_thread_id alone |
216 | | // since they can be useful for debugging. |
217 | | // Check the `_status` field to know if these values |
218 | | // are still valid. |
219 | | |
220 | | // We leave tstate->_status.bound set to 1 |
221 | | // to indicate it was previously bound. |
222 | 0 | tstate->_status.unbound = 1; |
223 | 0 | } |
224 | | |
225 | | |
226 | | /* Stick the thread state for this thread in thread specific storage. |
227 | | |
228 | | When a thread state is created for a thread by some mechanism |
229 | | other than PyGILState_Ensure(), it's important that the GILState |
230 | | machinery knows about it so it doesn't try to create another |
231 | | thread state for the thread. |
232 | | (This is a better fix for SF bug #1010677 than the first one attempted.) |
233 | | |
234 | | The only situation where you can legitimately have more than one |
235 | | thread state for an OS level thread is when there are multiple |
236 | | interpreters. |
237 | | |
238 | | Before 3.12, the PyGILState_*() APIs didn't work with multiple |
239 | | interpreters (see bpo-10915 and bpo-15751), so this function used |
240 | | to set TSS only once. Thus, the first thread state created for that |
241 | | given OS level thread would "win", which seemed reasonable behaviour. |
242 | | */ |
243 | | |
244 | | static void |
245 | | bind_gilstate_tstate(PyThreadState *tstate) |
246 | 16 | { |
247 | 16 | assert(tstate != NULL); |
248 | 16 | assert(tstate_is_alive(tstate)); |
249 | 16 | assert(tstate_is_bound(tstate)); |
250 | | // XXX assert(!tstate->_status.active); |
251 | 16 | assert(!tstate->_status.bound_gilstate); |
252 | | |
253 | 16 | PyThreadState *tcur = gilstate_get(); |
254 | 16 | assert(tstate != tcur); |
255 | | |
256 | 16 | if (tcur != NULL) { |
257 | 0 | tcur->_status.bound_gilstate = 0; |
258 | 0 | } |
259 | 16 | gilstate_set(tstate); |
260 | 16 | tstate->_status.bound_gilstate = 1; |
261 | 16 | } |
262 | | |
263 | | static void |
264 | | unbind_gilstate_tstate(PyThreadState *tstate) |
265 | 0 | { |
266 | 0 | assert(tstate != NULL); |
267 | | // XXX assert(tstate_is_alive(tstate)); |
268 | 0 | assert(tstate_is_bound(tstate)); |
269 | | // XXX assert(!tstate->_status.active); |
270 | 0 | assert(tstate->_status.bound_gilstate); |
271 | 0 | assert(tstate == gilstate_get()); |
272 | 0 | gilstate_clear(); |
273 | 0 | tstate->_status.bound_gilstate = 0; |
274 | 0 | } |
275 | | |
276 | | |
277 | | //---------------------------------------------- |
278 | | // the thread state that currently holds the GIL |
279 | | //---------------------------------------------- |
280 | | |
281 | | /* This is not exported, as it is not reliable! It can only |
282 | | ever be compared to the state for the *current* thread. |
283 | | * If not equal, then it doesn't matter that the actual |
284 | | value may change immediately after comparison, as it can't |
285 | | possibly change to the current thread's state. |
286 | | * If equal, then the current thread holds the lock, so the value can't |
287 | | change until we yield the lock. |
288 | | */ |
289 | | static int |
290 | | holds_gil(PyThreadState *tstate) |
291 | 0 | { |
292 | | // XXX Fall back to tstate->interp->runtime->ceval.gil.last_holder |
293 | | // (and tstate->interp->runtime->ceval.gil.locked). |
294 | 0 | assert(tstate != NULL); |
295 | | /* Must be the tstate for this thread */ |
296 | 0 | assert(tstate == gilstate_get()); |
297 | 0 | return tstate == current_fast_get(); |
298 | 0 | } |
299 | | |
300 | | |
301 | | /****************************/ |
302 | | /* the global runtime state */ |
303 | | /****************************/ |
304 | | |
305 | | //---------- |
306 | | // lifecycle |
307 | | //---------- |
308 | | |
309 | | /* Suppress deprecation warning for PyBytesObject.ob_shash */ |
310 | | _Py_COMP_DIAG_PUSH |
311 | | _Py_COMP_DIAG_IGNORE_DEPR_DECLS |
312 | | /* We use "initial" if the runtime gets re-used |
313 | | (e.g. Py_Finalize() followed by Py_Initialize(). |
314 | | Note that we initialize "initial" relative to _PyRuntime, |
315 | | to ensure pre-initialized pointers point to the active |
316 | | runtime state (and not "initial"). */ |
317 | | static const _PyRuntimeState initial = _PyRuntimeState_INIT(_PyRuntime, ""); |
318 | | _Py_COMP_DIAG_POP |
319 | | |
320 | | #define LOCKS_INIT(runtime) \ |
321 | 0 | { \ |
322 | 0 | &(runtime)->interpreters.mutex, \ |
323 | 0 | &(runtime)->xi.data_lookup.registry.mutex, \ |
324 | 0 | &(runtime)->unicode_state.ids.mutex, \ |
325 | 0 | &(runtime)->imports.extensions.mutex, \ |
326 | 0 | &(runtime)->ceval.pending_mainthread.mutex, \ |
327 | 0 | &(runtime)->ceval.sys_trace_profile_mutex, \ |
328 | 0 | &(runtime)->atexit.mutex, \ |
329 | 0 | &(runtime)->audit_hooks.mutex, \ |
330 | 0 | &(runtime)->allocators.mutex, \ |
331 | 0 | &(runtime)->_main_interpreter.types.mutex, \ |
332 | 0 | &(runtime)->_main_interpreter.code_state.mutex, \ |
333 | 0 | } |
334 | | |
335 | | static void |
336 | | init_runtime(_PyRuntimeState *runtime, |
337 | | void *open_code_hook, void *open_code_userdata, |
338 | | _Py_AuditHookEntry *audit_hook_head, |
339 | | Py_ssize_t unicode_next_index) |
340 | 16 | { |
341 | 16 | assert(!runtime->preinitializing); |
342 | 16 | assert(!runtime->preinitialized); |
343 | 16 | assert(!runtime->core_initialized); |
344 | 16 | assert(!runtime->initialized); |
345 | 16 | assert(!runtime->_initialized); |
346 | | |
347 | 16 | runtime->open_code_hook = open_code_hook; |
348 | 16 | runtime->open_code_userdata = open_code_userdata; |
349 | 16 | runtime->audit_hooks.head = audit_hook_head; |
350 | | |
351 | 16 | PyPreConfig_InitPythonConfig(&runtime->preconfig); |
352 | | |
353 | | // Set it to the ID of the main thread of the main interpreter. |
354 | 16 | runtime->main_thread = PyThread_get_thread_ident(); |
355 | | |
356 | 16 | runtime->unicode_state.ids.next_index = unicode_next_index; |
357 | | |
358 | | #if defined(__EMSCRIPTEN__) && defined(PY_CALL_TRAMPOLINE) |
359 | | _Py_EmscriptenTrampoline_Init(runtime); |
360 | | #endif |
361 | | |
362 | 16 | runtime->_initialized = 1; |
363 | 16 | } |
364 | | |
365 | | PyStatus |
366 | | _PyRuntimeState_Init(_PyRuntimeState *runtime) |
367 | 16 | { |
368 | | /* We preserve the hook across init, because there is |
369 | | currently no public API to set it between runtime |
370 | | initialization and interpreter initialization. */ |
371 | 16 | void *open_code_hook = runtime->open_code_hook; |
372 | 16 | void *open_code_userdata = runtime->open_code_userdata; |
373 | 16 | _Py_AuditHookEntry *audit_hook_head = runtime->audit_hooks.head; |
374 | | // bpo-42882: Preserve next_index value if Py_Initialize()/Py_Finalize() |
375 | | // is called multiple times. |
376 | 16 | Py_ssize_t unicode_next_index = runtime->unicode_state.ids.next_index; |
377 | | |
378 | 16 | if (runtime->_initialized) { |
379 | | // Py_Initialize() must be running again. |
380 | | // Reset to _PyRuntimeState_INIT. |
381 | 0 | memcpy(runtime, &initial, sizeof(*runtime)); |
382 | | // Preserve the cookie from the original runtime. |
383 | 0 | memcpy(runtime->debug_offsets.cookie, _Py_Debug_Cookie, 8); |
384 | 0 | assert(!runtime->_initialized); |
385 | 0 | } |
386 | | |
387 | 16 | PyStatus status = _PyTime_Init(&runtime->time); |
388 | 16 | if (_PyStatus_EXCEPTION(status)) { |
389 | 0 | return status; |
390 | 0 | } |
391 | | |
392 | 16 | init_runtime(runtime, open_code_hook, open_code_userdata, audit_hook_head, |
393 | 16 | unicode_next_index); |
394 | | |
395 | 16 | return _PyStatus_OK(); |
396 | 16 | } |
397 | | |
398 | | void |
399 | | _PyRuntimeState_Fini(_PyRuntimeState *runtime) |
400 | 0 | { |
401 | | #ifdef Py_REF_DEBUG |
402 | | /* The count is cleared by _Py_FinalizeRefTotal(). */ |
403 | | assert(runtime->object_state.interpreter_leaks == 0); |
404 | | #endif |
405 | 0 | gilstate_clear(); |
406 | 0 | } |
407 | | |
408 | | #ifdef HAVE_FORK |
409 | | /* This function is called from PyOS_AfterFork_Child to ensure that |
410 | | newly created child processes do not share locks with the parent. */ |
411 | | PyStatus |
412 | | _PyRuntimeState_ReInitThreads(_PyRuntimeState *runtime) |
413 | 0 | { |
414 | | // This was initially set in _PyRuntimeState_Init(). |
415 | 0 | runtime->main_thread = PyThread_get_thread_ident(); |
416 | | |
417 | | // Clears the parking lot. Any waiting threads are dead. This must be |
418 | | // called before releasing any locks that use the parking lot. |
419 | 0 | _PyParkingLot_AfterFork(); |
420 | | |
421 | | // Re-initialize global locks |
422 | 0 | PyMutex *locks[] = LOCKS_INIT(runtime); |
423 | 0 | for (size_t i = 0; i < Py_ARRAY_LENGTH(locks); i++) { |
424 | 0 | _PyMutex_at_fork_reinit(locks[i]); |
425 | 0 | } |
426 | | #ifdef Py_GIL_DISABLED |
427 | | for (PyInterpreterState *interp = runtime->interpreters.head; |
428 | | interp != NULL; interp = interp->next) |
429 | | { |
430 | | for (int i = 0; i < NUM_WEAKREF_LIST_LOCKS; i++) { |
431 | | _PyMutex_at_fork_reinit(&interp->weakref_locks[i]); |
432 | | } |
433 | | } |
434 | | #endif |
435 | |
|
436 | 0 | _PyTypes_AfterFork(); |
437 | |
|
438 | 0 | _PyThread_AfterFork(&runtime->threads); |
439 | |
|
440 | 0 | return _PyStatus_OK(); |
441 | 0 | } |
442 | | #endif |
443 | | |
444 | | |
445 | | /*************************************/ |
446 | | /* the per-interpreter runtime state */ |
447 | | /*************************************/ |
448 | | |
449 | | //---------- |
450 | | // lifecycle |
451 | | //---------- |
452 | | |
453 | | /* Calling this indicates that the runtime is ready to create interpreters. */ |
454 | | |
455 | | PyStatus |
456 | | _PyInterpreterState_Enable(_PyRuntimeState *runtime) |
457 | 16 | { |
458 | 16 | struct pyinterpreters *interpreters = &runtime->interpreters; |
459 | 16 | interpreters->next_id = 0; |
460 | 16 | return _PyStatus_OK(); |
461 | 16 | } |
462 | | |
463 | | static PyInterpreterState * |
464 | | alloc_interpreter(void) |
465 | 0 | { |
466 | 0 | size_t alignment = _Alignof(PyInterpreterState); |
467 | 0 | size_t allocsize = sizeof(PyInterpreterState) + alignment - 1; |
468 | 0 | void *mem = PyMem_RawCalloc(1, allocsize); |
469 | 0 | if (mem == NULL) { |
470 | 0 | return NULL; |
471 | 0 | } |
472 | 0 | PyInterpreterState *interp = _Py_ALIGN_UP(mem, alignment); |
473 | 0 | assert(_Py_IS_ALIGNED(interp, alignment)); |
474 | 0 | interp->_malloced = mem; |
475 | 0 | return interp; |
476 | 0 | } |
477 | | |
478 | | static void |
479 | | free_interpreter(PyInterpreterState *interp) |
480 | 0 | { |
481 | | // The main interpreter is statically allocated so |
482 | | // should not be freed. |
483 | 0 | if (interp != &_PyRuntime._main_interpreter) { |
484 | 0 | if (_PyMem_obmalloc_state_on_heap(interp)) { |
485 | | // interpreter has its own obmalloc state, free it |
486 | 0 | PyMem_RawFree(interp->obmalloc); |
487 | 0 | interp->obmalloc = NULL; |
488 | 0 | } |
489 | 0 | assert(_Py_IS_ALIGNED(interp, _Alignof(PyInterpreterState))); |
490 | 0 | PyMem_RawFree(interp->_malloced); |
491 | 0 | } |
492 | 0 | } |
493 | | |
494 | | #ifndef NDEBUG |
495 | | static inline int check_interpreter_whence(long); |
496 | | #endif |
497 | | |
498 | | /* Get the interpreter state to a minimal consistent state. |
499 | | Further init happens in pylifecycle.c before it can be used. |
500 | | All fields not initialized here are expected to be zeroed out, |
501 | | e.g. by PyMem_RawCalloc() or memset(), or otherwise pre-initialized. |
502 | | The runtime state is not manipulated. Instead it is assumed that |
503 | | the interpreter is getting added to the runtime. |
504 | | |
505 | | Note that the main interpreter was statically initialized as part |
506 | | of the runtime and most state is already set properly. That leaves |
507 | | a small number of fields to initialize dynamically, as well as some |
508 | | that are initialized lazily. |
509 | | |
510 | | For subinterpreters we memcpy() the main interpreter in |
511 | | PyInterpreterState_New(), leaving it in the same mostly-initialized |
512 | | state. The only difference is that the interpreter has some |
513 | | self-referential state that is statically initializexd to the |
514 | | main interpreter. We fix those fields here, in addition |
515 | | to the other dynamically initialized fields. |
516 | | */ |
517 | | static PyStatus |
518 | | init_interpreter(PyInterpreterState *interp, |
519 | | _PyRuntimeState *runtime, int64_t id, |
520 | | PyInterpreterState *next, |
521 | | long whence) |
522 | 16 | { |
523 | 16 | if (interp->_initialized) { |
524 | 0 | return _PyStatus_ERR("interpreter already initialized"); |
525 | 0 | } |
526 | | |
527 | 16 | assert(interp->_whence == _PyInterpreterState_WHENCE_NOTSET); |
528 | 16 | assert(check_interpreter_whence(whence) == 0); |
529 | 16 | interp->_whence = whence; |
530 | | |
531 | 16 | assert(runtime != NULL); |
532 | 16 | interp->runtime = runtime; |
533 | | |
534 | 16 | assert(id > 0 || (id == 0 && interp == runtime->interpreters.main)); |
535 | 16 | interp->id = id; |
536 | | |
537 | 16 | interp->id_refcount = 0; |
538 | | |
539 | 16 | assert(runtime->interpreters.head == interp); |
540 | 16 | assert(next != NULL || (interp == runtime->interpreters.main)); |
541 | 16 | interp->next = next; |
542 | | |
543 | 16 | interp->threads.preallocated = &interp->_initial_thread; |
544 | | |
545 | | // We would call _PyObject_InitState() at this point |
546 | | // if interp->feature_flags were alredy set. |
547 | | |
548 | 16 | _PyEval_InitState(interp); |
549 | 16 | _PyGC_InitState(&interp->gc); |
550 | 16 | PyConfig_InitPythonConfig(&interp->config); |
551 | 16 | _PyType_InitCache(interp); |
552 | | #ifdef Py_GIL_DISABLED |
553 | | _Py_brc_init_state(interp); |
554 | | #endif |
555 | 16 | llist_init(&interp->mem_free_queue.head); |
556 | 16 | llist_init(&interp->asyncio_tasks_head); |
557 | 16 | interp->asyncio_tasks_lock = (PyMutex){0}; |
558 | 272 | for (int i = 0; i < _PY_MONITORING_UNGROUPED_EVENTS; i++) { |
559 | 256 | interp->monitors.tools[i] = 0; |
560 | 256 | } |
561 | 144 | for (int t = 0; t < PY_MONITORING_TOOL_IDS; t++) { |
562 | 2.56k | for (int e = 0; e < _PY_MONITORING_EVENTS; e++) { |
563 | 2.43k | interp->monitoring_callables[t][e] = NULL; |
564 | | |
565 | 2.43k | } |
566 | 128 | interp->monitoring_tool_versions[t] = 0; |
567 | 128 | } |
568 | 16 | interp->sys_profile_initialized = false; |
569 | 16 | interp->sys_trace_initialized = false; |
570 | 16 | interp->_code_object_generation = 0; |
571 | 16 | interp->jit = false; |
572 | 16 | interp->executor_list_head = NULL; |
573 | 16 | interp->executor_deletion_list_head = NULL; |
574 | 16 | interp->executor_deletion_list_remaining_capacity = 0; |
575 | 16 | interp->trace_run_counter = JIT_CLEANUP_THRESHOLD; |
576 | 16 | if (interp != &runtime->_main_interpreter) { |
577 | | /* Fix the self-referential, statically initialized fields. */ |
578 | 0 | interp->dtoa = (struct _dtoa_state)_dtoa_state_INIT(interp); |
579 | 0 | } |
580 | | #if !defined(Py_GIL_DISABLED) && defined(Py_STACKREF_DEBUG) |
581 | | interp->next_stackref = INITIAL_STACKREF_INDEX; |
582 | | _Py_hashtable_allocator_t alloc = { |
583 | | .malloc = malloc, |
584 | | .free = free, |
585 | | }; |
586 | | interp->open_stackrefs_table = _Py_hashtable_new_full( |
587 | | _Py_hashtable_hash_ptr, |
588 | | _Py_hashtable_compare_direct, |
589 | | NULL, |
590 | | NULL, |
591 | | &alloc |
592 | | ); |
593 | | # ifdef Py_STACKREF_CLOSE_DEBUG |
594 | | interp->closed_stackrefs_table = _Py_hashtable_new_full( |
595 | | _Py_hashtable_hash_ptr, |
596 | | _Py_hashtable_compare_direct, |
597 | | NULL, |
598 | | NULL, |
599 | | &alloc |
600 | | ); |
601 | | # endif |
602 | | _Py_stackref_associate(interp, Py_None, PyStackRef_None); |
603 | | _Py_stackref_associate(interp, Py_False, PyStackRef_False); |
604 | | _Py_stackref_associate(interp, Py_True, PyStackRef_True); |
605 | | #endif |
606 | | |
607 | 16 | interp->_initialized = 1; |
608 | 16 | return _PyStatus_OK(); |
609 | 16 | } |
610 | | |
611 | | |
612 | | PyStatus |
613 | | _PyInterpreterState_New(PyThreadState *tstate, PyInterpreterState **pinterp) |
614 | 16 | { |
615 | 16 | *pinterp = NULL; |
616 | | |
617 | | // Don't get runtime from tstate since tstate can be NULL |
618 | 16 | _PyRuntimeState *runtime = &_PyRuntime; |
619 | | |
620 | | // tstate is NULL when pycore_create_interpreter() calls |
621 | | // _PyInterpreterState_New() to create the main interpreter. |
622 | 16 | if (tstate != NULL) { |
623 | 0 | if (_PySys_Audit(tstate, "cpython.PyInterpreterState_New", NULL) < 0) { |
624 | 0 | return _PyStatus_ERR("sys.audit failed"); |
625 | 0 | } |
626 | 0 | } |
627 | | |
628 | | /* We completely serialize creation of multiple interpreters, since |
629 | | it simplifies things here and blocking concurrent calls isn't a problem. |
630 | | Regardless, we must fully block subinterpreter creation until |
631 | | after the main interpreter is created. */ |
632 | 16 | HEAD_LOCK(runtime); |
633 | | |
634 | 16 | struct pyinterpreters *interpreters = &runtime->interpreters; |
635 | 16 | int64_t id = interpreters->next_id; |
636 | 16 | interpreters->next_id += 1; |
637 | | |
638 | | // Allocate the interpreter and add it to the runtime state. |
639 | 16 | PyInterpreterState *interp; |
640 | 16 | PyStatus status; |
641 | 16 | PyInterpreterState *old_head = interpreters->head; |
642 | 16 | if (old_head == NULL) { |
643 | | // We are creating the main interpreter. |
644 | 16 | assert(interpreters->main == NULL); |
645 | 16 | assert(id == 0); |
646 | | |
647 | 16 | interp = &runtime->_main_interpreter; |
648 | 16 | assert(interp->id == 0); |
649 | 16 | assert(interp->next == NULL); |
650 | | |
651 | 16 | interpreters->main = interp; |
652 | 16 | } |
653 | 0 | else { |
654 | 0 | assert(interpreters->main != NULL); |
655 | 0 | assert(id != 0); |
656 | |
|
657 | 0 | interp = alloc_interpreter(); |
658 | 0 | if (interp == NULL) { |
659 | 0 | status = _PyStatus_NO_MEMORY(); |
660 | 0 | goto error; |
661 | 0 | } |
662 | | // Set to _PyInterpreterState_INIT. |
663 | 0 | memcpy(interp, &initial._main_interpreter, sizeof(*interp)); |
664 | |
|
665 | 0 | if (id < 0) { |
666 | | /* overflow or Py_Initialize() not called yet! */ |
667 | 0 | status = _PyStatus_ERR("failed to get an interpreter ID"); |
668 | 0 | goto error; |
669 | 0 | } |
670 | 0 | } |
671 | 16 | interpreters->head = interp; |
672 | | |
673 | 16 | long whence = _PyInterpreterState_WHENCE_UNKNOWN; |
674 | 16 | status = init_interpreter(interp, runtime, |
675 | 16 | id, old_head, whence); |
676 | 16 | if (_PyStatus_EXCEPTION(status)) { |
677 | 0 | goto error; |
678 | 0 | } |
679 | | |
680 | 16 | HEAD_UNLOCK(runtime); |
681 | | |
682 | 16 | assert(interp != NULL); |
683 | 16 | *pinterp = interp; |
684 | 16 | return _PyStatus_OK(); |
685 | | |
686 | 0 | error: |
687 | 0 | HEAD_UNLOCK(runtime); |
688 | |
|
689 | 0 | if (interp != NULL) { |
690 | 0 | free_interpreter(interp); |
691 | 0 | } |
692 | 0 | return status; |
693 | 16 | } |
694 | | |
695 | | |
696 | | PyInterpreterState * |
697 | | PyInterpreterState_New(void) |
698 | 0 | { |
699 | | // tstate can be NULL |
700 | 0 | PyThreadState *tstate = current_fast_get(); |
701 | |
|
702 | 0 | PyInterpreterState *interp; |
703 | 0 | PyStatus status = _PyInterpreterState_New(tstate, &interp); |
704 | 0 | if (_PyStatus_EXCEPTION(status)) { |
705 | 0 | Py_ExitStatusException(status); |
706 | 0 | } |
707 | 0 | assert(interp != NULL); |
708 | 0 | return interp; |
709 | 0 | } |
710 | | |
711 | | #if !defined(Py_GIL_DISABLED) && defined(Py_STACKREF_DEBUG) |
712 | | extern void |
713 | | _Py_stackref_report_leaks(PyInterpreterState *interp); |
714 | | #endif |
715 | | |
716 | | static void |
717 | | interpreter_clear(PyInterpreterState *interp, PyThreadState *tstate) |
718 | 0 | { |
719 | 0 | assert(interp != NULL); |
720 | 0 | assert(tstate != NULL); |
721 | 0 | _PyRuntimeState *runtime = interp->runtime; |
722 | | |
723 | | /* XXX Conditions we need to enforce: |
724 | | |
725 | | * the GIL must be held by the current thread |
726 | | * tstate must be the "current" thread state (current_fast_get()) |
727 | | * tstate->interp must be interp |
728 | | * for the main interpreter, tstate must be the main thread |
729 | | */ |
730 | | // XXX Ideally, we would not rely on any thread state in this function |
731 | | // (and we would drop the "tstate" argument). |
732 | |
|
733 | 0 | if (_PySys_Audit(tstate, "cpython.PyInterpreterState_Clear", NULL) < 0) { |
734 | 0 | _PyErr_Clear(tstate); |
735 | 0 | } |
736 | | |
737 | | // Clear the current/main thread state last. |
738 | 0 | _Py_FOR_EACH_TSTATE_BEGIN(interp, p) { |
739 | | // See https://github.com/python/cpython/issues/102126 |
740 | | // Must be called without HEAD_LOCK held as it can deadlock |
741 | | // if any finalizer tries to acquire that lock. |
742 | 0 | HEAD_UNLOCK(runtime); |
743 | 0 | PyThreadState_Clear(p); |
744 | 0 | HEAD_LOCK(runtime); |
745 | 0 | } |
746 | 0 | _Py_FOR_EACH_TSTATE_END(interp); |
747 | 0 | if (tstate->interp == interp) { |
748 | | /* We fix tstate->_status below when we for sure aren't using it |
749 | | (e.g. no longer need the GIL). */ |
750 | | // XXX Eliminate the need to do this. |
751 | 0 | tstate->_status.cleared = 0; |
752 | 0 | } |
753 | | |
754 | | /* It is possible that any of the objects below have a finalizer |
755 | | that runs Python code or otherwise relies on a thread state |
756 | | or even the interpreter state. For now we trust that isn't |
757 | | a problem. |
758 | | */ |
759 | | // XXX Make sure we properly deal with problematic finalizers. |
760 | |
|
761 | 0 | Py_CLEAR(interp->audit_hooks); |
762 | | |
763 | | // At this time, all the threads should be cleared so we don't need atomic |
764 | | // operations for instrumentation_version or eval_breaker. |
765 | 0 | interp->ceval.instrumentation_version = 0; |
766 | 0 | tstate->eval_breaker = 0; |
767 | |
|
768 | 0 | for (int i = 0; i < _PY_MONITORING_UNGROUPED_EVENTS; i++) { |
769 | 0 | interp->monitors.tools[i] = 0; |
770 | 0 | } |
771 | 0 | for (int t = 0; t < PY_MONITORING_TOOL_IDS; t++) { |
772 | 0 | for (int e = 0; e < _PY_MONITORING_EVENTS; e++) { |
773 | 0 | Py_CLEAR(interp->monitoring_callables[t][e]); |
774 | 0 | } |
775 | 0 | } |
776 | 0 | interp->sys_profile_initialized = false; |
777 | 0 | interp->sys_trace_initialized = false; |
778 | 0 | for (int t = 0; t < PY_MONITORING_TOOL_IDS; t++) { |
779 | 0 | Py_CLEAR(interp->monitoring_tool_names[t]); |
780 | 0 | } |
781 | 0 | interp->_code_object_generation = 0; |
782 | | #ifdef Py_GIL_DISABLED |
783 | | interp->tlbc_indices.tlbc_generation = 0; |
784 | | #endif |
785 | |
|
786 | 0 | PyConfig_Clear(&interp->config); |
787 | 0 | _PyCodec_Fini(interp); |
788 | |
|
789 | 0 | assert(interp->imports.modules == NULL); |
790 | 0 | assert(interp->imports.modules_by_index == NULL); |
791 | 0 | assert(interp->imports.importlib == NULL); |
792 | 0 | assert(interp->imports.import_func == NULL); |
793 | |
|
794 | 0 | Py_CLEAR(interp->sysdict_copy); |
795 | 0 | Py_CLEAR(interp->builtins_copy); |
796 | 0 | Py_CLEAR(interp->dict); |
797 | 0 | #ifdef HAVE_FORK |
798 | 0 | Py_CLEAR(interp->before_forkers); |
799 | 0 | Py_CLEAR(interp->after_forkers_parent); |
800 | 0 | Py_CLEAR(interp->after_forkers_child); |
801 | 0 | #endif |
802 | | |
803 | |
|
804 | | #ifdef _Py_TIER2 |
805 | | _Py_ClearExecutorDeletionList(interp); |
806 | | #endif |
807 | 0 | _PyAST_Fini(interp); |
808 | 0 | _PyWarnings_Fini(interp); |
809 | 0 | _PyAtExit_Fini(interp); |
810 | | |
811 | | // All Python types must be destroyed before the last GC collection. Python |
812 | | // types create a reference cycle to themselves in their in their |
813 | | // PyTypeObject.tp_mro member (the tuple contains the type). |
814 | | |
815 | | /* Last garbage collection on this interpreter */ |
816 | 0 | _PyGC_CollectNoFail(tstate); |
817 | 0 | _PyGC_Fini(interp); |
818 | | |
819 | | /* We don't clear sysdict and builtins until the end of this function. |
820 | | Because clearing other attributes can execute arbitrary Python code |
821 | | which requires sysdict and builtins. */ |
822 | 0 | PyDict_Clear(interp->sysdict); |
823 | 0 | PyDict_Clear(interp->builtins); |
824 | 0 | Py_CLEAR(interp->sysdict); |
825 | 0 | Py_CLEAR(interp->builtins); |
826 | |
|
827 | | #if !defined(Py_GIL_DISABLED) && defined(Py_STACKREF_DEBUG) |
828 | | # ifdef Py_STACKREF_CLOSE_DEBUG |
829 | | _Py_hashtable_destroy(interp->closed_stackrefs_table); |
830 | | interp->closed_stackrefs_table = NULL; |
831 | | # endif |
832 | | _Py_stackref_report_leaks(interp); |
833 | | _Py_hashtable_destroy(interp->open_stackrefs_table); |
834 | | interp->open_stackrefs_table = NULL; |
835 | | #endif |
836 | |
|
837 | 0 | if (tstate->interp == interp) { |
838 | | /* We are now safe to fix tstate->_status.cleared. */ |
839 | | // XXX Do this (much) earlier? |
840 | 0 | tstate->_status.cleared = 1; |
841 | 0 | } |
842 | |
|
843 | 0 | for (int i=0; i < DICT_MAX_WATCHERS; i++) { |
844 | 0 | interp->dict_state.watchers[i] = NULL; |
845 | 0 | } |
846 | |
|
847 | 0 | for (int i=0; i < TYPE_MAX_WATCHERS; i++) { |
848 | 0 | interp->type_watchers[i] = NULL; |
849 | 0 | } |
850 | |
|
851 | 0 | for (int i=0; i < FUNC_MAX_WATCHERS; i++) { |
852 | 0 | interp->func_watchers[i] = NULL; |
853 | 0 | } |
854 | 0 | interp->active_func_watchers = 0; |
855 | |
|
856 | 0 | for (int i=0; i < CODE_MAX_WATCHERS; i++) { |
857 | 0 | interp->code_watchers[i] = NULL; |
858 | 0 | } |
859 | 0 | interp->active_code_watchers = 0; |
860 | |
|
861 | 0 | for (int i=0; i < CONTEXT_MAX_WATCHERS; i++) { |
862 | 0 | interp->context_watchers[i] = NULL; |
863 | 0 | } |
864 | 0 | interp->active_context_watchers = 0; |
865 | | // XXX Once we have one allocator per interpreter (i.e. |
866 | | // per-interpreter GC) we must ensure that all of the interpreter's |
867 | | // objects have been cleaned up at the point. |
868 | | |
869 | | // We could clear interp->threads.freelist here |
870 | | // if it held more than just the initial thread state. |
871 | 0 | } |
872 | | |
873 | | |
874 | | void |
875 | | PyInterpreterState_Clear(PyInterpreterState *interp) |
876 | 0 | { |
877 | | // Use the current Python thread state to call audit hooks and to collect |
878 | | // garbage. It can be different than the current Python thread state |
879 | | // of 'interp'. |
880 | 0 | PyThreadState *current_tstate = current_fast_get(); |
881 | 0 | _PyImport_ClearCore(interp); |
882 | 0 | interpreter_clear(interp, current_tstate); |
883 | 0 | } |
884 | | |
885 | | |
886 | | void |
887 | | _PyInterpreterState_Clear(PyThreadState *tstate) |
888 | 0 | { |
889 | 0 | _PyImport_ClearCore(tstate->interp); |
890 | 0 | interpreter_clear(tstate->interp, tstate); |
891 | 0 | } |
892 | | |
893 | | |
894 | | static inline void tstate_deactivate(PyThreadState *tstate); |
895 | | static void tstate_set_detached(PyThreadState *tstate, int detached_state); |
896 | | static void zapthreads(PyInterpreterState *interp); |
897 | | |
898 | | void |
899 | | PyInterpreterState_Delete(PyInterpreterState *interp) |
900 | 0 | { |
901 | 0 | _PyRuntimeState *runtime = interp->runtime; |
902 | 0 | struct pyinterpreters *interpreters = &runtime->interpreters; |
903 | | |
904 | | // XXX Clearing the "current" thread state should happen before |
905 | | // we start finalizing the interpreter (or the current thread state). |
906 | 0 | PyThreadState *tcur = current_fast_get(); |
907 | 0 | if (tcur != NULL && interp == tcur->interp) { |
908 | | /* Unset current thread. After this, many C API calls become crashy. */ |
909 | 0 | _PyThreadState_Detach(tcur); |
910 | 0 | } |
911 | |
|
912 | 0 | zapthreads(interp); |
913 | | |
914 | | // XXX These two calls should be done at the end of clear_interpreter(), |
915 | | // but currently some objects get decref'ed after that. |
916 | | #ifdef Py_REF_DEBUG |
917 | | _PyInterpreterState_FinalizeRefTotal(interp); |
918 | | #endif |
919 | 0 | _PyInterpreterState_FinalizeAllocatedBlocks(interp); |
920 | |
|
921 | 0 | HEAD_LOCK(runtime); |
922 | 0 | PyInterpreterState **p; |
923 | 0 | for (p = &interpreters->head; ; p = &(*p)->next) { |
924 | 0 | if (*p == NULL) { |
925 | 0 | Py_FatalError("NULL interpreter"); |
926 | 0 | } |
927 | 0 | if (*p == interp) { |
928 | 0 | break; |
929 | 0 | } |
930 | 0 | } |
931 | 0 | if (interp->threads.head != NULL) { |
932 | 0 | Py_FatalError("remaining threads"); |
933 | 0 | } |
934 | 0 | *p = interp->next; |
935 | |
|
936 | 0 | if (interpreters->main == interp) { |
937 | 0 | interpreters->main = NULL; |
938 | 0 | if (interpreters->head != NULL) { |
939 | 0 | Py_FatalError("remaining subinterpreters"); |
940 | 0 | } |
941 | 0 | } |
942 | 0 | HEAD_UNLOCK(runtime); |
943 | |
|
944 | 0 | _Py_qsbr_fini(interp); |
945 | |
|
946 | 0 | _PyObject_FiniState(interp); |
947 | |
|
948 | 0 | free_interpreter(interp); |
949 | 0 | } |
950 | | |
951 | | |
952 | | #ifdef HAVE_FORK |
953 | | /* |
954 | | * Delete all interpreter states except the main interpreter. If there |
955 | | * is a current interpreter state, it *must* be the main interpreter. |
956 | | */ |
957 | | PyStatus |
958 | | _PyInterpreterState_DeleteExceptMain(_PyRuntimeState *runtime) |
959 | 0 | { |
960 | 0 | struct pyinterpreters *interpreters = &runtime->interpreters; |
961 | |
|
962 | 0 | PyThreadState *tstate = _PyThreadState_Swap(runtime, NULL); |
963 | 0 | if (tstate != NULL && tstate->interp != interpreters->main) { |
964 | 0 | return _PyStatus_ERR("not main interpreter"); |
965 | 0 | } |
966 | | |
967 | 0 | HEAD_LOCK(runtime); |
968 | 0 | PyInterpreterState *interp = interpreters->head; |
969 | 0 | interpreters->head = NULL; |
970 | 0 | while (interp != NULL) { |
971 | 0 | if (interp == interpreters->main) { |
972 | 0 | interpreters->main->next = NULL; |
973 | 0 | interpreters->head = interp; |
974 | 0 | interp = interp->next; |
975 | 0 | continue; |
976 | 0 | } |
977 | | |
978 | | // XXX Won't this fail since PyInterpreterState_Clear() requires |
979 | | // the "current" tstate to be set? |
980 | 0 | PyInterpreterState_Clear(interp); // XXX must activate? |
981 | 0 | zapthreads(interp); |
982 | 0 | PyInterpreterState *prev_interp = interp; |
983 | 0 | interp = interp->next; |
984 | 0 | free_interpreter(prev_interp); |
985 | 0 | } |
986 | 0 | HEAD_UNLOCK(runtime); |
987 | |
|
988 | 0 | if (interpreters->head == NULL) { |
989 | 0 | return _PyStatus_ERR("missing main interpreter"); |
990 | 0 | } |
991 | 0 | _PyThreadState_Swap(runtime, tstate); |
992 | 0 | return _PyStatus_OK(); |
993 | 0 | } |
994 | | #endif |
995 | | |
996 | | static inline void |
997 | | set_main_thread(PyInterpreterState *interp, PyThreadState *tstate) |
998 | 0 | { |
999 | 0 | _Py_atomic_store_ptr_relaxed(&interp->threads.main, tstate); |
1000 | 0 | } |
1001 | | |
1002 | | static inline PyThreadState * |
1003 | | get_main_thread(PyInterpreterState *interp) |
1004 | 0 | { |
1005 | 0 | return _Py_atomic_load_ptr_relaxed(&interp->threads.main); |
1006 | 0 | } |
1007 | | |
1008 | | void |
1009 | | _PyErr_SetInterpreterAlreadyRunning(void) |
1010 | 0 | { |
1011 | 0 | PyErr_SetString(PyExc_InterpreterError, "interpreter already running"); |
1012 | 0 | } |
1013 | | |
1014 | | int |
1015 | | _PyInterpreterState_SetRunningMain(PyInterpreterState *interp) |
1016 | 0 | { |
1017 | 0 | if (get_main_thread(interp) != NULL) { |
1018 | 0 | _PyErr_SetInterpreterAlreadyRunning(); |
1019 | 0 | return -1; |
1020 | 0 | } |
1021 | 0 | PyThreadState *tstate = current_fast_get(); |
1022 | 0 | _Py_EnsureTstateNotNULL(tstate); |
1023 | 0 | if (tstate->interp != interp) { |
1024 | 0 | PyErr_SetString(PyExc_RuntimeError, |
1025 | 0 | "current tstate has wrong interpreter"); |
1026 | 0 | return -1; |
1027 | 0 | } |
1028 | 0 | set_main_thread(interp, tstate); |
1029 | |
|
1030 | 0 | return 0; |
1031 | 0 | } |
1032 | | |
1033 | | void |
1034 | | _PyInterpreterState_SetNotRunningMain(PyInterpreterState *interp) |
1035 | 0 | { |
1036 | 0 | assert(get_main_thread(interp) == current_fast_get()); |
1037 | 0 | set_main_thread(interp, NULL); |
1038 | 0 | } |
1039 | | |
1040 | | int |
1041 | | _PyInterpreterState_IsRunningMain(PyInterpreterState *interp) |
1042 | 0 | { |
1043 | 0 | if (get_main_thread(interp) != NULL) { |
1044 | 0 | return 1; |
1045 | 0 | } |
1046 | | // Embedders might not know to call _PyInterpreterState_SetRunningMain(), |
1047 | | // so their main thread wouldn't show it is running the main interpreter's |
1048 | | // program. (Py_Main() doesn't have this problem.) For now this isn't |
1049 | | // critical. If it were, we would need to infer "running main" from other |
1050 | | // information, like if it's the main interpreter. We used to do that |
1051 | | // but the naive approach led to some inconsistencies that caused problems. |
1052 | 0 | return 0; |
1053 | 0 | } |
1054 | | |
1055 | | int |
1056 | | _PyThreadState_IsRunningMain(PyThreadState *tstate) |
1057 | 0 | { |
1058 | 0 | PyInterpreterState *interp = tstate->interp; |
1059 | | // See the note in _PyInterpreterState_IsRunningMain() about |
1060 | | // possible false negatives here for embedders. |
1061 | 0 | return get_main_thread(interp) == tstate; |
1062 | 0 | } |
1063 | | |
1064 | | void |
1065 | | _PyInterpreterState_ReinitRunningMain(PyThreadState *tstate) |
1066 | 0 | { |
1067 | 0 | PyInterpreterState *interp = tstate->interp; |
1068 | 0 | if (get_main_thread(interp) != tstate) { |
1069 | 0 | set_main_thread(interp, NULL); |
1070 | 0 | } |
1071 | 0 | } |
1072 | | |
1073 | | |
1074 | | //---------- |
1075 | | // accessors |
1076 | | //---------- |
1077 | | |
1078 | | int |
1079 | | _PyInterpreterState_IsReady(PyInterpreterState *interp) |
1080 | 0 | { |
1081 | 0 | return interp->_ready; |
1082 | 0 | } |
1083 | | |
1084 | | #ifndef NDEBUG |
1085 | | static inline int |
1086 | | check_interpreter_whence(long whence) |
1087 | | { |
1088 | | if(whence < 0) { |
1089 | | return -1; |
1090 | | } |
1091 | | if (whence > _PyInterpreterState_WHENCE_MAX) { |
1092 | | return -1; |
1093 | | } |
1094 | | return 0; |
1095 | | } |
1096 | | #endif |
1097 | | |
1098 | | long |
1099 | | _PyInterpreterState_GetWhence(PyInterpreterState *interp) |
1100 | 0 | { |
1101 | 0 | assert(check_interpreter_whence(interp->_whence) == 0); |
1102 | 0 | return interp->_whence; |
1103 | 0 | } |
1104 | | |
1105 | | void |
1106 | | _PyInterpreterState_SetWhence(PyInterpreterState *interp, long whence) |
1107 | 16 | { |
1108 | 16 | assert(interp->_whence != _PyInterpreterState_WHENCE_NOTSET); |
1109 | 16 | assert(check_interpreter_whence(whence) == 0); |
1110 | 16 | interp->_whence = whence; |
1111 | 16 | } |
1112 | | |
1113 | | |
1114 | | PyObject * |
1115 | | _Py_GetMainModule(PyThreadState *tstate) |
1116 | 0 | { |
1117 | | // We return None to indicate "not found" or "bogus". |
1118 | 0 | PyObject *modules = _PyImport_GetModulesRef(tstate->interp); |
1119 | 0 | if (modules == Py_None) { |
1120 | 0 | return modules; |
1121 | 0 | } |
1122 | 0 | PyObject *module = NULL; |
1123 | 0 | (void)PyMapping_GetOptionalItem(modules, &_Py_ID(__main__), &module); |
1124 | 0 | Py_DECREF(modules); |
1125 | 0 | if (module == NULL && !PyErr_Occurred()) { |
1126 | 0 | Py_RETURN_NONE; |
1127 | 0 | } |
1128 | 0 | return module; |
1129 | 0 | } |
1130 | | |
1131 | | int |
1132 | | _Py_CheckMainModule(PyObject *module) |
1133 | 0 | { |
1134 | 0 | if (module == NULL || module == Py_None) { |
1135 | 0 | if (!PyErr_Occurred()) { |
1136 | 0 | (void)_PyErr_SetModuleNotFoundError(&_Py_ID(__main__)); |
1137 | 0 | } |
1138 | 0 | return -1; |
1139 | 0 | } |
1140 | 0 | if (!Py_IS_TYPE(module, &PyModule_Type)) { |
1141 | | /* The __main__ module has been tampered with. */ |
1142 | 0 | PyObject *msg = PyUnicode_FromString("invalid __main__ module"); |
1143 | 0 | if (msg != NULL) { |
1144 | 0 | (void)PyErr_SetImportError(msg, &_Py_ID(__main__), NULL); |
1145 | 0 | Py_DECREF(msg); |
1146 | 0 | } |
1147 | 0 | return -1; |
1148 | 0 | } |
1149 | 0 | return 0; |
1150 | 0 | } |
1151 | | |
1152 | | |
1153 | | PyObject * |
1154 | | PyInterpreterState_GetDict(PyInterpreterState *interp) |
1155 | 12 | { |
1156 | 12 | if (interp->dict == NULL) { |
1157 | 6 | interp->dict = PyDict_New(); |
1158 | 6 | if (interp->dict == NULL) { |
1159 | 0 | PyErr_Clear(); |
1160 | 0 | } |
1161 | 6 | } |
1162 | | /* Returning NULL means no per-interpreter dict is available. */ |
1163 | 12 | return interp->dict; |
1164 | 12 | } |
1165 | | |
1166 | | |
1167 | | //---------- |
1168 | | // interp ID |
1169 | | //---------- |
1170 | | |
1171 | | int64_t |
1172 | | _PyInterpreterState_ObjectToID(PyObject *idobj) |
1173 | 0 | { |
1174 | 0 | if (!_PyIndex_Check(idobj)) { |
1175 | 0 | PyErr_Format(PyExc_TypeError, |
1176 | 0 | "interpreter ID must be an int, got %.100s", |
1177 | 0 | Py_TYPE(idobj)->tp_name); |
1178 | 0 | return -1; |
1179 | 0 | } |
1180 | | |
1181 | | // This may raise OverflowError. |
1182 | | // For now, we don't worry about if LLONG_MAX < INT64_MAX. |
1183 | 0 | long long id = PyLong_AsLongLong(idobj); |
1184 | 0 | if (id == -1 && PyErr_Occurred()) { |
1185 | 0 | return -1; |
1186 | 0 | } |
1187 | | |
1188 | 0 | if (id < 0) { |
1189 | 0 | PyErr_Format(PyExc_ValueError, |
1190 | 0 | "interpreter ID must be a non-negative int, got %R", |
1191 | 0 | idobj); |
1192 | 0 | return -1; |
1193 | 0 | } |
1194 | | #if LLONG_MAX > INT64_MAX |
1195 | | else if (id > INT64_MAX) { |
1196 | | PyErr_SetString(PyExc_OverflowError, "int too big to convert"); |
1197 | | return -1; |
1198 | | } |
1199 | | #endif |
1200 | 0 | else { |
1201 | 0 | return (int64_t)id; |
1202 | 0 | } |
1203 | 0 | } |
1204 | | |
1205 | | int64_t |
1206 | | PyInterpreterState_GetID(PyInterpreterState *interp) |
1207 | 0 | { |
1208 | 0 | if (interp == NULL) { |
1209 | 0 | PyErr_SetString(PyExc_RuntimeError, "no interpreter provided"); |
1210 | 0 | return -1; |
1211 | 0 | } |
1212 | 0 | return interp->id; |
1213 | 0 | } |
1214 | | |
1215 | | PyObject * |
1216 | | _PyInterpreterState_GetIDObject(PyInterpreterState *interp) |
1217 | 0 | { |
1218 | 0 | int64_t interpid = interp->id; |
1219 | 0 | if (interpid < 0) { |
1220 | 0 | return NULL; |
1221 | 0 | } |
1222 | 0 | assert(interpid < LLONG_MAX); |
1223 | 0 | return PyLong_FromLongLong(interpid); |
1224 | 0 | } |
1225 | | |
1226 | | |
1227 | | |
1228 | | void |
1229 | | _PyInterpreterState_IDIncref(PyInterpreterState *interp) |
1230 | 0 | { |
1231 | 0 | _Py_atomic_add_ssize(&interp->id_refcount, 1); |
1232 | 0 | } |
1233 | | |
1234 | | |
1235 | | void |
1236 | | _PyInterpreterState_IDDecref(PyInterpreterState *interp) |
1237 | 0 | { |
1238 | 0 | _PyRuntimeState *runtime = interp->runtime; |
1239 | |
|
1240 | 0 | Py_ssize_t refcount = _Py_atomic_add_ssize(&interp->id_refcount, -1); |
1241 | |
|
1242 | 0 | if (refcount == 1 && interp->requires_idref) { |
1243 | 0 | PyThreadState *tstate = |
1244 | 0 | _PyThreadState_NewBound(interp, _PyThreadState_WHENCE_FINI); |
1245 | | |
1246 | | // XXX Possible GILState issues? |
1247 | 0 | PyThreadState *save_tstate = _PyThreadState_Swap(runtime, tstate); |
1248 | 0 | Py_EndInterpreter(tstate); |
1249 | 0 | _PyThreadState_Swap(runtime, save_tstate); |
1250 | 0 | } |
1251 | 0 | } |
1252 | | |
1253 | | int |
1254 | | _PyInterpreterState_RequiresIDRef(PyInterpreterState *interp) |
1255 | 0 | { |
1256 | 0 | return interp->requires_idref; |
1257 | 0 | } |
1258 | | |
1259 | | void |
1260 | | _PyInterpreterState_RequireIDRef(PyInterpreterState *interp, int required) |
1261 | 0 | { |
1262 | 0 | interp->requires_idref = required ? 1 : 0; |
1263 | 0 | } |
1264 | | |
1265 | | |
1266 | | //----------------------------- |
1267 | | // look up an interpreter state |
1268 | | //----------------------------- |
1269 | | |
1270 | | /* Return the interpreter associated with the current OS thread. |
1271 | | |
1272 | | The GIL must be held. |
1273 | | */ |
1274 | | |
1275 | | PyInterpreterState* |
1276 | | PyInterpreterState_Get(void) |
1277 | 641k | { |
1278 | 641k | PyThreadState *tstate = current_fast_get(); |
1279 | 641k | _Py_EnsureTstateNotNULL(tstate); |
1280 | 641k | PyInterpreterState *interp = tstate->interp; |
1281 | 641k | if (interp == NULL) { |
1282 | 0 | Py_FatalError("no current interpreter"); |
1283 | 0 | } |
1284 | 641k | return interp; |
1285 | 641k | } |
1286 | | |
1287 | | |
1288 | | static PyInterpreterState * |
1289 | | interp_look_up_id(_PyRuntimeState *runtime, int64_t requested_id) |
1290 | 0 | { |
1291 | 0 | PyInterpreterState *interp = runtime->interpreters.head; |
1292 | 0 | while (interp != NULL) { |
1293 | 0 | int64_t id = interp->id; |
1294 | 0 | assert(id >= 0); |
1295 | 0 | if (requested_id == id) { |
1296 | 0 | return interp; |
1297 | 0 | } |
1298 | 0 | interp = PyInterpreterState_Next(interp); |
1299 | 0 | } |
1300 | 0 | return NULL; |
1301 | 0 | } |
1302 | | |
1303 | | /* Return the interpreter state with the given ID. |
1304 | | |
1305 | | Fail with RuntimeError if the interpreter is not found. */ |
1306 | | |
1307 | | PyInterpreterState * |
1308 | | _PyInterpreterState_LookUpID(int64_t requested_id) |
1309 | 0 | { |
1310 | 0 | PyInterpreterState *interp = NULL; |
1311 | 0 | if (requested_id >= 0) { |
1312 | 0 | _PyRuntimeState *runtime = &_PyRuntime; |
1313 | 0 | HEAD_LOCK(runtime); |
1314 | 0 | interp = interp_look_up_id(runtime, requested_id); |
1315 | 0 | HEAD_UNLOCK(runtime); |
1316 | 0 | } |
1317 | 0 | if (interp == NULL && !PyErr_Occurred()) { |
1318 | 0 | PyErr_Format(PyExc_InterpreterNotFoundError, |
1319 | 0 | "unrecognized interpreter ID %lld", requested_id); |
1320 | 0 | } |
1321 | 0 | return interp; |
1322 | 0 | } |
1323 | | |
1324 | | PyInterpreterState * |
1325 | | _PyInterpreterState_LookUpIDObject(PyObject *requested_id) |
1326 | 0 | { |
1327 | 0 | int64_t id = _PyInterpreterState_ObjectToID(requested_id); |
1328 | 0 | if (id < 0) { |
1329 | 0 | return NULL; |
1330 | 0 | } |
1331 | 0 | return _PyInterpreterState_LookUpID(id); |
1332 | 0 | } |
1333 | | |
1334 | | |
1335 | | /********************************/ |
1336 | | /* the per-thread runtime state */ |
1337 | | /********************************/ |
1338 | | |
1339 | | #ifndef NDEBUG |
1340 | | static inline int |
1341 | | tstate_is_alive(PyThreadState *tstate) |
1342 | | { |
1343 | | return (tstate->_status.initialized && |
1344 | | !tstate->_status.finalized && |
1345 | | !tstate->_status.cleared && |
1346 | | !tstate->_status.finalizing); |
1347 | | } |
1348 | | #endif |
1349 | | |
1350 | | |
1351 | | //---------- |
1352 | | // lifecycle |
1353 | | //---------- |
1354 | | |
1355 | | static _PyStackChunk* |
1356 | | allocate_chunk(int size_in_bytes, _PyStackChunk* previous) |
1357 | 158k | { |
1358 | 158k | assert(size_in_bytes % sizeof(PyObject **) == 0); |
1359 | 158k | _PyStackChunk *res = _PyObject_VirtualAlloc(size_in_bytes); |
1360 | 158k | if (res == NULL) { |
1361 | 0 | return NULL; |
1362 | 0 | } |
1363 | 158k | res->previous = previous; |
1364 | 158k | res->size = size_in_bytes; |
1365 | 158k | res->top = 0; |
1366 | 158k | return res; |
1367 | 158k | } |
1368 | | |
1369 | | static void |
1370 | | reset_threadstate(_PyThreadStateImpl *tstate) |
1371 | 0 | { |
1372 | | // Set to _PyThreadState_INIT directly? |
1373 | 0 | memcpy(tstate, |
1374 | 0 | &initial._main_interpreter._initial_thread, |
1375 | 0 | sizeof(*tstate)); |
1376 | 0 | } |
1377 | | |
1378 | | static _PyThreadStateImpl * |
1379 | | alloc_threadstate(PyInterpreterState *interp) |
1380 | 16 | { |
1381 | 16 | _PyThreadStateImpl *tstate; |
1382 | | |
1383 | | // Try the preallocated tstate first. |
1384 | 16 | tstate = _Py_atomic_exchange_ptr(&interp->threads.preallocated, NULL); |
1385 | | |
1386 | | // Fall back to the allocator. |
1387 | 16 | if (tstate == NULL) { |
1388 | 0 | tstate = PyMem_RawCalloc(1, sizeof(_PyThreadStateImpl)); |
1389 | 0 | if (tstate == NULL) { |
1390 | 0 | return NULL; |
1391 | 0 | } |
1392 | 0 | reset_threadstate(tstate); |
1393 | 0 | } |
1394 | 16 | return tstate; |
1395 | 16 | } |
1396 | | |
1397 | | static void |
1398 | | free_threadstate(_PyThreadStateImpl *tstate) |
1399 | 0 | { |
1400 | 0 | PyInterpreterState *interp = tstate->base.interp; |
1401 | | // The initial thread state of the interpreter is allocated |
1402 | | // as part of the interpreter state so should not be freed. |
1403 | 0 | if (tstate == &interp->_initial_thread) { |
1404 | | // Make it available again. |
1405 | 0 | reset_threadstate(tstate); |
1406 | 0 | assert(interp->threads.preallocated == NULL); |
1407 | 0 | _Py_atomic_store_ptr(&interp->threads.preallocated, tstate); |
1408 | 0 | } |
1409 | 0 | else { |
1410 | 0 | PyMem_RawFree(tstate); |
1411 | 0 | } |
1412 | 0 | } |
1413 | | |
1414 | | static void |
1415 | | decref_threadstate(_PyThreadStateImpl *tstate) |
1416 | 0 | { |
1417 | 0 | if (_Py_atomic_add_ssize(&tstate->refcount, -1) == 1) { |
1418 | | // The last reference to the thread state is gone. |
1419 | 0 | free_threadstate(tstate); |
1420 | 0 | } |
1421 | 0 | } |
1422 | | |
1423 | | /* Get the thread state to a minimal consistent state. |
1424 | | Further init happens in pylifecycle.c before it can be used. |
1425 | | All fields not initialized here are expected to be zeroed out, |
1426 | | e.g. by PyMem_RawCalloc() or memset(), or otherwise pre-initialized. |
1427 | | The interpreter state is not manipulated. Instead it is assumed that |
1428 | | the thread is getting added to the interpreter. |
1429 | | */ |
1430 | | |
1431 | | static void |
1432 | | init_threadstate(_PyThreadStateImpl *_tstate, |
1433 | | PyInterpreterState *interp, uint64_t id, int whence) |
1434 | 16 | { |
1435 | 16 | PyThreadState *tstate = (PyThreadState *)_tstate; |
1436 | 16 | if (tstate->_status.initialized) { |
1437 | 0 | Py_FatalError("thread state already initialized"); |
1438 | 0 | } |
1439 | | |
1440 | 16 | assert(interp != NULL); |
1441 | 16 | tstate->interp = interp; |
1442 | 16 | tstate->eval_breaker = |
1443 | 16 | _Py_atomic_load_uintptr_relaxed(&interp->ceval.instrumentation_version); |
1444 | | |
1445 | | // next/prev are set in add_threadstate(). |
1446 | 16 | assert(tstate->next == NULL); |
1447 | 16 | assert(tstate->prev == NULL); |
1448 | | |
1449 | 16 | assert(tstate->_whence == _PyThreadState_WHENCE_NOTSET); |
1450 | 16 | assert(whence >= 0 && whence <= _PyThreadState_WHENCE_EXEC); |
1451 | 16 | tstate->_whence = whence; |
1452 | | |
1453 | 16 | assert(id > 0); |
1454 | 16 | tstate->id = id; |
1455 | | |
1456 | | // thread_id and native_thread_id are set in bind_tstate(). |
1457 | | |
1458 | 16 | tstate->py_recursion_limit = interp->ceval.recursion_limit; |
1459 | 16 | tstate->py_recursion_remaining = interp->ceval.recursion_limit; |
1460 | 16 | tstate->exc_info = &tstate->exc_state; |
1461 | | |
1462 | | // PyGILState_Release must not try to delete this thread state. |
1463 | | // This is cleared when PyGILState_Ensure() creates the thread state. |
1464 | 16 | tstate->gilstate_counter = 1; |
1465 | | |
1466 | 16 | tstate->current_frame = NULL; |
1467 | 16 | tstate->datastack_chunk = NULL; |
1468 | 16 | tstate->datastack_top = NULL; |
1469 | 16 | tstate->datastack_limit = NULL; |
1470 | 16 | tstate->what_event = -1; |
1471 | 16 | tstate->current_executor = NULL; |
1472 | 16 | tstate->dict_global_version = 0; |
1473 | | |
1474 | 16 | _tstate->c_stack_soft_limit = UINTPTR_MAX; |
1475 | 16 | _tstate->c_stack_top = 0; |
1476 | 16 | _tstate->c_stack_hard_limit = 0; |
1477 | | |
1478 | 16 | _tstate->asyncio_running_loop = NULL; |
1479 | 16 | _tstate->asyncio_running_task = NULL; |
1480 | | |
1481 | 16 | tstate->delete_later = NULL; |
1482 | | |
1483 | 16 | llist_init(&_tstate->mem_free_queue); |
1484 | 16 | llist_init(&_tstate->asyncio_tasks_head); |
1485 | 16 | if (interp->stoptheworld.requested || _PyRuntime.stoptheworld.requested) { |
1486 | | // Start in the suspended state if there is an ongoing stop-the-world. |
1487 | 0 | tstate->state = _Py_THREAD_SUSPENDED; |
1488 | 0 | } |
1489 | | |
1490 | 16 | tstate->_status.initialized = 1; |
1491 | 16 | } |
1492 | | |
1493 | | static void |
1494 | | add_threadstate(PyInterpreterState *interp, PyThreadState *tstate, |
1495 | | PyThreadState *next) |
1496 | 16 | { |
1497 | 16 | assert(interp->threads.head != tstate); |
1498 | 16 | if (next != NULL) { |
1499 | 0 | assert(next->prev == NULL || next->prev == tstate); |
1500 | 0 | next->prev = tstate; |
1501 | 0 | } |
1502 | 16 | tstate->next = next; |
1503 | 16 | assert(tstate->prev == NULL); |
1504 | 16 | interp->threads.head = tstate; |
1505 | 16 | } |
1506 | | |
1507 | | static PyThreadState * |
1508 | | new_threadstate(PyInterpreterState *interp, int whence) |
1509 | 16 | { |
1510 | | // Allocate the thread state. |
1511 | 16 | _PyThreadStateImpl *tstate = alloc_threadstate(interp); |
1512 | 16 | if (tstate == NULL) { |
1513 | 0 | return NULL; |
1514 | 0 | } |
1515 | | |
1516 | | #ifdef Py_GIL_DISABLED |
1517 | | Py_ssize_t qsbr_idx = _Py_qsbr_reserve(interp); |
1518 | | if (qsbr_idx < 0) { |
1519 | | free_threadstate(tstate); |
1520 | | return NULL; |
1521 | | } |
1522 | | int32_t tlbc_idx = _Py_ReserveTLBCIndex(interp); |
1523 | | if (tlbc_idx < 0) { |
1524 | | free_threadstate(tstate); |
1525 | | return NULL; |
1526 | | } |
1527 | | #endif |
1528 | | |
1529 | | /* We serialize concurrent creation to protect global state. */ |
1530 | 16 | HEAD_LOCK(interp->runtime); |
1531 | | |
1532 | | // Initialize the new thread state. |
1533 | 16 | interp->threads.next_unique_id += 1; |
1534 | 16 | uint64_t id = interp->threads.next_unique_id; |
1535 | 16 | init_threadstate(tstate, interp, id, whence); |
1536 | | |
1537 | | // Add the new thread state to the interpreter. |
1538 | 16 | PyThreadState *old_head = interp->threads.head; |
1539 | 16 | add_threadstate(interp, (PyThreadState *)tstate, old_head); |
1540 | | |
1541 | 16 | HEAD_UNLOCK(interp->runtime); |
1542 | | |
1543 | | #ifdef Py_GIL_DISABLED |
1544 | | // Must be called with lock unlocked to avoid lock ordering deadlocks. |
1545 | | _Py_qsbr_register(tstate, interp, qsbr_idx); |
1546 | | tstate->tlbc_index = tlbc_idx; |
1547 | | #endif |
1548 | | |
1549 | 16 | return (PyThreadState *)tstate; |
1550 | 16 | } |
1551 | | |
1552 | | PyThreadState * |
1553 | | PyThreadState_New(PyInterpreterState *interp) |
1554 | 0 | { |
1555 | 0 | return _PyThreadState_NewBound(interp, _PyThreadState_WHENCE_UNKNOWN); |
1556 | 0 | } |
1557 | | |
1558 | | PyThreadState * |
1559 | | _PyThreadState_NewBound(PyInterpreterState *interp, int whence) |
1560 | 0 | { |
1561 | 0 | PyThreadState *tstate = new_threadstate(interp, whence); |
1562 | 0 | if (tstate) { |
1563 | 0 | bind_tstate(tstate); |
1564 | | // This makes sure there's a gilstate tstate bound |
1565 | | // as soon as possible. |
1566 | 0 | if (gilstate_get() == NULL) { |
1567 | 0 | bind_gilstate_tstate(tstate); |
1568 | 0 | } |
1569 | 0 | } |
1570 | 0 | return tstate; |
1571 | 0 | } |
1572 | | |
1573 | | // This must be followed by a call to _PyThreadState_Bind(); |
1574 | | PyThreadState * |
1575 | | _PyThreadState_New(PyInterpreterState *interp, int whence) |
1576 | 16 | { |
1577 | 16 | return new_threadstate(interp, whence); |
1578 | 16 | } |
1579 | | |
1580 | | // We keep this for stable ABI compabibility. |
1581 | | PyAPI_FUNC(PyThreadState*) |
1582 | | _PyThreadState_Prealloc(PyInterpreterState *interp) |
1583 | 0 | { |
1584 | 0 | return _PyThreadState_New(interp, _PyThreadState_WHENCE_UNKNOWN); |
1585 | 0 | } |
1586 | | |
1587 | | // We keep this around for (accidental) stable ABI compatibility. |
1588 | | // Realistically, no extensions are using it. |
1589 | | PyAPI_FUNC(void) |
1590 | | _PyThreadState_Init(PyThreadState *tstate) |
1591 | 0 | { |
1592 | 0 | Py_FatalError("_PyThreadState_Init() is for internal use only"); |
1593 | 0 | } |
1594 | | |
1595 | | |
1596 | | static void |
1597 | | clear_datastack(PyThreadState *tstate) |
1598 | 0 | { |
1599 | 0 | _PyStackChunk *chunk = tstate->datastack_chunk; |
1600 | 0 | tstate->datastack_chunk = NULL; |
1601 | 0 | while (chunk != NULL) { |
1602 | 0 | _PyStackChunk *prev = chunk->previous; |
1603 | 0 | _PyObject_VirtualFree(chunk, chunk->size); |
1604 | 0 | chunk = prev; |
1605 | 0 | } |
1606 | 0 | } |
1607 | | |
1608 | | void |
1609 | | PyThreadState_Clear(PyThreadState *tstate) |
1610 | 0 | { |
1611 | 0 | assert(tstate->_status.initialized && !tstate->_status.cleared); |
1612 | 0 | assert(current_fast_get()->interp == tstate->interp); |
1613 | 0 | assert(!_PyThreadState_IsRunningMain(tstate)); |
1614 | | // XXX assert(!tstate->_status.bound || tstate->_status.unbound); |
1615 | 0 | tstate->_status.finalizing = 1; // just in case |
1616 | | |
1617 | | /* XXX Conditions we need to enforce: |
1618 | | |
1619 | | * the GIL must be held by the current thread |
1620 | | * current_fast_get()->interp must match tstate->interp |
1621 | | * for the main interpreter, current_fast_get() must be the main thread |
1622 | | */ |
1623 | |
|
1624 | 0 | int verbose = _PyInterpreterState_GetConfig(tstate->interp)->verbose; |
1625 | |
|
1626 | 0 | if (verbose && tstate->current_frame != NULL) { |
1627 | | /* bpo-20526: After the main thread calls |
1628 | | _PyInterpreterState_SetFinalizing() in Py_FinalizeEx() |
1629 | | (or in Py_EndInterpreter() for subinterpreters), |
1630 | | threads must exit when trying to take the GIL. |
1631 | | If a thread exit in the middle of _PyEval_EvalFrameDefault(), |
1632 | | tstate->frame is not reset to its previous value. |
1633 | | It is more likely with daemon threads, but it can happen |
1634 | | with regular threads if threading._shutdown() fails |
1635 | | (ex: interrupted by CTRL+C). */ |
1636 | 0 | fprintf(stderr, |
1637 | 0 | "PyThreadState_Clear: warning: thread still has a frame\n"); |
1638 | 0 | } |
1639 | |
|
1640 | 0 | if (verbose && tstate->current_exception != NULL) { |
1641 | 0 | fprintf(stderr, "PyThreadState_Clear: warning: thread has an exception set\n"); |
1642 | 0 | _PyErr_Print(tstate); |
1643 | 0 | } |
1644 | | |
1645 | | /* At this point tstate shouldn't be used any more, |
1646 | | neither to run Python code nor for other uses. |
1647 | | |
1648 | | This is tricky when current_fast_get() == tstate, in the same way |
1649 | | as noted in interpreter_clear() above. The below finalizers |
1650 | | can possibly run Python code or otherwise use the partially |
1651 | | cleared thread state. For now we trust that isn't a problem |
1652 | | in practice. |
1653 | | */ |
1654 | | // XXX Deal with the possibility of problematic finalizers. |
1655 | | |
1656 | | /* Don't clear tstate->pyframe: it is a borrowed reference */ |
1657 | |
|
1658 | 0 | Py_CLEAR(tstate->threading_local_key); |
1659 | 0 | Py_CLEAR(tstate->threading_local_sentinel); |
1660 | |
|
1661 | 0 | Py_CLEAR(((_PyThreadStateImpl *)tstate)->asyncio_running_loop); |
1662 | 0 | Py_CLEAR(((_PyThreadStateImpl *)tstate)->asyncio_running_task); |
1663 | | |
1664 | |
|
1665 | 0 | PyMutex_Lock(&tstate->interp->asyncio_tasks_lock); |
1666 | | // merge any lingering tasks from thread state to interpreter's |
1667 | | // tasks list |
1668 | 0 | llist_concat(&tstate->interp->asyncio_tasks_head, |
1669 | 0 | &((_PyThreadStateImpl *)tstate)->asyncio_tasks_head); |
1670 | 0 | PyMutex_Unlock(&tstate->interp->asyncio_tasks_lock); |
1671 | |
|
1672 | 0 | Py_CLEAR(tstate->dict); |
1673 | 0 | Py_CLEAR(tstate->async_exc); |
1674 | |
|
1675 | 0 | Py_CLEAR(tstate->current_exception); |
1676 | |
|
1677 | 0 | Py_CLEAR(tstate->exc_state.exc_value); |
1678 | | |
1679 | | /* The stack of exception states should contain just this thread. */ |
1680 | 0 | if (verbose && tstate->exc_info != &tstate->exc_state) { |
1681 | 0 | fprintf(stderr, |
1682 | 0 | "PyThreadState_Clear: warning: thread still has a generator\n"); |
1683 | 0 | } |
1684 | |
|
1685 | 0 | if (tstate->c_profilefunc != NULL) { |
1686 | 0 | tstate->interp->sys_profiling_threads--; |
1687 | 0 | tstate->c_profilefunc = NULL; |
1688 | 0 | } |
1689 | 0 | if (tstate->c_tracefunc != NULL) { |
1690 | 0 | tstate->interp->sys_tracing_threads--; |
1691 | 0 | tstate->c_tracefunc = NULL; |
1692 | 0 | } |
1693 | 0 | Py_CLEAR(tstate->c_profileobj); |
1694 | 0 | Py_CLEAR(tstate->c_traceobj); |
1695 | |
|
1696 | 0 | Py_CLEAR(tstate->async_gen_firstiter); |
1697 | 0 | Py_CLEAR(tstate->async_gen_finalizer); |
1698 | |
|
1699 | 0 | Py_CLEAR(tstate->context); |
1700 | |
|
1701 | | #ifdef Py_GIL_DISABLED |
1702 | | // Each thread should clear own freelists in free-threading builds. |
1703 | | struct _Py_freelists *freelists = _Py_freelists_GET(); |
1704 | | _PyObject_ClearFreeLists(freelists, 1); |
1705 | | |
1706 | | // Merge our thread-local refcounts into the type's own refcount and |
1707 | | // free our local refcount array. |
1708 | | _PyObject_FinalizePerThreadRefcounts((_PyThreadStateImpl *)tstate); |
1709 | | |
1710 | | // Remove ourself from the biased reference counting table of threads. |
1711 | | _Py_brc_remove_thread(tstate); |
1712 | | |
1713 | | // Release our thread-local copies of the bytecode for reuse by another |
1714 | | // thread |
1715 | | _Py_ClearTLBCIndex((_PyThreadStateImpl *)tstate); |
1716 | | #endif |
1717 | | |
1718 | | // Merge our queue of pointers to be freed into the interpreter queue. |
1719 | 0 | _PyMem_AbandonDelayed(tstate); |
1720 | |
|
1721 | 0 | _PyThreadState_ClearMimallocHeaps(tstate); |
1722 | |
|
1723 | 0 | tstate->_status.cleared = 1; |
1724 | | |
1725 | | // XXX Call _PyThreadStateSwap(runtime, NULL) here if "current". |
1726 | | // XXX Do it as early in the function as possible. |
1727 | 0 | } |
1728 | | |
1729 | | static void |
1730 | | decrement_stoptheworld_countdown(struct _stoptheworld_state *stw); |
1731 | | |
1732 | | /* Common code for PyThreadState_Delete() and PyThreadState_DeleteCurrent() */ |
1733 | | static void |
1734 | | tstate_delete_common(PyThreadState *tstate, int release_gil) |
1735 | 0 | { |
1736 | 0 | assert(tstate->_status.cleared && !tstate->_status.finalized); |
1737 | 0 | tstate_verify_not_active(tstate); |
1738 | 0 | assert(!_PyThreadState_IsRunningMain(tstate)); |
1739 | |
|
1740 | 0 | PyInterpreterState *interp = tstate->interp; |
1741 | 0 | if (interp == NULL) { |
1742 | 0 | Py_FatalError("NULL interpreter"); |
1743 | 0 | } |
1744 | 0 | _PyRuntimeState *runtime = interp->runtime; |
1745 | |
|
1746 | 0 | HEAD_LOCK(runtime); |
1747 | 0 | if (tstate->prev) { |
1748 | 0 | tstate->prev->next = tstate->next; |
1749 | 0 | } |
1750 | 0 | else { |
1751 | 0 | interp->threads.head = tstate->next; |
1752 | 0 | } |
1753 | 0 | if (tstate->next) { |
1754 | 0 | tstate->next->prev = tstate->prev; |
1755 | 0 | } |
1756 | 0 | if (tstate->state != _Py_THREAD_SUSPENDED) { |
1757 | | // Any ongoing stop-the-world request should not wait for us because |
1758 | | // our thread is getting deleted. |
1759 | 0 | if (interp->stoptheworld.requested) { |
1760 | 0 | decrement_stoptheworld_countdown(&interp->stoptheworld); |
1761 | 0 | } |
1762 | 0 | if (runtime->stoptheworld.requested) { |
1763 | 0 | decrement_stoptheworld_countdown(&runtime->stoptheworld); |
1764 | 0 | } |
1765 | 0 | } |
1766 | |
|
1767 | | #if defined(Py_REF_DEBUG) && defined(Py_GIL_DISABLED) |
1768 | | // Add our portion of the total refcount to the interpreter's total. |
1769 | | _PyThreadStateImpl *tstate_impl = (_PyThreadStateImpl *)tstate; |
1770 | | tstate->interp->object_state.reftotal += tstate_impl->reftotal; |
1771 | | tstate_impl->reftotal = 0; |
1772 | | assert(tstate_impl->refcounts.values == NULL); |
1773 | | #endif |
1774 | |
|
1775 | 0 | HEAD_UNLOCK(runtime); |
1776 | | |
1777 | | // XXX Unbind in PyThreadState_Clear(), or earlier |
1778 | | // (and assert not-equal here)? |
1779 | 0 | if (tstate->_status.bound_gilstate) { |
1780 | 0 | unbind_gilstate_tstate(tstate); |
1781 | 0 | } |
1782 | 0 | if (tstate->_status.bound) { |
1783 | 0 | unbind_tstate(tstate); |
1784 | 0 | } |
1785 | | |
1786 | | // XXX Move to PyThreadState_Clear()? |
1787 | 0 | clear_datastack(tstate); |
1788 | |
|
1789 | 0 | if (release_gil) { |
1790 | 0 | _PyEval_ReleaseLock(tstate->interp, tstate, 1); |
1791 | 0 | } |
1792 | |
|
1793 | | #ifdef Py_GIL_DISABLED |
1794 | | _Py_qsbr_unregister(tstate); |
1795 | | #endif |
1796 | |
|
1797 | 0 | tstate->_status.finalized = 1; |
1798 | 0 | } |
1799 | | |
1800 | | static void |
1801 | | zapthreads(PyInterpreterState *interp) |
1802 | 0 | { |
1803 | 0 | PyThreadState *tstate; |
1804 | | /* No need to lock the mutex here because this should only happen |
1805 | | when the threads are all really dead (XXX famous last words). |
1806 | | |
1807 | | Cannot use _Py_FOR_EACH_TSTATE_UNLOCKED because we are freeing |
1808 | | the thread states here. |
1809 | | */ |
1810 | 0 | while ((tstate = interp->threads.head) != NULL) { |
1811 | 0 | tstate_verify_not_active(tstate); |
1812 | 0 | tstate_delete_common(tstate, 0); |
1813 | 0 | free_threadstate((_PyThreadStateImpl *)tstate); |
1814 | 0 | } |
1815 | 0 | } |
1816 | | |
1817 | | |
1818 | | void |
1819 | | PyThreadState_Delete(PyThreadState *tstate) |
1820 | 0 | { |
1821 | 0 | _Py_EnsureTstateNotNULL(tstate); |
1822 | 0 | tstate_verify_not_active(tstate); |
1823 | 0 | tstate_delete_common(tstate, 0); |
1824 | 0 | free_threadstate((_PyThreadStateImpl *)tstate); |
1825 | 0 | } |
1826 | | |
1827 | | |
1828 | | void |
1829 | | _PyThreadState_DeleteCurrent(PyThreadState *tstate) |
1830 | 0 | { |
1831 | 0 | _Py_EnsureTstateNotNULL(tstate); |
1832 | | #ifdef Py_GIL_DISABLED |
1833 | | _Py_qsbr_detach(((_PyThreadStateImpl *)tstate)->qsbr); |
1834 | | #endif |
1835 | 0 | current_fast_clear(tstate->interp->runtime); |
1836 | 0 | tstate_delete_common(tstate, 1); // release GIL as part of call |
1837 | 0 | free_threadstate((_PyThreadStateImpl *)tstate); |
1838 | 0 | } |
1839 | | |
1840 | | void |
1841 | | PyThreadState_DeleteCurrent(void) |
1842 | 0 | { |
1843 | 0 | PyThreadState *tstate = current_fast_get(); |
1844 | 0 | _PyThreadState_DeleteCurrent(tstate); |
1845 | 0 | } |
1846 | | |
1847 | | |
1848 | | // Unlinks and removes all thread states from `tstate->interp`, with the |
1849 | | // exception of the one passed as an argument. However, it does not delete |
1850 | | // these thread states. Instead, it returns the removed thread states as a |
1851 | | // linked list. |
1852 | | // |
1853 | | // Note that if there is a current thread state, it *must* be the one |
1854 | | // passed as argument. Also, this won't touch any interpreters other |
1855 | | // than the current one, since we don't know which thread state should |
1856 | | // be kept in those other interpreters. |
1857 | | PyThreadState * |
1858 | | _PyThreadState_RemoveExcept(PyThreadState *tstate) |
1859 | 0 | { |
1860 | 0 | assert(tstate != NULL); |
1861 | 0 | PyInterpreterState *interp = tstate->interp; |
1862 | 0 | _PyRuntimeState *runtime = interp->runtime; |
1863 | |
|
1864 | | #ifdef Py_GIL_DISABLED |
1865 | | assert(runtime->stoptheworld.world_stopped); |
1866 | | #endif |
1867 | |
|
1868 | 0 | HEAD_LOCK(runtime); |
1869 | | /* Remove all thread states, except tstate, from the linked list of |
1870 | | thread states. */ |
1871 | 0 | PyThreadState *list = interp->threads.head; |
1872 | 0 | if (list == tstate) { |
1873 | 0 | list = tstate->next; |
1874 | 0 | } |
1875 | 0 | if (tstate->prev) { |
1876 | 0 | tstate->prev->next = tstate->next; |
1877 | 0 | } |
1878 | 0 | if (tstate->next) { |
1879 | 0 | tstate->next->prev = tstate->prev; |
1880 | 0 | } |
1881 | 0 | tstate->prev = tstate->next = NULL; |
1882 | 0 | interp->threads.head = tstate; |
1883 | 0 | HEAD_UNLOCK(runtime); |
1884 | |
|
1885 | 0 | return list; |
1886 | 0 | } |
1887 | | |
1888 | | // Deletes the thread states in the linked list `list`. |
1889 | | // |
1890 | | // This is intended to be used in conjunction with _PyThreadState_RemoveExcept. |
1891 | | // |
1892 | | // If `is_after_fork` is true, the thread states are immediately freed. |
1893 | | // Otherwise, they are decref'd because they may still be referenced by an |
1894 | | // OS thread. |
1895 | | void |
1896 | | _PyThreadState_DeleteList(PyThreadState *list, int is_after_fork) |
1897 | 0 | { |
1898 | | // The world can't be stopped because we PyThreadState_Clear() can |
1899 | | // call destructors. |
1900 | 0 | assert(!_PyRuntime.stoptheworld.world_stopped); |
1901 | |
|
1902 | 0 | PyThreadState *p, *next; |
1903 | 0 | for (p = list; p; p = next) { |
1904 | 0 | next = p->next; |
1905 | 0 | PyThreadState_Clear(p); |
1906 | 0 | if (is_after_fork) { |
1907 | 0 | free_threadstate((_PyThreadStateImpl *)p); |
1908 | 0 | } |
1909 | 0 | else { |
1910 | 0 | decref_threadstate((_PyThreadStateImpl *)p); |
1911 | 0 | } |
1912 | 0 | } |
1913 | 0 | } |
1914 | | |
1915 | | |
1916 | | //---------- |
1917 | | // accessors |
1918 | | //---------- |
1919 | | |
1920 | | /* An extension mechanism to store arbitrary additional per-thread state. |
1921 | | PyThreadState_GetDict() returns a dictionary that can be used to hold such |
1922 | | state; the caller should pick a unique key and store its state there. If |
1923 | | PyThreadState_GetDict() returns NULL, an exception has *not* been raised |
1924 | | and the caller should assume no per-thread state is available. */ |
1925 | | |
1926 | | PyObject * |
1927 | | _PyThreadState_GetDict(PyThreadState *tstate) |
1928 | 7.83M | { |
1929 | 7.83M | assert(tstate != NULL); |
1930 | 7.83M | if (tstate->dict == NULL) { |
1931 | 1 | tstate->dict = PyDict_New(); |
1932 | 1 | if (tstate->dict == NULL) { |
1933 | 0 | _PyErr_Clear(tstate); |
1934 | 0 | } |
1935 | 1 | } |
1936 | 7.83M | return tstate->dict; |
1937 | 7.83M | } |
1938 | | |
1939 | | |
1940 | | PyObject * |
1941 | | PyThreadState_GetDict(void) |
1942 | 7.83M | { |
1943 | 7.83M | PyThreadState *tstate = current_fast_get(); |
1944 | 7.83M | if (tstate == NULL) { |
1945 | 0 | return NULL; |
1946 | 0 | } |
1947 | 7.83M | return _PyThreadState_GetDict(tstate); |
1948 | 7.83M | } |
1949 | | |
1950 | | |
1951 | | PyInterpreterState * |
1952 | | PyThreadState_GetInterpreter(PyThreadState *tstate) |
1953 | 0 | { |
1954 | 0 | assert(tstate != NULL); |
1955 | 0 | return tstate->interp; |
1956 | 0 | } |
1957 | | |
1958 | | |
1959 | | PyFrameObject* |
1960 | | PyThreadState_GetFrame(PyThreadState *tstate) |
1961 | 2.35k | { |
1962 | 2.35k | assert(tstate != NULL); |
1963 | 2.35k | _PyInterpreterFrame *f = _PyThreadState_GetFrame(tstate); |
1964 | 2.35k | if (f == NULL) { |
1965 | 0 | return NULL; |
1966 | 0 | } |
1967 | 2.35k | PyFrameObject *frame = _PyFrame_GetFrameObject(f); |
1968 | 2.35k | if (frame == NULL) { |
1969 | 0 | PyErr_Clear(); |
1970 | 0 | } |
1971 | 2.35k | return (PyFrameObject*)Py_XNewRef(frame); |
1972 | 2.35k | } |
1973 | | |
1974 | | |
1975 | | uint64_t |
1976 | | PyThreadState_GetID(PyThreadState *tstate) |
1977 | 0 | { |
1978 | 0 | assert(tstate != NULL); |
1979 | 0 | return tstate->id; |
1980 | 0 | } |
1981 | | |
1982 | | |
1983 | | static inline void |
1984 | | tstate_activate(PyThreadState *tstate) |
1985 | 31.1k | { |
1986 | 31.1k | assert(tstate != NULL); |
1987 | | // XXX assert(tstate_is_alive(tstate)); |
1988 | 31.1k | assert(tstate_is_bound(tstate)); |
1989 | 31.1k | assert(!tstate->_status.active); |
1990 | | |
1991 | 31.1k | assert(!tstate->_status.bound_gilstate || |
1992 | 31.1k | tstate == gilstate_get()); |
1993 | 31.1k | if (!tstate->_status.bound_gilstate) { |
1994 | 0 | bind_gilstate_tstate(tstate); |
1995 | 0 | } |
1996 | | |
1997 | 31.1k | tstate->_status.active = 1; |
1998 | 31.1k | } |
1999 | | |
2000 | | static inline void |
2001 | | tstate_deactivate(PyThreadState *tstate) |
2002 | 31.1k | { |
2003 | 31.1k | assert(tstate != NULL); |
2004 | | // XXX assert(tstate_is_alive(tstate)); |
2005 | 31.1k | assert(tstate_is_bound(tstate)); |
2006 | 31.1k | assert(tstate->_status.active); |
2007 | | |
2008 | 31.1k | tstate->_status.active = 0; |
2009 | | |
2010 | | // We do not unbind the gilstate tstate here. |
2011 | | // It will still be used in PyGILState_Ensure(). |
2012 | 31.1k | } |
2013 | | |
2014 | | static int |
2015 | | tstate_try_attach(PyThreadState *tstate) |
2016 | 31.1k | { |
2017 | | #ifdef Py_GIL_DISABLED |
2018 | | int expected = _Py_THREAD_DETACHED; |
2019 | | return _Py_atomic_compare_exchange_int(&tstate->state, |
2020 | | &expected, |
2021 | | _Py_THREAD_ATTACHED); |
2022 | | #else |
2023 | 31.1k | assert(tstate->state == _Py_THREAD_DETACHED); |
2024 | 31.1k | tstate->state = _Py_THREAD_ATTACHED; |
2025 | 31.1k | return 1; |
2026 | 31.1k | #endif |
2027 | 31.1k | } |
2028 | | |
2029 | | static void |
2030 | | tstate_set_detached(PyThreadState *tstate, int detached_state) |
2031 | 31.1k | { |
2032 | 31.1k | assert(_Py_atomic_load_int_relaxed(&tstate->state) == _Py_THREAD_ATTACHED); |
2033 | | #ifdef Py_GIL_DISABLED |
2034 | | _Py_atomic_store_int(&tstate->state, detached_state); |
2035 | | #else |
2036 | 31.1k | tstate->state = detached_state; |
2037 | 31.1k | #endif |
2038 | 31.1k | } |
2039 | | |
2040 | | static void |
2041 | | tstate_wait_attach(PyThreadState *tstate) |
2042 | 0 | { |
2043 | 0 | do { |
2044 | 0 | int state = _Py_atomic_load_int_relaxed(&tstate->state); |
2045 | 0 | if (state == _Py_THREAD_SUSPENDED) { |
2046 | | // Wait until we're switched out of SUSPENDED to DETACHED. |
2047 | 0 | _PyParkingLot_Park(&tstate->state, &state, sizeof(tstate->state), |
2048 | 0 | /*timeout=*/-1, NULL, /*detach=*/0); |
2049 | 0 | } |
2050 | 0 | else if (state == _Py_THREAD_SHUTTING_DOWN) { |
2051 | | // We're shutting down, so we can't attach. |
2052 | 0 | _PyThreadState_HangThread(tstate); |
2053 | 0 | } |
2054 | 0 | else { |
2055 | 0 | assert(state == _Py_THREAD_DETACHED); |
2056 | 0 | } |
2057 | | // Once we're back in DETACHED we can re-attach |
2058 | 0 | } while (!tstate_try_attach(tstate)); |
2059 | 0 | } |
2060 | | |
2061 | | void |
2062 | | _PyThreadState_Attach(PyThreadState *tstate) |
2063 | 31.1k | { |
2064 | | #if defined(Py_DEBUG) |
2065 | | // This is called from PyEval_RestoreThread(). Similar |
2066 | | // to it, we need to ensure errno doesn't change. |
2067 | | int err = errno; |
2068 | | #endif |
2069 | | |
2070 | 31.1k | _Py_EnsureTstateNotNULL(tstate); |
2071 | 31.1k | if (current_fast_get() != NULL) { |
2072 | 0 | Py_FatalError("non-NULL old thread state"); |
2073 | 0 | } |
2074 | 31.1k | _PyThreadStateImpl *_tstate = (_PyThreadStateImpl *)tstate; |
2075 | 31.1k | if (_tstate->c_stack_hard_limit == 0) { |
2076 | 16 | _Py_InitializeRecursionLimits(tstate); |
2077 | 16 | } |
2078 | | |
2079 | 31.1k | while (1) { |
2080 | 31.1k | _PyEval_AcquireLock(tstate); |
2081 | | |
2082 | | // XXX assert(tstate_is_alive(tstate)); |
2083 | 31.1k | current_fast_set(&_PyRuntime, tstate); |
2084 | 31.1k | if (!tstate_try_attach(tstate)) { |
2085 | 0 | tstate_wait_attach(tstate); |
2086 | 0 | } |
2087 | 31.1k | tstate_activate(tstate); |
2088 | | |
2089 | | #ifdef Py_GIL_DISABLED |
2090 | | if (_PyEval_IsGILEnabled(tstate) && !tstate->holds_gil) { |
2091 | | // The GIL was enabled between our call to _PyEval_AcquireLock() |
2092 | | // and when we attached (the GIL can't go from enabled to disabled |
2093 | | // here because only a thread holding the GIL can disable |
2094 | | // it). Detach and try again. |
2095 | | tstate_set_detached(tstate, _Py_THREAD_DETACHED); |
2096 | | tstate_deactivate(tstate); |
2097 | | current_fast_clear(&_PyRuntime); |
2098 | | continue; |
2099 | | } |
2100 | | _Py_qsbr_attach(((_PyThreadStateImpl *)tstate)->qsbr); |
2101 | | #endif |
2102 | 31.1k | break; |
2103 | 31.1k | } |
2104 | | |
2105 | | // Resume previous critical section. This acquires the lock(s) from the |
2106 | | // top-most critical section. |
2107 | 31.1k | if (tstate->critical_section != 0) { |
2108 | 0 | _PyCriticalSection_Resume(tstate); |
2109 | 0 | } |
2110 | | |
2111 | | #if defined(Py_DEBUG) |
2112 | | errno = err; |
2113 | | #endif |
2114 | 31.1k | } |
2115 | | |
2116 | | static void |
2117 | | detach_thread(PyThreadState *tstate, int detached_state) |
2118 | 31.1k | { |
2119 | | // XXX assert(tstate_is_alive(tstate) && tstate_is_bound(tstate)); |
2120 | 31.1k | assert(_Py_atomic_load_int_relaxed(&tstate->state) == _Py_THREAD_ATTACHED); |
2121 | 31.1k | assert(tstate == current_fast_get()); |
2122 | 31.1k | if (tstate->critical_section != 0) { |
2123 | 0 | _PyCriticalSection_SuspendAll(tstate); |
2124 | 0 | } |
2125 | | #ifdef Py_GIL_DISABLED |
2126 | | _Py_qsbr_detach(((_PyThreadStateImpl *)tstate)->qsbr); |
2127 | | #endif |
2128 | 31.1k | tstate_deactivate(tstate); |
2129 | 31.1k | tstate_set_detached(tstate, detached_state); |
2130 | 31.1k | current_fast_clear(&_PyRuntime); |
2131 | 31.1k | _PyEval_ReleaseLock(tstate->interp, tstate, 0); |
2132 | 31.1k | } |
2133 | | |
2134 | | void |
2135 | | _PyThreadState_Detach(PyThreadState *tstate) |
2136 | 31.1k | { |
2137 | 31.1k | detach_thread(tstate, _Py_THREAD_DETACHED); |
2138 | 31.1k | } |
2139 | | |
2140 | | void |
2141 | | _PyThreadState_Suspend(PyThreadState *tstate) |
2142 | 0 | { |
2143 | 0 | _PyRuntimeState *runtime = &_PyRuntime; |
2144 | |
|
2145 | 0 | assert(_Py_atomic_load_int_relaxed(&tstate->state) == _Py_THREAD_ATTACHED); |
2146 | |
|
2147 | 0 | struct _stoptheworld_state *stw = NULL; |
2148 | 0 | HEAD_LOCK(runtime); |
2149 | 0 | if (runtime->stoptheworld.requested) { |
2150 | 0 | stw = &runtime->stoptheworld; |
2151 | 0 | } |
2152 | 0 | else if (tstate->interp->stoptheworld.requested) { |
2153 | 0 | stw = &tstate->interp->stoptheworld; |
2154 | 0 | } |
2155 | 0 | HEAD_UNLOCK(runtime); |
2156 | |
|
2157 | 0 | if (stw == NULL) { |
2158 | | // Switch directly to "detached" if there is no active stop-the-world |
2159 | | // request. |
2160 | 0 | detach_thread(tstate, _Py_THREAD_DETACHED); |
2161 | 0 | return; |
2162 | 0 | } |
2163 | | |
2164 | | // Switch to "suspended" state. |
2165 | 0 | detach_thread(tstate, _Py_THREAD_SUSPENDED); |
2166 | | |
2167 | | // Decrease the count of remaining threads needing to park. |
2168 | 0 | HEAD_LOCK(runtime); |
2169 | 0 | decrement_stoptheworld_countdown(stw); |
2170 | 0 | HEAD_UNLOCK(runtime); |
2171 | 0 | } |
2172 | | |
2173 | | void |
2174 | | _PyThreadState_SetShuttingDown(PyThreadState *tstate) |
2175 | 0 | { |
2176 | 0 | _Py_atomic_store_int(&tstate->state, _Py_THREAD_SHUTTING_DOWN); |
2177 | | #ifdef Py_GIL_DISABLED |
2178 | | _PyParkingLot_UnparkAll(&tstate->state); |
2179 | | #endif |
2180 | 0 | } |
2181 | | |
2182 | | // Decrease stop-the-world counter of remaining number of threads that need to |
2183 | | // pause. If we are the final thread to pause, notify the requesting thread. |
2184 | | static void |
2185 | | decrement_stoptheworld_countdown(struct _stoptheworld_state *stw) |
2186 | 0 | { |
2187 | 0 | assert(stw->thread_countdown > 0); |
2188 | 0 | if (--stw->thread_countdown == 0) { |
2189 | 0 | _PyEvent_Notify(&stw->stop_event); |
2190 | 0 | } |
2191 | 0 | } |
2192 | | |
2193 | | #ifdef Py_GIL_DISABLED |
2194 | | // Interpreter for _Py_FOR_EACH_STW_INTERP(). For global stop-the-world events, |
2195 | | // we start with the first interpreter and then iterate over all interpreters. |
2196 | | // For per-interpreter stop-the-world events, we only operate on the one |
2197 | | // interpreter. |
2198 | | static PyInterpreterState * |
2199 | | interp_for_stop_the_world(struct _stoptheworld_state *stw) |
2200 | | { |
2201 | | return (stw->is_global |
2202 | | ? PyInterpreterState_Head() |
2203 | | : _Py_CONTAINER_OF(stw, PyInterpreterState, stoptheworld)); |
2204 | | } |
2205 | | |
2206 | | // Loops over threads for a stop-the-world event. |
2207 | | // For global: all threads in all interpreters |
2208 | | // For per-interpreter: all threads in the interpreter |
2209 | | #define _Py_FOR_EACH_STW_INTERP(stw, i) \ |
2210 | | for (PyInterpreterState *i = interp_for_stop_the_world((stw)); \ |
2211 | | i != NULL; i = ((stw->is_global) ? i->next : NULL)) |
2212 | | |
2213 | | |
2214 | | // Try to transition threads atomically from the "detached" state to the |
2215 | | // "gc stopped" state. Returns true if all threads are in the "gc stopped" |
2216 | | static bool |
2217 | | park_detached_threads(struct _stoptheworld_state *stw) |
2218 | | { |
2219 | | int num_parked = 0; |
2220 | | _Py_FOR_EACH_STW_INTERP(stw, i) { |
2221 | | _Py_FOR_EACH_TSTATE_UNLOCKED(i, t) { |
2222 | | int state = _Py_atomic_load_int_relaxed(&t->state); |
2223 | | if (state == _Py_THREAD_DETACHED) { |
2224 | | // Atomically transition to "suspended" if in "detached" state. |
2225 | | if (_Py_atomic_compare_exchange_int( |
2226 | | &t->state, &state, _Py_THREAD_SUSPENDED)) { |
2227 | | num_parked++; |
2228 | | } |
2229 | | } |
2230 | | else if (state == _Py_THREAD_ATTACHED && t != stw->requester) { |
2231 | | _Py_set_eval_breaker_bit(t, _PY_EVAL_PLEASE_STOP_BIT); |
2232 | | } |
2233 | | } |
2234 | | } |
2235 | | stw->thread_countdown -= num_parked; |
2236 | | assert(stw->thread_countdown >= 0); |
2237 | | return num_parked > 0 && stw->thread_countdown == 0; |
2238 | | } |
2239 | | |
2240 | | static void |
2241 | | stop_the_world(struct _stoptheworld_state *stw) |
2242 | | { |
2243 | | _PyRuntimeState *runtime = &_PyRuntime; |
2244 | | |
2245 | | PyMutex_Lock(&stw->mutex); |
2246 | | if (stw->is_global) { |
2247 | | _PyRWMutex_Lock(&runtime->stoptheworld_mutex); |
2248 | | } |
2249 | | else { |
2250 | | _PyRWMutex_RLock(&runtime->stoptheworld_mutex); |
2251 | | } |
2252 | | |
2253 | | HEAD_LOCK(runtime); |
2254 | | stw->requested = 1; |
2255 | | stw->thread_countdown = 0; |
2256 | | stw->stop_event = (PyEvent){0}; // zero-initialize (unset) |
2257 | | stw->requester = _PyThreadState_GET(); // may be NULL |
2258 | | |
2259 | | _Py_FOR_EACH_STW_INTERP(stw, i) { |
2260 | | _Py_FOR_EACH_TSTATE_UNLOCKED(i, t) { |
2261 | | if (t != stw->requester) { |
2262 | | // Count all the other threads (we don't wait on ourself). |
2263 | | stw->thread_countdown++; |
2264 | | } |
2265 | | } |
2266 | | } |
2267 | | |
2268 | | if (stw->thread_countdown == 0) { |
2269 | | HEAD_UNLOCK(runtime); |
2270 | | stw->world_stopped = 1; |
2271 | | return; |
2272 | | } |
2273 | | |
2274 | | for (;;) { |
2275 | | // Switch threads that are detached to the GC stopped state |
2276 | | bool stopped_all_threads = park_detached_threads(stw); |
2277 | | HEAD_UNLOCK(runtime); |
2278 | | |
2279 | | if (stopped_all_threads) { |
2280 | | break; |
2281 | | } |
2282 | | |
2283 | | PyTime_t wait_ns = 1000*1000; // 1ms (arbitrary, may need tuning) |
2284 | | int detach = 0; |
2285 | | if (PyEvent_WaitTimed(&stw->stop_event, wait_ns, detach)) { |
2286 | | assert(stw->thread_countdown == 0); |
2287 | | break; |
2288 | | } |
2289 | | |
2290 | | HEAD_LOCK(runtime); |
2291 | | } |
2292 | | stw->world_stopped = 1; |
2293 | | } |
2294 | | |
2295 | | static void |
2296 | | start_the_world(struct _stoptheworld_state *stw) |
2297 | | { |
2298 | | _PyRuntimeState *runtime = &_PyRuntime; |
2299 | | assert(PyMutex_IsLocked(&stw->mutex)); |
2300 | | |
2301 | | HEAD_LOCK(runtime); |
2302 | | stw->requested = 0; |
2303 | | stw->world_stopped = 0; |
2304 | | // Switch threads back to the detached state. |
2305 | | _Py_FOR_EACH_STW_INTERP(stw, i) { |
2306 | | _Py_FOR_EACH_TSTATE_UNLOCKED(i, t) { |
2307 | | if (t != stw->requester) { |
2308 | | assert(_Py_atomic_load_int_relaxed(&t->state) == |
2309 | | _Py_THREAD_SUSPENDED); |
2310 | | _Py_atomic_store_int(&t->state, _Py_THREAD_DETACHED); |
2311 | | _PyParkingLot_UnparkAll(&t->state); |
2312 | | } |
2313 | | } |
2314 | | } |
2315 | | stw->requester = NULL; |
2316 | | HEAD_UNLOCK(runtime); |
2317 | | if (stw->is_global) { |
2318 | | _PyRWMutex_Unlock(&runtime->stoptheworld_mutex); |
2319 | | } |
2320 | | else { |
2321 | | _PyRWMutex_RUnlock(&runtime->stoptheworld_mutex); |
2322 | | } |
2323 | | PyMutex_Unlock(&stw->mutex); |
2324 | | } |
2325 | | #endif // Py_GIL_DISABLED |
2326 | | |
2327 | | void |
2328 | | _PyEval_StopTheWorldAll(_PyRuntimeState *runtime) |
2329 | 0 | { |
2330 | | #ifdef Py_GIL_DISABLED |
2331 | | stop_the_world(&runtime->stoptheworld); |
2332 | | #endif |
2333 | 0 | } |
2334 | | |
2335 | | void |
2336 | | _PyEval_StartTheWorldAll(_PyRuntimeState *runtime) |
2337 | 0 | { |
2338 | | #ifdef Py_GIL_DISABLED |
2339 | | start_the_world(&runtime->stoptheworld); |
2340 | | #endif |
2341 | 0 | } |
2342 | | |
2343 | | void |
2344 | | _PyEval_StopTheWorld(PyInterpreterState *interp) |
2345 | 0 | { |
2346 | | #ifdef Py_GIL_DISABLED |
2347 | | stop_the_world(&interp->stoptheworld); |
2348 | | #endif |
2349 | 0 | } |
2350 | | |
2351 | | void |
2352 | | _PyEval_StartTheWorld(PyInterpreterState *interp) |
2353 | 0 | { |
2354 | | #ifdef Py_GIL_DISABLED |
2355 | | start_the_world(&interp->stoptheworld); |
2356 | | #endif |
2357 | 0 | } |
2358 | | |
2359 | | //---------- |
2360 | | // other API |
2361 | | //---------- |
2362 | | |
2363 | | /* Asynchronously raise an exception in a thread. |
2364 | | Requested by Just van Rossum and Alex Martelli. |
2365 | | To prevent naive misuse, you must write your own extension |
2366 | | to call this, or use ctypes. Must be called with the GIL held. |
2367 | | Returns the number of tstates modified (normally 1, but 0 if `id` didn't |
2368 | | match any known thread id). Can be called with exc=NULL to clear an |
2369 | | existing async exception. This raises no exceptions. */ |
2370 | | |
2371 | | // XXX Move this to Python/ceval_gil.c? |
2372 | | // XXX Deprecate this. |
2373 | | int |
2374 | | PyThreadState_SetAsyncExc(unsigned long id, PyObject *exc) |
2375 | 0 | { |
2376 | 0 | PyInterpreterState *interp = _PyInterpreterState_GET(); |
2377 | | |
2378 | | /* Although the GIL is held, a few C API functions can be called |
2379 | | * without the GIL held, and in particular some that create and |
2380 | | * destroy thread and interpreter states. Those can mutate the |
2381 | | * list of thread states we're traversing, so to prevent that we lock |
2382 | | * head_mutex for the duration. |
2383 | | */ |
2384 | 0 | PyThreadState *tstate = NULL; |
2385 | 0 | _Py_FOR_EACH_TSTATE_BEGIN(interp, t) { |
2386 | 0 | if (t->thread_id == id) { |
2387 | 0 | tstate = t; |
2388 | 0 | break; |
2389 | 0 | } |
2390 | 0 | } |
2391 | 0 | _Py_FOR_EACH_TSTATE_END(interp); |
2392 | |
|
2393 | 0 | if (tstate != NULL) { |
2394 | | /* Tricky: we need to decref the current value |
2395 | | * (if any) in tstate->async_exc, but that can in turn |
2396 | | * allow arbitrary Python code to run, including |
2397 | | * perhaps calls to this function. To prevent |
2398 | | * deadlock, we need to release head_mutex before |
2399 | | * the decref. |
2400 | | */ |
2401 | 0 | Py_XINCREF(exc); |
2402 | 0 | PyObject *old_exc = _Py_atomic_exchange_ptr(&tstate->async_exc, exc); |
2403 | |
|
2404 | 0 | Py_XDECREF(old_exc); |
2405 | 0 | _Py_set_eval_breaker_bit(tstate, _PY_ASYNC_EXCEPTION_BIT); |
2406 | 0 | } |
2407 | |
|
2408 | 0 | return tstate != NULL; |
2409 | 0 | } |
2410 | | |
2411 | | //--------------------------------- |
2412 | | // API for the current thread state |
2413 | | //--------------------------------- |
2414 | | |
2415 | | PyThreadState * |
2416 | | PyThreadState_GetUnchecked(void) |
2417 | 0 | { |
2418 | 0 | return current_fast_get(); |
2419 | 0 | } |
2420 | | |
2421 | | |
2422 | | PyThreadState * |
2423 | | PyThreadState_Get(void) |
2424 | 86.4M | { |
2425 | 86.4M | PyThreadState *tstate = current_fast_get(); |
2426 | 86.4M | _Py_EnsureTstateNotNULL(tstate); |
2427 | 86.4M | return tstate; |
2428 | 86.4M | } |
2429 | | |
2430 | | PyThreadState * |
2431 | | _PyThreadState_Swap(_PyRuntimeState *runtime, PyThreadState *newts) |
2432 | 0 | { |
2433 | 0 | PyThreadState *oldts = current_fast_get(); |
2434 | 0 | if (oldts != NULL) { |
2435 | 0 | _PyThreadState_Detach(oldts); |
2436 | 0 | } |
2437 | 0 | if (newts != NULL) { |
2438 | 0 | _PyThreadState_Attach(newts); |
2439 | 0 | } |
2440 | 0 | return oldts; |
2441 | 0 | } |
2442 | | |
2443 | | PyThreadState * |
2444 | | PyThreadState_Swap(PyThreadState *newts) |
2445 | 0 | { |
2446 | 0 | return _PyThreadState_Swap(&_PyRuntime, newts); |
2447 | 0 | } |
2448 | | |
2449 | | |
2450 | | void |
2451 | | _PyThreadState_Bind(PyThreadState *tstate) |
2452 | 16 | { |
2453 | | // gh-104690: If Python is being finalized and PyInterpreterState_Delete() |
2454 | | // was called, tstate becomes a dangling pointer. |
2455 | 16 | assert(_PyThreadState_CheckConsistency(tstate)); |
2456 | | |
2457 | 16 | bind_tstate(tstate); |
2458 | | // This makes sure there's a gilstate tstate bound |
2459 | | // as soon as possible. |
2460 | 16 | if (gilstate_get() == NULL) { |
2461 | 16 | bind_gilstate_tstate(tstate); |
2462 | 16 | } |
2463 | 16 | } |
2464 | | |
2465 | | #if defined(Py_GIL_DISABLED) && !defined(Py_LIMITED_API) |
2466 | | uintptr_t |
2467 | | _Py_GetThreadLocal_Addr(void) |
2468 | | { |
2469 | | #ifdef HAVE_THREAD_LOCAL |
2470 | | // gh-112535: Use the address of the thread-local PyThreadState variable as |
2471 | | // a unique identifier for the current thread. Each thread has a unique |
2472 | | // _Py_tss_tstate variable with a unique address. |
2473 | | return (uintptr_t)&_Py_tss_tstate; |
2474 | | #else |
2475 | | # error "no supported thread-local variable storage classifier" |
2476 | | #endif |
2477 | | } |
2478 | | #endif |
2479 | | |
2480 | | /***********************************/ |
2481 | | /* routines for advanced debuggers */ |
2482 | | /***********************************/ |
2483 | | |
2484 | | // (requested by David Beazley) |
2485 | | // Don't use unless you know what you are doing! |
2486 | | |
2487 | | PyInterpreterState * |
2488 | | PyInterpreterState_Head(void) |
2489 | 0 | { |
2490 | 0 | return _PyRuntime.interpreters.head; |
2491 | 0 | } |
2492 | | |
2493 | | PyInterpreterState * |
2494 | | PyInterpreterState_Main(void) |
2495 | 0 | { |
2496 | 0 | return _PyInterpreterState_Main(); |
2497 | 0 | } |
2498 | | |
2499 | | PyInterpreterState * |
2500 | 0 | PyInterpreterState_Next(PyInterpreterState *interp) { |
2501 | 0 | return interp->next; |
2502 | 0 | } |
2503 | | |
2504 | | PyThreadState * |
2505 | 64.1k | PyInterpreterState_ThreadHead(PyInterpreterState *interp) { |
2506 | 64.1k | return interp->threads.head; |
2507 | 64.1k | } |
2508 | | |
2509 | | PyThreadState * |
2510 | 64.1k | PyThreadState_Next(PyThreadState *tstate) { |
2511 | 64.1k | return tstate->next; |
2512 | 64.1k | } |
2513 | | |
2514 | | |
2515 | | /********************************************/ |
2516 | | /* reporting execution state of all threads */ |
2517 | | /********************************************/ |
2518 | | |
2519 | | /* The implementation of sys._current_frames(). This is intended to be |
2520 | | called with the GIL held, as it will be when called via |
2521 | | sys._current_frames(). It's possible it would work fine even without |
2522 | | the GIL held, but haven't thought enough about that. |
2523 | | */ |
2524 | | PyObject * |
2525 | | _PyThread_CurrentFrames(void) |
2526 | 0 | { |
2527 | 0 | _PyRuntimeState *runtime = &_PyRuntime; |
2528 | 0 | PyThreadState *tstate = current_fast_get(); |
2529 | 0 | if (_PySys_Audit(tstate, "sys._current_frames", NULL) < 0) { |
2530 | 0 | return NULL; |
2531 | 0 | } |
2532 | | |
2533 | 0 | PyObject *result = PyDict_New(); |
2534 | 0 | if (result == NULL) { |
2535 | 0 | return NULL; |
2536 | 0 | } |
2537 | | |
2538 | | /* for i in all interpreters: |
2539 | | * for t in all of i's thread states: |
2540 | | * if t's frame isn't NULL, map t's id to its frame |
2541 | | * Because these lists can mutate even when the GIL is held, we |
2542 | | * need to grab head_mutex for the duration. |
2543 | | */ |
2544 | 0 | _PyEval_StopTheWorldAll(runtime); |
2545 | 0 | HEAD_LOCK(runtime); |
2546 | 0 | PyInterpreterState *i; |
2547 | 0 | for (i = runtime->interpreters.head; i != NULL; i = i->next) { |
2548 | 0 | _Py_FOR_EACH_TSTATE_UNLOCKED(i, t) { |
2549 | 0 | _PyInterpreterFrame *frame = t->current_frame; |
2550 | 0 | frame = _PyFrame_GetFirstComplete(frame); |
2551 | 0 | if (frame == NULL) { |
2552 | 0 | continue; |
2553 | 0 | } |
2554 | 0 | PyObject *id = PyLong_FromUnsignedLong(t->thread_id); |
2555 | 0 | if (id == NULL) { |
2556 | 0 | goto fail; |
2557 | 0 | } |
2558 | 0 | PyObject *frameobj = (PyObject *)_PyFrame_GetFrameObject(frame); |
2559 | 0 | if (frameobj == NULL) { |
2560 | 0 | Py_DECREF(id); |
2561 | 0 | goto fail; |
2562 | 0 | } |
2563 | 0 | int stat = PyDict_SetItem(result, id, frameobj); |
2564 | 0 | Py_DECREF(id); |
2565 | 0 | if (stat < 0) { |
2566 | 0 | goto fail; |
2567 | 0 | } |
2568 | 0 | } |
2569 | 0 | } |
2570 | 0 | goto done; |
2571 | | |
2572 | 0 | fail: |
2573 | 0 | Py_CLEAR(result); |
2574 | |
|
2575 | 0 | done: |
2576 | 0 | HEAD_UNLOCK(runtime); |
2577 | 0 | _PyEval_StartTheWorldAll(runtime); |
2578 | 0 | return result; |
2579 | 0 | } |
2580 | | |
2581 | | /* The implementation of sys._current_exceptions(). This is intended to be |
2582 | | called with the GIL held, as it will be when called via |
2583 | | sys._current_exceptions(). It's possible it would work fine even without |
2584 | | the GIL held, but haven't thought enough about that. |
2585 | | */ |
2586 | | PyObject * |
2587 | | _PyThread_CurrentExceptions(void) |
2588 | 0 | { |
2589 | 0 | _PyRuntimeState *runtime = &_PyRuntime; |
2590 | 0 | PyThreadState *tstate = current_fast_get(); |
2591 | |
|
2592 | 0 | _Py_EnsureTstateNotNULL(tstate); |
2593 | |
|
2594 | 0 | if (_PySys_Audit(tstate, "sys._current_exceptions", NULL) < 0) { |
2595 | 0 | return NULL; |
2596 | 0 | } |
2597 | | |
2598 | 0 | PyObject *result = PyDict_New(); |
2599 | 0 | if (result == NULL) { |
2600 | 0 | return NULL; |
2601 | 0 | } |
2602 | | |
2603 | | /* for i in all interpreters: |
2604 | | * for t in all of i's thread states: |
2605 | | * if t's frame isn't NULL, map t's id to its frame |
2606 | | * Because these lists can mutate even when the GIL is held, we |
2607 | | * need to grab head_mutex for the duration. |
2608 | | */ |
2609 | 0 | _PyEval_StopTheWorldAll(runtime); |
2610 | 0 | HEAD_LOCK(runtime); |
2611 | 0 | PyInterpreterState *i; |
2612 | 0 | for (i = runtime->interpreters.head; i != NULL; i = i->next) { |
2613 | 0 | _Py_FOR_EACH_TSTATE_UNLOCKED(i, t) { |
2614 | 0 | _PyErr_StackItem *err_info = _PyErr_GetTopmostException(t); |
2615 | 0 | if (err_info == NULL) { |
2616 | 0 | continue; |
2617 | 0 | } |
2618 | 0 | PyObject *id = PyLong_FromUnsignedLong(t->thread_id); |
2619 | 0 | if (id == NULL) { |
2620 | 0 | goto fail; |
2621 | 0 | } |
2622 | 0 | PyObject *exc = err_info->exc_value; |
2623 | 0 | assert(exc == NULL || |
2624 | 0 | exc == Py_None || |
2625 | 0 | PyExceptionInstance_Check(exc)); |
2626 | |
|
2627 | 0 | int stat = PyDict_SetItem(result, id, exc == NULL ? Py_None : exc); |
2628 | 0 | Py_DECREF(id); |
2629 | 0 | if (stat < 0) { |
2630 | 0 | goto fail; |
2631 | 0 | } |
2632 | 0 | } |
2633 | 0 | } |
2634 | 0 | goto done; |
2635 | | |
2636 | 0 | fail: |
2637 | 0 | Py_CLEAR(result); |
2638 | |
|
2639 | 0 | done: |
2640 | 0 | HEAD_UNLOCK(runtime); |
2641 | 0 | _PyEval_StartTheWorldAll(runtime); |
2642 | 0 | return result; |
2643 | 0 | } |
2644 | | |
2645 | | |
2646 | | /***********************************/ |
2647 | | /* Python "auto thread state" API. */ |
2648 | | /***********************************/ |
2649 | | |
2650 | | /* Internal initialization/finalization functions called by |
2651 | | Py_Initialize/Py_FinalizeEx |
2652 | | */ |
2653 | | PyStatus |
2654 | | _PyGILState_Init(PyInterpreterState *interp) |
2655 | 16 | { |
2656 | 16 | if (!_Py_IsMainInterpreter(interp)) { |
2657 | | /* Currently, PyGILState is shared by all interpreters. The main |
2658 | | * interpreter is responsible to initialize it. */ |
2659 | 0 | return _PyStatus_OK(); |
2660 | 0 | } |
2661 | 16 | _PyRuntimeState *runtime = interp->runtime; |
2662 | 16 | assert(gilstate_get() == NULL); |
2663 | 16 | assert(runtime->gilstate.autoInterpreterState == NULL); |
2664 | 16 | runtime->gilstate.autoInterpreterState = interp; |
2665 | 16 | return _PyStatus_OK(); |
2666 | 16 | } |
2667 | | |
2668 | | void |
2669 | | _PyGILState_Fini(PyInterpreterState *interp) |
2670 | 0 | { |
2671 | 0 | if (!_Py_IsMainInterpreter(interp)) { |
2672 | | /* Currently, PyGILState is shared by all interpreters. The main |
2673 | | * interpreter is responsible to initialize it. */ |
2674 | 0 | return; |
2675 | 0 | } |
2676 | 0 | interp->runtime->gilstate.autoInterpreterState = NULL; |
2677 | 0 | } |
2678 | | |
2679 | | |
2680 | | // XXX Drop this. |
2681 | | void |
2682 | | _PyGILState_SetTstate(PyThreadState *tstate) |
2683 | 16 | { |
2684 | | /* must init with valid states */ |
2685 | 16 | assert(tstate != NULL); |
2686 | 16 | assert(tstate->interp != NULL); |
2687 | | |
2688 | 16 | if (!_Py_IsMainInterpreter(tstate->interp)) { |
2689 | | /* Currently, PyGILState is shared by all interpreters. The main |
2690 | | * interpreter is responsible to initialize it. */ |
2691 | 0 | return; |
2692 | 0 | } |
2693 | | |
2694 | | #ifndef NDEBUG |
2695 | | _PyRuntimeState *runtime = tstate->interp->runtime; |
2696 | | |
2697 | | assert(runtime->gilstate.autoInterpreterState == tstate->interp); |
2698 | | assert(gilstate_get() == tstate); |
2699 | | assert(tstate->gilstate_counter == 1); |
2700 | | #endif |
2701 | 16 | } |
2702 | | |
2703 | | PyInterpreterState * |
2704 | | _PyGILState_GetInterpreterStateUnsafe(void) |
2705 | 0 | { |
2706 | 0 | return _PyRuntime.gilstate.autoInterpreterState; |
2707 | 0 | } |
2708 | | |
2709 | | /* The public functions */ |
2710 | | |
2711 | | PyThreadState * |
2712 | | PyGILState_GetThisThreadState(void) |
2713 | 0 | { |
2714 | 0 | return gilstate_get(); |
2715 | 0 | } |
2716 | | |
2717 | | int |
2718 | | PyGILState_Check(void) |
2719 | 0 | { |
2720 | 0 | _PyRuntimeState *runtime = &_PyRuntime; |
2721 | 0 | if (!runtime->gilstate.check_enabled) { |
2722 | 0 | return 1; |
2723 | 0 | } |
2724 | | |
2725 | 0 | PyThreadState *tstate = current_fast_get(); |
2726 | 0 | if (tstate == NULL) { |
2727 | 0 | return 0; |
2728 | 0 | } |
2729 | | |
2730 | 0 | PyThreadState *tcur = gilstate_get(); |
2731 | 0 | return (tstate == tcur); |
2732 | 0 | } |
2733 | | |
2734 | | PyGILState_STATE |
2735 | | PyGILState_Ensure(void) |
2736 | 0 | { |
2737 | 0 | _PyRuntimeState *runtime = &_PyRuntime; |
2738 | | |
2739 | | /* Note that we do not auto-init Python here - apart from |
2740 | | potential races with 2 threads auto-initializing, pep-311 |
2741 | | spells out other issues. Embedders are expected to have |
2742 | | called Py_Initialize(). */ |
2743 | | |
2744 | | /* Ensure that _PyEval_InitThreads() and _PyGILState_Init() have been |
2745 | | called by Py_Initialize() |
2746 | | |
2747 | | TODO: This isn't thread-safe. There's no protection here against |
2748 | | concurrent finalization of the interpreter; it's simply a guard |
2749 | | for *after* the interpreter has finalized. |
2750 | | */ |
2751 | 0 | if (!_PyEval_ThreadsInitialized() || runtime->gilstate.autoInterpreterState == NULL) { |
2752 | 0 | PyThread_hang_thread(); |
2753 | 0 | } |
2754 | | |
2755 | 0 | PyThreadState *tcur = gilstate_get(); |
2756 | 0 | int has_gil; |
2757 | 0 | if (tcur == NULL) { |
2758 | | /* Create a new Python thread state for this thread */ |
2759 | | // XXX Use PyInterpreterState_EnsureThreadState()? |
2760 | 0 | tcur = new_threadstate(runtime->gilstate.autoInterpreterState, |
2761 | 0 | _PyThreadState_WHENCE_GILSTATE); |
2762 | 0 | if (tcur == NULL) { |
2763 | 0 | Py_FatalError("Couldn't create thread-state for new thread"); |
2764 | 0 | } |
2765 | 0 | bind_tstate(tcur); |
2766 | 0 | bind_gilstate_tstate(tcur); |
2767 | | |
2768 | | /* This is our thread state! We'll need to delete it in the |
2769 | | matching call to PyGILState_Release(). */ |
2770 | 0 | assert(tcur->gilstate_counter == 1); |
2771 | 0 | tcur->gilstate_counter = 0; |
2772 | 0 | has_gil = 0; /* new thread state is never current */ |
2773 | 0 | } |
2774 | 0 | else { |
2775 | 0 | has_gil = holds_gil(tcur); |
2776 | 0 | } |
2777 | | |
2778 | 0 | if (!has_gil) { |
2779 | 0 | PyEval_RestoreThread(tcur); |
2780 | 0 | } |
2781 | | |
2782 | | /* Update our counter in the thread-state - no need for locks: |
2783 | | - tcur will remain valid as we hold the GIL. |
2784 | | - the counter is safe as we are the only thread "allowed" |
2785 | | to modify this value |
2786 | | */ |
2787 | 0 | ++tcur->gilstate_counter; |
2788 | |
|
2789 | 0 | return has_gil ? PyGILState_LOCKED : PyGILState_UNLOCKED; |
2790 | 0 | } |
2791 | | |
2792 | | void |
2793 | | PyGILState_Release(PyGILState_STATE oldstate) |
2794 | 0 | { |
2795 | 0 | PyThreadState *tstate = gilstate_get(); |
2796 | 0 | if (tstate == NULL) { |
2797 | 0 | Py_FatalError("auto-releasing thread-state, " |
2798 | 0 | "but no thread-state for this thread"); |
2799 | 0 | } |
2800 | | |
2801 | | /* We must hold the GIL and have our thread state current */ |
2802 | 0 | if (!holds_gil(tstate)) { |
2803 | 0 | _Py_FatalErrorFormat(__func__, |
2804 | 0 | "thread state %p must be current when releasing", |
2805 | 0 | tstate); |
2806 | 0 | } |
2807 | 0 | --tstate->gilstate_counter; |
2808 | 0 | assert(tstate->gilstate_counter >= 0); /* illegal counter value */ |
2809 | | |
2810 | | /* If we're going to destroy this thread-state, we must |
2811 | | * clear it while the GIL is held, as destructors may run. |
2812 | | */ |
2813 | 0 | if (tstate->gilstate_counter == 0) { |
2814 | | /* can't have been locked when we created it */ |
2815 | 0 | assert(oldstate == PyGILState_UNLOCKED); |
2816 | | // XXX Unbind tstate here. |
2817 | | // gh-119585: `PyThreadState_Clear()` may call destructors that |
2818 | | // themselves use PyGILState_Ensure and PyGILState_Release, so make |
2819 | | // sure that gilstate_counter is not zero when calling it. |
2820 | 0 | ++tstate->gilstate_counter; |
2821 | 0 | PyThreadState_Clear(tstate); |
2822 | 0 | --tstate->gilstate_counter; |
2823 | | /* Delete the thread-state. Note this releases the GIL too! |
2824 | | * It's vital that the GIL be held here, to avoid shutdown |
2825 | | * races; see bugs 225673 and 1061968 (that nasty bug has a |
2826 | | * habit of coming back). |
2827 | | */ |
2828 | 0 | assert(tstate->gilstate_counter == 0); |
2829 | 0 | assert(current_fast_get() == tstate); |
2830 | 0 | _PyThreadState_DeleteCurrent(tstate); |
2831 | 0 | } |
2832 | | /* Release the lock if necessary */ |
2833 | 0 | else if (oldstate == PyGILState_UNLOCKED) { |
2834 | 0 | PyEval_SaveThread(); |
2835 | 0 | } |
2836 | 0 | } |
2837 | | |
2838 | | |
2839 | | /*************/ |
2840 | | /* Other API */ |
2841 | | /*************/ |
2842 | | |
2843 | | _PyFrameEvalFunction |
2844 | | _PyInterpreterState_GetEvalFrameFunc(PyInterpreterState *interp) |
2845 | 0 | { |
2846 | 0 | if (interp->eval_frame == NULL) { |
2847 | 0 | return _PyEval_EvalFrameDefault; |
2848 | 0 | } |
2849 | 0 | return interp->eval_frame; |
2850 | 0 | } |
2851 | | |
2852 | | |
2853 | | void |
2854 | | _PyInterpreterState_SetEvalFrameFunc(PyInterpreterState *interp, |
2855 | | _PyFrameEvalFunction eval_frame) |
2856 | 0 | { |
2857 | 0 | if (eval_frame == _PyEval_EvalFrameDefault) { |
2858 | 0 | eval_frame = NULL; |
2859 | 0 | } |
2860 | 0 | if (eval_frame == interp->eval_frame) { |
2861 | 0 | return; |
2862 | 0 | } |
2863 | | #ifdef _Py_TIER2 |
2864 | | if (eval_frame != NULL) { |
2865 | | _Py_Executors_InvalidateAll(interp, 1); |
2866 | | } |
2867 | | #endif |
2868 | 0 | RARE_EVENT_INC(set_eval_frame_func); |
2869 | 0 | _PyEval_StopTheWorld(interp); |
2870 | 0 | interp->eval_frame = eval_frame; |
2871 | 0 | _PyEval_StartTheWorld(interp); |
2872 | 0 | } |
2873 | | |
2874 | | |
2875 | | const PyConfig* |
2876 | | _PyInterpreterState_GetConfig(PyInterpreterState *interp) |
2877 | 123M | { |
2878 | 123M | return &interp->config; |
2879 | 123M | } |
2880 | | |
2881 | | |
2882 | | const PyConfig* |
2883 | | _Py_GetConfig(void) |
2884 | 39.3k | { |
2885 | 39.3k | PyThreadState *tstate = current_fast_get(); |
2886 | 39.3k | _Py_EnsureTstateNotNULL(tstate); |
2887 | 39.3k | return _PyInterpreterState_GetConfig(tstate->interp); |
2888 | 39.3k | } |
2889 | | |
2890 | | |
2891 | | int |
2892 | | _PyInterpreterState_HasFeature(PyInterpreterState *interp, unsigned long feature) |
2893 | 0 | { |
2894 | 0 | return ((interp->feature_flags & feature) != 0); |
2895 | 0 | } |
2896 | | |
2897 | | |
2898 | 158k | #define MINIMUM_OVERHEAD 1000 |
2899 | | |
2900 | | static PyObject ** |
2901 | | push_chunk(PyThreadState *tstate, int size) |
2902 | 158k | { |
2903 | 158k | int allocate_size = _PY_DATA_STACK_CHUNK_SIZE; |
2904 | 158k | while (allocate_size < (int)sizeof(PyObject*)*(size + MINIMUM_OVERHEAD)) { |
2905 | 0 | allocate_size *= 2; |
2906 | 0 | } |
2907 | 158k | _PyStackChunk *new = allocate_chunk(allocate_size, tstate->datastack_chunk); |
2908 | 158k | if (new == NULL) { |
2909 | 0 | return NULL; |
2910 | 0 | } |
2911 | 158k | if (tstate->datastack_chunk) { |
2912 | 158k | tstate->datastack_chunk->top = tstate->datastack_top - |
2913 | 158k | &tstate->datastack_chunk->data[0]; |
2914 | 158k | } |
2915 | 158k | tstate->datastack_chunk = new; |
2916 | 158k | tstate->datastack_limit = (PyObject **)(((char *)new) + allocate_size); |
2917 | | // When new is the "root" chunk (i.e. new->previous == NULL), we can keep |
2918 | | // _PyThreadState_PopFrame from freeing it later by "skipping" over the |
2919 | | // first element: |
2920 | 158k | PyObject **res = &new->data[new->previous == NULL]; |
2921 | 158k | tstate->datastack_top = res + size; |
2922 | 158k | return res; |
2923 | 158k | } |
2924 | | |
2925 | | _PyInterpreterFrame * |
2926 | | _PyThreadState_PushFrame(PyThreadState *tstate, size_t size) |
2927 | 160M | { |
2928 | 160M | assert(size < INT_MAX/sizeof(PyObject *)); |
2929 | 160M | if (_PyThreadState_HasStackSpace(tstate, (int)size)) { |
2930 | 160M | _PyInterpreterFrame *res = (_PyInterpreterFrame *)tstate->datastack_top; |
2931 | 160M | tstate->datastack_top += size; |
2932 | 160M | return res; |
2933 | 160M | } |
2934 | 158k | return (_PyInterpreterFrame *)push_chunk(tstate, (int)size); |
2935 | 160M | } |
2936 | | |
2937 | | void |
2938 | | _PyThreadState_PopFrame(PyThreadState *tstate, _PyInterpreterFrame * frame) |
2939 | 526M | { |
2940 | 526M | assert(tstate->datastack_chunk); |
2941 | 526M | PyObject **base = (PyObject **)frame; |
2942 | 526M | if (base == &tstate->datastack_chunk->data[0]) { |
2943 | 158k | _PyStackChunk *chunk = tstate->datastack_chunk; |
2944 | 158k | _PyStackChunk *previous = chunk->previous; |
2945 | | // push_chunk ensures that the root chunk is never popped: |
2946 | 158k | assert(previous); |
2947 | 158k | tstate->datastack_top = &previous->data[previous->top]; |
2948 | 158k | tstate->datastack_chunk = previous; |
2949 | 158k | _PyObject_VirtualFree(chunk, chunk->size); |
2950 | 158k | tstate->datastack_limit = (PyObject **)(((char *)previous) + previous->size); |
2951 | 158k | } |
2952 | 526M | else { |
2953 | 526M | assert(tstate->datastack_top); |
2954 | 526M | assert(tstate->datastack_top >= base); |
2955 | 526M | tstate->datastack_top = base; |
2956 | 526M | } |
2957 | 526M | } |
2958 | | |
2959 | | |
2960 | | #ifndef NDEBUG |
2961 | | // Check that a Python thread state valid. In practice, this function is used |
2962 | | // on a Python debug build to check if 'tstate' is a dangling pointer, if the |
2963 | | // PyThreadState memory has been freed. |
2964 | | // |
2965 | | // Usage: |
2966 | | // |
2967 | | // assert(_PyThreadState_CheckConsistency(tstate)); |
2968 | | int |
2969 | | _PyThreadState_CheckConsistency(PyThreadState *tstate) |
2970 | | { |
2971 | | assert(!_PyMem_IsPtrFreed(tstate)); |
2972 | | assert(!_PyMem_IsPtrFreed(tstate->interp)); |
2973 | | return 1; |
2974 | | } |
2975 | | #endif |
2976 | | |
2977 | | |
2978 | | // Check if a Python thread must call _PyThreadState_HangThread(), rather than |
2979 | | // taking the GIL or attaching to the interpreter if Py_Finalize() has been |
2980 | | // called. |
2981 | | // |
2982 | | // When this function is called by a daemon thread after Py_Finalize() has been |
2983 | | // called, the GIL may no longer exist. |
2984 | | // |
2985 | | // tstate must be non-NULL. |
2986 | | int |
2987 | | _PyThreadState_MustExit(PyThreadState *tstate) |
2988 | 62.2k | { |
2989 | 62.2k | int state = _Py_atomic_load_int_relaxed(&tstate->state); |
2990 | 62.2k | return state == _Py_THREAD_SHUTTING_DOWN; |
2991 | 62.2k | } |
2992 | | |
2993 | | void |
2994 | | _PyThreadState_HangThread(PyThreadState *tstate) |
2995 | 0 | { |
2996 | 0 | _PyThreadStateImpl *tstate_impl = (_PyThreadStateImpl *)tstate; |
2997 | 0 | decref_threadstate(tstate_impl); |
2998 | 0 | PyThread_hang_thread(); |
2999 | 0 | } |
3000 | | |
3001 | | /********************/ |
3002 | | /* mimalloc support */ |
3003 | | /********************/ |
3004 | | |
3005 | | static void |
3006 | | tstate_mimalloc_bind(PyThreadState *tstate) |
3007 | 16 | { |
3008 | | #ifdef Py_GIL_DISABLED |
3009 | | struct _mimalloc_thread_state *mts = &((_PyThreadStateImpl*)tstate)->mimalloc; |
3010 | | |
3011 | | // Initialize the mimalloc thread state. This must be called from the |
3012 | | // same thread that will use the thread state. The "mem" heap doubles as |
3013 | | // the "backing" heap. |
3014 | | mi_tld_t *tld = &mts->tld; |
3015 | | _mi_tld_init(tld, &mts->heaps[_Py_MIMALLOC_HEAP_MEM]); |
3016 | | llist_init(&mts->page_list); |
3017 | | |
3018 | | // Exiting threads push any remaining in-use segments to the abandoned |
3019 | | // pool to be re-claimed later by other threads. We use per-interpreter |
3020 | | // pools to keep Python objects from different interpreters separate. |
3021 | | tld->segments.abandoned = &tstate->interp->mimalloc.abandoned_pool; |
3022 | | |
3023 | | // Don't fill in the first N bytes up to ob_type in debug builds. We may |
3024 | | // access ob_tid and the refcount fields in the dict and list lock-less |
3025 | | // accesses, so they must remain valid for a while after deallocation. |
3026 | | size_t base_offset = offsetof(PyObject, ob_type); |
3027 | | if (_PyMem_DebugEnabled()) { |
3028 | | // The debug allocator adds two words at the beginning of each block. |
3029 | | base_offset += 2 * sizeof(size_t); |
3030 | | } |
3031 | | size_t debug_offsets[_Py_MIMALLOC_HEAP_COUNT] = { |
3032 | | [_Py_MIMALLOC_HEAP_OBJECT] = base_offset, |
3033 | | [_Py_MIMALLOC_HEAP_GC] = base_offset, |
3034 | | [_Py_MIMALLOC_HEAP_GC_PRE] = base_offset + 2 * sizeof(PyObject *), |
3035 | | }; |
3036 | | |
3037 | | // Initialize each heap |
3038 | | for (uint8_t i = 0; i < _Py_MIMALLOC_HEAP_COUNT; i++) { |
3039 | | _mi_heap_init_ex(&mts->heaps[i], tld, _mi_arena_id_none(), false, i); |
3040 | | mts->heaps[i].debug_offset = (uint8_t)debug_offsets[i]; |
3041 | | } |
3042 | | |
3043 | | // Heaps that store Python objects should use QSBR to delay freeing |
3044 | | // mimalloc pages while there may be concurrent lock-free readers. |
3045 | | mts->heaps[_Py_MIMALLOC_HEAP_OBJECT].page_use_qsbr = true; |
3046 | | mts->heaps[_Py_MIMALLOC_HEAP_GC].page_use_qsbr = true; |
3047 | | mts->heaps[_Py_MIMALLOC_HEAP_GC_PRE].page_use_qsbr = true; |
3048 | | |
3049 | | // By default, object allocations use _Py_MIMALLOC_HEAP_OBJECT. |
3050 | | // _PyObject_GC_New() and similar functions temporarily override this to |
3051 | | // use one of the GC heaps. |
3052 | | mts->current_object_heap = &mts->heaps[_Py_MIMALLOC_HEAP_OBJECT]; |
3053 | | |
3054 | | _Py_atomic_store_int(&mts->initialized, 1); |
3055 | | #endif |
3056 | 16 | } |
3057 | | |
3058 | | void |
3059 | | _PyThreadState_ClearMimallocHeaps(PyThreadState *tstate) |
3060 | 0 | { |
3061 | | #ifdef Py_GIL_DISABLED |
3062 | | if (!tstate->_status.bound) { |
3063 | | // The mimalloc heaps are only initialized when the thread is bound. |
3064 | | return; |
3065 | | } |
3066 | | |
3067 | | _PyThreadStateImpl *tstate_impl = (_PyThreadStateImpl *)tstate; |
3068 | | for (Py_ssize_t i = 0; i < _Py_MIMALLOC_HEAP_COUNT; i++) { |
3069 | | // Abandon all segments in use by this thread. This pushes them to |
3070 | | // a shared pool to later be reclaimed by other threads. It's important |
3071 | | // to do this before the thread state is destroyed so that objects |
3072 | | // remain visible to the GC. |
3073 | | _mi_heap_collect_abandon(&tstate_impl->mimalloc.heaps[i]); |
3074 | | } |
3075 | | #endif |
3076 | 0 | } |
3077 | | |
3078 | | |
3079 | | int |
3080 | | _Py_IsMainThread(void) |
3081 | 114M | { |
3082 | 114M | unsigned long thread = PyThread_get_thread_ident(); |
3083 | 114M | return (thread == _PyRuntime.main_thread); |
3084 | 114M | } |
3085 | | |
3086 | | |
3087 | | PyInterpreterState * |
3088 | | _PyInterpreterState_Main(void) |
3089 | 114M | { |
3090 | 114M | return _PyRuntime.interpreters.main; |
3091 | 114M | } |
3092 | | |
3093 | | |
3094 | | int |
3095 | | _Py_IsMainInterpreterFinalizing(PyInterpreterState *interp) |
3096 | 0 | { |
3097 | | /* bpo-39877: Access _PyRuntime directly rather than using |
3098 | | tstate->interp->runtime to support calls from Python daemon threads. |
3099 | | After Py_Finalize() has been called, tstate can be a dangling pointer: |
3100 | | point to PyThreadState freed memory. */ |
3101 | 0 | return (_PyRuntimeState_GetFinalizing(&_PyRuntime) != NULL && |
3102 | 0 | interp == &_PyRuntime._main_interpreter); |
3103 | 0 | } |
3104 | | |
3105 | | |
3106 | | const PyConfig * |
3107 | | _Py_GetMainConfig(void) |
3108 | 0 | { |
3109 | 0 | PyInterpreterState *interp = _PyInterpreterState_Main(); |
3110 | 0 | if (interp == NULL) { |
3111 | 0 | return NULL; |
3112 | 0 | } |
3113 | 0 | return _PyInterpreterState_GetConfig(interp); |
3114 | 0 | } |