Coverage Report

Created: 2026-01-17 06:16

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/cpython/Python/pystate.c
Line
Count
Source
1
2
/* Thread and interpreter state structures and their interfaces */
3
4
#include "Python.h"
5
#include "pycore_abstract.h"      // _PyIndex_Check()
6
#include "pycore_audit.h"         // _Py_AuditHookEntry
7
#include "pycore_backoff.h"       // JUMP_BACKWARD_INITIAL_VALUE, SIDE_EXIT_INITIAL_VALUE
8
#include "pycore_ceval.h"         // _PyEval_AcquireLock()
9
#include "pycore_codecs.h"        // _PyCodec_Fini()
10
#include "pycore_critical_section.h" // _PyCriticalSection_Resume()
11
#include "pycore_dtoa.h"          // _dtoa_state_INIT()
12
#include "pycore_freelist.h"      // _PyObject_ClearFreeLists()
13
#include "pycore_initconfig.h"    // _PyStatus_OK()
14
#include "pycore_interpframe.h"   // _PyThreadState_HasStackSpace()
15
#include "pycore_object.h"        // _PyType_InitCache()
16
#include "pycore_obmalloc.h"      // _PyMem_obmalloc_state_on_heap()
17
#include "pycore_optimizer.h"     // JIT_CLEANUP_THRESHOLD
18
#include "pycore_parking_lot.h"   // _PyParkingLot_AfterFork()
19
#include "pycore_pyerrors.h"      // _PyErr_Clear()
20
#include "pycore_pylifecycle.h"   // _PyAST_Fini()
21
#include "pycore_pymem.h"         // _PyMem_DebugEnabled()
22
#include "pycore_runtime.h"       // _PyRuntime
23
#include "pycore_runtime_init.h"  // _PyRuntimeState_INIT
24
#include "pycore_stackref.h"      // Py_STACKREF_DEBUG
25
#include "pycore_stats.h"         // FT_STAT_WORLD_STOP_INC()
26
#include "pycore_time.h"          // _PyTime_Init()
27
#include "pycore_uop.h"           // UOP_BUFFER_SIZE
28
#include "pycore_uniqueid.h"      // _PyObject_FinalizePerThreadRefcounts()
29
30
31
/* --------------------------------------------------------------------------
32
CAUTION
33
34
Always use PyMem_RawMalloc() and PyMem_RawFree() directly in this file.  A
35
number of these functions are advertised as safe to call when the GIL isn't
36
held, and in a debug build Python redirects (e.g.) PyMem_NEW (etc) to Python's
37
debugging obmalloc functions.  Those aren't thread-safe (they rely on the GIL
38
to avoid the expense of doing their own locking).
39
-------------------------------------------------------------------------- */
40
41
#ifdef HAVE_DLOPEN
42
#  ifdef HAVE_DLFCN_H
43
#    include <dlfcn.h>
44
#  endif
45
#  if !HAVE_DECL_RTLD_LAZY
46
#    define RTLD_LAZY 1
47
#  endif
48
#endif
49
50
51
/****************************************/
52
/* helpers for the current thread state */
53
/****************************************/
54
55
// API for the current thread state is further down.
56
57
/* "current" means one of:
58
   - bound to the current OS thread
59
   - holds the GIL
60
 */
61
62
//-------------------------------------------------
63
// a highly efficient lookup for the current thread
64
//-------------------------------------------------
65
66
/*
67
   The stored thread state is set by PyThreadState_Swap().
68
69
   For each of these functions, the GIL must be held by the current thread.
70
 */
71
72
73
/* The attached thread state for the current thread. */
74
_Py_thread_local PyThreadState *_Py_tss_tstate = NULL;
75
76
/* The "bound" thread state used by PyGILState_Ensure(),
77
   also known as a "gilstate." */
78
_Py_thread_local PyThreadState *_Py_tss_gilstate = NULL;
79
80
/* The interpreter of the attached thread state,
81
   and is same as tstate->interp. */
82
_Py_thread_local PyInterpreterState *_Py_tss_interp = NULL;
83
84
static inline PyThreadState *
85
current_fast_get(void)
86
105M
{
87
105M
    return _Py_tss_tstate;
88
105M
}
89
90
static inline void
91
current_fast_set(_PyRuntimeState *Py_UNUSED(runtime), PyThreadState *tstate)
92
3.12M
{
93
3.12M
    assert(tstate != NULL);
94
3.12M
    _Py_tss_tstate = tstate;
95
3.12M
    assert(tstate->interp != NULL);
96
3.12M
    _Py_tss_interp = tstate->interp;
97
3.12M
}
98
99
static inline void
100
current_fast_clear(_PyRuntimeState *Py_UNUSED(runtime))
101
3.12M
{
102
3.12M
    _Py_tss_tstate = NULL;
103
3.12M
    _Py_tss_interp = NULL;
104
3.12M
}
105
106
#define tstate_verify_not_active(tstate) \
107
0
    if (tstate == current_fast_get()) { \
108
0
        _Py_FatalErrorFormat(__func__, "tstate %p is still current", tstate); \
109
0
    }
110
111
PyThreadState *
112
_PyThreadState_GetCurrent(void)
113
8.82M
{
114
8.82M
    return current_fast_get();
115
8.82M
}
116
117
118
//---------------------------------------------
119
// The thread state used by PyGILState_Ensure()
120
//---------------------------------------------
121
122
/*
123
   The stored thread state is set by bind_tstate() (AKA PyThreadState_Bind().
124
125
   The GIL does no need to be held for these.
126
  */
127
128
static inline PyThreadState *
129
gilstate_get(void)
130
56
{
131
56
    return _Py_tss_gilstate;
132
56
}
133
134
static inline void
135
gilstate_set(PyThreadState *tstate)
136
28
{
137
28
    assert(tstate != NULL);
138
28
    _Py_tss_gilstate = tstate;
139
28
}
140
141
static inline void
142
gilstate_clear(void)
143
0
{
144
0
    _Py_tss_gilstate = NULL;
145
0
}
146
147
148
#ifndef NDEBUG
149
static inline int tstate_is_alive(PyThreadState *tstate);
150
151
static inline int
152
tstate_is_bound(PyThreadState *tstate)
153
{
154
    return tstate->_status.bound && !tstate->_status.unbound;
155
}
156
#endif  // !NDEBUG
157
158
static void bind_gilstate_tstate(PyThreadState *);
159
static void unbind_gilstate_tstate(PyThreadState *);
160
161
static void tstate_mimalloc_bind(PyThreadState *);
162
163
static void
164
bind_tstate(PyThreadState *tstate)
165
28
{
166
28
    assert(tstate != NULL);
167
28
    assert(tstate_is_alive(tstate) && !tstate->_status.bound);
168
28
    assert(!tstate->_status.unbound);  // just in case
169
28
    assert(!tstate->_status.bound_gilstate);
170
28
    assert(tstate != gilstate_get());
171
28
    assert(!tstate->_status.active);
172
28
    assert(tstate->thread_id == 0);
173
28
    assert(tstate->native_thread_id == 0);
174
175
    // Currently we don't necessarily store the thread state
176
    // in thread-local storage (e.g. per-interpreter).
177
178
28
    tstate->thread_id = PyThread_get_thread_ident();
179
28
#ifdef PY_HAVE_THREAD_NATIVE_ID
180
28
    tstate->native_thread_id = PyThread_get_thread_native_id();
181
28
#endif
182
183
#ifdef Py_GIL_DISABLED
184
    // Initialize biased reference counting inter-thread queue. Note that this
185
    // needs to be initialized from the active thread.
186
    _Py_brc_init_thread(tstate);
187
#endif
188
189
    // mimalloc state needs to be initialized from the active thread.
190
28
    tstate_mimalloc_bind(tstate);
191
192
28
    tstate->_status.bound = 1;
193
28
}
194
195
static void
196
unbind_tstate(PyThreadState *tstate)
197
0
{
198
0
    assert(tstate != NULL);
199
0
    assert(tstate_is_bound(tstate));
200
0
#ifndef HAVE_PTHREAD_STUBS
201
0
    assert(tstate->thread_id > 0);
202
0
#endif
203
0
#ifdef PY_HAVE_THREAD_NATIVE_ID
204
0
    assert(tstate->native_thread_id > 0);
205
0
#endif
206
207
    // We leave thread_id and native_thread_id alone
208
    // since they can be useful for debugging.
209
    // Check the `_status` field to know if these values
210
    // are still valid.
211
212
    // We leave tstate->_status.bound set to 1
213
    // to indicate it was previously bound.
214
0
    tstate->_status.unbound = 1;
215
0
}
216
217
218
/* Stick the thread state for this thread in thread specific storage.
219
220
   When a thread state is created for a thread by some mechanism
221
   other than PyGILState_Ensure(), it's important that the GILState
222
   machinery knows about it so it doesn't try to create another
223
   thread state for the thread.
224
   (This is a better fix for SF bug #1010677 than the first one attempted.)
225
226
   The only situation where you can legitimately have more than one
227
   thread state for an OS level thread is when there are multiple
228
   interpreters.
229
230
   Before 3.12, the PyGILState_*() APIs didn't work with multiple
231
   interpreters (see bpo-10915 and bpo-15751), so this function used
232
   to set TSS only once.  Thus, the first thread state created for that
233
   given OS level thread would "win", which seemed reasonable behaviour.
234
*/
235
236
static void
237
bind_gilstate_tstate(PyThreadState *tstate)
238
28
{
239
28
    assert(tstate != NULL);
240
28
    assert(tstate_is_alive(tstate));
241
28
    assert(tstate_is_bound(tstate));
242
    // XXX assert(!tstate->_status.active);
243
28
    assert(!tstate->_status.bound_gilstate);
244
245
28
    PyThreadState *tcur = gilstate_get();
246
28
    assert(tstate != tcur);
247
248
28
    if (tcur != NULL) {
249
0
        tcur->_status.bound_gilstate = 0;
250
0
    }
251
28
    gilstate_set(tstate);
252
28
    tstate->_status.bound_gilstate = 1;
253
28
}
254
255
static void
256
unbind_gilstate_tstate(PyThreadState *tstate)
257
0
{
258
0
    assert(tstate != NULL);
259
    // XXX assert(tstate_is_alive(tstate));
260
0
    assert(tstate_is_bound(tstate));
261
    // XXX assert(!tstate->_status.active);
262
0
    assert(tstate->_status.bound_gilstate);
263
0
    assert(tstate == gilstate_get());
264
0
    gilstate_clear();
265
0
    tstate->_status.bound_gilstate = 0;
266
0
}
267
268
269
//----------------------------------------------
270
// the thread state that currently holds the GIL
271
//----------------------------------------------
272
273
/* This is not exported, as it is not reliable!  It can only
274
   ever be compared to the state for the *current* thread.
275
   * If not equal, then it doesn't matter that the actual
276
     value may change immediately after comparison, as it can't
277
     possibly change to the current thread's state.
278
   * If equal, then the current thread holds the lock, so the value can't
279
     change until we yield the lock.
280
*/
281
static int
282
holds_gil(PyThreadState *tstate)
283
0
{
284
    // XXX Fall back to tstate->interp->runtime->ceval.gil.last_holder
285
    // (and tstate->interp->runtime->ceval.gil.locked).
286
0
    assert(tstate != NULL);
287
    /* Must be the tstate for this thread */
288
0
    assert(tstate == gilstate_get());
289
0
    return tstate == current_fast_get();
290
0
}
291
292
293
/****************************/
294
/* the global runtime state */
295
/****************************/
296
297
//----------
298
// lifecycle
299
//----------
300
301
/* Suppress deprecation warning for PyBytesObject.ob_shash */
302
_Py_COMP_DIAG_PUSH
303
_Py_COMP_DIAG_IGNORE_DEPR_DECLS
304
/* We use "initial" if the runtime gets re-used
305
   (e.g. Py_Finalize() followed by Py_Initialize().
306
   Note that we initialize "initial" relative to _PyRuntime,
307
   to ensure pre-initialized pointers point to the active
308
   runtime state (and not "initial"). */
309
static const _PyRuntimeState initial = _PyRuntimeState_INIT(_PyRuntime, "");
310
_Py_COMP_DIAG_POP
311
312
#define LOCKS_INIT(runtime) \
313
0
    { \
314
0
        &(runtime)->interpreters.mutex, \
315
0
        &(runtime)->xi.data_lookup.registry.mutex, \
316
0
        &(runtime)->unicode_state.ids.mutex, \
317
0
        &(runtime)->imports.extensions.mutex, \
318
0
        &(runtime)->ceval.pending_mainthread.mutex, \
319
0
        &(runtime)->atexit.mutex, \
320
0
        &(runtime)->audit_hooks.mutex, \
321
0
        &(runtime)->allocators.mutex, \
322
0
        &(runtime)->_main_interpreter.types.mutex, \
323
0
        &(runtime)->_main_interpreter.code_state.mutex, \
324
0
    }
325
326
static void
327
init_runtime(_PyRuntimeState *runtime,
328
             void *open_code_hook, void *open_code_userdata,
329
             _Py_AuditHookEntry *audit_hook_head,
330
             Py_ssize_t unicode_next_index)
331
28
{
332
28
    assert(!runtime->preinitializing);
333
28
    assert(!runtime->preinitialized);
334
28
    assert(!runtime->core_initialized);
335
28
    assert(!runtime->initialized);
336
28
    assert(!runtime->_initialized);
337
338
28
    runtime->open_code_hook = open_code_hook;
339
28
    runtime->open_code_userdata = open_code_userdata;
340
28
    runtime->audit_hooks.head = audit_hook_head;
341
342
28
    PyPreConfig_InitPythonConfig(&runtime->preconfig);
343
344
    // Set it to the ID of the main thread of the main interpreter.
345
28
    runtime->main_thread = PyThread_get_thread_ident();
346
347
28
    runtime->unicode_state.ids.next_index = unicode_next_index;
348
28
    runtime->_initialized = 1;
349
28
}
350
351
PyStatus
352
_PyRuntimeState_Init(_PyRuntimeState *runtime)
353
28
{
354
    /* We preserve the hook across init, because there is
355
       currently no public API to set it between runtime
356
       initialization and interpreter initialization. */
357
28
    void *open_code_hook = runtime->open_code_hook;
358
28
    void *open_code_userdata = runtime->open_code_userdata;
359
28
    _Py_AuditHookEntry *audit_hook_head = runtime->audit_hooks.head;
360
    // bpo-42882: Preserve next_index value if Py_Initialize()/Py_Finalize()
361
    // is called multiple times.
362
28
    Py_ssize_t unicode_next_index = runtime->unicode_state.ids.next_index;
363
364
28
    if (runtime->_initialized) {
365
        // Py_Initialize() must be running again.
366
        // Reset to _PyRuntimeState_INIT.
367
0
        memcpy(runtime, &initial, sizeof(*runtime));
368
        // Preserve the cookie from the original runtime.
369
0
        memcpy(runtime->debug_offsets.cookie, _Py_Debug_Cookie, 8);
370
0
        assert(!runtime->_initialized);
371
0
    }
372
373
28
    PyStatus status = _PyTime_Init(&runtime->time);
374
28
    if (_PyStatus_EXCEPTION(status)) {
375
0
        return status;
376
0
    }
377
378
28
    init_runtime(runtime, open_code_hook, open_code_userdata, audit_hook_head,
379
28
                 unicode_next_index);
380
381
28
    return _PyStatus_OK();
382
28
}
383
384
void
385
_PyRuntimeState_Fini(_PyRuntimeState *runtime)
386
0
{
387
#ifdef Py_REF_DEBUG
388
    /* The count is cleared by _Py_FinalizeRefTotal(). */
389
    assert(runtime->object_state.interpreter_leaks == 0);
390
#endif
391
0
    gilstate_clear();
392
0
}
393
394
#ifdef HAVE_FORK
395
/* This function is called from PyOS_AfterFork_Child to ensure that
396
   newly created child processes do not share locks with the parent. */
397
PyStatus
398
_PyRuntimeState_ReInitThreads(_PyRuntimeState *runtime)
399
0
{
400
    // This was initially set in _PyRuntimeState_Init().
401
0
    runtime->main_thread = PyThread_get_thread_ident();
402
403
    // Clears the parking lot. Any waiting threads are dead. This must be
404
    // called before releasing any locks that use the parking lot.
405
0
    _PyParkingLot_AfterFork();
406
407
    // Re-initialize global locks
408
0
    PyMutex *locks[] = LOCKS_INIT(runtime);
409
0
    for (size_t i = 0; i < Py_ARRAY_LENGTH(locks); i++) {
410
0
        _PyMutex_at_fork_reinit(locks[i]);
411
0
    }
412
#ifdef Py_GIL_DISABLED
413
    for (PyInterpreterState *interp = runtime->interpreters.head;
414
         interp != NULL; interp = interp->next)
415
    {
416
        for (int i = 0; i < NUM_WEAKREF_LIST_LOCKS; i++) {
417
            _PyMutex_at_fork_reinit(&interp->weakref_locks[i]);
418
        }
419
    }
420
#endif
421
422
0
    _PyTypes_AfterFork();
423
424
0
    _PyThread_AfterFork(&runtime->threads);
425
426
0
    return _PyStatus_OK();
427
0
}
428
#endif
429
430
431
/*************************************/
432
/* the per-interpreter runtime state */
433
/*************************************/
434
435
//----------
436
// lifecycle
437
//----------
438
439
/* Calling this indicates that the runtime is ready to create interpreters. */
440
441
PyStatus
442
_PyInterpreterState_Enable(_PyRuntimeState *runtime)
443
28
{
444
28
    struct pyinterpreters *interpreters = &runtime->interpreters;
445
28
    interpreters->next_id = 0;
446
28
    return _PyStatus_OK();
447
28
}
448
449
static PyInterpreterState *
450
alloc_interpreter(void)
451
0
{
452
    // Aligned allocation for PyInterpreterState.
453
    // the first word of the memory block is used to store
454
    // the original pointer to be used later to free the memory.
455
0
    size_t alignment = _Alignof(PyInterpreterState);
456
0
    size_t allocsize = sizeof(PyInterpreterState) + sizeof(void *) + alignment - 1;
457
0
    void *mem = PyMem_RawCalloc(1, allocsize);
458
0
    if (mem == NULL) {
459
0
        return NULL;
460
0
    }
461
0
    void *ptr = _Py_ALIGN_UP((char *)mem + sizeof(void *), alignment);
462
0
    ((void **)ptr)[-1] = mem;
463
0
    assert(_Py_IS_ALIGNED(ptr, alignment));
464
0
    return ptr;
465
0
}
466
467
static void
468
free_interpreter(PyInterpreterState *interp)
469
0
{
470
#ifdef Py_STATS
471
    if (interp->pystats_struct) {
472
        PyMem_RawFree(interp->pystats_struct);
473
        interp->pystats_struct = NULL;
474
    }
475
#endif
476
    // The main interpreter is statically allocated so
477
    // should not be freed.
478
0
    if (interp != &_PyRuntime._main_interpreter) {
479
0
        if (_PyMem_obmalloc_state_on_heap(interp)) {
480
            // interpreter has its own obmalloc state, free it
481
0
            PyMem_RawFree(interp->obmalloc);
482
0
            interp->obmalloc = NULL;
483
0
        }
484
0
        assert(_Py_IS_ALIGNED(interp, _Alignof(PyInterpreterState)));
485
0
        PyMem_RawFree(((void **)interp)[-1]);
486
0
    }
487
0
}
488
489
#ifndef NDEBUG
490
static inline int check_interpreter_whence(long);
491
#endif
492
493
extern _Py_CODEUNIT *
494
_Py_LazyJitShim(
495
    struct _PyExecutorObject *exec, _PyInterpreterFrame *frame, _PyStackRef *stack_pointer, PyThreadState *tstate
496
);
497
498
/* Get the interpreter state to a minimal consistent state.
499
   Further init happens in pylifecycle.c before it can be used.
500
   All fields not initialized here are expected to be zeroed out,
501
   e.g. by PyMem_RawCalloc() or memset(), or otherwise pre-initialized.
502
   The runtime state is not manipulated.  Instead it is assumed that
503
   the interpreter is getting added to the runtime.
504
505
   Note that the main interpreter was statically initialized as part
506
   of the runtime and most state is already set properly.  That leaves
507
   a small number of fields to initialize dynamically, as well as some
508
   that are initialized lazily.
509
510
   For subinterpreters we memcpy() the main interpreter in
511
   PyInterpreterState_New(), leaving it in the same mostly-initialized
512
   state.  The only difference is that the interpreter has some
513
   self-referential state that is statically initializexd to the
514
   main interpreter.  We fix those fields here, in addition
515
   to the other dynamically initialized fields.
516
  */
517
518
static inline bool
519
is_env_enabled(const char *env_name)
520
56
{
521
56
    char *env = Py_GETENV(env_name);
522
56
    return env && *env != '\0' && *env != '0';
523
56
}
524
525
static inline void
526
init_policy(uint16_t *target, const char *env_name, uint16_t default_value,
527
            long min_value, long max_value)
528
112
{
529
112
    *target = default_value;
530
112
    char *env = Py_GETENV(env_name);
531
112
    if (env && *env != '\0') {
532
0
        long value = atol(env);
533
0
        if (value >= min_value && value <= max_value) {
534
0
            *target = (uint16_t)value;
535
0
        }
536
0
    }
537
112
}
538
539
static PyStatus
540
init_interpreter(PyInterpreterState *interp,
541
                 _PyRuntimeState *runtime, int64_t id,
542
                 PyInterpreterState *next,
543
                 long whence)
544
28
{
545
28
    if (interp->_initialized) {
546
0
        return _PyStatus_ERR("interpreter already initialized");
547
0
    }
548
549
28
    assert(interp->_whence == _PyInterpreterState_WHENCE_NOTSET);
550
28
    assert(check_interpreter_whence(whence) == 0);
551
28
    interp->_whence = whence;
552
553
28
    assert(runtime != NULL);
554
28
    interp->runtime = runtime;
555
556
28
    assert(id > 0 || (id == 0 && interp == runtime->interpreters.main));
557
28
    interp->id = id;
558
559
28
    interp->id_refcount = 0;
560
561
28
    assert(runtime->interpreters.head == interp);
562
28
    assert(next != NULL || (interp == runtime->interpreters.main));
563
28
    interp->next = next;
564
565
28
    interp->threads.preallocated = &interp->_initial_thread;
566
567
    // We would call _PyObject_InitState() at this point
568
    // if interp->feature_flags were alredy set.
569
570
28
    _PyEval_InitState(interp);
571
28
    _PyGC_InitState(&interp->gc);
572
28
    PyConfig_InitPythonConfig(&interp->config);
573
28
    _PyType_InitCache(interp);
574
#ifdef Py_GIL_DISABLED
575
    _Py_brc_init_state(interp);
576
#endif
577
578
28
    llist_init(&interp->mem_free_queue.head);
579
28
    llist_init(&interp->asyncio_tasks_head);
580
28
    interp->asyncio_tasks_lock = (PyMutex){0};
581
476
    for (int i = 0; i < _PY_MONITORING_UNGROUPED_EVENTS; i++) {
582
448
        interp->monitors.tools[i] = 0;
583
448
    }
584
252
    for (int t = 0; t < PY_MONITORING_TOOL_IDS; t++) {
585
4.48k
        for (int e = 0; e < _PY_MONITORING_EVENTS; e++) {
586
4.25k
            interp->monitoring_callables[t][e] = NULL;
587
588
4.25k
        }
589
224
        interp->monitoring_tool_versions[t] = 0;
590
224
    }
591
28
    interp->_code_object_generation = 0;
592
28
    interp->jit = false;
593
28
    interp->compiling = false;
594
28
    interp->executor_list_head = NULL;
595
28
    interp->executor_deletion_list_head = NULL;
596
28
    interp->executor_creation_counter = JIT_CLEANUP_THRESHOLD;
597
598
    // Initialize optimization configuration from environment variables
599
    // PYTHON_JIT_STRESS sets aggressive defaults for testing, but can be overridden
600
28
    uint16_t jump_default = JUMP_BACKWARD_INITIAL_VALUE;
601
28
    uint16_t side_exit_default = SIDE_EXIT_INITIAL_VALUE;
602
603
28
    if (is_env_enabled("PYTHON_JIT_STRESS")) {
604
0
        jump_default = 63;
605
0
        side_exit_default = 63;
606
0
    }
607
608
28
    init_policy(&interp->opt_config.jump_backward_initial_value,
609
28
                "PYTHON_JIT_JUMP_BACKWARD_INITIAL_VALUE",
610
28
                jump_default, 1, MAX_VALUE);
611
28
    init_policy(&interp->opt_config.jump_backward_initial_backoff,
612
28
                "PYTHON_JIT_JUMP_BACKWARD_INITIAL_BACKOFF",
613
28
                JUMP_BACKWARD_INITIAL_BACKOFF, 0, MAX_BACKOFF);
614
28
    init_policy(&interp->opt_config.side_exit_initial_value,
615
28
                "PYTHON_JIT_SIDE_EXIT_INITIAL_VALUE",
616
28
                side_exit_default, 1, MAX_VALUE);
617
28
    init_policy(&interp->opt_config.side_exit_initial_backoff,
618
28
                "PYTHON_JIT_SIDE_EXIT_INITIAL_BACKOFF",
619
28
                SIDE_EXIT_INITIAL_BACKOFF, 0, MAX_BACKOFF);
620
621
28
    interp->opt_config.specialization_enabled = !is_env_enabled("PYTHON_SPECIALIZATION_OFF");
622
28
    if (interp != &runtime->_main_interpreter) {
623
        /* Fix the self-referential, statically initialized fields. */
624
0
        interp->dtoa = (struct _dtoa_state)_dtoa_state_INIT(interp);
625
0
    }
626
#if !defined(Py_GIL_DISABLED) && defined(Py_STACKREF_DEBUG)
627
    interp->next_stackref = INITIAL_STACKREF_INDEX;
628
    _Py_hashtable_allocator_t alloc = {
629
        .malloc = malloc,
630
        .free = free,
631
    };
632
    interp->open_stackrefs_table = _Py_hashtable_new_full(
633
        _Py_hashtable_hash_ptr,
634
        _Py_hashtable_compare_direct,
635
        NULL,
636
        NULL,
637
        &alloc
638
    );
639
#  ifdef Py_STACKREF_CLOSE_DEBUG
640
    interp->closed_stackrefs_table = _Py_hashtable_new_full(
641
        _Py_hashtable_hash_ptr,
642
        _Py_hashtable_compare_direct,
643
        NULL,
644
        NULL,
645
        &alloc
646
    );
647
#  endif
648
    _Py_stackref_associate(interp, Py_None, PyStackRef_None);
649
    _Py_stackref_associate(interp, Py_False, PyStackRef_False);
650
    _Py_stackref_associate(interp, Py_True, PyStackRef_True);
651
#endif
652
653
28
    interp->_initialized = 1;
654
28
    return _PyStatus_OK();
655
28
}
656
657
658
PyStatus
659
_PyInterpreterState_New(PyThreadState *tstate, PyInterpreterState **pinterp)
660
28
{
661
28
    *pinterp = NULL;
662
663
    // Don't get runtime from tstate since tstate can be NULL
664
28
    _PyRuntimeState *runtime = &_PyRuntime;
665
666
    // tstate is NULL when pycore_create_interpreter() calls
667
    // _PyInterpreterState_New() to create the main interpreter.
668
28
    if (tstate != NULL) {
669
0
        if (_PySys_Audit(tstate, "cpython.PyInterpreterState_New", NULL) < 0) {
670
0
            return _PyStatus_ERR("sys.audit failed");
671
0
        }
672
0
    }
673
674
    /* We completely serialize creation of multiple interpreters, since
675
       it simplifies things here and blocking concurrent calls isn't a problem.
676
       Regardless, we must fully block subinterpreter creation until
677
       after the main interpreter is created. */
678
28
    HEAD_LOCK(runtime);
679
680
28
    struct pyinterpreters *interpreters = &runtime->interpreters;
681
28
    int64_t id = interpreters->next_id;
682
28
    interpreters->next_id += 1;
683
684
    // Allocate the interpreter and add it to the runtime state.
685
28
    PyInterpreterState *interp;
686
28
    PyStatus status;
687
28
    PyInterpreterState *old_head = interpreters->head;
688
28
    if (old_head == NULL) {
689
        // We are creating the main interpreter.
690
28
        assert(interpreters->main == NULL);
691
28
        assert(id == 0);
692
693
28
        interp = &runtime->_main_interpreter;
694
28
        assert(interp->id == 0);
695
28
        assert(interp->next == NULL);
696
697
28
        interpreters->main = interp;
698
28
    }
699
0
    else {
700
0
        assert(interpreters->main != NULL);
701
0
        assert(id != 0);
702
703
0
        interp = alloc_interpreter();
704
0
        if (interp == NULL) {
705
0
            status = _PyStatus_NO_MEMORY();
706
0
            goto error;
707
0
        }
708
        // Set to _PyInterpreterState_INIT.
709
0
        memcpy(interp, &initial._main_interpreter, sizeof(*interp));
710
711
0
        if (id < 0) {
712
            /* overflow or Py_Initialize() not called yet! */
713
0
            status = _PyStatus_ERR("failed to get an interpreter ID");
714
0
            goto error;
715
0
        }
716
0
    }
717
28
    interpreters->head = interp;
718
719
28
    long whence = _PyInterpreterState_WHENCE_UNKNOWN;
720
28
    status = init_interpreter(interp, runtime,
721
28
                              id, old_head, whence);
722
28
    if (_PyStatus_EXCEPTION(status)) {
723
0
        goto error;
724
0
    }
725
726
28
    HEAD_UNLOCK(runtime);
727
728
28
    assert(interp != NULL);
729
28
    *pinterp = interp;
730
28
    return _PyStatus_OK();
731
732
0
error:
733
0
    HEAD_UNLOCK(runtime);
734
735
0
    if (interp != NULL) {
736
0
        free_interpreter(interp);
737
0
    }
738
0
    return status;
739
28
}
740
741
742
PyInterpreterState *
743
PyInterpreterState_New(void)
744
0
{
745
    // tstate can be NULL
746
0
    PyThreadState *tstate = current_fast_get();
747
748
0
    PyInterpreterState *interp;
749
0
    PyStatus status = _PyInterpreterState_New(tstate, &interp);
750
0
    if (_PyStatus_EXCEPTION(status)) {
751
0
        Py_ExitStatusException(status);
752
0
    }
753
0
    assert(interp != NULL);
754
0
    return interp;
755
0
}
756
757
#if !defined(Py_GIL_DISABLED) && defined(Py_STACKREF_DEBUG)
758
extern void
759
_Py_stackref_report_leaks(PyInterpreterState *interp);
760
#endif
761
762
static void
763
interpreter_clear(PyInterpreterState *interp, PyThreadState *tstate)
764
0
{
765
0
    assert(interp != NULL);
766
0
    assert(tstate != NULL);
767
0
    _PyRuntimeState *runtime = interp->runtime;
768
769
    /* XXX Conditions we need to enforce:
770
771
       * the GIL must be held by the current thread
772
       * tstate must be the "current" thread state (current_fast_get())
773
       * tstate->interp must be interp
774
       * for the main interpreter, tstate must be the main thread
775
     */
776
    // XXX Ideally, we would not rely on any thread state in this function
777
    // (and we would drop the "tstate" argument).
778
779
0
    if (_PySys_Audit(tstate, "cpython.PyInterpreterState_Clear", NULL) < 0) {
780
0
        _PyErr_Clear(tstate);
781
0
    }
782
783
    // Clear the current/main thread state last.
784
0
    _Py_FOR_EACH_TSTATE_BEGIN(interp, p) {
785
        // See https://github.com/python/cpython/issues/102126
786
        // Must be called without HEAD_LOCK held as it can deadlock
787
        // if any finalizer tries to acquire that lock.
788
0
        HEAD_UNLOCK(runtime);
789
0
        PyThreadState_Clear(p);
790
0
        HEAD_LOCK(runtime);
791
0
    }
792
0
    _Py_FOR_EACH_TSTATE_END(interp);
793
0
    if (tstate->interp == interp) {
794
        /* We fix tstate->_status below when we for sure aren't using it
795
           (e.g. no longer need the GIL). */
796
        // XXX Eliminate the need to do this.
797
0
        tstate->_status.cleared = 0;
798
0
    }
799
800
    /* It is possible that any of the objects below have a finalizer
801
       that runs Python code or otherwise relies on a thread state
802
       or even the interpreter state.  For now we trust that isn't
803
       a problem.
804
     */
805
    // XXX Make sure we properly deal with problematic finalizers.
806
807
0
    Py_CLEAR(interp->audit_hooks);
808
809
    // gh-140257: Threads have already been cleared, but daemon threads may
810
    // still access eval_breaker atomically via take_gil() right before they
811
    // hang. Use an atomic store to prevent data races during finalization.
812
0
    interp->ceval.instrumentation_version = 0;
813
0
    _Py_atomic_store_uintptr(&tstate->eval_breaker, 0);
814
815
0
    for (int i = 0; i < _PY_MONITORING_UNGROUPED_EVENTS; i++) {
816
0
        interp->monitors.tools[i] = 0;
817
0
    }
818
0
    for (int t = 0; t < PY_MONITORING_TOOL_IDS; t++) {
819
0
        for (int e = 0; e < _PY_MONITORING_EVENTS; e++) {
820
0
            Py_CLEAR(interp->monitoring_callables[t][e]);
821
0
        }
822
0
    }
823
0
    for (int t = 0; t < PY_MONITORING_TOOL_IDS; t++) {
824
0
        Py_CLEAR(interp->monitoring_tool_names[t]);
825
0
    }
826
0
    interp->_code_object_generation = 0;
827
#ifdef Py_GIL_DISABLED
828
    interp->tlbc_indices.tlbc_generation = 0;
829
#endif
830
831
0
    PyConfig_Clear(&interp->config);
832
0
    _PyCodec_Fini(interp);
833
834
0
    assert(interp->imports.modules == NULL);
835
0
    assert(interp->imports.modules_by_index == NULL);
836
0
    assert(interp->imports.importlib == NULL);
837
0
    assert(interp->imports.import_func == NULL);
838
839
0
    Py_CLEAR(interp->sysdict_copy);
840
0
    Py_CLEAR(interp->builtins_copy);
841
0
    Py_CLEAR(interp->dict);
842
0
#ifdef HAVE_FORK
843
0
    Py_CLEAR(interp->before_forkers);
844
0
    Py_CLEAR(interp->after_forkers_parent);
845
0
    Py_CLEAR(interp->after_forkers_child);
846
0
#endif
847
848
849
#ifdef _Py_TIER2
850
    _Py_ClearExecutorDeletionList(interp);
851
#endif
852
0
    _PyAST_Fini(interp);
853
0
    _PyAtExit_Fini(interp);
854
855
    // All Python types must be destroyed before the last GC collection. Python
856
    // types create a reference cycle to themselves in their in their
857
    // PyTypeObject.tp_mro member (the tuple contains the type).
858
859
    /* Last garbage collection on this interpreter */
860
0
    _PyGC_CollectNoFail(tstate);
861
0
    _PyGC_Fini(interp);
862
863
    // Finalize warnings after last gc so that any finalizers can
864
    // access warnings state
865
0
    _PyWarnings_Fini(interp);
866
0
    struct _PyExecutorObject *cold = interp->cold_executor;
867
0
    if (cold != NULL) {
868
0
        interp->cold_executor = NULL;
869
0
        assert(cold->vm_data.valid);
870
0
        assert(!cold->vm_data.cold);
871
0
        _PyExecutor_Free(cold);
872
0
    }
873
874
0
    struct _PyExecutorObject *cold_dynamic = interp->cold_dynamic_executor;
875
0
    if (cold_dynamic != NULL) {
876
0
        interp->cold_dynamic_executor = NULL;
877
0
        assert(cold_dynamic->vm_data.valid);
878
0
        assert(!cold_dynamic->vm_data.cold);
879
0
        _PyExecutor_Free(cold_dynamic);
880
0
    }
881
    /* We don't clear sysdict and builtins until the end of this function.
882
       Because clearing other attributes can execute arbitrary Python code
883
       which requires sysdict and builtins. */
884
0
    PyDict_Clear(interp->sysdict);
885
0
    PyDict_Clear(interp->builtins);
886
0
    Py_CLEAR(interp->sysdict);
887
0
    Py_CLEAR(interp->builtins);
888
889
#if !defined(Py_GIL_DISABLED) && defined(Py_STACKREF_DEBUG)
890
#  ifdef Py_STACKREF_CLOSE_DEBUG
891
    _Py_hashtable_destroy(interp->closed_stackrefs_table);
892
    interp->closed_stackrefs_table = NULL;
893
#  endif
894
    _Py_stackref_report_leaks(interp);
895
    _Py_hashtable_destroy(interp->open_stackrefs_table);
896
    interp->open_stackrefs_table = NULL;
897
#endif
898
899
0
    if (tstate->interp == interp) {
900
        /* We are now safe to fix tstate->_status.cleared. */
901
        // XXX Do this (much) earlier?
902
0
        tstate->_status.cleared = 1;
903
0
    }
904
905
0
    for (int i=0; i < DICT_MAX_WATCHERS; i++) {
906
0
        interp->dict_state.watchers[i] = NULL;
907
0
    }
908
909
0
    for (int i=0; i < TYPE_MAX_WATCHERS; i++) {
910
0
        interp->type_watchers[i] = NULL;
911
0
    }
912
913
0
    for (int i=0; i < FUNC_MAX_WATCHERS; i++) {
914
0
        interp->func_watchers[i] = NULL;
915
0
    }
916
0
    interp->active_func_watchers = 0;
917
918
0
    for (int i=0; i < CODE_MAX_WATCHERS; i++) {
919
0
        interp->code_watchers[i] = NULL;
920
0
    }
921
0
    interp->active_code_watchers = 0;
922
923
0
    for (int i=0; i < CONTEXT_MAX_WATCHERS; i++) {
924
0
        interp->context_watchers[i] = NULL;
925
0
    }
926
0
    interp->active_context_watchers = 0;
927
    // XXX Once we have one allocator per interpreter (i.e.
928
    // per-interpreter GC) we must ensure that all of the interpreter's
929
    // objects have been cleaned up at the point.
930
931
    // We could clear interp->threads.freelist here
932
    // if it held more than just the initial thread state.
933
0
}
934
935
936
void
937
PyInterpreterState_Clear(PyInterpreterState *interp)
938
0
{
939
    // Use the current Python thread state to call audit hooks and to collect
940
    // garbage. It can be different than the current Python thread state
941
    // of 'interp'.
942
0
    PyThreadState *current_tstate = current_fast_get();
943
0
    _PyImport_ClearCore(interp);
944
0
    interpreter_clear(interp, current_tstate);
945
0
}
946
947
948
void
949
_PyInterpreterState_Clear(PyThreadState *tstate)
950
0
{
951
0
    _PyImport_ClearCore(tstate->interp);
952
0
    interpreter_clear(tstate->interp, tstate);
953
0
}
954
955
956
static inline void tstate_deactivate(PyThreadState *tstate);
957
static void tstate_set_detached(PyThreadState *tstate, int detached_state);
958
static void zapthreads(PyInterpreterState *interp);
959
960
void
961
PyInterpreterState_Delete(PyInterpreterState *interp)
962
0
{
963
0
    _PyRuntimeState *runtime = interp->runtime;
964
0
    struct pyinterpreters *interpreters = &runtime->interpreters;
965
966
    // XXX Clearing the "current" thread state should happen before
967
    // we start finalizing the interpreter (or the current thread state).
968
0
    PyThreadState *tcur = current_fast_get();
969
0
    if (tcur != NULL && interp == tcur->interp) {
970
        /* Unset current thread.  After this, many C API calls become crashy. */
971
0
        _PyThreadState_Detach(tcur);
972
0
    }
973
974
0
    zapthreads(interp);
975
976
    // XXX These two calls should be done at the end of clear_interpreter(),
977
    // but currently some objects get decref'ed after that.
978
#ifdef Py_REF_DEBUG
979
    _PyInterpreterState_FinalizeRefTotal(interp);
980
#endif
981
0
    _PyInterpreterState_FinalizeAllocatedBlocks(interp);
982
983
0
    HEAD_LOCK(runtime);
984
0
    PyInterpreterState **p;
985
0
    for (p = &interpreters->head; ; p = &(*p)->next) {
986
0
        if (*p == NULL) {
987
0
            Py_FatalError("NULL interpreter");
988
0
        }
989
0
        if (*p == interp) {
990
0
            break;
991
0
        }
992
0
    }
993
0
    if (interp->threads.head != NULL) {
994
0
        Py_FatalError("remaining threads");
995
0
    }
996
0
    *p = interp->next;
997
998
0
    if (interpreters->main == interp) {
999
0
        interpreters->main = NULL;
1000
0
        if (interpreters->head != NULL) {
1001
0
            Py_FatalError("remaining subinterpreters");
1002
0
        }
1003
0
    }
1004
0
    HEAD_UNLOCK(runtime);
1005
1006
0
    _Py_qsbr_fini(interp);
1007
1008
0
    _PyObject_FiniState(interp);
1009
1010
0
    PyConfig_Clear(&interp->config);
1011
1012
0
    free_interpreter(interp);
1013
0
}
1014
1015
1016
#ifdef HAVE_FORK
1017
/*
1018
 * Delete all interpreter states except the main interpreter.  If there
1019
 * is a current interpreter state, it *must* be the main interpreter.
1020
 */
1021
PyStatus
1022
_PyInterpreterState_DeleteExceptMain(_PyRuntimeState *runtime)
1023
0
{
1024
0
    struct pyinterpreters *interpreters = &runtime->interpreters;
1025
1026
0
    PyThreadState *tstate = _PyThreadState_Swap(runtime, NULL);
1027
0
    if (tstate != NULL && tstate->interp != interpreters->main) {
1028
0
        return _PyStatus_ERR("not main interpreter");
1029
0
    }
1030
1031
0
    HEAD_LOCK(runtime);
1032
0
    PyInterpreterState *interp = interpreters->head;
1033
0
    interpreters->head = NULL;
1034
0
    while (interp != NULL) {
1035
0
        if (interp == interpreters->main) {
1036
0
            interpreters->main->next = NULL;
1037
0
            interpreters->head = interp;
1038
0
            interp = interp->next;
1039
0
            continue;
1040
0
        }
1041
1042
        // XXX Won't this fail since PyInterpreterState_Clear() requires
1043
        // the "current" tstate to be set?
1044
0
        PyInterpreterState_Clear(interp);  // XXX must activate?
1045
0
        zapthreads(interp);
1046
0
        PyInterpreterState *prev_interp = interp;
1047
0
        interp = interp->next;
1048
0
        free_interpreter(prev_interp);
1049
0
    }
1050
0
    HEAD_UNLOCK(runtime);
1051
1052
0
    if (interpreters->head == NULL) {
1053
0
        return _PyStatus_ERR("missing main interpreter");
1054
0
    }
1055
0
    _PyThreadState_Swap(runtime, tstate);
1056
0
    return _PyStatus_OK();
1057
0
}
1058
#endif
1059
1060
static inline void
1061
set_main_thread(PyInterpreterState *interp, PyThreadState *tstate)
1062
0
{
1063
0
    _Py_atomic_store_ptr_relaxed(&interp->threads.main, tstate);
1064
0
}
1065
1066
static inline PyThreadState *
1067
get_main_thread(PyInterpreterState *interp)
1068
0
{
1069
0
    return _Py_atomic_load_ptr_relaxed(&interp->threads.main);
1070
0
}
1071
1072
void
1073
_PyErr_SetInterpreterAlreadyRunning(void)
1074
0
{
1075
0
    PyErr_SetString(PyExc_InterpreterError, "interpreter already running");
1076
0
}
1077
1078
int
1079
_PyInterpreterState_SetRunningMain(PyInterpreterState *interp)
1080
0
{
1081
0
    if (get_main_thread(interp) != NULL) {
1082
0
        _PyErr_SetInterpreterAlreadyRunning();
1083
0
        return -1;
1084
0
    }
1085
0
    PyThreadState *tstate = current_fast_get();
1086
0
    _Py_EnsureTstateNotNULL(tstate);
1087
0
    if (tstate->interp != interp) {
1088
0
        PyErr_SetString(PyExc_RuntimeError,
1089
0
                        "current tstate has wrong interpreter");
1090
0
        return -1;
1091
0
    }
1092
0
    set_main_thread(interp, tstate);
1093
1094
0
    return 0;
1095
0
}
1096
1097
void
1098
_PyInterpreterState_SetNotRunningMain(PyInterpreterState *interp)
1099
0
{
1100
0
    assert(get_main_thread(interp) == current_fast_get());
1101
0
    set_main_thread(interp, NULL);
1102
0
}
1103
1104
int
1105
_PyInterpreterState_IsRunningMain(PyInterpreterState *interp)
1106
0
{
1107
0
    if (get_main_thread(interp) != NULL) {
1108
0
        return 1;
1109
0
    }
1110
    // Embedders might not know to call _PyInterpreterState_SetRunningMain(),
1111
    // so their main thread wouldn't show it is running the main interpreter's
1112
    // program.  (Py_Main() doesn't have this problem.)  For now this isn't
1113
    // critical.  If it were, we would need to infer "running main" from other
1114
    // information, like if it's the main interpreter.  We used to do that
1115
    // but the naive approach led to some inconsistencies that caused problems.
1116
0
    return 0;
1117
0
}
1118
1119
int
1120
_PyThreadState_IsRunningMain(PyThreadState *tstate)
1121
0
{
1122
0
    PyInterpreterState *interp = tstate->interp;
1123
    // See the note in _PyInterpreterState_IsRunningMain() about
1124
    // possible false negatives here for embedders.
1125
0
    return get_main_thread(interp) == tstate;
1126
0
}
1127
1128
void
1129
_PyInterpreterState_ReinitRunningMain(PyThreadState *tstate)
1130
0
{
1131
0
    PyInterpreterState *interp = tstate->interp;
1132
0
    if (get_main_thread(interp) != tstate) {
1133
0
        set_main_thread(interp, NULL);
1134
0
    }
1135
0
}
1136
1137
1138
//----------
1139
// accessors
1140
//----------
1141
1142
int
1143
_PyInterpreterState_IsReady(PyInterpreterState *interp)
1144
0
{
1145
0
    return interp->_ready;
1146
0
}
1147
1148
#ifndef NDEBUG
1149
static inline int
1150
check_interpreter_whence(long whence)
1151
{
1152
    if(whence < 0) {
1153
        return -1;
1154
    }
1155
    if (whence > _PyInterpreterState_WHENCE_MAX) {
1156
        return -1;
1157
    }
1158
    return 0;
1159
}
1160
#endif
1161
1162
long
1163
_PyInterpreterState_GetWhence(PyInterpreterState *interp)
1164
0
{
1165
0
    assert(check_interpreter_whence(interp->_whence) == 0);
1166
0
    return interp->_whence;
1167
0
}
1168
1169
void
1170
_PyInterpreterState_SetWhence(PyInterpreterState *interp, long whence)
1171
28
{
1172
28
    assert(interp->_whence != _PyInterpreterState_WHENCE_NOTSET);
1173
28
    assert(check_interpreter_whence(whence) == 0);
1174
28
    interp->_whence = whence;
1175
28
}
1176
1177
1178
PyObject *
1179
_Py_GetMainModule(PyThreadState *tstate)
1180
0
{
1181
    // We return None to indicate "not found" or "bogus".
1182
0
    PyObject *modules = _PyImport_GetModulesRef(tstate->interp);
1183
0
    if (modules == Py_None) {
1184
0
        return modules;
1185
0
    }
1186
0
    PyObject *module = NULL;
1187
0
    (void)PyMapping_GetOptionalItem(modules, &_Py_ID(__main__), &module);
1188
0
    Py_DECREF(modules);
1189
0
    if (module == NULL && !PyErr_Occurred()) {
1190
0
        Py_RETURN_NONE;
1191
0
    }
1192
0
    return module;
1193
0
}
1194
1195
int
1196
_Py_CheckMainModule(PyObject *module)
1197
0
{
1198
0
    if (module == NULL || module == Py_None) {
1199
0
        if (!PyErr_Occurred()) {
1200
0
            (void)_PyErr_SetModuleNotFoundError(&_Py_ID(__main__));
1201
0
        }
1202
0
        return -1;
1203
0
    }
1204
0
    if (!Py_IS_TYPE(module, &PyModule_Type)) {
1205
        /* The __main__ module has been tampered with. */
1206
0
        PyObject *msg = PyUnicode_FromString("invalid __main__ module");
1207
0
        if (msg != NULL) {
1208
0
            (void)PyErr_SetImportError(msg, &_Py_ID(__main__), NULL);
1209
0
            Py_DECREF(msg);
1210
0
        }
1211
0
        return -1;
1212
0
    }
1213
0
    return 0;
1214
0
}
1215
1216
1217
PyObject *
1218
PyInterpreterState_GetDict(PyInterpreterState *interp)
1219
38
{
1220
38
    if (interp->dict == NULL) {
1221
10
        interp->dict = PyDict_New();
1222
10
        if (interp->dict == NULL) {
1223
0
            PyErr_Clear();
1224
0
        }
1225
10
    }
1226
    /* Returning NULL means no per-interpreter dict is available. */
1227
38
    return interp->dict;
1228
38
}
1229
1230
1231
//----------
1232
// interp ID
1233
//----------
1234
1235
int64_t
1236
_PyInterpreterState_ObjectToID(PyObject *idobj)
1237
0
{
1238
0
    if (!_PyIndex_Check(idobj)) {
1239
0
        PyErr_Format(PyExc_TypeError,
1240
0
                     "interpreter ID must be an int, got %.100s",
1241
0
                     Py_TYPE(idobj)->tp_name);
1242
0
        return -1;
1243
0
    }
1244
1245
    // This may raise OverflowError.
1246
    // For now, we don't worry about if LLONG_MAX < INT64_MAX.
1247
0
    long long id = PyLong_AsLongLong(idobj);
1248
0
    if (id == -1 && PyErr_Occurred()) {
1249
0
        return -1;
1250
0
    }
1251
1252
0
    if (id < 0) {
1253
0
        PyErr_Format(PyExc_ValueError,
1254
0
                     "interpreter ID must be a non-negative int, got %R",
1255
0
                     idobj);
1256
0
        return -1;
1257
0
    }
1258
#if LLONG_MAX > INT64_MAX
1259
    else if (id > INT64_MAX) {
1260
        PyErr_SetString(PyExc_OverflowError, "int too big to convert");
1261
        return -1;
1262
    }
1263
#endif
1264
0
    else {
1265
0
        return (int64_t)id;
1266
0
    }
1267
0
}
1268
1269
int64_t
1270
PyInterpreterState_GetID(PyInterpreterState *interp)
1271
0
{
1272
0
    if (interp == NULL) {
1273
0
        PyErr_SetString(PyExc_RuntimeError, "no interpreter provided");
1274
0
        return -1;
1275
0
    }
1276
0
    return interp->id;
1277
0
}
1278
1279
PyObject *
1280
_PyInterpreterState_GetIDObject(PyInterpreterState *interp)
1281
0
{
1282
0
    int64_t interpid = interp->id;
1283
0
    if (interpid < 0) {
1284
0
        return NULL;
1285
0
    }
1286
0
    assert(interpid < LLONG_MAX);
1287
0
    return PyLong_FromLongLong(interpid);
1288
0
}
1289
1290
1291
1292
void
1293
_PyInterpreterState_IDIncref(PyInterpreterState *interp)
1294
0
{
1295
0
    _Py_atomic_add_ssize(&interp->id_refcount, 1);
1296
0
}
1297
1298
1299
void
1300
_PyInterpreterState_IDDecref(PyInterpreterState *interp)
1301
0
{
1302
0
    _PyRuntimeState *runtime = interp->runtime;
1303
1304
0
    Py_ssize_t refcount = _Py_atomic_add_ssize(&interp->id_refcount, -1);
1305
1306
0
    if (refcount == 1 && interp->requires_idref) {
1307
0
        PyThreadState *tstate =
1308
0
            _PyThreadState_NewBound(interp, _PyThreadState_WHENCE_FINI);
1309
1310
        // XXX Possible GILState issues?
1311
0
        PyThreadState *save_tstate = _PyThreadState_Swap(runtime, tstate);
1312
0
        Py_EndInterpreter(tstate);
1313
0
        _PyThreadState_Swap(runtime, save_tstate);
1314
0
    }
1315
0
}
1316
1317
int
1318
_PyInterpreterState_RequiresIDRef(PyInterpreterState *interp)
1319
0
{
1320
0
    return interp->requires_idref;
1321
0
}
1322
1323
void
1324
_PyInterpreterState_RequireIDRef(PyInterpreterState *interp, int required)
1325
0
{
1326
0
    interp->requires_idref = required ? 1 : 0;
1327
0
}
1328
1329
1330
//-----------------------------
1331
// look up an interpreter state
1332
//-----------------------------
1333
1334
/* Return the interpreter associated with the current OS thread.
1335
1336
   The GIL must be held.
1337
  */
1338
1339
PyInterpreterState*
1340
PyInterpreterState_Get(void)
1341
79
{
1342
79
    _Py_AssertHoldsTstate();
1343
79
    PyInterpreterState *interp = _Py_tss_interp;
1344
79
    if (interp == NULL) {
1345
0
        Py_FatalError("no current interpreter");
1346
0
    }
1347
79
    return interp;
1348
79
}
1349
1350
1351
static PyInterpreterState *
1352
interp_look_up_id(_PyRuntimeState *runtime, int64_t requested_id)
1353
0
{
1354
0
    PyInterpreterState *interp = runtime->interpreters.head;
1355
0
    while (interp != NULL) {
1356
0
        int64_t id = interp->id;
1357
0
        assert(id >= 0);
1358
0
        if (requested_id == id) {
1359
0
            return interp;
1360
0
        }
1361
0
        interp = PyInterpreterState_Next(interp);
1362
0
    }
1363
0
    return NULL;
1364
0
}
1365
1366
/* Return the interpreter state with the given ID.
1367
1368
   Fail with RuntimeError if the interpreter is not found. */
1369
1370
PyInterpreterState *
1371
_PyInterpreterState_LookUpID(int64_t requested_id)
1372
0
{
1373
0
    PyInterpreterState *interp = NULL;
1374
0
    if (requested_id >= 0) {
1375
0
        _PyRuntimeState *runtime = &_PyRuntime;
1376
0
        HEAD_LOCK(runtime);
1377
0
        interp = interp_look_up_id(runtime, requested_id);
1378
0
        HEAD_UNLOCK(runtime);
1379
0
    }
1380
0
    if (interp == NULL && !PyErr_Occurred()) {
1381
0
        PyErr_Format(PyExc_InterpreterNotFoundError,
1382
0
                     "unrecognized interpreter ID %lld", requested_id);
1383
0
    }
1384
0
    return interp;
1385
0
}
1386
1387
PyInterpreterState *
1388
_PyInterpreterState_LookUpIDObject(PyObject *requested_id)
1389
0
{
1390
0
    int64_t id = _PyInterpreterState_ObjectToID(requested_id);
1391
0
    if (id < 0) {
1392
0
        return NULL;
1393
0
    }
1394
0
    return _PyInterpreterState_LookUpID(id);
1395
0
}
1396
1397
1398
/********************************/
1399
/* the per-thread runtime state */
1400
/********************************/
1401
1402
#ifndef NDEBUG
1403
static inline int
1404
tstate_is_alive(PyThreadState *tstate)
1405
{
1406
    return (tstate->_status.initialized &&
1407
            !tstate->_status.finalized &&
1408
            !tstate->_status.cleared &&
1409
            !tstate->_status.finalizing);
1410
}
1411
#endif
1412
1413
1414
//----------
1415
// lifecycle
1416
//----------
1417
1418
static _PyStackChunk*
1419
allocate_chunk(int size_in_bytes, _PyStackChunk* previous)
1420
315k
{
1421
315k
    assert(size_in_bytes % sizeof(PyObject **) == 0);
1422
315k
    _PyStackChunk *res = _PyObject_VirtualAlloc(size_in_bytes);
1423
315k
    if (res == NULL) {
1424
0
        return NULL;
1425
0
    }
1426
315k
    res->previous = previous;
1427
315k
    res->size = size_in_bytes;
1428
315k
    res->top = 0;
1429
315k
    return res;
1430
315k
}
1431
1432
static void
1433
reset_threadstate(_PyThreadStateImpl *tstate)
1434
0
{
1435
    // Set to _PyThreadState_INIT directly?
1436
0
    memcpy(tstate,
1437
0
           &initial._main_interpreter._initial_thread,
1438
0
           sizeof(*tstate));
1439
0
}
1440
1441
static _PyThreadStateImpl *
1442
alloc_threadstate(PyInterpreterState *interp)
1443
28
{
1444
28
    _PyThreadStateImpl *tstate;
1445
1446
    // Try the preallocated tstate first.
1447
28
    tstate = _Py_atomic_exchange_ptr(&interp->threads.preallocated, NULL);
1448
1449
    // Fall back to the allocator.
1450
28
    if (tstate == NULL) {
1451
0
        tstate = PyMem_RawCalloc(1, sizeof(_PyThreadStateImpl));
1452
0
        if (tstate == NULL) {
1453
0
            return NULL;
1454
0
        }
1455
0
        reset_threadstate(tstate);
1456
0
    }
1457
28
    return tstate;
1458
28
}
1459
1460
static void
1461
free_threadstate(_PyThreadStateImpl *tstate)
1462
0
{
1463
0
    PyInterpreterState *interp = tstate->base.interp;
1464
#ifdef Py_STATS
1465
    _PyStats_ThreadFini(tstate);
1466
#endif
1467
    // The initial thread state of the interpreter is allocated
1468
    // as part of the interpreter state so should not be freed.
1469
0
    if (tstate == &interp->_initial_thread) {
1470
        // Make it available again.
1471
0
        reset_threadstate(tstate);
1472
0
        assert(interp->threads.preallocated == NULL);
1473
0
        _Py_atomic_store_ptr(&interp->threads.preallocated, tstate);
1474
0
    }
1475
0
    else {
1476
0
        PyMem_RawFree(tstate);
1477
0
    }
1478
0
}
1479
1480
static void
1481
decref_threadstate(_PyThreadStateImpl *tstate)
1482
0
{
1483
0
    if (_Py_atomic_add_ssize(&tstate->refcount, -1) == 1) {
1484
        // The last reference to the thread state is gone.
1485
0
        free_threadstate(tstate);
1486
0
    }
1487
0
}
1488
1489
/* Get the thread state to a minimal consistent state.
1490
   Further init happens in pylifecycle.c before it can be used.
1491
   All fields not initialized here are expected to be zeroed out,
1492
   e.g. by PyMem_RawCalloc() or memset(), or otherwise pre-initialized.
1493
   The interpreter state is not manipulated.  Instead it is assumed that
1494
   the thread is getting added to the interpreter.
1495
  */
1496
1497
static void
1498
init_threadstate(_PyThreadStateImpl *_tstate,
1499
                 PyInterpreterState *interp, uint64_t id, int whence)
1500
28
{
1501
28
    PyThreadState *tstate = (PyThreadState *)_tstate;
1502
28
    if (tstate->_status.initialized) {
1503
0
        Py_FatalError("thread state already initialized");
1504
0
    }
1505
1506
28
    assert(interp != NULL);
1507
28
    tstate->interp = interp;
1508
28
    tstate->eval_breaker =
1509
28
        _Py_atomic_load_uintptr_relaxed(&interp->ceval.instrumentation_version);
1510
1511
    // next/prev are set in add_threadstate().
1512
28
    assert(tstate->next == NULL);
1513
28
    assert(tstate->prev == NULL);
1514
1515
28
    assert(tstate->_whence == _PyThreadState_WHENCE_NOTSET);
1516
28
    assert(whence >= 0 && whence <= _PyThreadState_WHENCE_THREADING_DAEMON);
1517
28
    tstate->_whence = whence;
1518
1519
28
    assert(id > 0);
1520
28
    tstate->id = id;
1521
1522
    // thread_id and native_thread_id are set in bind_tstate().
1523
1524
28
    tstate->py_recursion_limit = interp->ceval.recursion_limit;
1525
28
    tstate->py_recursion_remaining = interp->ceval.recursion_limit;
1526
28
    tstate->exc_info = &tstate->exc_state;
1527
1528
    // PyGILState_Release must not try to delete this thread state.
1529
    // This is cleared when PyGILState_Ensure() creates the thread state.
1530
28
    tstate->gilstate_counter = 1;
1531
1532
    // Initialize the embedded base frame - sentinel at the bottom of the frame stack
1533
28
    _tstate->base_frame.previous = NULL;
1534
28
    _tstate->base_frame.f_executable = PyStackRef_None;
1535
28
    _tstate->base_frame.f_funcobj = PyStackRef_NULL;
1536
28
    _tstate->base_frame.f_globals = NULL;
1537
28
    _tstate->base_frame.f_builtins = NULL;
1538
28
    _tstate->base_frame.f_locals = NULL;
1539
28
    _tstate->base_frame.frame_obj = NULL;
1540
28
    _tstate->base_frame.instr_ptr = NULL;
1541
28
    _tstate->base_frame.stackpointer = _tstate->base_frame.localsplus;
1542
28
    _tstate->base_frame.return_offset = 0;
1543
28
    _tstate->base_frame.owner = FRAME_OWNED_BY_INTERPRETER;
1544
28
    _tstate->base_frame.visited = 0;
1545
#ifdef Py_DEBUG
1546
    _tstate->base_frame.lltrace = 0;
1547
#endif
1548
#ifdef Py_GIL_DISABLED
1549
    _tstate->base_frame.tlbc_index = 0;
1550
#endif
1551
28
    _tstate->base_frame.localsplus[0] = PyStackRef_NULL;
1552
1553
    // current_frame starts pointing to the base frame
1554
28
    tstate->current_frame = &_tstate->base_frame;
1555
    // base_frame pointer for profilers to validate stack unwinding
1556
28
    tstate->base_frame = &_tstate->base_frame;
1557
28
    tstate->datastack_chunk = NULL;
1558
28
    tstate->datastack_top = NULL;
1559
28
    tstate->datastack_limit = NULL;
1560
28
    tstate->what_event = -1;
1561
28
    tstate->current_executor = NULL;
1562
28
    tstate->jit_exit = NULL;
1563
28
    tstate->dict_global_version = 0;
1564
1565
28
    _tstate->c_stack_soft_limit = UINTPTR_MAX;
1566
28
    _tstate->c_stack_top = 0;
1567
28
    _tstate->c_stack_hard_limit = 0;
1568
1569
28
    _tstate->c_stack_init_base = 0;
1570
28
    _tstate->c_stack_init_top = 0;
1571
1572
28
    _tstate->asyncio_running_loop = NULL;
1573
28
    _tstate->asyncio_running_task = NULL;
1574
1575
#ifdef _Py_TIER2
1576
    _tstate->jit_tracer_state = NULL;
1577
#endif
1578
28
    tstate->delete_later = NULL;
1579
1580
28
    llist_init(&_tstate->mem_free_queue);
1581
28
    llist_init(&_tstate->asyncio_tasks_head);
1582
28
    if (interp->stoptheworld.requested || _PyRuntime.stoptheworld.requested) {
1583
        // Start in the suspended state if there is an ongoing stop-the-world.
1584
0
        tstate->state = _Py_THREAD_SUSPENDED;
1585
0
    }
1586
1587
28
    tstate->_status.initialized = 1;
1588
28
}
1589
1590
static void
1591
add_threadstate(PyInterpreterState *interp, PyThreadState *tstate,
1592
                PyThreadState *next)
1593
28
{
1594
28
    assert(interp->threads.head != tstate);
1595
28
    if (next != NULL) {
1596
0
        assert(next->prev == NULL || next->prev == tstate);
1597
0
        next->prev = tstate;
1598
0
    }
1599
28
    tstate->next = next;
1600
28
    assert(tstate->prev == NULL);
1601
28
    interp->threads.head = tstate;
1602
28
}
1603
1604
static PyThreadState *
1605
new_threadstate(PyInterpreterState *interp, int whence)
1606
28
{
1607
    // Allocate the thread state.
1608
28
    _PyThreadStateImpl *tstate = alloc_threadstate(interp);
1609
28
    if (tstate == NULL) {
1610
0
        return NULL;
1611
0
    }
1612
1613
#ifdef Py_GIL_DISABLED
1614
    Py_ssize_t qsbr_idx = _Py_qsbr_reserve(interp);
1615
    if (qsbr_idx < 0) {
1616
        free_threadstate(tstate);
1617
        return NULL;
1618
    }
1619
    int32_t tlbc_idx = _Py_ReserveTLBCIndex(interp);
1620
    if (tlbc_idx < 0) {
1621
        free_threadstate(tstate);
1622
        return NULL;
1623
    }
1624
#endif
1625
#ifdef Py_STATS
1626
    // The PyStats structure is quite large and is allocated separated from tstate.
1627
    if (!_PyStats_ThreadInit(interp, tstate)) {
1628
        free_threadstate(tstate);
1629
        return NULL;
1630
    }
1631
#endif
1632
1633
    /* We serialize concurrent creation to protect global state. */
1634
28
    HEAD_LOCK(interp->runtime);
1635
1636
    // Initialize the new thread state.
1637
28
    interp->threads.next_unique_id += 1;
1638
28
    uint64_t id = interp->threads.next_unique_id;
1639
28
    init_threadstate(tstate, interp, id, whence);
1640
1641
    // Add the new thread state to the interpreter.
1642
28
    PyThreadState *old_head = interp->threads.head;
1643
28
    add_threadstate(interp, (PyThreadState *)tstate, old_head);
1644
1645
28
    HEAD_UNLOCK(interp->runtime);
1646
1647
#ifdef Py_GIL_DISABLED
1648
    // Must be called with lock unlocked to avoid lock ordering deadlocks.
1649
    _Py_qsbr_register(tstate, interp, qsbr_idx);
1650
    tstate->tlbc_index = tlbc_idx;
1651
#endif
1652
1653
28
    return (PyThreadState *)tstate;
1654
28
}
1655
1656
PyThreadState *
1657
PyThreadState_New(PyInterpreterState *interp)
1658
0
{
1659
0
    return _PyThreadState_NewBound(interp, _PyThreadState_WHENCE_UNKNOWN);
1660
0
}
1661
1662
PyThreadState *
1663
_PyThreadState_NewBound(PyInterpreterState *interp, int whence)
1664
0
{
1665
0
    PyThreadState *tstate = new_threadstate(interp, whence);
1666
0
    if (tstate) {
1667
0
        bind_tstate(tstate);
1668
        // This makes sure there's a gilstate tstate bound
1669
        // as soon as possible.
1670
0
        if (gilstate_get() == NULL) {
1671
0
            bind_gilstate_tstate(tstate);
1672
0
        }
1673
0
    }
1674
0
    return tstate;
1675
0
}
1676
1677
// This must be followed by a call to _PyThreadState_Bind();
1678
PyThreadState *
1679
_PyThreadState_New(PyInterpreterState *interp, int whence)
1680
28
{
1681
28
    return new_threadstate(interp, whence);
1682
28
}
1683
1684
// We keep this for stable ABI compabibility.
1685
PyAPI_FUNC(PyThreadState*)
1686
_PyThreadState_Prealloc(PyInterpreterState *interp)
1687
0
{
1688
0
    return _PyThreadState_New(interp, _PyThreadState_WHENCE_UNKNOWN);
1689
0
}
1690
1691
// We keep this around for (accidental) stable ABI compatibility.
1692
// Realistically, no extensions are using it.
1693
PyAPI_FUNC(void)
1694
_PyThreadState_Init(PyThreadState *tstate)
1695
0
{
1696
0
    Py_FatalError("_PyThreadState_Init() is for internal use only");
1697
0
}
1698
1699
1700
static void
1701
clear_datastack(PyThreadState *tstate)
1702
0
{
1703
0
    _PyStackChunk *chunk = tstate->datastack_chunk;
1704
0
    tstate->datastack_chunk = NULL;
1705
0
    while (chunk != NULL) {
1706
0
        _PyStackChunk *prev = chunk->previous;
1707
0
        _PyObject_VirtualFree(chunk, chunk->size);
1708
0
        chunk = prev;
1709
0
    }
1710
0
}
1711
1712
void
1713
PyThreadState_Clear(PyThreadState *tstate)
1714
0
{
1715
0
    assert(tstate->_status.initialized && !tstate->_status.cleared);
1716
0
    assert(current_fast_get()->interp == tstate->interp);
1717
    // GH-126016: In the _interpreters module, KeyboardInterrupt exceptions
1718
    // during PyEval_EvalCode() are sent to finalization, which doesn't let us
1719
    // mark threads as "not running main". So, for now this assertion is
1720
    // disabled.
1721
    // XXX assert(!_PyThreadState_IsRunningMain(tstate));
1722
    // XXX assert(!tstate->_status.bound || tstate->_status.unbound);
1723
0
    tstate->_status.finalizing = 1;  // just in case
1724
1725
    /* XXX Conditions we need to enforce:
1726
1727
       * the GIL must be held by the current thread
1728
       * current_fast_get()->interp must match tstate->interp
1729
       * for the main interpreter, current_fast_get() must be the main thread
1730
     */
1731
1732
0
    int verbose = _PyInterpreterState_GetConfig(tstate->interp)->verbose;
1733
1734
0
    if (verbose && tstate->current_frame != tstate->base_frame) {
1735
        /* bpo-20526: After the main thread calls
1736
           _PyInterpreterState_SetFinalizing() in Py_FinalizeEx()
1737
           (or in Py_EndInterpreter() for subinterpreters),
1738
           threads must exit when trying to take the GIL.
1739
           If a thread exit in the middle of _PyEval_EvalFrameDefault(),
1740
           tstate->frame is not reset to its previous value.
1741
           It is more likely with daemon threads, but it can happen
1742
           with regular threads if threading._shutdown() fails
1743
           (ex: interrupted by CTRL+C). */
1744
0
        fprintf(stderr,
1745
0
          "PyThreadState_Clear: warning: thread still has a frame\n");
1746
0
    }
1747
1748
0
    if (verbose && tstate->current_exception != NULL) {
1749
0
        fprintf(stderr, "PyThreadState_Clear: warning: thread has an exception set\n");
1750
0
        _PyErr_Print(tstate);
1751
0
    }
1752
1753
    /* At this point tstate shouldn't be used any more,
1754
       neither to run Python code nor for other uses.
1755
1756
       This is tricky when current_fast_get() == tstate, in the same way
1757
       as noted in interpreter_clear() above.  The below finalizers
1758
       can possibly run Python code or otherwise use the partially
1759
       cleared thread state.  For now we trust that isn't a problem
1760
       in practice.
1761
     */
1762
    // XXX Deal with the possibility of problematic finalizers.
1763
1764
    /* Don't clear tstate->pyframe: it is a borrowed reference */
1765
1766
0
    Py_CLEAR(tstate->threading_local_key);
1767
0
    Py_CLEAR(tstate->threading_local_sentinel);
1768
1769
0
    Py_CLEAR(((_PyThreadStateImpl *)tstate)->asyncio_running_loop);
1770
0
    Py_CLEAR(((_PyThreadStateImpl *)tstate)->asyncio_running_task);
1771
1772
1773
0
    PyMutex_Lock(&tstate->interp->asyncio_tasks_lock);
1774
    // merge any lingering tasks from thread state to interpreter's
1775
    // tasks list
1776
0
    llist_concat(&tstate->interp->asyncio_tasks_head,
1777
0
                 &((_PyThreadStateImpl *)tstate)->asyncio_tasks_head);
1778
0
    PyMutex_Unlock(&tstate->interp->asyncio_tasks_lock);
1779
1780
0
    Py_CLEAR(tstate->dict);
1781
0
    Py_CLEAR(tstate->async_exc);
1782
1783
0
    Py_CLEAR(tstate->current_exception);
1784
1785
0
    Py_CLEAR(tstate->exc_state.exc_value);
1786
1787
    /* The stack of exception states should contain just this thread. */
1788
0
    if (verbose && tstate->exc_info != &tstate->exc_state) {
1789
0
        fprintf(stderr,
1790
0
          "PyThreadState_Clear: warning: thread still has a generator\n");
1791
0
    }
1792
1793
0
    if (tstate->c_profilefunc != NULL) {
1794
0
        FT_ATOMIC_ADD_SSIZE(tstate->interp->sys_profiling_threads, -1);
1795
0
        tstate->c_profilefunc = NULL;
1796
0
    }
1797
0
    if (tstate->c_tracefunc != NULL) {
1798
0
        FT_ATOMIC_ADD_SSIZE(tstate->interp->sys_tracing_threads, -1);
1799
0
        tstate->c_tracefunc = NULL;
1800
0
    }
1801
1802
0
    Py_CLEAR(tstate->c_profileobj);
1803
0
    Py_CLEAR(tstate->c_traceobj);
1804
1805
0
    Py_CLEAR(tstate->async_gen_firstiter);
1806
0
    Py_CLEAR(tstate->async_gen_finalizer);
1807
1808
0
    Py_CLEAR(tstate->context);
1809
1810
#ifdef Py_GIL_DISABLED
1811
    // Each thread should clear own freelists in free-threading builds.
1812
    struct _Py_freelists *freelists = _Py_freelists_GET();
1813
    _PyObject_ClearFreeLists(freelists, 1);
1814
1815
    // Flush the thread's local GC allocation count to the global count
1816
    // before the thread state is cleared, otherwise the count is lost.
1817
    _PyThreadStateImpl *tstate_impl = (_PyThreadStateImpl *)tstate;
1818
    _Py_atomic_add_int(&tstate->interp->gc.young.count,
1819
                       (int)tstate_impl->gc.alloc_count);
1820
    tstate_impl->gc.alloc_count = 0;
1821
1822
    // Merge our thread-local refcounts into the type's own refcount and
1823
    // free our local refcount array.
1824
    _PyObject_FinalizePerThreadRefcounts(tstate_impl);
1825
1826
    // Remove ourself from the biased reference counting table of threads.
1827
    _Py_brc_remove_thread(tstate);
1828
1829
    // Release our thread-local copies of the bytecode for reuse by another
1830
    // thread
1831
    _Py_ClearTLBCIndex(tstate_impl);
1832
#endif
1833
1834
    // Merge our queue of pointers to be freed into the interpreter queue.
1835
0
    _PyMem_AbandonDelayed(tstate);
1836
1837
0
    _PyThreadState_ClearMimallocHeaps(tstate);
1838
1839
0
    tstate->_status.cleared = 1;
1840
1841
    // XXX Call _PyThreadStateSwap(runtime, NULL) here if "current".
1842
    // XXX Do it as early in the function as possible.
1843
0
}
1844
1845
static void
1846
decrement_stoptheworld_countdown(struct _stoptheworld_state *stw);
1847
1848
/* Common code for PyThreadState_Delete() and PyThreadState_DeleteCurrent() */
1849
static void
1850
tstate_delete_common(PyThreadState *tstate, int release_gil)
1851
0
{
1852
0
    assert(tstate->_status.cleared && !tstate->_status.finalized);
1853
0
    tstate_verify_not_active(tstate);
1854
0
    assert(!_PyThreadState_IsRunningMain(tstate));
1855
1856
0
    PyInterpreterState *interp = tstate->interp;
1857
0
    if (interp == NULL) {
1858
0
        Py_FatalError("NULL interpreter");
1859
0
    }
1860
0
    _PyRuntimeState *runtime = interp->runtime;
1861
1862
0
    HEAD_LOCK(runtime);
1863
0
    if (tstate->prev) {
1864
0
        tstate->prev->next = tstate->next;
1865
0
    }
1866
0
    else {
1867
0
        interp->threads.head = tstate->next;
1868
0
    }
1869
0
    if (tstate->next) {
1870
0
        tstate->next->prev = tstate->prev;
1871
0
    }
1872
0
    if (tstate->state != _Py_THREAD_SUSPENDED) {
1873
        // Any ongoing stop-the-world request should not wait for us because
1874
        // our thread is getting deleted.
1875
0
        if (interp->stoptheworld.requested) {
1876
0
            decrement_stoptheworld_countdown(&interp->stoptheworld);
1877
0
        }
1878
0
        if (runtime->stoptheworld.requested) {
1879
0
            decrement_stoptheworld_countdown(&runtime->stoptheworld);
1880
0
        }
1881
0
    }
1882
1883
#if defined(Py_REF_DEBUG) && defined(Py_GIL_DISABLED)
1884
    // Add our portion of the total refcount to the interpreter's total.
1885
    _PyThreadStateImpl *tstate_impl = (_PyThreadStateImpl *)tstate;
1886
    tstate->interp->object_state.reftotal += tstate_impl->reftotal;
1887
    tstate_impl->reftotal = 0;
1888
    assert(tstate_impl->refcounts.values == NULL);
1889
#endif
1890
1891
#if _Py_TIER2
1892
    _PyJit_TracerFree((_PyThreadStateImpl *)tstate);
1893
#endif
1894
1895
0
    HEAD_UNLOCK(runtime);
1896
1897
    // XXX Unbind in PyThreadState_Clear(), or earlier
1898
    // (and assert not-equal here)?
1899
0
    if (tstate->_status.bound_gilstate) {
1900
0
        unbind_gilstate_tstate(tstate);
1901
0
    }
1902
0
    if (tstate->_status.bound) {
1903
0
        unbind_tstate(tstate);
1904
0
    }
1905
1906
    // XXX Move to PyThreadState_Clear()?
1907
0
    clear_datastack(tstate);
1908
1909
0
    if (release_gil) {
1910
0
        _PyEval_ReleaseLock(tstate->interp, tstate, 1);
1911
0
    }
1912
1913
#ifdef Py_GIL_DISABLED
1914
    _Py_qsbr_unregister(tstate);
1915
#endif
1916
1917
0
    tstate->_status.finalized = 1;
1918
0
}
1919
1920
static void
1921
zapthreads(PyInterpreterState *interp)
1922
0
{
1923
0
    PyThreadState *tstate;
1924
    /* No need to lock the mutex here because this should only happen
1925
       when the threads are all really dead (XXX famous last words).
1926
1927
       Cannot use _Py_FOR_EACH_TSTATE_UNLOCKED because we are freeing
1928
       the thread states here.
1929
    */
1930
0
    while ((tstate = interp->threads.head) != NULL) {
1931
0
        tstate_verify_not_active(tstate);
1932
0
        tstate_delete_common(tstate, 0);
1933
0
        free_threadstate((_PyThreadStateImpl *)tstate);
1934
0
    }
1935
0
}
1936
1937
1938
void
1939
PyThreadState_Delete(PyThreadState *tstate)
1940
0
{
1941
0
    _Py_EnsureTstateNotNULL(tstate);
1942
0
    tstate_verify_not_active(tstate);
1943
0
    tstate_delete_common(tstate, 0);
1944
0
    free_threadstate((_PyThreadStateImpl *)tstate);
1945
0
}
1946
1947
1948
void
1949
_PyThreadState_DeleteCurrent(PyThreadState *tstate)
1950
0
{
1951
0
    _Py_EnsureTstateNotNULL(tstate);
1952
#ifdef Py_GIL_DISABLED
1953
    _Py_qsbr_detach(((_PyThreadStateImpl *)tstate)->qsbr);
1954
#endif
1955
#ifdef Py_STATS
1956
    _PyStats_Detach((_PyThreadStateImpl *)tstate);
1957
#endif
1958
0
    current_fast_clear(tstate->interp->runtime);
1959
0
    tstate_delete_common(tstate, 1);  // release GIL as part of call
1960
0
    free_threadstate((_PyThreadStateImpl *)tstate);
1961
0
}
1962
1963
void
1964
PyThreadState_DeleteCurrent(void)
1965
0
{
1966
0
    PyThreadState *tstate = current_fast_get();
1967
0
    _PyThreadState_DeleteCurrent(tstate);
1968
0
}
1969
1970
1971
// Unlinks and removes all thread states from `tstate->interp`, with the
1972
// exception of the one passed as an argument. However, it does not delete
1973
// these thread states. Instead, it returns the removed thread states as a
1974
// linked list.
1975
//
1976
// Note that if there is a current thread state, it *must* be the one
1977
// passed as argument.  Also, this won't touch any interpreters other
1978
// than the current one, since we don't know which thread state should
1979
// be kept in those other interpreters.
1980
PyThreadState *
1981
_PyThreadState_RemoveExcept(PyThreadState *tstate)
1982
0
{
1983
0
    assert(tstate != NULL);
1984
0
    PyInterpreterState *interp = tstate->interp;
1985
0
    _PyRuntimeState *runtime = interp->runtime;
1986
1987
#ifdef Py_GIL_DISABLED
1988
    assert(runtime->stoptheworld.world_stopped);
1989
#endif
1990
1991
0
    HEAD_LOCK(runtime);
1992
    /* Remove all thread states, except tstate, from the linked list of
1993
       thread states. */
1994
0
    PyThreadState *list = interp->threads.head;
1995
0
    if (list == tstate) {
1996
0
        list = tstate->next;
1997
0
    }
1998
0
    if (tstate->prev) {
1999
0
        tstate->prev->next = tstate->next;
2000
0
    }
2001
0
    if (tstate->next) {
2002
0
        tstate->next->prev = tstate->prev;
2003
0
    }
2004
0
    tstate->prev = tstate->next = NULL;
2005
0
    interp->threads.head = tstate;
2006
0
    HEAD_UNLOCK(runtime);
2007
2008
0
    return list;
2009
0
}
2010
2011
// Deletes the thread states in the linked list `list`.
2012
//
2013
// This is intended to be used in conjunction with _PyThreadState_RemoveExcept.
2014
//
2015
// If `is_after_fork` is true, the thread states are immediately freed.
2016
// Otherwise, they are decref'd because they may still be referenced by an
2017
// OS thread.
2018
void
2019
_PyThreadState_DeleteList(PyThreadState *list, int is_after_fork)
2020
0
{
2021
    // The world can't be stopped because we PyThreadState_Clear() can
2022
    // call destructors.
2023
0
    assert(!_PyRuntime.stoptheworld.world_stopped);
2024
2025
0
    PyThreadState *p, *next;
2026
0
    for (p = list; p; p = next) {
2027
0
        next = p->next;
2028
0
        PyThreadState_Clear(p);
2029
0
        if (is_after_fork) {
2030
0
            free_threadstate((_PyThreadStateImpl *)p);
2031
0
        }
2032
0
        else {
2033
0
            decref_threadstate((_PyThreadStateImpl *)p);
2034
0
        }
2035
0
    }
2036
0
}
2037
2038
2039
//----------
2040
// accessors
2041
//----------
2042
2043
/* An extension mechanism to store arbitrary additional per-thread state.
2044
   PyThreadState_GetDict() returns a dictionary that can be used to hold such
2045
   state; the caller should pick a unique key and store its state there.  If
2046
   PyThreadState_GetDict() returns NULL, an exception has *not* been raised
2047
   and the caller should assume no per-thread state is available. */
2048
2049
PyObject *
2050
_PyThreadState_GetDict(PyThreadState *tstate)
2051
8.52M
{
2052
8.52M
    assert(tstate != NULL);
2053
8.52M
    if (tstate->dict == NULL) {
2054
2
        tstate->dict = PyDict_New();
2055
2
        if (tstate->dict == NULL) {
2056
0
            _PyErr_Clear(tstate);
2057
0
        }
2058
2
    }
2059
8.52M
    return tstate->dict;
2060
8.52M
}
2061
2062
2063
PyObject *
2064
PyThreadState_GetDict(void)
2065
8.52M
{
2066
8.52M
    PyThreadState *tstate = current_fast_get();
2067
8.52M
    if (tstate == NULL) {
2068
0
        return NULL;
2069
0
    }
2070
8.52M
    return _PyThreadState_GetDict(tstate);
2071
8.52M
}
2072
2073
2074
PyInterpreterState *
2075
PyThreadState_GetInterpreter(PyThreadState *tstate)
2076
0
{
2077
0
    assert(tstate != NULL);
2078
0
    return tstate->interp;
2079
0
}
2080
2081
2082
PyFrameObject*
2083
PyThreadState_GetFrame(PyThreadState *tstate)
2084
1.03M
{
2085
1.03M
    assert(tstate != NULL);
2086
1.03M
    _PyInterpreterFrame *f = _PyThreadState_GetFrame(tstate);
2087
1.03M
    if (f == NULL) {
2088
0
        return NULL;
2089
0
    }
2090
1.03M
    PyFrameObject *frame = _PyFrame_GetFrameObject(f);
2091
1.03M
    if (frame == NULL) {
2092
0
        PyErr_Clear();
2093
0
    }
2094
1.03M
    return (PyFrameObject*)Py_XNewRef(frame);
2095
1.03M
}
2096
2097
2098
uint64_t
2099
PyThreadState_GetID(PyThreadState *tstate)
2100
0
{
2101
0
    assert(tstate != NULL);
2102
0
    return tstate->id;
2103
0
}
2104
2105
2106
static inline void
2107
tstate_activate(PyThreadState *tstate)
2108
3.12M
{
2109
3.12M
    assert(tstate != NULL);
2110
    // XXX assert(tstate_is_alive(tstate));
2111
3.12M
    assert(tstate_is_bound(tstate));
2112
3.12M
    assert(!tstate->_status.active);
2113
2114
3.12M
    assert(!tstate->_status.bound_gilstate ||
2115
3.12M
           tstate == gilstate_get());
2116
3.12M
    if (!tstate->_status.bound_gilstate) {
2117
0
        bind_gilstate_tstate(tstate);
2118
0
    }
2119
2120
3.12M
    tstate->_status.active = 1;
2121
3.12M
}
2122
2123
static inline void
2124
tstate_deactivate(PyThreadState *tstate)
2125
3.12M
{
2126
3.12M
    assert(tstate != NULL);
2127
    // XXX assert(tstate_is_alive(tstate));
2128
3.12M
    assert(tstate_is_bound(tstate));
2129
3.12M
    assert(tstate->_status.active);
2130
2131
#if Py_STATS
2132
    _PyStats_Detach((_PyThreadStateImpl *)tstate);
2133
#endif
2134
2135
3.12M
    tstate->_status.active = 0;
2136
2137
    // We do not unbind the gilstate tstate here.
2138
    // It will still be used in PyGILState_Ensure().
2139
3.12M
}
2140
2141
static int
2142
tstate_try_attach(PyThreadState *tstate)
2143
3.12M
{
2144
#ifdef Py_GIL_DISABLED
2145
    int expected = _Py_THREAD_DETACHED;
2146
    return _Py_atomic_compare_exchange_int(&tstate->state,
2147
                                           &expected,
2148
                                           _Py_THREAD_ATTACHED);
2149
#else
2150
3.12M
    assert(tstate->state == _Py_THREAD_DETACHED);
2151
3.12M
    tstate->state = _Py_THREAD_ATTACHED;
2152
3.12M
    return 1;
2153
3.12M
#endif
2154
3.12M
}
2155
2156
static void
2157
tstate_set_detached(PyThreadState *tstate, int detached_state)
2158
3.12M
{
2159
3.12M
    assert(_Py_atomic_load_int_relaxed(&tstate->state) == _Py_THREAD_ATTACHED);
2160
#ifdef Py_GIL_DISABLED
2161
    _Py_atomic_store_int(&tstate->state, detached_state);
2162
#else
2163
3.12M
    tstate->state = detached_state;
2164
3.12M
#endif
2165
3.12M
}
2166
2167
static void
2168
tstate_wait_attach(PyThreadState *tstate)
2169
0
{
2170
0
    do {
2171
0
        int state = _Py_atomic_load_int_relaxed(&tstate->state);
2172
0
        if (state == _Py_THREAD_SUSPENDED) {
2173
            // Wait until we're switched out of SUSPENDED to DETACHED.
2174
0
            _PyParkingLot_Park(&tstate->state, &state, sizeof(tstate->state),
2175
0
                               /*timeout=*/-1, NULL, /*detach=*/0);
2176
0
        }
2177
0
        else if (state == _Py_THREAD_SHUTTING_DOWN) {
2178
            // We're shutting down, so we can't attach.
2179
0
            _PyThreadState_HangThread(tstate);
2180
0
        }
2181
0
        else {
2182
0
            assert(state == _Py_THREAD_DETACHED);
2183
0
        }
2184
        // Once we're back in DETACHED we can re-attach
2185
0
    } while (!tstate_try_attach(tstate));
2186
0
}
2187
2188
void
2189
_PyThreadState_Attach(PyThreadState *tstate)
2190
3.12M
{
2191
#if defined(Py_DEBUG)
2192
    // This is called from PyEval_RestoreThread(). Similar
2193
    // to it, we need to ensure errno doesn't change.
2194
    int err = errno;
2195
#endif
2196
2197
3.12M
    _Py_EnsureTstateNotNULL(tstate);
2198
3.12M
    if (current_fast_get() != NULL) {
2199
0
        Py_FatalError("non-NULL old thread state");
2200
0
    }
2201
3.12M
    _PyThreadStateImpl *_tstate = (_PyThreadStateImpl *)tstate;
2202
3.12M
    if (_tstate->c_stack_hard_limit == 0) {
2203
28
        _Py_InitializeRecursionLimits(tstate);
2204
28
    }
2205
2206
3.12M
    while (1) {
2207
3.12M
        _PyEval_AcquireLock(tstate);
2208
2209
        // XXX assert(tstate_is_alive(tstate));
2210
3.12M
        current_fast_set(&_PyRuntime, tstate);
2211
3.12M
        if (!tstate_try_attach(tstate)) {
2212
0
            tstate_wait_attach(tstate);
2213
0
        }
2214
3.12M
        tstate_activate(tstate);
2215
2216
#ifdef Py_GIL_DISABLED
2217
        if (_PyEval_IsGILEnabled(tstate) && !tstate->holds_gil) {
2218
            // The GIL was enabled between our call to _PyEval_AcquireLock()
2219
            // and when we attached (the GIL can't go from enabled to disabled
2220
            // here because only a thread holding the GIL can disable
2221
            // it). Detach and try again.
2222
            tstate_set_detached(tstate, _Py_THREAD_DETACHED);
2223
            tstate_deactivate(tstate);
2224
            current_fast_clear(&_PyRuntime);
2225
            continue;
2226
        }
2227
        _Py_qsbr_attach(((_PyThreadStateImpl *)tstate)->qsbr);
2228
#endif
2229
3.12M
        break;
2230
3.12M
    }
2231
2232
    // Resume previous critical section. This acquires the lock(s) from the
2233
    // top-most critical section.
2234
3.12M
    if (tstate->critical_section != 0) {
2235
0
        _PyCriticalSection_Resume(tstate);
2236
0
    }
2237
2238
#ifdef Py_STATS
2239
    _PyStats_Attach((_PyThreadStateImpl *)tstate);
2240
#endif
2241
2242
#if defined(Py_DEBUG)
2243
    errno = err;
2244
#endif
2245
3.12M
}
2246
2247
static void
2248
detach_thread(PyThreadState *tstate, int detached_state)
2249
3.12M
{
2250
    // XXX assert(tstate_is_alive(tstate) && tstate_is_bound(tstate));
2251
3.12M
    assert(_Py_atomic_load_int_relaxed(&tstate->state) == _Py_THREAD_ATTACHED);
2252
3.12M
    assert(tstate == current_fast_get());
2253
3.12M
    if (tstate->critical_section != 0) {
2254
0
        _PyCriticalSection_SuspendAll(tstate);
2255
0
    }
2256
#ifdef Py_GIL_DISABLED
2257
    _Py_qsbr_detach(((_PyThreadStateImpl *)tstate)->qsbr);
2258
#endif
2259
3.12M
    tstate_deactivate(tstate);
2260
3.12M
    tstate_set_detached(tstate, detached_state);
2261
3.12M
    current_fast_clear(&_PyRuntime);
2262
3.12M
    _PyEval_ReleaseLock(tstate->interp, tstate, 0);
2263
3.12M
}
2264
2265
void
2266
_PyThreadState_Detach(PyThreadState *tstate)
2267
3.12M
{
2268
3.12M
    detach_thread(tstate, _Py_THREAD_DETACHED);
2269
3.12M
}
2270
2271
void
2272
_PyThreadState_Suspend(PyThreadState *tstate)
2273
0
{
2274
0
    _PyRuntimeState *runtime = &_PyRuntime;
2275
2276
0
    assert(_Py_atomic_load_int_relaxed(&tstate->state) == _Py_THREAD_ATTACHED);
2277
2278
0
    struct _stoptheworld_state *stw = NULL;
2279
0
    HEAD_LOCK(runtime);
2280
0
    if (runtime->stoptheworld.requested) {
2281
0
        stw = &runtime->stoptheworld;
2282
0
    }
2283
0
    else if (tstate->interp->stoptheworld.requested) {
2284
0
        stw = &tstate->interp->stoptheworld;
2285
0
    }
2286
0
    HEAD_UNLOCK(runtime);
2287
2288
0
    if (stw == NULL) {
2289
        // Switch directly to "detached" if there is no active stop-the-world
2290
        // request.
2291
0
        detach_thread(tstate, _Py_THREAD_DETACHED);
2292
0
        return;
2293
0
    }
2294
2295
    // Switch to "suspended" state.
2296
0
    detach_thread(tstate, _Py_THREAD_SUSPENDED);
2297
2298
    // Decrease the count of remaining threads needing to park.
2299
0
    HEAD_LOCK(runtime);
2300
0
    decrement_stoptheworld_countdown(stw);
2301
0
    HEAD_UNLOCK(runtime);
2302
0
}
2303
2304
void
2305
_PyThreadState_SetShuttingDown(PyThreadState *tstate)
2306
0
{
2307
0
    _Py_atomic_store_int(&tstate->state, _Py_THREAD_SHUTTING_DOWN);
2308
#ifdef Py_GIL_DISABLED
2309
    _PyParkingLot_UnparkAll(&tstate->state);
2310
#endif
2311
0
}
2312
2313
// Decrease stop-the-world counter of remaining number of threads that need to
2314
// pause. If we are the final thread to pause, notify the requesting thread.
2315
static void
2316
decrement_stoptheworld_countdown(struct _stoptheworld_state *stw)
2317
0
{
2318
0
    assert(stw->thread_countdown > 0);
2319
0
    if (--stw->thread_countdown == 0) {
2320
0
        _PyEvent_Notify(&stw->stop_event);
2321
0
    }
2322
0
}
2323
2324
#ifdef Py_GIL_DISABLED
2325
// Interpreter for _Py_FOR_EACH_STW_INTERP(). For global stop-the-world events,
2326
// we start with the first interpreter and then iterate over all interpreters.
2327
// For per-interpreter stop-the-world events, we only operate on the one
2328
// interpreter.
2329
static PyInterpreterState *
2330
interp_for_stop_the_world(struct _stoptheworld_state *stw)
2331
{
2332
    return (stw->is_global
2333
        ? PyInterpreterState_Head()
2334
        : _Py_CONTAINER_OF(stw, PyInterpreterState, stoptheworld));
2335
}
2336
2337
// Loops over threads for a stop-the-world event.
2338
// For global: all threads in all interpreters
2339
// For per-interpreter: all threads in the interpreter
2340
#define _Py_FOR_EACH_STW_INTERP(stw, i)                                     \
2341
    for (PyInterpreterState *i = interp_for_stop_the_world((stw));          \
2342
            i != NULL; i = ((stw->is_global) ? i->next : NULL))
2343
2344
2345
// Try to transition threads atomically from the "detached" state to the
2346
// "gc stopped" state. Returns true if all threads are in the "gc stopped"
2347
static bool
2348
park_detached_threads(struct _stoptheworld_state *stw)
2349
{
2350
    int num_parked = 0;
2351
    _Py_FOR_EACH_STW_INTERP(stw, i) {
2352
        _Py_FOR_EACH_TSTATE_UNLOCKED(i, t) {
2353
            int state = _Py_atomic_load_int_relaxed(&t->state);
2354
            if (state == _Py_THREAD_DETACHED) {
2355
                // Atomically transition to "suspended" if in "detached" state.
2356
                if (_Py_atomic_compare_exchange_int(
2357
                                &t->state, &state, _Py_THREAD_SUSPENDED)) {
2358
                    num_parked++;
2359
                }
2360
            }
2361
            else if (state == _Py_THREAD_ATTACHED && t != stw->requester) {
2362
                _Py_set_eval_breaker_bit(t, _PY_EVAL_PLEASE_STOP_BIT);
2363
            }
2364
        }
2365
    }
2366
    stw->thread_countdown -= num_parked;
2367
    assert(stw->thread_countdown >= 0);
2368
    return num_parked > 0 && stw->thread_countdown == 0;
2369
}
2370
2371
static void
2372
stop_the_world(struct _stoptheworld_state *stw)
2373
{
2374
    _PyRuntimeState *runtime = &_PyRuntime;
2375
2376
    // gh-137433: Acquire the rwmutex first to avoid deadlocks with daemon
2377
    // threads that may hang when blocked on lock acquisition.
2378
    if (stw->is_global) {
2379
        _PyRWMutex_Lock(&runtime->stoptheworld_mutex);
2380
    }
2381
    else {
2382
        _PyRWMutex_RLock(&runtime->stoptheworld_mutex);
2383
    }
2384
    PyMutex_Lock(&stw->mutex);
2385
2386
    HEAD_LOCK(runtime);
2387
    stw->requested = 1;
2388
    stw->thread_countdown = 0;
2389
    stw->stop_event = (PyEvent){0};  // zero-initialize (unset)
2390
    stw->requester = _PyThreadState_GET();  // may be NULL
2391
    FT_STAT_WORLD_STOP_INC();
2392
2393
    _Py_FOR_EACH_STW_INTERP(stw, i) {
2394
        _Py_FOR_EACH_TSTATE_UNLOCKED(i, t) {
2395
            if (t != stw->requester) {
2396
                // Count all the other threads (we don't wait on ourself).
2397
                stw->thread_countdown++;
2398
            }
2399
        }
2400
    }
2401
2402
    if (stw->thread_countdown == 0) {
2403
        HEAD_UNLOCK(runtime);
2404
        stw->world_stopped = 1;
2405
        return;
2406
    }
2407
2408
    for (;;) {
2409
        // Switch threads that are detached to the GC stopped state
2410
        bool stopped_all_threads = park_detached_threads(stw);
2411
        HEAD_UNLOCK(runtime);
2412
2413
        if (stopped_all_threads) {
2414
            break;
2415
        }
2416
2417
        PyTime_t wait_ns = 1000*1000;  // 1ms (arbitrary, may need tuning)
2418
        int detach = 0;
2419
        if (PyEvent_WaitTimed(&stw->stop_event, wait_ns, detach)) {
2420
            assert(stw->thread_countdown == 0);
2421
            break;
2422
        }
2423
2424
        HEAD_LOCK(runtime);
2425
    }
2426
    stw->world_stopped = 1;
2427
}
2428
2429
static void
2430
start_the_world(struct _stoptheworld_state *stw)
2431
{
2432
    _PyRuntimeState *runtime = &_PyRuntime;
2433
    assert(PyMutex_IsLocked(&stw->mutex));
2434
2435
    HEAD_LOCK(runtime);
2436
    stw->requested = 0;
2437
    stw->world_stopped = 0;
2438
    // Switch threads back to the detached state.
2439
    _Py_FOR_EACH_STW_INTERP(stw, i) {
2440
        _Py_FOR_EACH_TSTATE_UNLOCKED(i, t) {
2441
            if (t != stw->requester) {
2442
                assert(_Py_atomic_load_int_relaxed(&t->state) ==
2443
                       _Py_THREAD_SUSPENDED);
2444
                _Py_atomic_store_int(&t->state, _Py_THREAD_DETACHED);
2445
                _PyParkingLot_UnparkAll(&t->state);
2446
            }
2447
        }
2448
    }
2449
    stw->requester = NULL;
2450
    HEAD_UNLOCK(runtime);
2451
    PyMutex_Unlock(&stw->mutex);
2452
    if (stw->is_global) {
2453
        _PyRWMutex_Unlock(&runtime->stoptheworld_mutex);
2454
    }
2455
    else {
2456
        _PyRWMutex_RUnlock(&runtime->stoptheworld_mutex);
2457
    }
2458
}
2459
#endif  // Py_GIL_DISABLED
2460
2461
void
2462
_PyEval_StopTheWorldAll(_PyRuntimeState *runtime)
2463
0
{
2464
#ifdef Py_GIL_DISABLED
2465
    stop_the_world(&runtime->stoptheworld);
2466
#endif
2467
0
}
2468
2469
void
2470
_PyEval_StartTheWorldAll(_PyRuntimeState *runtime)
2471
0
{
2472
#ifdef Py_GIL_DISABLED
2473
    start_the_world(&runtime->stoptheworld);
2474
#endif
2475
0
}
2476
2477
void
2478
_PyEval_StopTheWorld(PyInterpreterState *interp)
2479
4
{
2480
#ifdef Py_GIL_DISABLED
2481
    stop_the_world(&interp->stoptheworld);
2482
#endif
2483
4
}
2484
2485
void
2486
_PyEval_StartTheWorld(PyInterpreterState *interp)
2487
4
{
2488
#ifdef Py_GIL_DISABLED
2489
    start_the_world(&interp->stoptheworld);
2490
#endif
2491
4
}
2492
2493
//----------
2494
// other API
2495
//----------
2496
2497
/* Asynchronously raise an exception in a thread.
2498
   Requested by Just van Rossum and Alex Martelli.
2499
   To prevent naive misuse, you must write your own extension
2500
   to call this, or use ctypes.  Must be called with the GIL held.
2501
   Returns the number of tstates modified (normally 1, but 0 if `id` didn't
2502
   match any known thread id).  Can be called with exc=NULL to clear an
2503
   existing async exception.  This raises no exceptions. */
2504
2505
// XXX Move this to Python/ceval_gil.c?
2506
// XXX Deprecate this.
2507
int
2508
PyThreadState_SetAsyncExc(unsigned long id, PyObject *exc)
2509
0
{
2510
0
    PyInterpreterState *interp = _PyInterpreterState_GET();
2511
2512
    /* Although the GIL is held, a few C API functions can be called
2513
     * without the GIL held, and in particular some that create and
2514
     * destroy thread and interpreter states.  Those can mutate the
2515
     * list of thread states we're traversing, so to prevent that we lock
2516
     * head_mutex for the duration.
2517
     */
2518
0
    PyThreadState *tstate = NULL;
2519
0
    _Py_FOR_EACH_TSTATE_BEGIN(interp, t) {
2520
0
        if (t->thread_id == id) {
2521
0
            tstate = t;
2522
0
            break;
2523
0
        }
2524
0
    }
2525
0
    _Py_FOR_EACH_TSTATE_END(interp);
2526
2527
0
    if (tstate != NULL) {
2528
        /* Tricky:  we need to decref the current value
2529
         * (if any) in tstate->async_exc, but that can in turn
2530
         * allow arbitrary Python code to run, including
2531
         * perhaps calls to this function.  To prevent
2532
         * deadlock, we need to release head_mutex before
2533
         * the decref.
2534
         */
2535
0
        Py_XINCREF(exc);
2536
0
        PyObject *old_exc = _Py_atomic_exchange_ptr(&tstate->async_exc, exc);
2537
2538
0
        Py_XDECREF(old_exc);
2539
0
        _Py_set_eval_breaker_bit(tstate, _PY_ASYNC_EXCEPTION_BIT);
2540
0
    }
2541
2542
0
    return tstate != NULL;
2543
0
}
2544
2545
//---------------------------------
2546
// API for the current thread state
2547
//---------------------------------
2548
2549
PyThreadState *
2550
PyThreadState_GetUnchecked(void)
2551
0
{
2552
0
    return current_fast_get();
2553
0
}
2554
2555
2556
PyThreadState *
2557
PyThreadState_Get(void)
2558
84.9M
{
2559
84.9M
    PyThreadState *tstate = current_fast_get();
2560
84.9M
    _Py_EnsureTstateNotNULL(tstate);
2561
84.9M
    return tstate;
2562
84.9M
}
2563
2564
PyThreadState *
2565
_PyThreadState_Swap(_PyRuntimeState *runtime, PyThreadState *newts)
2566
0
{
2567
0
    PyThreadState *oldts = current_fast_get();
2568
0
    if (oldts != NULL) {
2569
0
        _PyThreadState_Detach(oldts);
2570
0
    }
2571
0
    if (newts != NULL) {
2572
0
        _PyThreadState_Attach(newts);
2573
0
    }
2574
0
    return oldts;
2575
0
}
2576
2577
PyThreadState *
2578
PyThreadState_Swap(PyThreadState *newts)
2579
0
{
2580
0
    return _PyThreadState_Swap(&_PyRuntime, newts);
2581
0
}
2582
2583
2584
void
2585
_PyThreadState_Bind(PyThreadState *tstate)
2586
28
{
2587
    // gh-104690: If Python is being finalized and PyInterpreterState_Delete()
2588
    // was called, tstate becomes a dangling pointer.
2589
28
    assert(_PyThreadState_CheckConsistency(tstate));
2590
2591
28
    bind_tstate(tstate);
2592
    // This makes sure there's a gilstate tstate bound
2593
    // as soon as possible.
2594
28
    if (gilstate_get() == NULL) {
2595
28
        bind_gilstate_tstate(tstate);
2596
28
    }
2597
28
}
2598
2599
#if defined(Py_GIL_DISABLED) && !defined(Py_LIMITED_API)
2600
uintptr_t
2601
_Py_GetThreadLocal_Addr(void)
2602
{
2603
    // gh-112535: Use the address of the thread-local PyThreadState variable as
2604
    // a unique identifier for the current thread. Each thread has a unique
2605
    // _Py_tss_tstate variable with a unique address.
2606
    return (uintptr_t)&_Py_tss_tstate;
2607
}
2608
#endif
2609
2610
/***********************************/
2611
/* routines for advanced debuggers */
2612
/***********************************/
2613
2614
// (requested by David Beazley)
2615
// Don't use unless you know what you are doing!
2616
2617
PyInterpreterState *
2618
PyInterpreterState_Head(void)
2619
0
{
2620
0
    return _PyRuntime.interpreters.head;
2621
0
}
2622
2623
PyInterpreterState *
2624
PyInterpreterState_Main(void)
2625
0
{
2626
0
    return _PyInterpreterState_Main();
2627
0
}
2628
2629
PyInterpreterState *
2630
0
PyInterpreterState_Next(PyInterpreterState *interp) {
2631
0
    return interp->next;
2632
0
}
2633
2634
PyThreadState *
2635
20.4k
PyInterpreterState_ThreadHead(PyInterpreterState *interp) {
2636
20.4k
    return interp->threads.head;
2637
20.4k
}
2638
2639
PyThreadState *
2640
20.4k
PyThreadState_Next(PyThreadState *tstate) {
2641
20.4k
    return tstate->next;
2642
20.4k
}
2643
2644
2645
/********************************************/
2646
/* reporting execution state of all threads */
2647
/********************************************/
2648
2649
/* The implementation of sys._current_frames().  This is intended to be
2650
   called with the GIL held, as it will be when called via
2651
   sys._current_frames().  It's possible it would work fine even without
2652
   the GIL held, but haven't thought enough about that.
2653
*/
2654
PyObject *
2655
_PyThread_CurrentFrames(void)
2656
0
{
2657
0
    _PyRuntimeState *runtime = &_PyRuntime;
2658
0
    PyThreadState *tstate = current_fast_get();
2659
0
    if (_PySys_Audit(tstate, "sys._current_frames", NULL) < 0) {
2660
0
        return NULL;
2661
0
    }
2662
2663
0
    PyObject *result = PyDict_New();
2664
0
    if (result == NULL) {
2665
0
        return NULL;
2666
0
    }
2667
2668
    /* for i in all interpreters:
2669
     *     for t in all of i's thread states:
2670
     *          if t's frame isn't NULL, map t's id to its frame
2671
     * Because these lists can mutate even when the GIL is held, we
2672
     * need to grab head_mutex for the duration.
2673
     */
2674
0
    _PyEval_StopTheWorldAll(runtime);
2675
0
    HEAD_LOCK(runtime);
2676
0
    PyInterpreterState *i;
2677
0
    for (i = runtime->interpreters.head; i != NULL; i = i->next) {
2678
0
        _Py_FOR_EACH_TSTATE_UNLOCKED(i, t) {
2679
0
            _PyInterpreterFrame *frame = t->current_frame;
2680
0
            frame = _PyFrame_GetFirstComplete(frame);
2681
0
            if (frame == NULL) {
2682
0
                continue;
2683
0
            }
2684
0
            PyObject *id = PyLong_FromUnsignedLong(t->thread_id);
2685
0
            if (id == NULL) {
2686
0
                goto fail;
2687
0
            }
2688
0
            PyObject *frameobj = (PyObject *)_PyFrame_GetFrameObject(frame);
2689
0
            if (frameobj == NULL) {
2690
0
                Py_DECREF(id);
2691
0
                goto fail;
2692
0
            }
2693
0
            int stat = PyDict_SetItem(result, id, frameobj);
2694
0
            Py_DECREF(id);
2695
0
            if (stat < 0) {
2696
0
                goto fail;
2697
0
            }
2698
0
        }
2699
0
    }
2700
0
    goto done;
2701
2702
0
fail:
2703
0
    Py_CLEAR(result);
2704
2705
0
done:
2706
0
    HEAD_UNLOCK(runtime);
2707
0
    _PyEval_StartTheWorldAll(runtime);
2708
0
    return result;
2709
0
}
2710
2711
/* The implementation of sys._current_exceptions().  This is intended to be
2712
   called with the GIL held, as it will be when called via
2713
   sys._current_exceptions().  It's possible it would work fine even without
2714
   the GIL held, but haven't thought enough about that.
2715
*/
2716
PyObject *
2717
_PyThread_CurrentExceptions(void)
2718
0
{
2719
0
    _PyRuntimeState *runtime = &_PyRuntime;
2720
0
    PyThreadState *tstate = current_fast_get();
2721
2722
0
    _Py_EnsureTstateNotNULL(tstate);
2723
2724
0
    if (_PySys_Audit(tstate, "sys._current_exceptions", NULL) < 0) {
2725
0
        return NULL;
2726
0
    }
2727
2728
0
    PyObject *result = PyDict_New();
2729
0
    if (result == NULL) {
2730
0
        return NULL;
2731
0
    }
2732
2733
    /* for i in all interpreters:
2734
     *     for t in all of i's thread states:
2735
     *          if t's frame isn't NULL, map t's id to its frame
2736
     * Because these lists can mutate even when the GIL is held, we
2737
     * need to grab head_mutex for the duration.
2738
     */
2739
0
    _PyEval_StopTheWorldAll(runtime);
2740
0
    HEAD_LOCK(runtime);
2741
0
    PyInterpreterState *i;
2742
0
    for (i = runtime->interpreters.head; i != NULL; i = i->next) {
2743
0
        _Py_FOR_EACH_TSTATE_UNLOCKED(i, t) {
2744
0
            _PyErr_StackItem *err_info = _PyErr_GetTopmostException(t);
2745
0
            if (err_info == NULL) {
2746
0
                continue;
2747
0
            }
2748
0
            PyObject *id = PyLong_FromUnsignedLong(t->thread_id);
2749
0
            if (id == NULL) {
2750
0
                goto fail;
2751
0
            }
2752
0
            PyObject *exc = err_info->exc_value;
2753
0
            assert(exc == NULL ||
2754
0
                   exc == Py_None ||
2755
0
                   PyExceptionInstance_Check(exc));
2756
2757
0
            int stat = PyDict_SetItem(result, id, exc == NULL ? Py_None : exc);
2758
0
            Py_DECREF(id);
2759
0
            if (stat < 0) {
2760
0
                goto fail;
2761
0
            }
2762
0
        }
2763
0
    }
2764
0
    goto done;
2765
2766
0
fail:
2767
0
    Py_CLEAR(result);
2768
2769
0
done:
2770
0
    HEAD_UNLOCK(runtime);
2771
0
    _PyEval_StartTheWorldAll(runtime);
2772
0
    return result;
2773
0
}
2774
2775
2776
/***********************************/
2777
/* Python "auto thread state" API. */
2778
/***********************************/
2779
2780
/* Internal initialization/finalization functions called by
2781
   Py_Initialize/Py_FinalizeEx
2782
*/
2783
PyStatus
2784
_PyGILState_Init(PyInterpreterState *interp)
2785
28
{
2786
28
    if (!_Py_IsMainInterpreter(interp)) {
2787
        /* Currently, PyGILState is shared by all interpreters. The main
2788
         * interpreter is responsible to initialize it. */
2789
0
        return _PyStatus_OK();
2790
0
    }
2791
28
    _PyRuntimeState *runtime = interp->runtime;
2792
28
    assert(gilstate_get() == NULL);
2793
28
    assert(runtime->gilstate.autoInterpreterState == NULL);
2794
28
    runtime->gilstate.autoInterpreterState = interp;
2795
28
    return _PyStatus_OK();
2796
28
}
2797
2798
void
2799
_PyGILState_Fini(PyInterpreterState *interp)
2800
0
{
2801
0
    if (!_Py_IsMainInterpreter(interp)) {
2802
        /* Currently, PyGILState is shared by all interpreters. The main
2803
         * interpreter is responsible to initialize it. */
2804
0
        return;
2805
0
    }
2806
0
    interp->runtime->gilstate.autoInterpreterState = NULL;
2807
0
}
2808
2809
2810
// XXX Drop this.
2811
void
2812
_PyGILState_SetTstate(PyThreadState *tstate)
2813
28
{
2814
    /* must init with valid states */
2815
28
    assert(tstate != NULL);
2816
28
    assert(tstate->interp != NULL);
2817
2818
28
    if (!_Py_IsMainInterpreter(tstate->interp)) {
2819
        /* Currently, PyGILState is shared by all interpreters. The main
2820
         * interpreter is responsible to initialize it. */
2821
0
        return;
2822
0
    }
2823
2824
#ifndef NDEBUG
2825
    _PyRuntimeState *runtime = tstate->interp->runtime;
2826
2827
    assert(runtime->gilstate.autoInterpreterState == tstate->interp);
2828
    assert(gilstate_get() == tstate);
2829
    assert(tstate->gilstate_counter == 1);
2830
#endif
2831
28
}
2832
2833
PyInterpreterState *
2834
_PyGILState_GetInterpreterStateUnsafe(void)
2835
0
{
2836
0
    return _PyRuntime.gilstate.autoInterpreterState;
2837
0
}
2838
2839
/* The public functions */
2840
2841
PyThreadState *
2842
PyGILState_GetThisThreadState(void)
2843
0
{
2844
0
    return gilstate_get();
2845
0
}
2846
2847
int
2848
PyGILState_Check(void)
2849
0
{
2850
0
    _PyRuntimeState *runtime = &_PyRuntime;
2851
0
    if (!_Py_atomic_load_int_relaxed(&runtime->gilstate.check_enabled)) {
2852
0
        return 1;
2853
0
    }
2854
2855
0
    PyThreadState *tstate = current_fast_get();
2856
0
    if (tstate == NULL) {
2857
0
        return 0;
2858
0
    }
2859
2860
0
    PyThreadState *tcur = gilstate_get();
2861
0
    return (tstate == tcur);
2862
0
}
2863
2864
PyGILState_STATE
2865
PyGILState_Ensure(void)
2866
0
{
2867
0
    _PyRuntimeState *runtime = &_PyRuntime;
2868
2869
    /* Note that we do not auto-init Python here - apart from
2870
       potential races with 2 threads auto-initializing, pep-311
2871
       spells out other issues.  Embedders are expected to have
2872
       called Py_Initialize(). */
2873
2874
    /* Ensure that _PyEval_InitThreads() and _PyGILState_Init() have been
2875
       called by Py_Initialize()
2876
2877
       TODO: This isn't thread-safe. There's no protection here against
2878
       concurrent finalization of the interpreter; it's simply a guard
2879
       for *after* the interpreter has finalized.
2880
     */
2881
0
    if (!_PyEval_ThreadsInitialized() || runtime->gilstate.autoInterpreterState == NULL) {
2882
0
        PyThread_hang_thread();
2883
0
    }
2884
2885
0
    PyThreadState *tcur = gilstate_get();
2886
0
    int has_gil;
2887
0
    if (tcur == NULL) {
2888
        /* Create a new Python thread state for this thread */
2889
        // XXX Use PyInterpreterState_EnsureThreadState()?
2890
0
        tcur = new_threadstate(runtime->gilstate.autoInterpreterState,
2891
0
                               _PyThreadState_WHENCE_GILSTATE);
2892
0
        if (tcur == NULL) {
2893
0
            Py_FatalError("Couldn't create thread-state for new thread");
2894
0
        }
2895
0
        bind_tstate(tcur);
2896
0
        bind_gilstate_tstate(tcur);
2897
2898
        /* This is our thread state!  We'll need to delete it in the
2899
           matching call to PyGILState_Release(). */
2900
0
        assert(tcur->gilstate_counter == 1);
2901
0
        tcur->gilstate_counter = 0;
2902
0
        has_gil = 0; /* new thread state is never current */
2903
0
    }
2904
0
    else {
2905
0
        has_gil = holds_gil(tcur);
2906
0
    }
2907
2908
0
    if (!has_gil) {
2909
0
        PyEval_RestoreThread(tcur);
2910
0
    }
2911
2912
    /* Update our counter in the thread-state - no need for locks:
2913
       - tcur will remain valid as we hold the GIL.
2914
       - the counter is safe as we are the only thread "allowed"
2915
         to modify this value
2916
    */
2917
0
    ++tcur->gilstate_counter;
2918
2919
0
    return has_gil ? PyGILState_LOCKED : PyGILState_UNLOCKED;
2920
0
}
2921
2922
void
2923
PyGILState_Release(PyGILState_STATE oldstate)
2924
0
{
2925
0
    PyThreadState *tstate = gilstate_get();
2926
0
    if (tstate == NULL) {
2927
0
        Py_FatalError("auto-releasing thread-state, "
2928
0
                      "but no thread-state for this thread");
2929
0
    }
2930
2931
    /* We must hold the GIL and have our thread state current */
2932
0
    if (!holds_gil(tstate)) {
2933
0
        _Py_FatalErrorFormat(__func__,
2934
0
                             "thread state %p must be current when releasing",
2935
0
                             tstate);
2936
0
    }
2937
0
    --tstate->gilstate_counter;
2938
0
    assert(tstate->gilstate_counter >= 0); /* illegal counter value */
2939
2940
    /* If we're going to destroy this thread-state, we must
2941
     * clear it while the GIL is held, as destructors may run.
2942
     */
2943
0
    if (tstate->gilstate_counter == 0) {
2944
        /* can't have been locked when we created it */
2945
0
        assert(oldstate == PyGILState_UNLOCKED);
2946
        // XXX Unbind tstate here.
2947
        // gh-119585: `PyThreadState_Clear()` may call destructors that
2948
        // themselves use PyGILState_Ensure and PyGILState_Release, so make
2949
        // sure that gilstate_counter is not zero when calling it.
2950
0
        ++tstate->gilstate_counter;
2951
0
        PyThreadState_Clear(tstate);
2952
0
        --tstate->gilstate_counter;
2953
        /* Delete the thread-state.  Note this releases the GIL too!
2954
         * It's vital that the GIL be held here, to avoid shutdown
2955
         * races; see bugs 225673 and 1061968 (that nasty bug has a
2956
         * habit of coming back).
2957
         */
2958
0
        assert(tstate->gilstate_counter == 0);
2959
0
        assert(current_fast_get() == tstate);
2960
0
        _PyThreadState_DeleteCurrent(tstate);
2961
0
    }
2962
    /* Release the lock if necessary */
2963
0
    else if (oldstate == PyGILState_UNLOCKED) {
2964
0
        PyEval_SaveThread();
2965
0
    }
2966
0
}
2967
2968
2969
/*************/
2970
/* Other API */
2971
/*************/
2972
2973
_PyFrameEvalFunction
2974
_PyInterpreterState_GetEvalFrameFunc(PyInterpreterState *interp)
2975
0
{
2976
0
    if (interp->eval_frame == NULL) {
2977
0
        return _PyEval_EvalFrameDefault;
2978
0
    }
2979
0
    return interp->eval_frame;
2980
0
}
2981
2982
2983
void
2984
_PyInterpreterState_SetEvalFrameFunc(PyInterpreterState *interp,
2985
                                     _PyFrameEvalFunction eval_frame)
2986
0
{
2987
0
    if (eval_frame == _PyEval_EvalFrameDefault) {
2988
0
        eval_frame = NULL;
2989
0
    }
2990
0
    if (eval_frame == interp->eval_frame) {
2991
0
        return;
2992
0
    }
2993
#ifdef _Py_TIER2
2994
    if (eval_frame != NULL) {
2995
        _Py_Executors_InvalidateAll(interp, 1);
2996
    }
2997
#endif
2998
0
    RARE_EVENT_INC(set_eval_frame_func);
2999
0
    _PyEval_StopTheWorld(interp);
3000
0
    interp->eval_frame = eval_frame;
3001
0
    _PyEval_StartTheWorld(interp);
3002
0
}
3003
3004
3005
const PyConfig*
3006
_PyInterpreterState_GetConfig(PyInterpreterState *interp)
3007
142M
{
3008
142M
    return &interp->config;
3009
142M
}
3010
3011
3012
const PyConfig*
3013
_Py_GetConfig(void)
3014
163k
{
3015
163k
    PyThreadState *tstate = current_fast_get();
3016
163k
    _Py_EnsureTstateNotNULL(tstate);
3017
163k
    return _PyInterpreterState_GetConfig(tstate->interp);
3018
163k
}
3019
3020
3021
int
3022
_PyInterpreterState_HasFeature(PyInterpreterState *interp, unsigned long feature)
3023
0
{
3024
0
    return ((interp->feature_flags & feature) != 0);
3025
0
}
3026
3027
3028
315k
#define MINIMUM_OVERHEAD 1000
3029
3030
static PyObject **
3031
push_chunk(PyThreadState *tstate, int size)
3032
315k
{
3033
315k
    int allocate_size = _PY_DATA_STACK_CHUNK_SIZE;
3034
315k
    while (allocate_size < (int)sizeof(PyObject*)*(size + MINIMUM_OVERHEAD)) {
3035
0
        allocate_size *= 2;
3036
0
    }
3037
315k
    _PyStackChunk *new = allocate_chunk(allocate_size, tstate->datastack_chunk);
3038
315k
    if (new == NULL) {
3039
0
        return NULL;
3040
0
    }
3041
315k
    if (tstate->datastack_chunk) {
3042
315k
        tstate->datastack_chunk->top = tstate->datastack_top -
3043
315k
                                       &tstate->datastack_chunk->data[0];
3044
315k
    }
3045
315k
    tstate->datastack_chunk = new;
3046
315k
    tstate->datastack_limit = (PyObject **)(((char *)new) + allocate_size);
3047
    // When new is the "root" chunk (i.e. new->previous == NULL), we can keep
3048
    // _PyThreadState_PopFrame from freeing it later by "skipping" over the
3049
    // first element:
3050
315k
    PyObject **res = &new->data[new->previous == NULL];
3051
315k
    tstate->datastack_top = res + size;
3052
315k
    return res;
3053
315k
}
3054
3055
_PyInterpreterFrame *
3056
_PyThreadState_PushFrame(PyThreadState *tstate, size_t size)
3057
258M
{
3058
258M
    assert(size < INT_MAX/sizeof(PyObject *));
3059
258M
    if (_PyThreadState_HasStackSpace(tstate, (int)size)) {
3060
258M
        _PyInterpreterFrame *res = (_PyInterpreterFrame *)tstate->datastack_top;
3061
258M
        tstate->datastack_top += size;
3062
258M
        return res;
3063
258M
    }
3064
315k
    return (_PyInterpreterFrame *)push_chunk(tstate, (int)size);
3065
258M
}
3066
3067
void
3068
_PyThreadState_PopFrame(PyThreadState *tstate, _PyInterpreterFrame * frame)
3069
1.16G
{
3070
1.16G
    assert(tstate->datastack_chunk);
3071
1.16G
    PyObject **base = (PyObject **)frame;
3072
1.16G
    if (base == &tstate->datastack_chunk->data[0]) {
3073
315k
        _PyStackChunk *chunk = tstate->datastack_chunk;
3074
315k
        _PyStackChunk *previous = chunk->previous;
3075
        // push_chunk ensures that the root chunk is never popped:
3076
315k
        assert(previous);
3077
315k
        tstate->datastack_top = &previous->data[previous->top];
3078
315k
        tstate->datastack_chunk = previous;
3079
315k
        _PyObject_VirtualFree(chunk, chunk->size);
3080
315k
        tstate->datastack_limit = (PyObject **)(((char *)previous) + previous->size);
3081
315k
    }
3082
1.16G
    else {
3083
1.16G
        assert(tstate->datastack_top);
3084
1.16G
        assert(tstate->datastack_top >= base);
3085
1.16G
        tstate->datastack_top = base;
3086
1.16G
    }
3087
1.16G
}
3088
3089
3090
#ifndef NDEBUG
3091
// Check that a Python thread state valid. In practice, this function is used
3092
// on a Python debug build to check if 'tstate' is a dangling pointer, if the
3093
// PyThreadState memory has been freed.
3094
//
3095
// Usage:
3096
//
3097
//     assert(_PyThreadState_CheckConsistency(tstate));
3098
int
3099
_PyThreadState_CheckConsistency(PyThreadState *tstate)
3100
{
3101
    assert(!_PyMem_IsPtrFreed(tstate));
3102
    assert(!_PyMem_IsPtrFreed(tstate->interp));
3103
    return 1;
3104
}
3105
#endif
3106
3107
3108
// Check if a Python thread must call _PyThreadState_HangThread(), rather than
3109
// taking the GIL or attaching to the interpreter if Py_Finalize() has been
3110
// called.
3111
//
3112
// When this function is called by a daemon thread after Py_Finalize() has been
3113
// called, the GIL may no longer exist.
3114
//
3115
// tstate must be non-NULL.
3116
int
3117
_PyThreadState_MustExit(PyThreadState *tstate)
3118
6.24M
{
3119
6.24M
    int state = _Py_atomic_load_int_relaxed(&tstate->state);
3120
6.24M
    return state == _Py_THREAD_SHUTTING_DOWN;
3121
6.24M
}
3122
3123
void
3124
_PyThreadState_HangThread(PyThreadState *tstate)
3125
0
{
3126
0
    _PyThreadStateImpl *tstate_impl = (_PyThreadStateImpl *)tstate;
3127
0
    decref_threadstate(tstate_impl);
3128
0
    PyThread_hang_thread();
3129
0
}
3130
3131
/********************/
3132
/* mimalloc support */
3133
/********************/
3134
3135
static void
3136
tstate_mimalloc_bind(PyThreadState *tstate)
3137
28
{
3138
#ifdef Py_GIL_DISABLED
3139
    struct _mimalloc_thread_state *mts = &((_PyThreadStateImpl*)tstate)->mimalloc;
3140
3141
    // Initialize the mimalloc thread state. This must be called from the
3142
    // same thread that will use the thread state. The "mem" heap doubles as
3143
    // the "backing" heap.
3144
    mi_tld_t *tld = &mts->tld;
3145
    _mi_tld_init(tld, &mts->heaps[_Py_MIMALLOC_HEAP_MEM]);
3146
    llist_init(&mts->page_list);
3147
3148
    // Exiting threads push any remaining in-use segments to the abandoned
3149
    // pool to be re-claimed later by other threads. We use per-interpreter
3150
    // pools to keep Python objects from different interpreters separate.
3151
    tld->segments.abandoned = &tstate->interp->mimalloc.abandoned_pool;
3152
3153
    // Don't fill in the first N bytes up to ob_type in debug builds. We may
3154
    // access ob_tid and the refcount fields in the dict and list lock-less
3155
    // accesses, so they must remain valid for a while after deallocation.
3156
    size_t base_offset = offsetof(PyObject, ob_type);
3157
    if (_PyMem_DebugEnabled()) {
3158
        // The debug allocator adds two words at the beginning of each block.
3159
        base_offset += 2 * sizeof(size_t);
3160
    }
3161
    size_t debug_offsets[_Py_MIMALLOC_HEAP_COUNT] = {
3162
        [_Py_MIMALLOC_HEAP_OBJECT] = base_offset,
3163
        [_Py_MIMALLOC_HEAP_GC] = base_offset,
3164
        [_Py_MIMALLOC_HEAP_GC_PRE] = base_offset + 2 * sizeof(PyObject *),
3165
    };
3166
3167
    // Initialize each heap
3168
    for (uint8_t i = 0; i < _Py_MIMALLOC_HEAP_COUNT; i++) {
3169
        _mi_heap_init_ex(&mts->heaps[i], tld, _mi_arena_id_none(), false, i);
3170
        mts->heaps[i].debug_offset = (uint8_t)debug_offsets[i];
3171
    }
3172
3173
    // Heaps that store Python objects should use QSBR to delay freeing
3174
    // mimalloc pages while there may be concurrent lock-free readers.
3175
    mts->heaps[_Py_MIMALLOC_HEAP_OBJECT].page_use_qsbr = true;
3176
    mts->heaps[_Py_MIMALLOC_HEAP_GC].page_use_qsbr = true;
3177
    mts->heaps[_Py_MIMALLOC_HEAP_GC_PRE].page_use_qsbr = true;
3178
3179
    // By default, object allocations use _Py_MIMALLOC_HEAP_OBJECT.
3180
    // _PyObject_GC_New() and similar functions temporarily override this to
3181
    // use one of the GC heaps.
3182
    mts->current_object_heap = &mts->heaps[_Py_MIMALLOC_HEAP_OBJECT];
3183
3184
    _Py_atomic_store_int(&mts->initialized, 1);
3185
#endif
3186
28
}
3187
3188
void
3189
_PyThreadState_ClearMimallocHeaps(PyThreadState *tstate)
3190
0
{
3191
#ifdef Py_GIL_DISABLED
3192
    if (!tstate->_status.bound) {
3193
        // The mimalloc heaps are only initialized when the thread is bound.
3194
        return;
3195
    }
3196
3197
    _PyThreadStateImpl *tstate_impl = (_PyThreadStateImpl *)tstate;
3198
    for (Py_ssize_t i = 0; i < _Py_MIMALLOC_HEAP_COUNT; i++) {
3199
        // Abandon all segments in use by this thread. This pushes them to
3200
        // a shared pool to later be reclaimed by other threads. It's important
3201
        // to do this before the thread state is destroyed so that objects
3202
        // remain visible to the GC.
3203
        _mi_heap_collect_abandon(&tstate_impl->mimalloc.heaps[i]);
3204
    }
3205
#endif
3206
0
}
3207
3208
3209
int
3210
_Py_IsMainThread(void)
3211
111M
{
3212
111M
    unsigned long thread = PyThread_get_thread_ident();
3213
111M
    return (thread == _PyRuntime.main_thread);
3214
111M
}
3215
3216
3217
PyInterpreterState *
3218
_PyInterpreterState_Main(void)
3219
108M
{
3220
108M
    return _PyRuntime.interpreters.main;
3221
108M
}
3222
3223
3224
int
3225
_Py_IsMainInterpreterFinalizing(PyInterpreterState *interp)
3226
0
{
3227
    /* bpo-39877: Access _PyRuntime directly rather than using
3228
       tstate->interp->runtime to support calls from Python daemon threads.
3229
       After Py_Finalize() has been called, tstate can be a dangling pointer:
3230
       point to PyThreadState freed memory. */
3231
0
    return (_PyRuntimeState_GetFinalizing(&_PyRuntime) != NULL &&
3232
0
            interp == &_PyRuntime._main_interpreter);
3233
0
}
3234
3235
3236
const PyConfig *
3237
_Py_GetMainConfig(void)
3238
0
{
3239
0
    PyInterpreterState *interp = _PyInterpreterState_Main();
3240
0
    if (interp == NULL) {
3241
0
        return NULL;
3242
0
    }
3243
0
    return _PyInterpreterState_GetConfig(interp);
3244
0
}