Coverage Report

Created: 2026-01-09 06:57

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/cpython3/Python/pystate.c
Line
Count
Source
1
2
/* Thread and interpreter state structures and their interfaces */
3
4
#include "Python.h"
5
#include "pycore_abstract.h"      // _PyIndex_Check()
6
#include "pycore_audit.h"         // _Py_AuditHookEntry
7
#include "pycore_backoff.h"       // JUMP_BACKWARD_INITIAL_VALUE, SIDE_EXIT_INITIAL_VALUE
8
#include "pycore_ceval.h"         // _PyEval_AcquireLock()
9
#include "pycore_codecs.h"        // _PyCodec_Fini()
10
#include "pycore_critical_section.h" // _PyCriticalSection_Resume()
11
#include "pycore_dtoa.h"          // _dtoa_state_INIT()
12
#include "pycore_freelist.h"      // _PyObject_ClearFreeLists()
13
#include "pycore_initconfig.h"    // _PyStatus_OK()
14
#include "pycore_interpframe.h"   // _PyThreadState_HasStackSpace()
15
#include "pycore_object.h"        // _PyType_InitCache()
16
#include "pycore_obmalloc.h"      // _PyMem_obmalloc_state_on_heap()
17
#include "pycore_optimizer.h"     // JIT_CLEANUP_THRESHOLD
18
#include "pycore_parking_lot.h"   // _PyParkingLot_AfterFork()
19
#include "pycore_pyerrors.h"      // _PyErr_Clear()
20
#include "pycore_pylifecycle.h"   // _PyAST_Fini()
21
#include "pycore_pymem.h"         // _PyMem_DebugEnabled()
22
#include "pycore_runtime.h"       // _PyRuntime
23
#include "pycore_runtime_init.h"  // _PyRuntimeState_INIT
24
#include "pycore_stackref.h"      // Py_STACKREF_DEBUG
25
#include "pycore_stats.h"         // FT_STAT_WORLD_STOP_INC()
26
#include "pycore_time.h"          // _PyTime_Init()
27
#include "pycore_uop.h"           // UOP_BUFFER_SIZE
28
#include "pycore_uniqueid.h"      // _PyObject_FinalizePerThreadRefcounts()
29
30
31
/* --------------------------------------------------------------------------
32
CAUTION
33
34
Always use PyMem_RawMalloc() and PyMem_RawFree() directly in this file.  A
35
number of these functions are advertised as safe to call when the GIL isn't
36
held, and in a debug build Python redirects (e.g.) PyMem_NEW (etc) to Python's
37
debugging obmalloc functions.  Those aren't thread-safe (they rely on the GIL
38
to avoid the expense of doing their own locking).
39
-------------------------------------------------------------------------- */
40
41
#ifdef HAVE_DLOPEN
42
#  ifdef HAVE_DLFCN_H
43
#    include <dlfcn.h>
44
#  endif
45
#  if !HAVE_DECL_RTLD_LAZY
46
#    define RTLD_LAZY 1
47
#  endif
48
#endif
49
50
51
/****************************************/
52
/* helpers for the current thread state */
53
/****************************************/
54
55
// API for the current thread state is further down.
56
57
/* "current" means one of:
58
   - bound to the current OS thread
59
   - holds the GIL
60
 */
61
62
//-------------------------------------------------
63
// a highly efficient lookup for the current thread
64
//-------------------------------------------------
65
66
/*
67
   The stored thread state is set by PyThreadState_Swap().
68
69
   For each of these functions, the GIL must be held by the current thread.
70
 */
71
72
73
/* The attached thread state for the current thread. */
74
_Py_thread_local PyThreadState *_Py_tss_tstate = NULL;
75
76
/* The "bound" thread state used by PyGILState_Ensure(),
77
   also known as a "gilstate." */
78
_Py_thread_local PyThreadState *_Py_tss_gilstate = NULL;
79
80
/* The interpreter of the attached thread state,
81
   and is same as tstate->interp. */
82
_Py_thread_local PyInterpreterState *_Py_tss_interp = NULL;
83
84
static inline PyThreadState *
85
current_fast_get(void)
86
160M
{
87
160M
    return _Py_tss_tstate;
88
160M
}
89
90
static inline void
91
current_fast_set(_PyRuntimeState *Py_UNUSED(runtime), PyThreadState *tstate)
92
293k
{
93
293k
    assert(tstate != NULL);
94
293k
    _Py_tss_tstate = tstate;
95
293k
    assert(tstate->interp != NULL);
96
293k
    _Py_tss_interp = tstate->interp;
97
293k
}
98
99
static inline void
100
current_fast_clear(_PyRuntimeState *Py_UNUSED(runtime))
101
293k
{
102
293k
    _Py_tss_tstate = NULL;
103
293k
    _Py_tss_interp = NULL;
104
293k
}
105
106
#define tstate_verify_not_active(tstate) \
107
0
    if (tstate == current_fast_get()) { \
108
0
        _Py_FatalErrorFormat(__func__, "tstate %p is still current", tstate); \
109
0
    }
110
111
PyThreadState *
112
_PyThreadState_GetCurrent(void)
113
15.2M
{
114
15.2M
    return current_fast_get();
115
15.2M
}
116
117
118
//---------------------------------------------
119
// The thread state used by PyGILState_Ensure()
120
//---------------------------------------------
121
122
/*
123
   The stored thread state is set by bind_tstate() (AKA PyThreadState_Bind().
124
125
   The GIL does no need to be held for these.
126
  */
127
128
static inline PyThreadState *
129
gilstate_get(void)
130
293k
{
131
293k
    return _Py_tss_gilstate;
132
293k
}
133
134
static inline void
135
gilstate_set(PyThreadState *tstate)
136
22
{
137
22
    assert(tstate != NULL);
138
22
    _Py_tss_gilstate = tstate;
139
22
}
140
141
static inline void
142
gilstate_clear(void)
143
0
{
144
0
    _Py_tss_gilstate = NULL;
145
0
}
146
147
148
#ifndef NDEBUG
149
static inline int tstate_is_alive(PyThreadState *tstate);
150
151
static inline int
152
tstate_is_bound(PyThreadState *tstate)
153
587k
{
154
587k
    return tstate->_status.bound && !tstate->_status.unbound;
155
587k
}
156
#endif  // !NDEBUG
157
158
static void bind_gilstate_tstate(PyThreadState *);
159
static void unbind_gilstate_tstate(PyThreadState *);
160
161
static void tstate_mimalloc_bind(PyThreadState *);
162
163
static void
164
bind_tstate(PyThreadState *tstate)
165
22
{
166
22
    assert(tstate != NULL);
167
22
    assert(tstate_is_alive(tstate) && !tstate->_status.bound);
168
22
    assert(!tstate->_status.unbound);  // just in case
169
22
    assert(!tstate->_status.bound_gilstate);
170
22
    assert(tstate != gilstate_get());
171
22
    assert(!tstate->_status.active);
172
22
    assert(tstate->thread_id == 0);
173
22
    assert(tstate->native_thread_id == 0);
174
175
    // Currently we don't necessarily store the thread state
176
    // in thread-local storage (e.g. per-interpreter).
177
178
22
    tstate->thread_id = PyThread_get_thread_ident();
179
22
#ifdef PY_HAVE_THREAD_NATIVE_ID
180
22
    tstate->native_thread_id = PyThread_get_thread_native_id();
181
22
#endif
182
183
#ifdef Py_GIL_DISABLED
184
    // Initialize biased reference counting inter-thread queue. Note that this
185
    // needs to be initialized from the active thread.
186
    _Py_brc_init_thread(tstate);
187
#endif
188
189
    // mimalloc state needs to be initialized from the active thread.
190
22
    tstate_mimalloc_bind(tstate);
191
192
22
    tstate->_status.bound = 1;
193
22
}
194
195
static void
196
unbind_tstate(PyThreadState *tstate)
197
0
{
198
0
    assert(tstate != NULL);
199
0
    assert(tstate_is_bound(tstate));
200
0
#ifndef HAVE_PTHREAD_STUBS
201
0
    assert(tstate->thread_id > 0);
202
0
#endif
203
0
#ifdef PY_HAVE_THREAD_NATIVE_ID
204
0
    assert(tstate->native_thread_id > 0);
205
0
#endif
206
207
    // We leave thread_id and native_thread_id alone
208
    // since they can be useful for debugging.
209
    // Check the `_status` field to know if these values
210
    // are still valid.
211
212
    // We leave tstate->_status.bound set to 1
213
    // to indicate it was previously bound.
214
0
    tstate->_status.unbound = 1;
215
0
}
216
217
218
/* Stick the thread state for this thread in thread specific storage.
219
220
   When a thread state is created for a thread by some mechanism
221
   other than PyGILState_Ensure(), it's important that the GILState
222
   machinery knows about it so it doesn't try to create another
223
   thread state for the thread.
224
   (This is a better fix for SF bug #1010677 than the first one attempted.)
225
226
   The only situation where you can legitimately have more than one
227
   thread state for an OS level thread is when there are multiple
228
   interpreters.
229
230
   Before 3.12, the PyGILState_*() APIs didn't work with multiple
231
   interpreters (see bpo-10915 and bpo-15751), so this function used
232
   to set TSS only once.  Thus, the first thread state created for that
233
   given OS level thread would "win", which seemed reasonable behaviour.
234
*/
235
236
static void
237
bind_gilstate_tstate(PyThreadState *tstate)
238
22
{
239
22
    assert(tstate != NULL);
240
22
    assert(tstate_is_alive(tstate));
241
22
    assert(tstate_is_bound(tstate));
242
    // XXX assert(!tstate->_status.active);
243
22
    assert(!tstate->_status.bound_gilstate);
244
245
22
    PyThreadState *tcur = gilstate_get();
246
22
    assert(tstate != tcur);
247
248
22
    if (tcur != NULL) {
249
0
        tcur->_status.bound_gilstate = 0;
250
0
    }
251
22
    gilstate_set(tstate);
252
22
    tstate->_status.bound_gilstate = 1;
253
22
}
254
255
static void
256
unbind_gilstate_tstate(PyThreadState *tstate)
257
0
{
258
0
    assert(tstate != NULL);
259
    // XXX assert(tstate_is_alive(tstate));
260
0
    assert(tstate_is_bound(tstate));
261
    // XXX assert(!tstate->_status.active);
262
0
    assert(tstate->_status.bound_gilstate);
263
0
    assert(tstate == gilstate_get());
264
0
    gilstate_clear();
265
0
    tstate->_status.bound_gilstate = 0;
266
0
}
267
268
269
//----------------------------------------------
270
// the thread state that currently holds the GIL
271
//----------------------------------------------
272
273
/* This is not exported, as it is not reliable!  It can only
274
   ever be compared to the state for the *current* thread.
275
   * If not equal, then it doesn't matter that the actual
276
     value may change immediately after comparison, as it can't
277
     possibly change to the current thread's state.
278
   * If equal, then the current thread holds the lock, so the value can't
279
     change until we yield the lock.
280
*/
281
static int
282
holds_gil(PyThreadState *tstate)
283
0
{
284
    // XXX Fall back to tstate->interp->runtime->ceval.gil.last_holder
285
    // (and tstate->interp->runtime->ceval.gil.locked).
286
0
    assert(tstate != NULL);
287
    /* Must be the tstate for this thread */
288
0
    assert(tstate == gilstate_get());
289
0
    return tstate == current_fast_get();
290
0
}
291
292
293
/****************************/
294
/* the global runtime state */
295
/****************************/
296
297
//----------
298
// lifecycle
299
//----------
300
301
/* Suppress deprecation warning for PyBytesObject.ob_shash */
302
_Py_COMP_DIAG_PUSH
303
_Py_COMP_DIAG_IGNORE_DEPR_DECLS
304
/* We use "initial" if the runtime gets re-used
305
   (e.g. Py_Finalize() followed by Py_Initialize().
306
   Note that we initialize "initial" relative to _PyRuntime,
307
   to ensure pre-initialized pointers point to the active
308
   runtime state (and not "initial"). */
309
static const _PyRuntimeState initial = _PyRuntimeState_INIT(_PyRuntime, "");
310
_Py_COMP_DIAG_POP
311
312
#define LOCKS_INIT(runtime) \
313
0
    { \
314
0
        &(runtime)->interpreters.mutex, \
315
0
        &(runtime)->xi.data_lookup.registry.mutex, \
316
0
        &(runtime)->unicode_state.ids.mutex, \
317
0
        &(runtime)->imports.extensions.mutex, \
318
0
        &(runtime)->ceval.pending_mainthread.mutex, \
319
0
        &(runtime)->atexit.mutex, \
320
0
        &(runtime)->audit_hooks.mutex, \
321
0
        &(runtime)->allocators.mutex, \
322
0
        &(runtime)->_main_interpreter.types.mutex, \
323
0
        &(runtime)->_main_interpreter.code_state.mutex, \
324
0
    }
325
326
static void
327
init_runtime(_PyRuntimeState *runtime,
328
             void *open_code_hook, void *open_code_userdata,
329
             _Py_AuditHookEntry *audit_hook_head,
330
             Py_ssize_t unicode_next_index)
331
22
{
332
22
    assert(!runtime->preinitializing);
333
22
    assert(!runtime->preinitialized);
334
22
    assert(!runtime->core_initialized);
335
22
    assert(!runtime->initialized);
336
22
    assert(!runtime->_initialized);
337
338
22
    runtime->open_code_hook = open_code_hook;
339
22
    runtime->open_code_userdata = open_code_userdata;
340
22
    runtime->audit_hooks.head = audit_hook_head;
341
342
22
    PyPreConfig_InitPythonConfig(&runtime->preconfig);
343
344
    // Set it to the ID of the main thread of the main interpreter.
345
22
    runtime->main_thread = PyThread_get_thread_ident();
346
347
22
    runtime->unicode_state.ids.next_index = unicode_next_index;
348
22
    runtime->_initialized = 1;
349
22
}
350
351
PyStatus
352
_PyRuntimeState_Init(_PyRuntimeState *runtime)
353
22
{
354
    /* We preserve the hook across init, because there is
355
       currently no public API to set it between runtime
356
       initialization and interpreter initialization. */
357
22
    void *open_code_hook = runtime->open_code_hook;
358
22
    void *open_code_userdata = runtime->open_code_userdata;
359
22
    _Py_AuditHookEntry *audit_hook_head = runtime->audit_hooks.head;
360
    // bpo-42882: Preserve next_index value if Py_Initialize()/Py_Finalize()
361
    // is called multiple times.
362
22
    Py_ssize_t unicode_next_index = runtime->unicode_state.ids.next_index;
363
364
22
    if (runtime->_initialized) {
365
        // Py_Initialize() must be running again.
366
        // Reset to _PyRuntimeState_INIT.
367
0
        memcpy(runtime, &initial, sizeof(*runtime));
368
        // Preserve the cookie from the original runtime.
369
0
        memcpy(runtime->debug_offsets.cookie, _Py_Debug_Cookie, 8);
370
0
        assert(!runtime->_initialized);
371
0
    }
372
373
22
    PyStatus status = _PyTime_Init(&runtime->time);
374
22
    if (_PyStatus_EXCEPTION(status)) {
375
0
        return status;
376
0
    }
377
378
22
    init_runtime(runtime, open_code_hook, open_code_userdata, audit_hook_head,
379
22
                 unicode_next_index);
380
381
22
    return _PyStatus_OK();
382
22
}
383
384
void
385
_PyRuntimeState_Fini(_PyRuntimeState *runtime)
386
0
{
387
#ifdef Py_REF_DEBUG
388
    /* The count is cleared by _Py_FinalizeRefTotal(). */
389
    assert(runtime->object_state.interpreter_leaks == 0);
390
#endif
391
0
    gilstate_clear();
392
0
}
393
394
#ifdef HAVE_FORK
395
/* This function is called from PyOS_AfterFork_Child to ensure that
396
   newly created child processes do not share locks with the parent. */
397
PyStatus
398
_PyRuntimeState_ReInitThreads(_PyRuntimeState *runtime)
399
0
{
400
    // This was initially set in _PyRuntimeState_Init().
401
0
    runtime->main_thread = PyThread_get_thread_ident();
402
403
    // Clears the parking lot. Any waiting threads are dead. This must be
404
    // called before releasing any locks that use the parking lot.
405
0
    _PyParkingLot_AfterFork();
406
407
    // Re-initialize global locks
408
0
    PyMutex *locks[] = LOCKS_INIT(runtime);
409
0
    for (size_t i = 0; i < Py_ARRAY_LENGTH(locks); i++) {
410
0
        _PyMutex_at_fork_reinit(locks[i]);
411
0
    }
412
#ifdef Py_GIL_DISABLED
413
    for (PyInterpreterState *interp = runtime->interpreters.head;
414
         interp != NULL; interp = interp->next)
415
    {
416
        for (int i = 0; i < NUM_WEAKREF_LIST_LOCKS; i++) {
417
            _PyMutex_at_fork_reinit(&interp->weakref_locks[i]);
418
        }
419
    }
420
#endif
421
422
0
    _PyTypes_AfterFork();
423
424
0
    _PyThread_AfterFork(&runtime->threads);
425
426
0
    return _PyStatus_OK();
427
0
}
428
#endif
429
430
431
/*************************************/
432
/* the per-interpreter runtime state */
433
/*************************************/
434
435
//----------
436
// lifecycle
437
//----------
438
439
/* Calling this indicates that the runtime is ready to create interpreters. */
440
441
PyStatus
442
_PyInterpreterState_Enable(_PyRuntimeState *runtime)
443
22
{
444
22
    struct pyinterpreters *interpreters = &runtime->interpreters;
445
22
    interpreters->next_id = 0;
446
22
    return _PyStatus_OK();
447
22
}
448
449
static PyInterpreterState *
450
alloc_interpreter(void)
451
0
{
452
    // Aligned allocation for PyInterpreterState.
453
    // the first word of the memory block is used to store
454
    // the original pointer to be used later to free the memory.
455
0
    size_t alignment = _Alignof(PyInterpreterState);
456
0
    size_t allocsize = sizeof(PyInterpreterState) + sizeof(void *) + alignment - 1;
457
0
    void *mem = PyMem_RawCalloc(1, allocsize);
458
0
    if (mem == NULL) {
459
0
        return NULL;
460
0
    }
461
0
    void *ptr = _Py_ALIGN_UP((char *)mem + sizeof(void *), alignment);
462
0
    ((void **)ptr)[-1] = mem;
463
0
    assert(_Py_IS_ALIGNED(ptr, alignment));
464
0
    return ptr;
465
0
}
466
467
static void
468
free_interpreter(PyInterpreterState *interp)
469
0
{
470
#ifdef Py_STATS
471
    if (interp->pystats_struct) {
472
        PyMem_RawFree(interp->pystats_struct);
473
        interp->pystats_struct = NULL;
474
    }
475
#endif
476
    // The main interpreter is statically allocated so
477
    // should not be freed.
478
0
    if (interp != &_PyRuntime._main_interpreter) {
479
0
        if (_PyMem_obmalloc_state_on_heap(interp)) {
480
            // interpreter has its own obmalloc state, free it
481
0
            PyMem_RawFree(interp->obmalloc);
482
0
            interp->obmalloc = NULL;
483
0
        }
484
0
        assert(_Py_IS_ALIGNED(interp, _Alignof(PyInterpreterState)));
485
0
        PyMem_RawFree(((void **)interp)[-1]);
486
0
    }
487
0
}
488
489
#ifndef NDEBUG
490
static inline int check_interpreter_whence(long);
491
#endif
492
493
extern _Py_CODEUNIT *
494
_Py_LazyJitShim(
495
    struct _PyExecutorObject *exec, _PyInterpreterFrame *frame, _PyStackRef *stack_pointer, PyThreadState *tstate
496
);
497
498
/* Get the interpreter state to a minimal consistent state.
499
   Further init happens in pylifecycle.c before it can be used.
500
   All fields not initialized here are expected to be zeroed out,
501
   e.g. by PyMem_RawCalloc() or memset(), or otherwise pre-initialized.
502
   The runtime state is not manipulated.  Instead it is assumed that
503
   the interpreter is getting added to the runtime.
504
505
   Note that the main interpreter was statically initialized as part
506
   of the runtime and most state is already set properly.  That leaves
507
   a small number of fields to initialize dynamically, as well as some
508
   that are initialized lazily.
509
510
   For subinterpreters we memcpy() the main interpreter in
511
   PyInterpreterState_New(), leaving it in the same mostly-initialized
512
   state.  The only difference is that the interpreter has some
513
   self-referential state that is statically initializexd to the
514
   main interpreter.  We fix those fields here, in addition
515
   to the other dynamically initialized fields.
516
  */
517
static PyStatus
518
init_interpreter(PyInterpreterState *interp,
519
                 _PyRuntimeState *runtime, int64_t id,
520
                 PyInterpreterState *next,
521
                 long whence)
522
22
{
523
22
    if (interp->_initialized) {
524
0
        return _PyStatus_ERR("interpreter already initialized");
525
0
    }
526
527
22
    assert(interp->_whence == _PyInterpreterState_WHENCE_NOTSET);
528
22
    assert(check_interpreter_whence(whence) == 0);
529
22
    interp->_whence = whence;
530
531
22
    assert(runtime != NULL);
532
22
    interp->runtime = runtime;
533
534
22
    assert(id > 0 || (id == 0 && interp == runtime->interpreters.main));
535
22
    interp->id = id;
536
537
22
    interp->id_refcount = 0;
538
539
22
    assert(runtime->interpreters.head == interp);
540
22
    assert(next != NULL || (interp == runtime->interpreters.main));
541
22
    interp->next = next;
542
543
22
    interp->threads.preallocated = &interp->_initial_thread;
544
545
    // We would call _PyObject_InitState() at this point
546
    // if interp->feature_flags were alredy set.
547
548
22
    _PyEval_InitState(interp);
549
22
    _PyGC_InitState(&interp->gc);
550
22
    PyConfig_InitPythonConfig(&interp->config);
551
22
    _PyType_InitCache(interp);
552
#ifdef Py_GIL_DISABLED
553
    _Py_brc_init_state(interp);
554
#endif
555
556
22
    llist_init(&interp->mem_free_queue.head);
557
22
    llist_init(&interp->asyncio_tasks_head);
558
22
    interp->asyncio_tasks_lock = (PyMutex){0};
559
374
    for (int i = 0; i < _PY_MONITORING_UNGROUPED_EVENTS; i++) {
560
352
        interp->monitors.tools[i] = 0;
561
352
    }
562
198
    for (int t = 0; t < PY_MONITORING_TOOL_IDS; t++) {
563
3.52k
        for (int e = 0; e < _PY_MONITORING_EVENTS; e++) {
564
3.34k
            interp->monitoring_callables[t][e] = NULL;
565
566
3.34k
        }
567
176
        interp->monitoring_tool_versions[t] = 0;
568
176
    }
569
22
    interp->_code_object_generation = 0;
570
22
    interp->jit = false;
571
22
    interp->compiling = false;
572
22
    interp->executor_list_head = NULL;
573
22
    interp->executor_deletion_list_head = NULL;
574
22
    interp->executor_creation_counter = JIT_CLEANUP_THRESHOLD;
575
22
    if (interp != &runtime->_main_interpreter) {
576
        /* Fix the self-referential, statically initialized fields. */
577
0
        interp->dtoa = (struct _dtoa_state)_dtoa_state_INIT(interp);
578
0
    }
579
#if !defined(Py_GIL_DISABLED) && defined(Py_STACKREF_DEBUG)
580
    interp->next_stackref = INITIAL_STACKREF_INDEX;
581
    _Py_hashtable_allocator_t alloc = {
582
        .malloc = malloc,
583
        .free = free,
584
    };
585
    interp->open_stackrefs_table = _Py_hashtable_new_full(
586
        _Py_hashtable_hash_ptr,
587
        _Py_hashtable_compare_direct,
588
        NULL,
589
        NULL,
590
        &alloc
591
    );
592
#  ifdef Py_STACKREF_CLOSE_DEBUG
593
    interp->closed_stackrefs_table = _Py_hashtable_new_full(
594
        _Py_hashtable_hash_ptr,
595
        _Py_hashtable_compare_direct,
596
        NULL,
597
        NULL,
598
        &alloc
599
    );
600
#  endif
601
    _Py_stackref_associate(interp, Py_None, PyStackRef_None);
602
    _Py_stackref_associate(interp, Py_False, PyStackRef_False);
603
    _Py_stackref_associate(interp, Py_True, PyStackRef_True);
604
#endif
605
606
22
    interp->_initialized = 1;
607
22
    return _PyStatus_OK();
608
22
}
609
610
611
PyStatus
612
_PyInterpreterState_New(PyThreadState *tstate, PyInterpreterState **pinterp)
613
22
{
614
22
    *pinterp = NULL;
615
616
    // Don't get runtime from tstate since tstate can be NULL
617
22
    _PyRuntimeState *runtime = &_PyRuntime;
618
619
    // tstate is NULL when pycore_create_interpreter() calls
620
    // _PyInterpreterState_New() to create the main interpreter.
621
22
    if (tstate != NULL) {
622
0
        if (_PySys_Audit(tstate, "cpython.PyInterpreterState_New", NULL) < 0) {
623
0
            return _PyStatus_ERR("sys.audit failed");
624
0
        }
625
0
    }
626
627
    /* We completely serialize creation of multiple interpreters, since
628
       it simplifies things here and blocking concurrent calls isn't a problem.
629
       Regardless, we must fully block subinterpreter creation until
630
       after the main interpreter is created. */
631
22
    HEAD_LOCK(runtime);
632
633
22
    struct pyinterpreters *interpreters = &runtime->interpreters;
634
22
    int64_t id = interpreters->next_id;
635
22
    interpreters->next_id += 1;
636
637
    // Allocate the interpreter and add it to the runtime state.
638
22
    PyInterpreterState *interp;
639
22
    PyStatus status;
640
22
    PyInterpreterState *old_head = interpreters->head;
641
22
    if (old_head == NULL) {
642
        // We are creating the main interpreter.
643
22
        assert(interpreters->main == NULL);
644
22
        assert(id == 0);
645
646
22
        interp = &runtime->_main_interpreter;
647
22
        assert(interp->id == 0);
648
22
        assert(interp->next == NULL);
649
650
22
        interpreters->main = interp;
651
22
    }
652
0
    else {
653
0
        assert(interpreters->main != NULL);
654
0
        assert(id != 0);
655
656
0
        interp = alloc_interpreter();
657
0
        if (interp == NULL) {
658
0
            status = _PyStatus_NO_MEMORY();
659
0
            goto error;
660
0
        }
661
        // Set to _PyInterpreterState_INIT.
662
0
        memcpy(interp, &initial._main_interpreter, sizeof(*interp));
663
664
0
        if (id < 0) {
665
            /* overflow or Py_Initialize() not called yet! */
666
0
            status = _PyStatus_ERR("failed to get an interpreter ID");
667
0
            goto error;
668
0
        }
669
0
    }
670
22
    interpreters->head = interp;
671
672
22
    long whence = _PyInterpreterState_WHENCE_UNKNOWN;
673
22
    status = init_interpreter(interp, runtime,
674
22
                              id, old_head, whence);
675
22
    if (_PyStatus_EXCEPTION(status)) {
676
0
        goto error;
677
0
    }
678
679
22
    HEAD_UNLOCK(runtime);
680
681
22
    assert(interp != NULL);
682
22
    *pinterp = interp;
683
22
    return _PyStatus_OK();
684
685
0
error:
686
0
    HEAD_UNLOCK(runtime);
687
688
0
    if (interp != NULL) {
689
0
        free_interpreter(interp);
690
0
    }
691
0
    return status;
692
22
}
693
694
695
PyInterpreterState *
696
PyInterpreterState_New(void)
697
0
{
698
    // tstate can be NULL
699
0
    PyThreadState *tstate = current_fast_get();
700
701
0
    PyInterpreterState *interp;
702
0
    PyStatus status = _PyInterpreterState_New(tstate, &interp);
703
0
    if (_PyStatus_EXCEPTION(status)) {
704
0
        Py_ExitStatusException(status);
705
0
    }
706
0
    assert(interp != NULL);
707
0
    return interp;
708
0
}
709
710
#if !defined(Py_GIL_DISABLED) && defined(Py_STACKREF_DEBUG)
711
extern void
712
_Py_stackref_report_leaks(PyInterpreterState *interp);
713
#endif
714
715
static void
716
interpreter_clear(PyInterpreterState *interp, PyThreadState *tstate)
717
0
{
718
0
    assert(interp != NULL);
719
0
    assert(tstate != NULL);
720
0
    _PyRuntimeState *runtime = interp->runtime;
721
722
    /* XXX Conditions we need to enforce:
723
724
       * the GIL must be held by the current thread
725
       * tstate must be the "current" thread state (current_fast_get())
726
       * tstate->interp must be interp
727
       * for the main interpreter, tstate must be the main thread
728
     */
729
    // XXX Ideally, we would not rely on any thread state in this function
730
    // (and we would drop the "tstate" argument).
731
732
0
    if (_PySys_Audit(tstate, "cpython.PyInterpreterState_Clear", NULL) < 0) {
733
0
        _PyErr_Clear(tstate);
734
0
    }
735
736
    // Clear the current/main thread state last.
737
0
    _Py_FOR_EACH_TSTATE_BEGIN(interp, p) {
738
        // See https://github.com/python/cpython/issues/102126
739
        // Must be called without HEAD_LOCK held as it can deadlock
740
        // if any finalizer tries to acquire that lock.
741
0
        HEAD_UNLOCK(runtime);
742
0
        PyThreadState_Clear(p);
743
0
        HEAD_LOCK(runtime);
744
0
    }
745
0
    _Py_FOR_EACH_TSTATE_END(interp);
746
0
    if (tstate->interp == interp) {
747
        /* We fix tstate->_status below when we for sure aren't using it
748
           (e.g. no longer need the GIL). */
749
        // XXX Eliminate the need to do this.
750
0
        tstate->_status.cleared = 0;
751
0
    }
752
753
    /* It is possible that any of the objects below have a finalizer
754
       that runs Python code or otherwise relies on a thread state
755
       or even the interpreter state.  For now we trust that isn't
756
       a problem.
757
     */
758
    // XXX Make sure we properly deal with problematic finalizers.
759
760
0
    Py_CLEAR(interp->audit_hooks);
761
762
    // gh-140257: Threads have already been cleared, but daemon threads may
763
    // still access eval_breaker atomically via take_gil() right before they
764
    // hang. Use an atomic store to prevent data races during finalization.
765
0
    interp->ceval.instrumentation_version = 0;
766
0
    _Py_atomic_store_uintptr(&tstate->eval_breaker, 0);
767
768
0
    for (int i = 0; i < _PY_MONITORING_UNGROUPED_EVENTS; i++) {
769
0
        interp->monitors.tools[i] = 0;
770
0
    }
771
0
    for (int t = 0; t < PY_MONITORING_TOOL_IDS; t++) {
772
0
        for (int e = 0; e < _PY_MONITORING_EVENTS; e++) {
773
0
            Py_CLEAR(interp->monitoring_callables[t][e]);
774
0
        }
775
0
    }
776
0
    for (int t = 0; t < PY_MONITORING_TOOL_IDS; t++) {
777
0
        Py_CLEAR(interp->monitoring_tool_names[t]);
778
0
    }
779
0
    interp->_code_object_generation = 0;
780
#ifdef Py_GIL_DISABLED
781
    interp->tlbc_indices.tlbc_generation = 0;
782
#endif
783
784
0
    PyConfig_Clear(&interp->config);
785
0
    _PyCodec_Fini(interp);
786
787
0
    assert(interp->imports.modules == NULL);
788
0
    assert(interp->imports.modules_by_index == NULL);
789
0
    assert(interp->imports.importlib == NULL);
790
0
    assert(interp->imports.import_func == NULL);
791
792
0
    Py_CLEAR(interp->sysdict_copy);
793
0
    Py_CLEAR(interp->builtins_copy);
794
0
    Py_CLEAR(interp->dict);
795
0
#ifdef HAVE_FORK
796
0
    Py_CLEAR(interp->before_forkers);
797
0
    Py_CLEAR(interp->after_forkers_parent);
798
0
    Py_CLEAR(interp->after_forkers_child);
799
0
#endif
800
801
802
#ifdef _Py_TIER2
803
    _Py_ClearExecutorDeletionList(interp);
804
#endif
805
0
    _PyAST_Fini(interp);
806
0
    _PyAtExit_Fini(interp);
807
808
    // All Python types must be destroyed before the last GC collection. Python
809
    // types create a reference cycle to themselves in their in their
810
    // PyTypeObject.tp_mro member (the tuple contains the type).
811
812
    /* Last garbage collection on this interpreter */
813
0
    _PyGC_CollectNoFail(tstate);
814
0
    _PyGC_Fini(interp);
815
816
    // Finalize warnings after last gc so that any finalizers can
817
    // access warnings state
818
0
    _PyWarnings_Fini(interp);
819
0
    struct _PyExecutorObject *cold = interp->cold_executor;
820
0
    if (cold != NULL) {
821
0
        interp->cold_executor = NULL;
822
0
        assert(cold->vm_data.valid);
823
0
        assert(cold->vm_data.warm);
824
0
        _PyExecutor_Free(cold);
825
0
    }
826
827
0
    struct _PyExecutorObject *cold_dynamic = interp->cold_dynamic_executor;
828
0
    if (cold_dynamic != NULL) {
829
0
        interp->cold_dynamic_executor = NULL;
830
0
        assert(cold_dynamic->vm_data.valid);
831
0
        assert(cold_dynamic->vm_data.warm);
832
0
        _PyExecutor_Free(cold_dynamic);
833
0
    }
834
    /* We don't clear sysdict and builtins until the end of this function.
835
       Because clearing other attributes can execute arbitrary Python code
836
       which requires sysdict and builtins. */
837
0
    PyDict_Clear(interp->sysdict);
838
0
    PyDict_Clear(interp->builtins);
839
0
    Py_CLEAR(interp->sysdict);
840
0
    Py_CLEAR(interp->builtins);
841
842
#if !defined(Py_GIL_DISABLED) && defined(Py_STACKREF_DEBUG)
843
#  ifdef Py_STACKREF_CLOSE_DEBUG
844
    _Py_hashtable_destroy(interp->closed_stackrefs_table);
845
    interp->closed_stackrefs_table = NULL;
846
#  endif
847
    _Py_stackref_report_leaks(interp);
848
    _Py_hashtable_destroy(interp->open_stackrefs_table);
849
    interp->open_stackrefs_table = NULL;
850
#endif
851
852
0
    if (tstate->interp == interp) {
853
        /* We are now safe to fix tstate->_status.cleared. */
854
        // XXX Do this (much) earlier?
855
0
        tstate->_status.cleared = 1;
856
0
    }
857
858
0
    for (int i=0; i < DICT_MAX_WATCHERS; i++) {
859
0
        interp->dict_state.watchers[i] = NULL;
860
0
    }
861
862
0
    for (int i=0; i < TYPE_MAX_WATCHERS; i++) {
863
0
        interp->type_watchers[i] = NULL;
864
0
    }
865
866
0
    for (int i=0; i < FUNC_MAX_WATCHERS; i++) {
867
0
        interp->func_watchers[i] = NULL;
868
0
    }
869
0
    interp->active_func_watchers = 0;
870
871
0
    for (int i=0; i < CODE_MAX_WATCHERS; i++) {
872
0
        interp->code_watchers[i] = NULL;
873
0
    }
874
0
    interp->active_code_watchers = 0;
875
876
0
    for (int i=0; i < CONTEXT_MAX_WATCHERS; i++) {
877
0
        interp->context_watchers[i] = NULL;
878
0
    }
879
0
    interp->active_context_watchers = 0;
880
    // XXX Once we have one allocator per interpreter (i.e.
881
    // per-interpreter GC) we must ensure that all of the interpreter's
882
    // objects have been cleaned up at the point.
883
884
    // We could clear interp->threads.freelist here
885
    // if it held more than just the initial thread state.
886
0
}
887
888
889
void
890
PyInterpreterState_Clear(PyInterpreterState *interp)
891
0
{
892
    // Use the current Python thread state to call audit hooks and to collect
893
    // garbage. It can be different than the current Python thread state
894
    // of 'interp'.
895
0
    PyThreadState *current_tstate = current_fast_get();
896
0
    _PyImport_ClearCore(interp);
897
0
    interpreter_clear(interp, current_tstate);
898
0
}
899
900
901
void
902
_PyInterpreterState_Clear(PyThreadState *tstate)
903
0
{
904
0
    _PyImport_ClearCore(tstate->interp);
905
0
    interpreter_clear(tstate->interp, tstate);
906
0
}
907
908
909
static inline void tstate_deactivate(PyThreadState *tstate);
910
static void tstate_set_detached(PyThreadState *tstate, int detached_state);
911
static void zapthreads(PyInterpreterState *interp);
912
913
void
914
PyInterpreterState_Delete(PyInterpreterState *interp)
915
0
{
916
0
    _PyRuntimeState *runtime = interp->runtime;
917
0
    struct pyinterpreters *interpreters = &runtime->interpreters;
918
919
    // XXX Clearing the "current" thread state should happen before
920
    // we start finalizing the interpreter (or the current thread state).
921
0
    PyThreadState *tcur = current_fast_get();
922
0
    if (tcur != NULL && interp == tcur->interp) {
923
        /* Unset current thread.  After this, many C API calls become crashy. */
924
0
        _PyThreadState_Detach(tcur);
925
0
    }
926
927
0
    zapthreads(interp);
928
929
    // XXX These two calls should be done at the end of clear_interpreter(),
930
    // but currently some objects get decref'ed after that.
931
#ifdef Py_REF_DEBUG
932
    _PyInterpreterState_FinalizeRefTotal(interp);
933
#endif
934
0
    _PyInterpreterState_FinalizeAllocatedBlocks(interp);
935
936
0
    HEAD_LOCK(runtime);
937
0
    PyInterpreterState **p;
938
0
    for (p = &interpreters->head; ; p = &(*p)->next) {
939
0
        if (*p == NULL) {
940
0
            Py_FatalError("NULL interpreter");
941
0
        }
942
0
        if (*p == interp) {
943
0
            break;
944
0
        }
945
0
    }
946
0
    if (interp->threads.head != NULL) {
947
0
        Py_FatalError("remaining threads");
948
0
    }
949
0
    *p = interp->next;
950
951
0
    if (interpreters->main == interp) {
952
0
        interpreters->main = NULL;
953
0
        if (interpreters->head != NULL) {
954
0
            Py_FatalError("remaining subinterpreters");
955
0
        }
956
0
    }
957
0
    HEAD_UNLOCK(runtime);
958
959
0
    _Py_qsbr_fini(interp);
960
961
0
    _PyObject_FiniState(interp);
962
963
0
    PyConfig_Clear(&interp->config);
964
965
0
    free_interpreter(interp);
966
0
}
967
968
969
#ifdef HAVE_FORK
970
/*
971
 * Delete all interpreter states except the main interpreter.  If there
972
 * is a current interpreter state, it *must* be the main interpreter.
973
 */
974
PyStatus
975
_PyInterpreterState_DeleteExceptMain(_PyRuntimeState *runtime)
976
0
{
977
0
    struct pyinterpreters *interpreters = &runtime->interpreters;
978
979
0
    PyThreadState *tstate = _PyThreadState_Swap(runtime, NULL);
980
0
    if (tstate != NULL && tstate->interp != interpreters->main) {
981
0
        return _PyStatus_ERR("not main interpreter");
982
0
    }
983
984
0
    HEAD_LOCK(runtime);
985
0
    PyInterpreterState *interp = interpreters->head;
986
0
    interpreters->head = NULL;
987
0
    while (interp != NULL) {
988
0
        if (interp == interpreters->main) {
989
0
            interpreters->main->next = NULL;
990
0
            interpreters->head = interp;
991
0
            interp = interp->next;
992
0
            continue;
993
0
        }
994
995
        // XXX Won't this fail since PyInterpreterState_Clear() requires
996
        // the "current" tstate to be set?
997
0
        PyInterpreterState_Clear(interp);  // XXX must activate?
998
0
        zapthreads(interp);
999
0
        PyInterpreterState *prev_interp = interp;
1000
0
        interp = interp->next;
1001
0
        free_interpreter(prev_interp);
1002
0
    }
1003
0
    HEAD_UNLOCK(runtime);
1004
1005
0
    if (interpreters->head == NULL) {
1006
0
        return _PyStatus_ERR("missing main interpreter");
1007
0
    }
1008
0
    _PyThreadState_Swap(runtime, tstate);
1009
0
    return _PyStatus_OK();
1010
0
}
1011
#endif
1012
1013
static inline void
1014
set_main_thread(PyInterpreterState *interp, PyThreadState *tstate)
1015
0
{
1016
0
    _Py_atomic_store_ptr_relaxed(&interp->threads.main, tstate);
1017
0
}
1018
1019
static inline PyThreadState *
1020
get_main_thread(PyInterpreterState *interp)
1021
0
{
1022
0
    return _Py_atomic_load_ptr_relaxed(&interp->threads.main);
1023
0
}
1024
1025
void
1026
_PyErr_SetInterpreterAlreadyRunning(void)
1027
0
{
1028
0
    PyErr_SetString(PyExc_InterpreterError, "interpreter already running");
1029
0
}
1030
1031
int
1032
_PyInterpreterState_SetRunningMain(PyInterpreterState *interp)
1033
0
{
1034
0
    if (get_main_thread(interp) != NULL) {
1035
0
        _PyErr_SetInterpreterAlreadyRunning();
1036
0
        return -1;
1037
0
    }
1038
0
    PyThreadState *tstate = current_fast_get();
1039
0
    _Py_EnsureTstateNotNULL(tstate);
1040
0
    if (tstate->interp != interp) {
1041
0
        PyErr_SetString(PyExc_RuntimeError,
1042
0
                        "current tstate has wrong interpreter");
1043
0
        return -1;
1044
0
    }
1045
0
    set_main_thread(interp, tstate);
1046
1047
0
    return 0;
1048
0
}
1049
1050
void
1051
_PyInterpreterState_SetNotRunningMain(PyInterpreterState *interp)
1052
0
{
1053
0
    assert(get_main_thread(interp) == current_fast_get());
1054
0
    set_main_thread(interp, NULL);
1055
0
}
1056
1057
int
1058
_PyInterpreterState_IsRunningMain(PyInterpreterState *interp)
1059
0
{
1060
0
    if (get_main_thread(interp) != NULL) {
1061
0
        return 1;
1062
0
    }
1063
    // Embedders might not know to call _PyInterpreterState_SetRunningMain(),
1064
    // so their main thread wouldn't show it is running the main interpreter's
1065
    // program.  (Py_Main() doesn't have this problem.)  For now this isn't
1066
    // critical.  If it were, we would need to infer "running main" from other
1067
    // information, like if it's the main interpreter.  We used to do that
1068
    // but the naive approach led to some inconsistencies that caused problems.
1069
0
    return 0;
1070
0
}
1071
1072
int
1073
_PyThreadState_IsRunningMain(PyThreadState *tstate)
1074
0
{
1075
0
    PyInterpreterState *interp = tstate->interp;
1076
    // See the note in _PyInterpreterState_IsRunningMain() about
1077
    // possible false negatives here for embedders.
1078
0
    return get_main_thread(interp) == tstate;
1079
0
}
1080
1081
void
1082
_PyInterpreterState_ReinitRunningMain(PyThreadState *tstate)
1083
0
{
1084
0
    PyInterpreterState *interp = tstate->interp;
1085
0
    if (get_main_thread(interp) != tstate) {
1086
0
        set_main_thread(interp, NULL);
1087
0
    }
1088
0
}
1089
1090
1091
//----------
1092
// accessors
1093
//----------
1094
1095
int
1096
_PyInterpreterState_IsReady(PyInterpreterState *interp)
1097
0
{
1098
0
    return interp->_ready;
1099
0
}
1100
1101
#ifndef NDEBUG
1102
static inline int
1103
check_interpreter_whence(long whence)
1104
44
{
1105
44
    if(whence < 0) {
1106
0
        return -1;
1107
0
    }
1108
44
    if (whence > _PyInterpreterState_WHENCE_MAX) {
1109
0
        return -1;
1110
0
    }
1111
44
    return 0;
1112
44
}
1113
#endif
1114
1115
long
1116
_PyInterpreterState_GetWhence(PyInterpreterState *interp)
1117
0
{
1118
0
    assert(check_interpreter_whence(interp->_whence) == 0);
1119
0
    return interp->_whence;
1120
0
}
1121
1122
void
1123
_PyInterpreterState_SetWhence(PyInterpreterState *interp, long whence)
1124
22
{
1125
22
    assert(interp->_whence != _PyInterpreterState_WHENCE_NOTSET);
1126
22
    assert(check_interpreter_whence(whence) == 0);
1127
22
    interp->_whence = whence;
1128
22
}
1129
1130
1131
PyObject *
1132
_Py_GetMainModule(PyThreadState *tstate)
1133
0
{
1134
    // We return None to indicate "not found" or "bogus".
1135
0
    PyObject *modules = _PyImport_GetModulesRef(tstate->interp);
1136
0
    if (modules == Py_None) {
1137
0
        return modules;
1138
0
    }
1139
0
    PyObject *module = NULL;
1140
0
    (void)PyMapping_GetOptionalItem(modules, &_Py_ID(__main__), &module);
1141
0
    Py_DECREF(modules);
1142
0
    if (module == NULL && !PyErr_Occurred()) {
1143
0
        Py_RETURN_NONE;
1144
0
    }
1145
0
    return module;
1146
0
}
1147
1148
int
1149
_Py_CheckMainModule(PyObject *module)
1150
0
{
1151
0
    if (module == NULL || module == Py_None) {
1152
0
        if (!PyErr_Occurred()) {
1153
0
            (void)_PyErr_SetModuleNotFoundError(&_Py_ID(__main__));
1154
0
        }
1155
0
        return -1;
1156
0
    }
1157
0
    if (!Py_IS_TYPE(module, &PyModule_Type)) {
1158
        /* The __main__ module has been tampered with. */
1159
0
        PyObject *msg = PyUnicode_FromString("invalid __main__ module");
1160
0
        if (msg != NULL) {
1161
0
            (void)PyErr_SetImportError(msg, &_Py_ID(__main__), NULL);
1162
0
            Py_DECREF(msg);
1163
0
        }
1164
0
        return -1;
1165
0
    }
1166
0
    return 0;
1167
0
}
1168
1169
1170
PyObject *
1171
PyInterpreterState_GetDict(PyInterpreterState *interp)
1172
0
{
1173
0
    if (interp->dict == NULL) {
1174
0
        interp->dict = PyDict_New();
1175
0
        if (interp->dict == NULL) {
1176
0
            PyErr_Clear();
1177
0
        }
1178
0
    }
1179
    /* Returning NULL means no per-interpreter dict is available. */
1180
0
    return interp->dict;
1181
0
}
1182
1183
1184
//----------
1185
// interp ID
1186
//----------
1187
1188
int64_t
1189
_PyInterpreterState_ObjectToID(PyObject *idobj)
1190
0
{
1191
0
    if (!_PyIndex_Check(idobj)) {
1192
0
        PyErr_Format(PyExc_TypeError,
1193
0
                     "interpreter ID must be an int, got %.100s",
1194
0
                     Py_TYPE(idobj)->tp_name);
1195
0
        return -1;
1196
0
    }
1197
1198
    // This may raise OverflowError.
1199
    // For now, we don't worry about if LLONG_MAX < INT64_MAX.
1200
0
    long long id = PyLong_AsLongLong(idobj);
1201
0
    if (id == -1 && PyErr_Occurred()) {
1202
0
        return -1;
1203
0
    }
1204
1205
0
    if (id < 0) {
1206
0
        PyErr_Format(PyExc_ValueError,
1207
0
                     "interpreter ID must be a non-negative int, got %R",
1208
0
                     idobj);
1209
0
        return -1;
1210
0
    }
1211
#if LLONG_MAX > INT64_MAX
1212
    else if (id > INT64_MAX) {
1213
        PyErr_SetString(PyExc_OverflowError, "int too big to convert");
1214
        return -1;
1215
    }
1216
#endif
1217
0
    else {
1218
0
        return (int64_t)id;
1219
0
    }
1220
0
}
1221
1222
int64_t
1223
PyInterpreterState_GetID(PyInterpreterState *interp)
1224
0
{
1225
0
    if (interp == NULL) {
1226
0
        PyErr_SetString(PyExc_RuntimeError, "no interpreter provided");
1227
0
        return -1;
1228
0
    }
1229
0
    return interp->id;
1230
0
}
1231
1232
PyObject *
1233
_PyInterpreterState_GetIDObject(PyInterpreterState *interp)
1234
0
{
1235
0
    int64_t interpid = interp->id;
1236
0
    if (interpid < 0) {
1237
0
        return NULL;
1238
0
    }
1239
0
    assert(interpid < LLONG_MAX);
1240
0
    return PyLong_FromLongLong(interpid);
1241
0
}
1242
1243
1244
1245
void
1246
_PyInterpreterState_IDIncref(PyInterpreterState *interp)
1247
0
{
1248
0
    _Py_atomic_add_ssize(&interp->id_refcount, 1);
1249
0
}
1250
1251
1252
void
1253
_PyInterpreterState_IDDecref(PyInterpreterState *interp)
1254
0
{
1255
0
    _PyRuntimeState *runtime = interp->runtime;
1256
1257
0
    Py_ssize_t refcount = _Py_atomic_add_ssize(&interp->id_refcount, -1);
1258
1259
0
    if (refcount == 1 && interp->requires_idref) {
1260
0
        PyThreadState *tstate =
1261
0
            _PyThreadState_NewBound(interp, _PyThreadState_WHENCE_FINI);
1262
1263
        // XXX Possible GILState issues?
1264
0
        PyThreadState *save_tstate = _PyThreadState_Swap(runtime, tstate);
1265
0
        Py_EndInterpreter(tstate);
1266
0
        _PyThreadState_Swap(runtime, save_tstate);
1267
0
    }
1268
0
}
1269
1270
int
1271
_PyInterpreterState_RequiresIDRef(PyInterpreterState *interp)
1272
0
{
1273
0
    return interp->requires_idref;
1274
0
}
1275
1276
void
1277
_PyInterpreterState_RequireIDRef(PyInterpreterState *interp, int required)
1278
0
{
1279
0
    interp->requires_idref = required ? 1 : 0;
1280
0
}
1281
1282
1283
//-----------------------------
1284
// look up an interpreter state
1285
//-----------------------------
1286
1287
/* Return the interpreter associated with the current OS thread.
1288
1289
   The GIL must be held.
1290
  */
1291
1292
PyInterpreterState*
1293
PyInterpreterState_Get(void)
1294
0
{
1295
0
    _Py_AssertHoldsTstate();
1296
0
    PyInterpreterState *interp = _Py_tss_interp;
1297
0
    if (interp == NULL) {
1298
0
        Py_FatalError("no current interpreter");
1299
0
    }
1300
0
    return interp;
1301
0
}
1302
1303
1304
static PyInterpreterState *
1305
interp_look_up_id(_PyRuntimeState *runtime, int64_t requested_id)
1306
0
{
1307
0
    PyInterpreterState *interp = runtime->interpreters.head;
1308
0
    while (interp != NULL) {
1309
0
        int64_t id = interp->id;
1310
0
        assert(id >= 0);
1311
0
        if (requested_id == id) {
1312
0
            return interp;
1313
0
        }
1314
0
        interp = PyInterpreterState_Next(interp);
1315
0
    }
1316
0
    return NULL;
1317
0
}
1318
1319
/* Return the interpreter state with the given ID.
1320
1321
   Fail with RuntimeError if the interpreter is not found. */
1322
1323
PyInterpreterState *
1324
_PyInterpreterState_LookUpID(int64_t requested_id)
1325
0
{
1326
0
    PyInterpreterState *interp = NULL;
1327
0
    if (requested_id >= 0) {
1328
0
        _PyRuntimeState *runtime = &_PyRuntime;
1329
0
        HEAD_LOCK(runtime);
1330
0
        interp = interp_look_up_id(runtime, requested_id);
1331
0
        HEAD_UNLOCK(runtime);
1332
0
    }
1333
0
    if (interp == NULL && !PyErr_Occurred()) {
1334
0
        PyErr_Format(PyExc_InterpreterNotFoundError,
1335
0
                     "unrecognized interpreter ID %lld", requested_id);
1336
0
    }
1337
0
    return interp;
1338
0
}
1339
1340
PyInterpreterState *
1341
_PyInterpreterState_LookUpIDObject(PyObject *requested_id)
1342
0
{
1343
0
    int64_t id = _PyInterpreterState_ObjectToID(requested_id);
1344
0
    if (id < 0) {
1345
0
        return NULL;
1346
0
    }
1347
0
    return _PyInterpreterState_LookUpID(id);
1348
0
}
1349
1350
1351
/********************************/
1352
/* the per-thread runtime state */
1353
/********************************/
1354
1355
#ifndef NDEBUG
1356
static inline int
1357
tstate_is_alive(PyThreadState *tstate)
1358
44
{
1359
44
    return (tstate->_status.initialized &&
1360
44
            !tstate->_status.finalized &&
1361
44
            !tstate->_status.cleared &&
1362
44
            !tstate->_status.finalizing);
1363
44
}
1364
#endif
1365
1366
1367
//----------
1368
// lifecycle
1369
//----------
1370
1371
static _PyStackChunk*
1372
allocate_chunk(int size_in_bytes, _PyStackChunk* previous)
1373
386k
{
1374
386k
    assert(size_in_bytes % sizeof(PyObject **) == 0);
1375
386k
    _PyStackChunk *res = _PyObject_VirtualAlloc(size_in_bytes);
1376
386k
    if (res == NULL) {
1377
0
        return NULL;
1378
0
    }
1379
386k
    res->previous = previous;
1380
386k
    res->size = size_in_bytes;
1381
386k
    res->top = 0;
1382
386k
    return res;
1383
386k
}
1384
1385
static void
1386
reset_threadstate(_PyThreadStateImpl *tstate)
1387
0
{
1388
    // Set to _PyThreadState_INIT directly?
1389
0
    memcpy(tstate,
1390
0
           &initial._main_interpreter._initial_thread,
1391
0
           sizeof(*tstate));
1392
0
}
1393
1394
static _PyThreadStateImpl *
1395
alloc_threadstate(PyInterpreterState *interp)
1396
22
{
1397
22
    _PyThreadStateImpl *tstate;
1398
1399
    // Try the preallocated tstate first.
1400
22
    tstate = _Py_atomic_exchange_ptr(&interp->threads.preallocated, NULL);
1401
1402
    // Fall back to the allocator.
1403
22
    if (tstate == NULL) {
1404
0
        tstate = PyMem_RawCalloc(1, sizeof(_PyThreadStateImpl));
1405
0
        if (tstate == NULL) {
1406
0
            return NULL;
1407
0
        }
1408
0
        reset_threadstate(tstate);
1409
0
    }
1410
22
    return tstate;
1411
22
}
1412
1413
static void
1414
free_threadstate(_PyThreadStateImpl *tstate)
1415
0
{
1416
0
    PyInterpreterState *interp = tstate->base.interp;
1417
#ifdef Py_STATS
1418
    _PyStats_ThreadFini(tstate);
1419
#endif
1420
    // The initial thread state of the interpreter is allocated
1421
    // as part of the interpreter state so should not be freed.
1422
0
    if (tstate == &interp->_initial_thread) {
1423
        // Make it available again.
1424
0
        reset_threadstate(tstate);
1425
0
        assert(interp->threads.preallocated == NULL);
1426
0
        _Py_atomic_store_ptr(&interp->threads.preallocated, tstate);
1427
0
    }
1428
0
    else {
1429
0
        PyMem_RawFree(tstate);
1430
0
    }
1431
0
}
1432
1433
static void
1434
decref_threadstate(_PyThreadStateImpl *tstate)
1435
0
{
1436
0
    if (_Py_atomic_add_ssize(&tstate->refcount, -1) == 1) {
1437
        // The last reference to the thread state is gone.
1438
0
        free_threadstate(tstate);
1439
0
    }
1440
0
}
1441
1442
static inline void
1443
init_policy(uint16_t *target, const char *env_name, uint16_t default_value,
1444
                long min_value, long max_value)
1445
44
{
1446
44
    *target = default_value;
1447
44
    char *env = Py_GETENV(env_name);
1448
44
    if (env && *env != '\0') {
1449
0
        long value = atol(env);
1450
0
        if (value >= min_value && value <= max_value) {
1451
0
            *target = (uint16_t)value;
1452
0
        }
1453
0
    }
1454
44
}
1455
1456
/* Get the thread state to a minimal consistent state.
1457
   Further init happens in pylifecycle.c before it can be used.
1458
   All fields not initialized here are expected to be zeroed out,
1459
   e.g. by PyMem_RawCalloc() or memset(), or otherwise pre-initialized.
1460
   The interpreter state is not manipulated.  Instead it is assumed that
1461
   the thread is getting added to the interpreter.
1462
  */
1463
1464
static void
1465
init_threadstate(_PyThreadStateImpl *_tstate,
1466
                 PyInterpreterState *interp, uint64_t id, int whence)
1467
22
{
1468
22
    PyThreadState *tstate = (PyThreadState *)_tstate;
1469
22
    if (tstate->_status.initialized) {
1470
0
        Py_FatalError("thread state already initialized");
1471
0
    }
1472
1473
22
    assert(interp != NULL);
1474
22
    tstate->interp = interp;
1475
22
    tstate->eval_breaker =
1476
22
        _Py_atomic_load_uintptr_relaxed(&interp->ceval.instrumentation_version);
1477
1478
    // next/prev are set in add_threadstate().
1479
22
    assert(tstate->next == NULL);
1480
22
    assert(tstate->prev == NULL);
1481
1482
22
    assert(tstate->_whence == _PyThreadState_WHENCE_NOTSET);
1483
22
    assert(whence >= 0 && whence <= _PyThreadState_WHENCE_THREADING_DAEMON);
1484
22
    tstate->_whence = whence;
1485
1486
22
    assert(id > 0);
1487
22
    tstate->id = id;
1488
1489
    // thread_id and native_thread_id are set in bind_tstate().
1490
1491
22
    tstate->py_recursion_limit = interp->ceval.recursion_limit;
1492
22
    tstate->py_recursion_remaining = interp->ceval.recursion_limit;
1493
22
    tstate->exc_info = &tstate->exc_state;
1494
1495
    // PyGILState_Release must not try to delete this thread state.
1496
    // This is cleared when PyGILState_Ensure() creates the thread state.
1497
22
    tstate->gilstate_counter = 1;
1498
1499
    // Initialize the embedded base frame - sentinel at the bottom of the frame stack
1500
22
    _tstate->base_frame.previous = NULL;
1501
22
    _tstate->base_frame.f_executable = PyStackRef_None;
1502
22
    _tstate->base_frame.f_funcobj = PyStackRef_NULL;
1503
22
    _tstate->base_frame.f_globals = NULL;
1504
22
    _tstate->base_frame.f_builtins = NULL;
1505
22
    _tstate->base_frame.f_locals = NULL;
1506
22
    _tstate->base_frame.frame_obj = NULL;
1507
22
    _tstate->base_frame.instr_ptr = NULL;
1508
22
    _tstate->base_frame.stackpointer = _tstate->base_frame.localsplus;
1509
22
    _tstate->base_frame.return_offset = 0;
1510
22
    _tstate->base_frame.owner = FRAME_OWNED_BY_INTERPRETER;
1511
22
    _tstate->base_frame.visited = 0;
1512
#ifdef Py_DEBUG
1513
    _tstate->base_frame.lltrace = 0;
1514
#endif
1515
#ifdef Py_GIL_DISABLED
1516
    _tstate->base_frame.tlbc_index = 0;
1517
#endif
1518
22
    _tstate->base_frame.localsplus[0] = PyStackRef_NULL;
1519
1520
    // current_frame starts pointing to the base frame
1521
22
    tstate->current_frame = &_tstate->base_frame;
1522
    // base_frame pointer for profilers to validate stack unwinding
1523
22
    tstate->base_frame = &_tstate->base_frame;
1524
22
    tstate->datastack_chunk = NULL;
1525
22
    tstate->datastack_top = NULL;
1526
22
    tstate->datastack_limit = NULL;
1527
22
    tstate->what_event = -1;
1528
22
    tstate->current_executor = NULL;
1529
22
    tstate->jit_exit = NULL;
1530
22
    tstate->dict_global_version = 0;
1531
1532
22
    _tstate->c_stack_soft_limit = UINTPTR_MAX;
1533
22
    _tstate->c_stack_top = 0;
1534
22
    _tstate->c_stack_hard_limit = 0;
1535
1536
22
    _tstate->c_stack_init_base = 0;
1537
22
    _tstate->c_stack_init_top = 0;
1538
1539
22
    _tstate->asyncio_running_loop = NULL;
1540
22
    _tstate->asyncio_running_task = NULL;
1541
    // Initialize interpreter policy from environment variables
1542
22
    init_policy(&_tstate->policy.interp.jump_backward_initial_value,
1543
22
                "PYTHON_JIT_JUMP_BACKWARD_INITIAL_VALUE",
1544
22
                JUMP_BACKWARD_INITIAL_VALUE, 1, MAX_VALUE);
1545
22
    init_policy(&_tstate->policy.interp.jump_backward_initial_backoff,
1546
22
                "PYTHON_JIT_JUMP_BACKWARD_INITIAL_BACKOFF",
1547
22
                JUMP_BACKWARD_INITIAL_BACKOFF, 0, MAX_BACKOFF);
1548
#ifdef _Py_TIER2
1549
    // Initialize JIT policy from environment variables
1550
    init_policy(&_tstate->policy.jit.side_exit_initial_value,
1551
                "PYTHON_JIT_SIDE_EXIT_INITIAL_VALUE",
1552
                SIDE_EXIT_INITIAL_VALUE, 1, MAX_VALUE);
1553
    init_policy(&_tstate->policy.jit.side_exit_initial_backoff,
1554
                "PYTHON_JIT_SIDE_EXIT_INITIAL_BACKOFF",
1555
                SIDE_EXIT_INITIAL_BACKOFF, 0, MAX_BACKOFF);
1556
#endif
1557
22
    tstate->delete_later = NULL;
1558
1559
22
    llist_init(&_tstate->mem_free_queue);
1560
22
    llist_init(&_tstate->asyncio_tasks_head);
1561
22
    if (interp->stoptheworld.requested || _PyRuntime.stoptheworld.requested) {
1562
        // Start in the suspended state if there is an ongoing stop-the-world.
1563
0
        tstate->state = _Py_THREAD_SUSPENDED;
1564
0
    }
1565
1566
22
    tstate->_status.initialized = 1;
1567
22
}
1568
1569
static void
1570
add_threadstate(PyInterpreterState *interp, PyThreadState *tstate,
1571
                PyThreadState *next)
1572
22
{
1573
22
    assert(interp->threads.head != tstate);
1574
22
    if (next != NULL) {
1575
0
        assert(next->prev == NULL || next->prev == tstate);
1576
0
        next->prev = tstate;
1577
0
    }
1578
22
    tstate->next = next;
1579
22
    assert(tstate->prev == NULL);
1580
22
    interp->threads.head = tstate;
1581
22
}
1582
1583
static PyThreadState *
1584
new_threadstate(PyInterpreterState *interp, int whence)
1585
22
{
1586
    // Allocate the thread state.
1587
22
    _PyThreadStateImpl *tstate = alloc_threadstate(interp);
1588
22
    if (tstate == NULL) {
1589
0
        return NULL;
1590
0
    }
1591
1592
#ifdef Py_GIL_DISABLED
1593
    Py_ssize_t qsbr_idx = _Py_qsbr_reserve(interp);
1594
    if (qsbr_idx < 0) {
1595
        free_threadstate(tstate);
1596
        return NULL;
1597
    }
1598
    int32_t tlbc_idx = _Py_ReserveTLBCIndex(interp);
1599
    if (tlbc_idx < 0) {
1600
        free_threadstate(tstate);
1601
        return NULL;
1602
    }
1603
#endif
1604
#ifdef Py_STATS
1605
    // The PyStats structure is quite large and is allocated separated from tstate.
1606
    if (!_PyStats_ThreadInit(interp, tstate)) {
1607
        free_threadstate(tstate);
1608
        return NULL;
1609
    }
1610
#endif
1611
1612
    /* We serialize concurrent creation to protect global state. */
1613
22
    HEAD_LOCK(interp->runtime);
1614
1615
    // Initialize the new thread state.
1616
22
    interp->threads.next_unique_id += 1;
1617
22
    uint64_t id = interp->threads.next_unique_id;
1618
22
    init_threadstate(tstate, interp, id, whence);
1619
1620
    // Add the new thread state to the interpreter.
1621
22
    PyThreadState *old_head = interp->threads.head;
1622
22
    add_threadstate(interp, (PyThreadState *)tstate, old_head);
1623
1624
22
    HEAD_UNLOCK(interp->runtime);
1625
1626
#ifdef Py_GIL_DISABLED
1627
    // Must be called with lock unlocked to avoid lock ordering deadlocks.
1628
    _Py_qsbr_register(tstate, interp, qsbr_idx);
1629
    tstate->tlbc_index = tlbc_idx;
1630
#endif
1631
1632
22
    return (PyThreadState *)tstate;
1633
22
}
1634
1635
PyThreadState *
1636
PyThreadState_New(PyInterpreterState *interp)
1637
0
{
1638
0
    return _PyThreadState_NewBound(interp, _PyThreadState_WHENCE_UNKNOWN);
1639
0
}
1640
1641
PyThreadState *
1642
_PyThreadState_NewBound(PyInterpreterState *interp, int whence)
1643
0
{
1644
0
    PyThreadState *tstate = new_threadstate(interp, whence);
1645
0
    if (tstate) {
1646
0
        bind_tstate(tstate);
1647
        // This makes sure there's a gilstate tstate bound
1648
        // as soon as possible.
1649
0
        if (gilstate_get() == NULL) {
1650
0
            bind_gilstate_tstate(tstate);
1651
0
        }
1652
0
    }
1653
0
    return tstate;
1654
0
}
1655
1656
// This must be followed by a call to _PyThreadState_Bind();
1657
PyThreadState *
1658
_PyThreadState_New(PyInterpreterState *interp, int whence)
1659
22
{
1660
22
    return new_threadstate(interp, whence);
1661
22
}
1662
1663
// We keep this for stable ABI compabibility.
1664
PyAPI_FUNC(PyThreadState*)
1665
_PyThreadState_Prealloc(PyInterpreterState *interp)
1666
0
{
1667
0
    return _PyThreadState_New(interp, _PyThreadState_WHENCE_UNKNOWN);
1668
0
}
1669
1670
// We keep this around for (accidental) stable ABI compatibility.
1671
// Realistically, no extensions are using it.
1672
PyAPI_FUNC(void)
1673
_PyThreadState_Init(PyThreadState *tstate)
1674
0
{
1675
0
    Py_FatalError("_PyThreadState_Init() is for internal use only");
1676
0
}
1677
1678
1679
static void
1680
clear_datastack(PyThreadState *tstate)
1681
0
{
1682
0
    _PyStackChunk *chunk = tstate->datastack_chunk;
1683
0
    tstate->datastack_chunk = NULL;
1684
0
    while (chunk != NULL) {
1685
0
        _PyStackChunk *prev = chunk->previous;
1686
0
        _PyObject_VirtualFree(chunk, chunk->size);
1687
0
        chunk = prev;
1688
0
    }
1689
0
}
1690
1691
void
1692
PyThreadState_Clear(PyThreadState *tstate)
1693
0
{
1694
0
    assert(tstate->_status.initialized && !tstate->_status.cleared);
1695
0
    assert(current_fast_get()->interp == tstate->interp);
1696
    // GH-126016: In the _interpreters module, KeyboardInterrupt exceptions
1697
    // during PyEval_EvalCode() are sent to finalization, which doesn't let us
1698
    // mark threads as "not running main". So, for now this assertion is
1699
    // disabled.
1700
    // XXX assert(!_PyThreadState_IsRunningMain(tstate));
1701
    // XXX assert(!tstate->_status.bound || tstate->_status.unbound);
1702
0
    tstate->_status.finalizing = 1;  // just in case
1703
1704
    /* XXX Conditions we need to enforce:
1705
1706
       * the GIL must be held by the current thread
1707
       * current_fast_get()->interp must match tstate->interp
1708
       * for the main interpreter, current_fast_get() must be the main thread
1709
     */
1710
1711
0
    int verbose = _PyInterpreterState_GetConfig(tstate->interp)->verbose;
1712
1713
0
    if (verbose && tstate->current_frame != tstate->base_frame) {
1714
        /* bpo-20526: After the main thread calls
1715
           _PyInterpreterState_SetFinalizing() in Py_FinalizeEx()
1716
           (or in Py_EndInterpreter() for subinterpreters),
1717
           threads must exit when trying to take the GIL.
1718
           If a thread exit in the middle of _PyEval_EvalFrameDefault(),
1719
           tstate->frame is not reset to its previous value.
1720
           It is more likely with daemon threads, but it can happen
1721
           with regular threads if threading._shutdown() fails
1722
           (ex: interrupted by CTRL+C). */
1723
0
        fprintf(stderr,
1724
0
          "PyThreadState_Clear: warning: thread still has a frame\n");
1725
0
    }
1726
1727
0
    if (verbose && tstate->current_exception != NULL) {
1728
0
        fprintf(stderr, "PyThreadState_Clear: warning: thread has an exception set\n");
1729
0
        _PyErr_Print(tstate);
1730
0
    }
1731
1732
    /* At this point tstate shouldn't be used any more,
1733
       neither to run Python code nor for other uses.
1734
1735
       This is tricky when current_fast_get() == tstate, in the same way
1736
       as noted in interpreter_clear() above.  The below finalizers
1737
       can possibly run Python code or otherwise use the partially
1738
       cleared thread state.  For now we trust that isn't a problem
1739
       in practice.
1740
     */
1741
    // XXX Deal with the possibility of problematic finalizers.
1742
1743
    /* Don't clear tstate->pyframe: it is a borrowed reference */
1744
1745
0
    Py_CLEAR(tstate->threading_local_key);
1746
0
    Py_CLEAR(tstate->threading_local_sentinel);
1747
1748
0
    Py_CLEAR(((_PyThreadStateImpl *)tstate)->asyncio_running_loop);
1749
0
    Py_CLEAR(((_PyThreadStateImpl *)tstate)->asyncio_running_task);
1750
1751
1752
0
    PyMutex_Lock(&tstate->interp->asyncio_tasks_lock);
1753
    // merge any lingering tasks from thread state to interpreter's
1754
    // tasks list
1755
0
    llist_concat(&tstate->interp->asyncio_tasks_head,
1756
0
                 &((_PyThreadStateImpl *)tstate)->asyncio_tasks_head);
1757
0
    PyMutex_Unlock(&tstate->interp->asyncio_tasks_lock);
1758
1759
0
    Py_CLEAR(tstate->dict);
1760
0
    Py_CLEAR(tstate->async_exc);
1761
1762
0
    Py_CLEAR(tstate->current_exception);
1763
1764
0
    Py_CLEAR(tstate->exc_state.exc_value);
1765
1766
    /* The stack of exception states should contain just this thread. */
1767
0
    if (verbose && tstate->exc_info != &tstate->exc_state) {
1768
0
        fprintf(stderr,
1769
0
          "PyThreadState_Clear: warning: thread still has a generator\n");
1770
0
    }
1771
1772
0
    if (tstate->c_profilefunc != NULL) {
1773
0
        FT_ATOMIC_ADD_SSIZE(tstate->interp->sys_profiling_threads, -1);
1774
0
        tstate->c_profilefunc = NULL;
1775
0
    }
1776
0
    if (tstate->c_tracefunc != NULL) {
1777
0
        FT_ATOMIC_ADD_SSIZE(tstate->interp->sys_tracing_threads, -1);
1778
0
        tstate->c_tracefunc = NULL;
1779
0
    }
1780
1781
0
    Py_CLEAR(tstate->c_profileobj);
1782
0
    Py_CLEAR(tstate->c_traceobj);
1783
1784
0
    Py_CLEAR(tstate->async_gen_firstiter);
1785
0
    Py_CLEAR(tstate->async_gen_finalizer);
1786
1787
0
    Py_CLEAR(tstate->context);
1788
1789
#ifdef Py_GIL_DISABLED
1790
    // Each thread should clear own freelists in free-threading builds.
1791
    struct _Py_freelists *freelists = _Py_freelists_GET();
1792
    _PyObject_ClearFreeLists(freelists, 1);
1793
1794
    // Flush the thread's local GC allocation count to the global count
1795
    // before the thread state is cleared, otherwise the count is lost.
1796
    _PyThreadStateImpl *tstate_impl = (_PyThreadStateImpl *)tstate;
1797
    _Py_atomic_add_int(&tstate->interp->gc.young.count,
1798
                       (int)tstate_impl->gc.alloc_count);
1799
    tstate_impl->gc.alloc_count = 0;
1800
1801
    // Merge our thread-local refcounts into the type's own refcount and
1802
    // free our local refcount array.
1803
    _PyObject_FinalizePerThreadRefcounts(tstate_impl);
1804
1805
    // Remove ourself from the biased reference counting table of threads.
1806
    _Py_brc_remove_thread(tstate);
1807
1808
    // Release our thread-local copies of the bytecode for reuse by another
1809
    // thread
1810
    _Py_ClearTLBCIndex(tstate_impl);
1811
#endif
1812
1813
    // Merge our queue of pointers to be freed into the interpreter queue.
1814
0
    _PyMem_AbandonDelayed(tstate);
1815
1816
0
    _PyThreadState_ClearMimallocHeaps(tstate);
1817
1818
0
    tstate->_status.cleared = 1;
1819
1820
    // XXX Call _PyThreadStateSwap(runtime, NULL) here if "current".
1821
    // XXX Do it as early in the function as possible.
1822
0
}
1823
1824
static void
1825
decrement_stoptheworld_countdown(struct _stoptheworld_state *stw);
1826
1827
/* Common code for PyThreadState_Delete() and PyThreadState_DeleteCurrent() */
1828
static void
1829
tstate_delete_common(PyThreadState *tstate, int release_gil)
1830
0
{
1831
0
    assert(tstate->_status.cleared && !tstate->_status.finalized);
1832
0
    tstate_verify_not_active(tstate);
1833
0
    assert(!_PyThreadState_IsRunningMain(tstate));
1834
1835
0
    PyInterpreterState *interp = tstate->interp;
1836
0
    if (interp == NULL) {
1837
0
        Py_FatalError("NULL interpreter");
1838
0
    }
1839
0
    _PyRuntimeState *runtime = interp->runtime;
1840
1841
0
    HEAD_LOCK(runtime);
1842
0
    if (tstate->prev) {
1843
0
        tstate->prev->next = tstate->next;
1844
0
    }
1845
0
    else {
1846
0
        interp->threads.head = tstate->next;
1847
0
    }
1848
0
    if (tstate->next) {
1849
0
        tstate->next->prev = tstate->prev;
1850
0
    }
1851
0
    if (tstate->state != _Py_THREAD_SUSPENDED) {
1852
        // Any ongoing stop-the-world request should not wait for us because
1853
        // our thread is getting deleted.
1854
0
        if (interp->stoptheworld.requested) {
1855
0
            decrement_stoptheworld_countdown(&interp->stoptheworld);
1856
0
        }
1857
0
        if (runtime->stoptheworld.requested) {
1858
0
            decrement_stoptheworld_countdown(&runtime->stoptheworld);
1859
0
        }
1860
0
    }
1861
1862
#if defined(Py_REF_DEBUG) && defined(Py_GIL_DISABLED)
1863
    // Add our portion of the total refcount to the interpreter's total.
1864
    _PyThreadStateImpl *tstate_impl = (_PyThreadStateImpl *)tstate;
1865
    tstate->interp->object_state.reftotal += tstate_impl->reftotal;
1866
    tstate_impl->reftotal = 0;
1867
    assert(tstate_impl->refcounts.values == NULL);
1868
#endif
1869
1870
0
    HEAD_UNLOCK(runtime);
1871
1872
    // XXX Unbind in PyThreadState_Clear(), or earlier
1873
    // (and assert not-equal here)?
1874
0
    if (tstate->_status.bound_gilstate) {
1875
0
        unbind_gilstate_tstate(tstate);
1876
0
    }
1877
0
    if (tstate->_status.bound) {
1878
0
        unbind_tstate(tstate);
1879
0
    }
1880
1881
    // XXX Move to PyThreadState_Clear()?
1882
0
    clear_datastack(tstate);
1883
1884
0
    if (release_gil) {
1885
0
        _PyEval_ReleaseLock(tstate->interp, tstate, 1);
1886
0
    }
1887
1888
#ifdef Py_GIL_DISABLED
1889
    _Py_qsbr_unregister(tstate);
1890
#endif
1891
1892
0
    tstate->_status.finalized = 1;
1893
0
}
1894
1895
static void
1896
zapthreads(PyInterpreterState *interp)
1897
0
{
1898
0
    PyThreadState *tstate;
1899
    /* No need to lock the mutex here because this should only happen
1900
       when the threads are all really dead (XXX famous last words).
1901
1902
       Cannot use _Py_FOR_EACH_TSTATE_UNLOCKED because we are freeing
1903
       the thread states here.
1904
    */
1905
0
    while ((tstate = interp->threads.head) != NULL) {
1906
0
        tstate_verify_not_active(tstate);
1907
0
        tstate_delete_common(tstate, 0);
1908
0
        free_threadstate((_PyThreadStateImpl *)tstate);
1909
0
    }
1910
0
}
1911
1912
1913
void
1914
PyThreadState_Delete(PyThreadState *tstate)
1915
0
{
1916
0
    _Py_EnsureTstateNotNULL(tstate);
1917
0
    tstate_verify_not_active(tstate);
1918
0
    tstate_delete_common(tstate, 0);
1919
0
    free_threadstate((_PyThreadStateImpl *)tstate);
1920
0
}
1921
1922
1923
void
1924
_PyThreadState_DeleteCurrent(PyThreadState *tstate)
1925
0
{
1926
0
    _Py_EnsureTstateNotNULL(tstate);
1927
#ifdef Py_GIL_DISABLED
1928
    _Py_qsbr_detach(((_PyThreadStateImpl *)tstate)->qsbr);
1929
#endif
1930
#ifdef Py_STATS
1931
    _PyStats_Detach((_PyThreadStateImpl *)tstate);
1932
#endif
1933
0
    current_fast_clear(tstate->interp->runtime);
1934
0
    tstate_delete_common(tstate, 1);  // release GIL as part of call
1935
0
    free_threadstate((_PyThreadStateImpl *)tstate);
1936
0
}
1937
1938
void
1939
PyThreadState_DeleteCurrent(void)
1940
0
{
1941
0
    PyThreadState *tstate = current_fast_get();
1942
0
    _PyThreadState_DeleteCurrent(tstate);
1943
0
}
1944
1945
1946
// Unlinks and removes all thread states from `tstate->interp`, with the
1947
// exception of the one passed as an argument. However, it does not delete
1948
// these thread states. Instead, it returns the removed thread states as a
1949
// linked list.
1950
//
1951
// Note that if there is a current thread state, it *must* be the one
1952
// passed as argument.  Also, this won't touch any interpreters other
1953
// than the current one, since we don't know which thread state should
1954
// be kept in those other interpreters.
1955
PyThreadState *
1956
_PyThreadState_RemoveExcept(PyThreadState *tstate)
1957
0
{
1958
0
    assert(tstate != NULL);
1959
0
    PyInterpreterState *interp = tstate->interp;
1960
0
    _PyRuntimeState *runtime = interp->runtime;
1961
1962
#ifdef Py_GIL_DISABLED
1963
    assert(runtime->stoptheworld.world_stopped);
1964
#endif
1965
1966
0
    HEAD_LOCK(runtime);
1967
    /* Remove all thread states, except tstate, from the linked list of
1968
       thread states. */
1969
0
    PyThreadState *list = interp->threads.head;
1970
0
    if (list == tstate) {
1971
0
        list = tstate->next;
1972
0
    }
1973
0
    if (tstate->prev) {
1974
0
        tstate->prev->next = tstate->next;
1975
0
    }
1976
0
    if (tstate->next) {
1977
0
        tstate->next->prev = tstate->prev;
1978
0
    }
1979
0
    tstate->prev = tstate->next = NULL;
1980
0
    interp->threads.head = tstate;
1981
0
    HEAD_UNLOCK(runtime);
1982
1983
0
    return list;
1984
0
}
1985
1986
// Deletes the thread states in the linked list `list`.
1987
//
1988
// This is intended to be used in conjunction with _PyThreadState_RemoveExcept.
1989
//
1990
// If `is_after_fork` is true, the thread states are immediately freed.
1991
// Otherwise, they are decref'd because they may still be referenced by an
1992
// OS thread.
1993
void
1994
_PyThreadState_DeleteList(PyThreadState *list, int is_after_fork)
1995
0
{
1996
    // The world can't be stopped because we PyThreadState_Clear() can
1997
    // call destructors.
1998
0
    assert(!_PyRuntime.stoptheworld.world_stopped);
1999
2000
0
    PyThreadState *p, *next;
2001
0
    for (p = list; p; p = next) {
2002
0
        next = p->next;
2003
0
        PyThreadState_Clear(p);
2004
0
        if (is_after_fork) {
2005
0
            free_threadstate((_PyThreadStateImpl *)p);
2006
0
        }
2007
0
        else {
2008
0
            decref_threadstate((_PyThreadStateImpl *)p);
2009
0
        }
2010
0
    }
2011
0
}
2012
2013
2014
//----------
2015
// accessors
2016
//----------
2017
2018
/* An extension mechanism to store arbitrary additional per-thread state.
2019
   PyThreadState_GetDict() returns a dictionary that can be used to hold such
2020
   state; the caller should pick a unique key and store its state there.  If
2021
   PyThreadState_GetDict() returns NULL, an exception has *not* been raised
2022
   and the caller should assume no per-thread state is available. */
2023
2024
PyObject *
2025
_PyThreadState_GetDict(PyThreadState *tstate)
2026
84
{
2027
84
    assert(tstate != NULL);
2028
84
    if (tstate->dict == NULL) {
2029
1
        tstate->dict = PyDict_New();
2030
1
        if (tstate->dict == NULL) {
2031
0
            _PyErr_Clear(tstate);
2032
0
        }
2033
1
    }
2034
84
    return tstate->dict;
2035
84
}
2036
2037
2038
PyObject *
2039
PyThreadState_GetDict(void)
2040
84
{
2041
84
    PyThreadState *tstate = current_fast_get();
2042
84
    if (tstate == NULL) {
2043
0
        return NULL;
2044
0
    }
2045
84
    return _PyThreadState_GetDict(tstate);
2046
84
}
2047
2048
2049
PyInterpreterState *
2050
PyThreadState_GetInterpreter(PyThreadState *tstate)
2051
0
{
2052
0
    assert(tstate != NULL);
2053
0
    return tstate->interp;
2054
0
}
2055
2056
2057
PyFrameObject*
2058
PyThreadState_GetFrame(PyThreadState *tstate)
2059
1.79M
{
2060
1.79M
    assert(tstate != NULL);
2061
1.79M
    _PyInterpreterFrame *f = _PyThreadState_GetFrame(tstate);
2062
1.79M
    if (f == NULL) {
2063
0
        return NULL;
2064
0
    }
2065
1.79M
    PyFrameObject *frame = _PyFrame_GetFrameObject(f);
2066
1.79M
    if (frame == NULL) {
2067
0
        PyErr_Clear();
2068
0
    }
2069
1.79M
    return (PyFrameObject*)Py_XNewRef(frame);
2070
1.79M
}
2071
2072
2073
uint64_t
2074
PyThreadState_GetID(PyThreadState *tstate)
2075
0
{
2076
0
    assert(tstate != NULL);
2077
0
    return tstate->id;
2078
0
}
2079
2080
2081
static inline void
2082
tstate_activate(PyThreadState *tstate)
2083
293k
{
2084
293k
    assert(tstate != NULL);
2085
    // XXX assert(tstate_is_alive(tstate));
2086
293k
    assert(tstate_is_bound(tstate));
2087
293k
    assert(!tstate->_status.active);
2088
2089
293k
    assert(!tstate->_status.bound_gilstate ||
2090
293k
           tstate == gilstate_get());
2091
293k
    if (!tstate->_status.bound_gilstate) {
2092
0
        bind_gilstate_tstate(tstate);
2093
0
    }
2094
2095
293k
    tstate->_status.active = 1;
2096
293k
}
2097
2098
static inline void
2099
tstate_deactivate(PyThreadState *tstate)
2100
293k
{
2101
293k
    assert(tstate != NULL);
2102
    // XXX assert(tstate_is_alive(tstate));
2103
293k
    assert(tstate_is_bound(tstate));
2104
293k
    assert(tstate->_status.active);
2105
2106
#if Py_STATS
2107
    _PyStats_Detach((_PyThreadStateImpl *)tstate);
2108
#endif
2109
2110
293k
    tstate->_status.active = 0;
2111
2112
    // We do not unbind the gilstate tstate here.
2113
    // It will still be used in PyGILState_Ensure().
2114
293k
}
2115
2116
static int
2117
tstate_try_attach(PyThreadState *tstate)
2118
293k
{
2119
#ifdef Py_GIL_DISABLED
2120
    int expected = _Py_THREAD_DETACHED;
2121
    return _Py_atomic_compare_exchange_int(&tstate->state,
2122
                                           &expected,
2123
                                           _Py_THREAD_ATTACHED);
2124
#else
2125
293k
    assert(tstate->state == _Py_THREAD_DETACHED);
2126
293k
    tstate->state = _Py_THREAD_ATTACHED;
2127
293k
    return 1;
2128
293k
#endif
2129
293k
}
2130
2131
static void
2132
tstate_set_detached(PyThreadState *tstate, int detached_state)
2133
293k
{
2134
293k
    assert(_Py_atomic_load_int_relaxed(&tstate->state) == _Py_THREAD_ATTACHED);
2135
#ifdef Py_GIL_DISABLED
2136
    _Py_atomic_store_int(&tstate->state, detached_state);
2137
#else
2138
293k
    tstate->state = detached_state;
2139
293k
#endif
2140
293k
}
2141
2142
static void
2143
tstate_wait_attach(PyThreadState *tstate)
2144
0
{
2145
0
    do {
2146
0
        int state = _Py_atomic_load_int_relaxed(&tstate->state);
2147
0
        if (state == _Py_THREAD_SUSPENDED) {
2148
            // Wait until we're switched out of SUSPENDED to DETACHED.
2149
0
            _PyParkingLot_Park(&tstate->state, &state, sizeof(tstate->state),
2150
0
                               /*timeout=*/-1, NULL, /*detach=*/0);
2151
0
        }
2152
0
        else if (state == _Py_THREAD_SHUTTING_DOWN) {
2153
            // We're shutting down, so we can't attach.
2154
0
            _PyThreadState_HangThread(tstate);
2155
0
        }
2156
0
        else {
2157
0
            assert(state == _Py_THREAD_DETACHED);
2158
0
        }
2159
        // Once we're back in DETACHED we can re-attach
2160
0
    } while (!tstate_try_attach(tstate));
2161
0
}
2162
2163
void
2164
_PyThreadState_Attach(PyThreadState *tstate)
2165
293k
{
2166
#if defined(Py_DEBUG)
2167
    // This is called from PyEval_RestoreThread(). Similar
2168
    // to it, we need to ensure errno doesn't change.
2169
    int err = errno;
2170
#endif
2171
2172
293k
    _Py_EnsureTstateNotNULL(tstate);
2173
293k
    if (current_fast_get() != NULL) {
2174
0
        Py_FatalError("non-NULL old thread state");
2175
0
    }
2176
293k
    _PyThreadStateImpl *_tstate = (_PyThreadStateImpl *)tstate;
2177
293k
    if (_tstate->c_stack_hard_limit == 0) {
2178
22
        _Py_InitializeRecursionLimits(tstate);
2179
22
    }
2180
2181
293k
    while (1) {
2182
293k
        _PyEval_AcquireLock(tstate);
2183
2184
        // XXX assert(tstate_is_alive(tstate));
2185
293k
        current_fast_set(&_PyRuntime, tstate);
2186
293k
        if (!tstate_try_attach(tstate)) {
2187
0
            tstate_wait_attach(tstate);
2188
0
        }
2189
293k
        tstate_activate(tstate);
2190
2191
#ifdef Py_GIL_DISABLED
2192
        if (_PyEval_IsGILEnabled(tstate) && !tstate->holds_gil) {
2193
            // The GIL was enabled between our call to _PyEval_AcquireLock()
2194
            // and when we attached (the GIL can't go from enabled to disabled
2195
            // here because only a thread holding the GIL can disable
2196
            // it). Detach and try again.
2197
            tstate_set_detached(tstate, _Py_THREAD_DETACHED);
2198
            tstate_deactivate(tstate);
2199
            current_fast_clear(&_PyRuntime);
2200
            continue;
2201
        }
2202
        _Py_qsbr_attach(((_PyThreadStateImpl *)tstate)->qsbr);
2203
#endif
2204
293k
        break;
2205
293k
    }
2206
2207
    // Resume previous critical section. This acquires the lock(s) from the
2208
    // top-most critical section.
2209
293k
    if (tstate->critical_section != 0) {
2210
0
        _PyCriticalSection_Resume(tstate);
2211
0
    }
2212
2213
#ifdef Py_STATS
2214
    _PyStats_Attach((_PyThreadStateImpl *)tstate);
2215
#endif
2216
2217
#if defined(Py_DEBUG)
2218
    errno = err;
2219
#endif
2220
293k
}
2221
2222
static void
2223
detach_thread(PyThreadState *tstate, int detached_state)
2224
293k
{
2225
    // XXX assert(tstate_is_alive(tstate) && tstate_is_bound(tstate));
2226
293k
    assert(_Py_atomic_load_int_relaxed(&tstate->state) == _Py_THREAD_ATTACHED);
2227
293k
    assert(tstate == current_fast_get());
2228
293k
    if (tstate->critical_section != 0) {
2229
0
        _PyCriticalSection_SuspendAll(tstate);
2230
0
    }
2231
#ifdef Py_GIL_DISABLED
2232
    _Py_qsbr_detach(((_PyThreadStateImpl *)tstate)->qsbr);
2233
#endif
2234
293k
    tstate_deactivate(tstate);
2235
293k
    tstate_set_detached(tstate, detached_state);
2236
293k
    current_fast_clear(&_PyRuntime);
2237
293k
    _PyEval_ReleaseLock(tstate->interp, tstate, 0);
2238
293k
}
2239
2240
void
2241
_PyThreadState_Detach(PyThreadState *tstate)
2242
293k
{
2243
293k
    detach_thread(tstate, _Py_THREAD_DETACHED);
2244
293k
}
2245
2246
void
2247
_PyThreadState_Suspend(PyThreadState *tstate)
2248
0
{
2249
0
    _PyRuntimeState *runtime = &_PyRuntime;
2250
2251
0
    assert(_Py_atomic_load_int_relaxed(&tstate->state) == _Py_THREAD_ATTACHED);
2252
2253
0
    struct _stoptheworld_state *stw = NULL;
2254
0
    HEAD_LOCK(runtime);
2255
0
    if (runtime->stoptheworld.requested) {
2256
0
        stw = &runtime->stoptheworld;
2257
0
    }
2258
0
    else if (tstate->interp->stoptheworld.requested) {
2259
0
        stw = &tstate->interp->stoptheworld;
2260
0
    }
2261
0
    HEAD_UNLOCK(runtime);
2262
2263
0
    if (stw == NULL) {
2264
        // Switch directly to "detached" if there is no active stop-the-world
2265
        // request.
2266
0
        detach_thread(tstate, _Py_THREAD_DETACHED);
2267
0
        return;
2268
0
    }
2269
2270
    // Switch to "suspended" state.
2271
0
    detach_thread(tstate, _Py_THREAD_SUSPENDED);
2272
2273
    // Decrease the count of remaining threads needing to park.
2274
0
    HEAD_LOCK(runtime);
2275
0
    decrement_stoptheworld_countdown(stw);
2276
0
    HEAD_UNLOCK(runtime);
2277
0
}
2278
2279
void
2280
_PyThreadState_SetShuttingDown(PyThreadState *tstate)
2281
0
{
2282
0
    _Py_atomic_store_int(&tstate->state, _Py_THREAD_SHUTTING_DOWN);
2283
#ifdef Py_GIL_DISABLED
2284
    _PyParkingLot_UnparkAll(&tstate->state);
2285
#endif
2286
0
}
2287
2288
// Decrease stop-the-world counter of remaining number of threads that need to
2289
// pause. If we are the final thread to pause, notify the requesting thread.
2290
static void
2291
decrement_stoptheworld_countdown(struct _stoptheworld_state *stw)
2292
0
{
2293
0
    assert(stw->thread_countdown > 0);
2294
0
    if (--stw->thread_countdown == 0) {
2295
0
        _PyEvent_Notify(&stw->stop_event);
2296
0
    }
2297
0
}
2298
2299
#ifdef Py_GIL_DISABLED
2300
// Interpreter for _Py_FOR_EACH_STW_INTERP(). For global stop-the-world events,
2301
// we start with the first interpreter and then iterate over all interpreters.
2302
// For per-interpreter stop-the-world events, we only operate on the one
2303
// interpreter.
2304
static PyInterpreterState *
2305
interp_for_stop_the_world(struct _stoptheworld_state *stw)
2306
{
2307
    return (stw->is_global
2308
        ? PyInterpreterState_Head()
2309
        : _Py_CONTAINER_OF(stw, PyInterpreterState, stoptheworld));
2310
}
2311
2312
// Loops over threads for a stop-the-world event.
2313
// For global: all threads in all interpreters
2314
// For per-interpreter: all threads in the interpreter
2315
#define _Py_FOR_EACH_STW_INTERP(stw, i)                                     \
2316
    for (PyInterpreterState *i = interp_for_stop_the_world((stw));          \
2317
            i != NULL; i = ((stw->is_global) ? i->next : NULL))
2318
2319
2320
// Try to transition threads atomically from the "detached" state to the
2321
// "gc stopped" state. Returns true if all threads are in the "gc stopped"
2322
static bool
2323
park_detached_threads(struct _stoptheworld_state *stw)
2324
{
2325
    int num_parked = 0;
2326
    _Py_FOR_EACH_STW_INTERP(stw, i) {
2327
        _Py_FOR_EACH_TSTATE_UNLOCKED(i, t) {
2328
            int state = _Py_atomic_load_int_relaxed(&t->state);
2329
            if (state == _Py_THREAD_DETACHED) {
2330
                // Atomically transition to "suspended" if in "detached" state.
2331
                if (_Py_atomic_compare_exchange_int(
2332
                                &t->state, &state, _Py_THREAD_SUSPENDED)) {
2333
                    num_parked++;
2334
                }
2335
            }
2336
            else if (state == _Py_THREAD_ATTACHED && t != stw->requester) {
2337
                _Py_set_eval_breaker_bit(t, _PY_EVAL_PLEASE_STOP_BIT);
2338
            }
2339
        }
2340
    }
2341
    stw->thread_countdown -= num_parked;
2342
    assert(stw->thread_countdown >= 0);
2343
    return num_parked > 0 && stw->thread_countdown == 0;
2344
}
2345
2346
static void
2347
stop_the_world(struct _stoptheworld_state *stw)
2348
{
2349
    _PyRuntimeState *runtime = &_PyRuntime;
2350
2351
    // gh-137433: Acquire the rwmutex first to avoid deadlocks with daemon
2352
    // threads that may hang when blocked on lock acquisition.
2353
    if (stw->is_global) {
2354
        _PyRWMutex_Lock(&runtime->stoptheworld_mutex);
2355
    }
2356
    else {
2357
        _PyRWMutex_RLock(&runtime->stoptheworld_mutex);
2358
    }
2359
    PyMutex_Lock(&stw->mutex);
2360
2361
    HEAD_LOCK(runtime);
2362
    stw->requested = 1;
2363
    stw->thread_countdown = 0;
2364
    stw->stop_event = (PyEvent){0};  // zero-initialize (unset)
2365
    stw->requester = _PyThreadState_GET();  // may be NULL
2366
    FT_STAT_WORLD_STOP_INC();
2367
2368
    _Py_FOR_EACH_STW_INTERP(stw, i) {
2369
        _Py_FOR_EACH_TSTATE_UNLOCKED(i, t) {
2370
            if (t != stw->requester) {
2371
                // Count all the other threads (we don't wait on ourself).
2372
                stw->thread_countdown++;
2373
            }
2374
        }
2375
    }
2376
2377
    if (stw->thread_countdown == 0) {
2378
        HEAD_UNLOCK(runtime);
2379
        stw->world_stopped = 1;
2380
        return;
2381
    }
2382
2383
    for (;;) {
2384
        // Switch threads that are detached to the GC stopped state
2385
        bool stopped_all_threads = park_detached_threads(stw);
2386
        HEAD_UNLOCK(runtime);
2387
2388
        if (stopped_all_threads) {
2389
            break;
2390
        }
2391
2392
        PyTime_t wait_ns = 1000*1000;  // 1ms (arbitrary, may need tuning)
2393
        int detach = 0;
2394
        if (PyEvent_WaitTimed(&stw->stop_event, wait_ns, detach)) {
2395
            assert(stw->thread_countdown == 0);
2396
            break;
2397
        }
2398
2399
        HEAD_LOCK(runtime);
2400
    }
2401
    stw->world_stopped = 1;
2402
}
2403
2404
static void
2405
start_the_world(struct _stoptheworld_state *stw)
2406
{
2407
    _PyRuntimeState *runtime = &_PyRuntime;
2408
    assert(PyMutex_IsLocked(&stw->mutex));
2409
2410
    HEAD_LOCK(runtime);
2411
    stw->requested = 0;
2412
    stw->world_stopped = 0;
2413
    // Switch threads back to the detached state.
2414
    _Py_FOR_EACH_STW_INTERP(stw, i) {
2415
        _Py_FOR_EACH_TSTATE_UNLOCKED(i, t) {
2416
            if (t != stw->requester) {
2417
                assert(_Py_atomic_load_int_relaxed(&t->state) ==
2418
                       _Py_THREAD_SUSPENDED);
2419
                _Py_atomic_store_int(&t->state, _Py_THREAD_DETACHED);
2420
                _PyParkingLot_UnparkAll(&t->state);
2421
            }
2422
        }
2423
    }
2424
    stw->requester = NULL;
2425
    HEAD_UNLOCK(runtime);
2426
    PyMutex_Unlock(&stw->mutex);
2427
    if (stw->is_global) {
2428
        _PyRWMutex_Unlock(&runtime->stoptheworld_mutex);
2429
    }
2430
    else {
2431
        _PyRWMutex_RUnlock(&runtime->stoptheworld_mutex);
2432
    }
2433
}
2434
#endif  // Py_GIL_DISABLED
2435
2436
void
2437
_PyEval_StopTheWorldAll(_PyRuntimeState *runtime)
2438
0
{
2439
#ifdef Py_GIL_DISABLED
2440
    stop_the_world(&runtime->stoptheworld);
2441
#endif
2442
0
}
2443
2444
void
2445
_PyEval_StartTheWorldAll(_PyRuntimeState *runtime)
2446
0
{
2447
#ifdef Py_GIL_DISABLED
2448
    start_the_world(&runtime->stoptheworld);
2449
#endif
2450
0
}
2451
2452
void
2453
_PyEval_StopTheWorld(PyInterpreterState *interp)
2454
0
{
2455
#ifdef Py_GIL_DISABLED
2456
    stop_the_world(&interp->stoptheworld);
2457
#endif
2458
0
}
2459
2460
void
2461
_PyEval_StartTheWorld(PyInterpreterState *interp)
2462
0
{
2463
#ifdef Py_GIL_DISABLED
2464
    start_the_world(&interp->stoptheworld);
2465
#endif
2466
0
}
2467
2468
//----------
2469
// other API
2470
//----------
2471
2472
/* Asynchronously raise an exception in a thread.
2473
   Requested by Just van Rossum and Alex Martelli.
2474
   To prevent naive misuse, you must write your own extension
2475
   to call this, or use ctypes.  Must be called with the GIL held.
2476
   Returns the number of tstates modified (normally 1, but 0 if `id` didn't
2477
   match any known thread id).  Can be called with exc=NULL to clear an
2478
   existing async exception.  This raises no exceptions. */
2479
2480
// XXX Move this to Python/ceval_gil.c?
2481
// XXX Deprecate this.
2482
int
2483
PyThreadState_SetAsyncExc(unsigned long id, PyObject *exc)
2484
0
{
2485
0
    PyInterpreterState *interp = _PyInterpreterState_GET();
2486
2487
    /* Although the GIL is held, a few C API functions can be called
2488
     * without the GIL held, and in particular some that create and
2489
     * destroy thread and interpreter states.  Those can mutate the
2490
     * list of thread states we're traversing, so to prevent that we lock
2491
     * head_mutex for the duration.
2492
     */
2493
0
    PyThreadState *tstate = NULL;
2494
0
    _Py_FOR_EACH_TSTATE_BEGIN(interp, t) {
2495
0
        if (t->thread_id == id) {
2496
0
            tstate = t;
2497
0
            break;
2498
0
        }
2499
0
    }
2500
0
    _Py_FOR_EACH_TSTATE_END(interp);
2501
2502
0
    if (tstate != NULL) {
2503
        /* Tricky:  we need to decref the current value
2504
         * (if any) in tstate->async_exc, but that can in turn
2505
         * allow arbitrary Python code to run, including
2506
         * perhaps calls to this function.  To prevent
2507
         * deadlock, we need to release head_mutex before
2508
         * the decref.
2509
         */
2510
0
        Py_XINCREF(exc);
2511
0
        PyObject *old_exc = _Py_atomic_exchange_ptr(&tstate->async_exc, exc);
2512
2513
0
        Py_XDECREF(old_exc);
2514
0
        _Py_set_eval_breaker_bit(tstate, _PY_ASYNC_EXCEPTION_BIT);
2515
0
    }
2516
2517
0
    return tstate != NULL;
2518
0
}
2519
2520
//---------------------------------
2521
// API for the current thread state
2522
//---------------------------------
2523
2524
PyThreadState *
2525
PyThreadState_GetUnchecked(void)
2526
0
{
2527
0
    return current_fast_get();
2528
0
}
2529
2530
2531
PyThreadState *
2532
PyThreadState_Get(void)
2533
144M
{
2534
144M
    PyThreadState *tstate = current_fast_get();
2535
144M
    _Py_EnsureTstateNotNULL(tstate);
2536
144M
    return tstate;
2537
144M
}
2538
2539
PyThreadState *
2540
_PyThreadState_Swap(_PyRuntimeState *runtime, PyThreadState *newts)
2541
0
{
2542
0
    PyThreadState *oldts = current_fast_get();
2543
0
    if (oldts != NULL) {
2544
0
        _PyThreadState_Detach(oldts);
2545
0
    }
2546
0
    if (newts != NULL) {
2547
0
        _PyThreadState_Attach(newts);
2548
0
    }
2549
0
    return oldts;
2550
0
}
2551
2552
PyThreadState *
2553
PyThreadState_Swap(PyThreadState *newts)
2554
0
{
2555
0
    return _PyThreadState_Swap(&_PyRuntime, newts);
2556
0
}
2557
2558
2559
void
2560
_PyThreadState_Bind(PyThreadState *tstate)
2561
22
{
2562
    // gh-104690: If Python is being finalized and PyInterpreterState_Delete()
2563
    // was called, tstate becomes a dangling pointer.
2564
22
    assert(_PyThreadState_CheckConsistency(tstate));
2565
2566
22
    bind_tstate(tstate);
2567
    // This makes sure there's a gilstate tstate bound
2568
    // as soon as possible.
2569
22
    if (gilstate_get() == NULL) {
2570
22
        bind_gilstate_tstate(tstate);
2571
22
    }
2572
22
}
2573
2574
#if defined(Py_GIL_DISABLED) && !defined(Py_LIMITED_API)
2575
uintptr_t
2576
_Py_GetThreadLocal_Addr(void)
2577
{
2578
    // gh-112535: Use the address of the thread-local PyThreadState variable as
2579
    // a unique identifier for the current thread. Each thread has a unique
2580
    // _Py_tss_tstate variable with a unique address.
2581
    return (uintptr_t)&_Py_tss_tstate;
2582
}
2583
#endif
2584
2585
/***********************************/
2586
/* routines for advanced debuggers */
2587
/***********************************/
2588
2589
// (requested by David Beazley)
2590
// Don't use unless you know what you are doing!
2591
2592
PyInterpreterState *
2593
PyInterpreterState_Head(void)
2594
0
{
2595
0
    return _PyRuntime.interpreters.head;
2596
0
}
2597
2598
PyInterpreterState *
2599
PyInterpreterState_Main(void)
2600
0
{
2601
0
    return _PyInterpreterState_Main();
2602
0
}
2603
2604
PyInterpreterState *
2605
0
PyInterpreterState_Next(PyInterpreterState *interp) {
2606
0
    return interp->next;
2607
0
}
2608
2609
PyThreadState *
2610
14.6k
PyInterpreterState_ThreadHead(PyInterpreterState *interp) {
2611
14.6k
    return interp->threads.head;
2612
14.6k
}
2613
2614
PyThreadState *
2615
14.6k
PyThreadState_Next(PyThreadState *tstate) {
2616
14.6k
    return tstate->next;
2617
14.6k
}
2618
2619
2620
/********************************************/
2621
/* reporting execution state of all threads */
2622
/********************************************/
2623
2624
/* The implementation of sys._current_frames().  This is intended to be
2625
   called with the GIL held, as it will be when called via
2626
   sys._current_frames().  It's possible it would work fine even without
2627
   the GIL held, but haven't thought enough about that.
2628
*/
2629
PyObject *
2630
_PyThread_CurrentFrames(void)
2631
0
{
2632
0
    _PyRuntimeState *runtime = &_PyRuntime;
2633
0
    PyThreadState *tstate = current_fast_get();
2634
0
    if (_PySys_Audit(tstate, "sys._current_frames", NULL) < 0) {
2635
0
        return NULL;
2636
0
    }
2637
2638
0
    PyObject *result = PyDict_New();
2639
0
    if (result == NULL) {
2640
0
        return NULL;
2641
0
    }
2642
2643
    /* for i in all interpreters:
2644
     *     for t in all of i's thread states:
2645
     *          if t's frame isn't NULL, map t's id to its frame
2646
     * Because these lists can mutate even when the GIL is held, we
2647
     * need to grab head_mutex for the duration.
2648
     */
2649
0
    _PyEval_StopTheWorldAll(runtime);
2650
0
    HEAD_LOCK(runtime);
2651
0
    PyInterpreterState *i;
2652
0
    for (i = runtime->interpreters.head; i != NULL; i = i->next) {
2653
0
        _Py_FOR_EACH_TSTATE_UNLOCKED(i, t) {
2654
0
            _PyInterpreterFrame *frame = t->current_frame;
2655
0
            frame = _PyFrame_GetFirstComplete(frame);
2656
0
            if (frame == NULL) {
2657
0
                continue;
2658
0
            }
2659
0
            PyObject *id = PyLong_FromUnsignedLong(t->thread_id);
2660
0
            if (id == NULL) {
2661
0
                goto fail;
2662
0
            }
2663
0
            PyObject *frameobj = (PyObject *)_PyFrame_GetFrameObject(frame);
2664
0
            if (frameobj == NULL) {
2665
0
                Py_DECREF(id);
2666
0
                goto fail;
2667
0
            }
2668
0
            int stat = PyDict_SetItem(result, id, frameobj);
2669
0
            Py_DECREF(id);
2670
0
            if (stat < 0) {
2671
0
                goto fail;
2672
0
            }
2673
0
        }
2674
0
    }
2675
0
    goto done;
2676
2677
0
fail:
2678
0
    Py_CLEAR(result);
2679
2680
0
done:
2681
0
    HEAD_UNLOCK(runtime);
2682
0
    _PyEval_StartTheWorldAll(runtime);
2683
0
    return result;
2684
0
}
2685
2686
/* The implementation of sys._current_exceptions().  This is intended to be
2687
   called with the GIL held, as it will be when called via
2688
   sys._current_exceptions().  It's possible it would work fine even without
2689
   the GIL held, but haven't thought enough about that.
2690
*/
2691
PyObject *
2692
_PyThread_CurrentExceptions(void)
2693
0
{
2694
0
    _PyRuntimeState *runtime = &_PyRuntime;
2695
0
    PyThreadState *tstate = current_fast_get();
2696
2697
0
    _Py_EnsureTstateNotNULL(tstate);
2698
2699
0
    if (_PySys_Audit(tstate, "sys._current_exceptions", NULL) < 0) {
2700
0
        return NULL;
2701
0
    }
2702
2703
0
    PyObject *result = PyDict_New();
2704
0
    if (result == NULL) {
2705
0
        return NULL;
2706
0
    }
2707
2708
    /* for i in all interpreters:
2709
     *     for t in all of i's thread states:
2710
     *          if t's frame isn't NULL, map t's id to its frame
2711
     * Because these lists can mutate even when the GIL is held, we
2712
     * need to grab head_mutex for the duration.
2713
     */
2714
0
    _PyEval_StopTheWorldAll(runtime);
2715
0
    HEAD_LOCK(runtime);
2716
0
    PyInterpreterState *i;
2717
0
    for (i = runtime->interpreters.head; i != NULL; i = i->next) {
2718
0
        _Py_FOR_EACH_TSTATE_UNLOCKED(i, t) {
2719
0
            _PyErr_StackItem *err_info = _PyErr_GetTopmostException(t);
2720
0
            if (err_info == NULL) {
2721
0
                continue;
2722
0
            }
2723
0
            PyObject *id = PyLong_FromUnsignedLong(t->thread_id);
2724
0
            if (id == NULL) {
2725
0
                goto fail;
2726
0
            }
2727
0
            PyObject *exc = err_info->exc_value;
2728
0
            assert(exc == NULL ||
2729
0
                   exc == Py_None ||
2730
0
                   PyExceptionInstance_Check(exc));
2731
2732
0
            int stat = PyDict_SetItem(result, id, exc == NULL ? Py_None : exc);
2733
0
            Py_DECREF(id);
2734
0
            if (stat < 0) {
2735
0
                goto fail;
2736
0
            }
2737
0
        }
2738
0
    }
2739
0
    goto done;
2740
2741
0
fail:
2742
0
    Py_CLEAR(result);
2743
2744
0
done:
2745
0
    HEAD_UNLOCK(runtime);
2746
0
    _PyEval_StartTheWorldAll(runtime);
2747
0
    return result;
2748
0
}
2749
2750
2751
/***********************************/
2752
/* Python "auto thread state" API. */
2753
/***********************************/
2754
2755
/* Internal initialization/finalization functions called by
2756
   Py_Initialize/Py_FinalizeEx
2757
*/
2758
PyStatus
2759
_PyGILState_Init(PyInterpreterState *interp)
2760
22
{
2761
22
    if (!_Py_IsMainInterpreter(interp)) {
2762
        /* Currently, PyGILState is shared by all interpreters. The main
2763
         * interpreter is responsible to initialize it. */
2764
0
        return _PyStatus_OK();
2765
0
    }
2766
22
    _PyRuntimeState *runtime = interp->runtime;
2767
22
    assert(gilstate_get() == NULL);
2768
22
    assert(runtime->gilstate.autoInterpreterState == NULL);
2769
22
    runtime->gilstate.autoInterpreterState = interp;
2770
22
    return _PyStatus_OK();
2771
22
}
2772
2773
void
2774
_PyGILState_Fini(PyInterpreterState *interp)
2775
0
{
2776
0
    if (!_Py_IsMainInterpreter(interp)) {
2777
        /* Currently, PyGILState is shared by all interpreters. The main
2778
         * interpreter is responsible to initialize it. */
2779
0
        return;
2780
0
    }
2781
0
    interp->runtime->gilstate.autoInterpreterState = NULL;
2782
0
}
2783
2784
2785
// XXX Drop this.
2786
void
2787
_PyGILState_SetTstate(PyThreadState *tstate)
2788
22
{
2789
    /* must init with valid states */
2790
22
    assert(tstate != NULL);
2791
22
    assert(tstate->interp != NULL);
2792
2793
22
    if (!_Py_IsMainInterpreter(tstate->interp)) {
2794
        /* Currently, PyGILState is shared by all interpreters. The main
2795
         * interpreter is responsible to initialize it. */
2796
0
        return;
2797
0
    }
2798
2799
22
#ifndef NDEBUG
2800
22
    _PyRuntimeState *runtime = tstate->interp->runtime;
2801
2802
22
    assert(runtime->gilstate.autoInterpreterState == tstate->interp);
2803
22
    assert(gilstate_get() == tstate);
2804
22
    assert(tstate->gilstate_counter == 1);
2805
22
#endif
2806
22
}
2807
2808
PyInterpreterState *
2809
_PyGILState_GetInterpreterStateUnsafe(void)
2810
0
{
2811
0
    return _PyRuntime.gilstate.autoInterpreterState;
2812
0
}
2813
2814
/* The public functions */
2815
2816
PyThreadState *
2817
PyGILState_GetThisThreadState(void)
2818
0
{
2819
0
    return gilstate_get();
2820
0
}
2821
2822
int
2823
PyGILState_Check(void)
2824
0
{
2825
0
    _PyRuntimeState *runtime = &_PyRuntime;
2826
0
    if (!_Py_atomic_load_int_relaxed(&runtime->gilstate.check_enabled)) {
2827
0
        return 1;
2828
0
    }
2829
2830
0
    PyThreadState *tstate = current_fast_get();
2831
0
    if (tstate == NULL) {
2832
0
        return 0;
2833
0
    }
2834
2835
0
    PyThreadState *tcur = gilstate_get();
2836
0
    return (tstate == tcur);
2837
0
}
2838
2839
PyGILState_STATE
2840
PyGILState_Ensure(void)
2841
0
{
2842
0
    _PyRuntimeState *runtime = &_PyRuntime;
2843
2844
    /* Note that we do not auto-init Python here - apart from
2845
       potential races with 2 threads auto-initializing, pep-311
2846
       spells out other issues.  Embedders are expected to have
2847
       called Py_Initialize(). */
2848
2849
    /* Ensure that _PyEval_InitThreads() and _PyGILState_Init() have been
2850
       called by Py_Initialize()
2851
2852
       TODO: This isn't thread-safe. There's no protection here against
2853
       concurrent finalization of the interpreter; it's simply a guard
2854
       for *after* the interpreter has finalized.
2855
     */
2856
0
    if (!_PyEval_ThreadsInitialized() || runtime->gilstate.autoInterpreterState == NULL) {
2857
0
        PyThread_hang_thread();
2858
0
    }
2859
2860
0
    PyThreadState *tcur = gilstate_get();
2861
0
    int has_gil;
2862
0
    if (tcur == NULL) {
2863
        /* Create a new Python thread state for this thread */
2864
        // XXX Use PyInterpreterState_EnsureThreadState()?
2865
0
        tcur = new_threadstate(runtime->gilstate.autoInterpreterState,
2866
0
                               _PyThreadState_WHENCE_GILSTATE);
2867
0
        if (tcur == NULL) {
2868
0
            Py_FatalError("Couldn't create thread-state for new thread");
2869
0
        }
2870
0
        bind_tstate(tcur);
2871
0
        bind_gilstate_tstate(tcur);
2872
2873
        /* This is our thread state!  We'll need to delete it in the
2874
           matching call to PyGILState_Release(). */
2875
0
        assert(tcur->gilstate_counter == 1);
2876
0
        tcur->gilstate_counter = 0;
2877
0
        has_gil = 0; /* new thread state is never current */
2878
0
    }
2879
0
    else {
2880
0
        has_gil = holds_gil(tcur);
2881
0
    }
2882
2883
0
    if (!has_gil) {
2884
0
        PyEval_RestoreThread(tcur);
2885
0
    }
2886
2887
    /* Update our counter in the thread-state - no need for locks:
2888
       - tcur will remain valid as we hold the GIL.
2889
       - the counter is safe as we are the only thread "allowed"
2890
         to modify this value
2891
    */
2892
0
    ++tcur->gilstate_counter;
2893
2894
0
    return has_gil ? PyGILState_LOCKED : PyGILState_UNLOCKED;
2895
0
}
2896
2897
void
2898
PyGILState_Release(PyGILState_STATE oldstate)
2899
0
{
2900
0
    PyThreadState *tstate = gilstate_get();
2901
0
    if (tstate == NULL) {
2902
0
        Py_FatalError("auto-releasing thread-state, "
2903
0
                      "but no thread-state for this thread");
2904
0
    }
2905
2906
    /* We must hold the GIL and have our thread state current */
2907
0
    if (!holds_gil(tstate)) {
2908
0
        _Py_FatalErrorFormat(__func__,
2909
0
                             "thread state %p must be current when releasing",
2910
0
                             tstate);
2911
0
    }
2912
0
    --tstate->gilstate_counter;
2913
0
    assert(tstate->gilstate_counter >= 0); /* illegal counter value */
2914
2915
    /* If we're going to destroy this thread-state, we must
2916
     * clear it while the GIL is held, as destructors may run.
2917
     */
2918
0
    if (tstate->gilstate_counter == 0) {
2919
        /* can't have been locked when we created it */
2920
0
        assert(oldstate == PyGILState_UNLOCKED);
2921
        // XXX Unbind tstate here.
2922
        // gh-119585: `PyThreadState_Clear()` may call destructors that
2923
        // themselves use PyGILState_Ensure and PyGILState_Release, so make
2924
        // sure that gilstate_counter is not zero when calling it.
2925
0
        ++tstate->gilstate_counter;
2926
0
        PyThreadState_Clear(tstate);
2927
0
        --tstate->gilstate_counter;
2928
        /* Delete the thread-state.  Note this releases the GIL too!
2929
         * It's vital that the GIL be held here, to avoid shutdown
2930
         * races; see bugs 225673 and 1061968 (that nasty bug has a
2931
         * habit of coming back).
2932
         */
2933
0
        assert(tstate->gilstate_counter == 0);
2934
0
        assert(current_fast_get() == tstate);
2935
0
        _PyThreadState_DeleteCurrent(tstate);
2936
0
    }
2937
    /* Release the lock if necessary */
2938
0
    else if (oldstate == PyGILState_UNLOCKED) {
2939
0
        PyEval_SaveThread();
2940
0
    }
2941
0
}
2942
2943
2944
/*************/
2945
/* Other API */
2946
/*************/
2947
2948
_PyFrameEvalFunction
2949
_PyInterpreterState_GetEvalFrameFunc(PyInterpreterState *interp)
2950
0
{
2951
0
    if (interp->eval_frame == NULL) {
2952
0
        return _PyEval_EvalFrameDefault;
2953
0
    }
2954
0
    return interp->eval_frame;
2955
0
}
2956
2957
2958
void
2959
_PyInterpreterState_SetEvalFrameFunc(PyInterpreterState *interp,
2960
                                     _PyFrameEvalFunction eval_frame)
2961
0
{
2962
0
    if (eval_frame == _PyEval_EvalFrameDefault) {
2963
0
        eval_frame = NULL;
2964
0
    }
2965
0
    if (eval_frame == interp->eval_frame) {
2966
0
        return;
2967
0
    }
2968
#ifdef _Py_TIER2
2969
    if (eval_frame != NULL) {
2970
        _Py_Executors_InvalidateAll(interp, 1);
2971
    }
2972
#endif
2973
0
    RARE_EVENT_INC(set_eval_frame_func);
2974
0
    _PyEval_StopTheWorld(interp);
2975
0
    interp->eval_frame = eval_frame;
2976
0
    _PyEval_StartTheWorld(interp);
2977
0
}
2978
2979
2980
const PyConfig*
2981
_PyInterpreterState_GetConfig(PyInterpreterState *interp)
2982
37.2M
{
2983
37.2M
    return &interp->config;
2984
37.2M
}
2985
2986
2987
const PyConfig*
2988
_Py_GetConfig(void)
2989
81.9k
{
2990
81.9k
    PyThreadState *tstate = current_fast_get();
2991
81.9k
    _Py_EnsureTstateNotNULL(tstate);
2992
81.9k
    return _PyInterpreterState_GetConfig(tstate->interp);
2993
81.9k
}
2994
2995
2996
int
2997
_PyInterpreterState_HasFeature(PyInterpreterState *interp, unsigned long feature)
2998
0
{
2999
0
    return ((interp->feature_flags & feature) != 0);
3000
0
}
3001
3002
3003
386k
#define MINIMUM_OVERHEAD 1000
3004
3005
static PyObject **
3006
push_chunk(PyThreadState *tstate, int size)
3007
386k
{
3008
386k
    int allocate_size = _PY_DATA_STACK_CHUNK_SIZE;
3009
386k
    while (allocate_size < (int)sizeof(PyObject*)*(size + MINIMUM_OVERHEAD)) {
3010
0
        allocate_size *= 2;
3011
0
    }
3012
386k
    _PyStackChunk *new = allocate_chunk(allocate_size, tstate->datastack_chunk);
3013
386k
    if (new == NULL) {
3014
0
        return NULL;
3015
0
    }
3016
386k
    if (tstate->datastack_chunk) {
3017
386k
        tstate->datastack_chunk->top = tstate->datastack_top -
3018
386k
                                       &tstate->datastack_chunk->data[0];
3019
386k
    }
3020
386k
    tstate->datastack_chunk = new;
3021
386k
    tstate->datastack_limit = (PyObject **)(((char *)new) + allocate_size);
3022
    // When new is the "root" chunk (i.e. new->previous == NULL), we can keep
3023
    // _PyThreadState_PopFrame from freeing it later by "skipping" over the
3024
    // first element:
3025
386k
    PyObject **res = &new->data[new->previous == NULL];
3026
386k
    tstate->datastack_top = res + size;
3027
386k
    return res;
3028
386k
}
3029
3030
_PyInterpreterFrame *
3031
_PyThreadState_PushFrame(PyThreadState *tstate, size_t size)
3032
34.4M
{
3033
34.4M
    assert(size < INT_MAX/sizeof(PyObject *));
3034
34.4M
    if (_PyThreadState_HasStackSpace(tstate, (int)size)) {
3035
34.0M
        _PyInterpreterFrame *res = (_PyInterpreterFrame *)tstate->datastack_top;
3036
34.0M
        tstate->datastack_top += size;
3037
34.0M
        return res;
3038
34.0M
    }
3039
386k
    return (_PyInterpreterFrame *)push_chunk(tstate, (int)size);
3040
34.4M
}
3041
3042
void
3043
_PyThreadState_PopFrame(PyThreadState *tstate, _PyInterpreterFrame * frame)
3044
162M
{
3045
162M
    assert(tstate->datastack_chunk);
3046
162M
    PyObject **base = (PyObject **)frame;
3047
162M
    if (base == &tstate->datastack_chunk->data[0]) {
3048
386k
        _PyStackChunk *chunk = tstate->datastack_chunk;
3049
386k
        _PyStackChunk *previous = chunk->previous;
3050
        // push_chunk ensures that the root chunk is never popped:
3051
386k
        assert(previous);
3052
386k
        tstate->datastack_top = &previous->data[previous->top];
3053
386k
        tstate->datastack_chunk = previous;
3054
386k
        _PyObject_VirtualFree(chunk, chunk->size);
3055
386k
        tstate->datastack_limit = (PyObject **)(((char *)previous) + previous->size);
3056
386k
    }
3057
161M
    else {
3058
161M
        assert(tstate->datastack_top);
3059
161M
        assert(tstate->datastack_top >= base);
3060
161M
        tstate->datastack_top = base;
3061
161M
    }
3062
162M
}
3063
3064
3065
#ifndef NDEBUG
3066
// Check that a Python thread state valid. In practice, this function is used
3067
// on a Python debug build to check if 'tstate' is a dangling pointer, if the
3068
// PyThreadState memory has been freed.
3069
//
3070
// Usage:
3071
//
3072
//     assert(_PyThreadState_CheckConsistency(tstate));
3073
int
3074
_PyThreadState_CheckConsistency(PyThreadState *tstate)
3075
587k
{
3076
587k
    assert(!_PyMem_IsPtrFreed(tstate));
3077
587k
    assert(!_PyMem_IsPtrFreed(tstate->interp));
3078
587k
    return 1;
3079
587k
}
3080
#endif
3081
3082
3083
// Check if a Python thread must call _PyThreadState_HangThread(), rather than
3084
// taking the GIL or attaching to the interpreter if Py_Finalize() has been
3085
// called.
3086
//
3087
// When this function is called by a daemon thread after Py_Finalize() has been
3088
// called, the GIL may no longer exist.
3089
//
3090
// tstate must be non-NULL.
3091
int
3092
_PyThreadState_MustExit(PyThreadState *tstate)
3093
587k
{
3094
587k
    int state = _Py_atomic_load_int_relaxed(&tstate->state);
3095
587k
    return state == _Py_THREAD_SHUTTING_DOWN;
3096
587k
}
3097
3098
void
3099
_PyThreadState_HangThread(PyThreadState *tstate)
3100
0
{
3101
0
    _PyThreadStateImpl *tstate_impl = (_PyThreadStateImpl *)tstate;
3102
0
    decref_threadstate(tstate_impl);
3103
0
    PyThread_hang_thread();
3104
0
}
3105
3106
/********************/
3107
/* mimalloc support */
3108
/********************/
3109
3110
static void
3111
tstate_mimalloc_bind(PyThreadState *tstate)
3112
22
{
3113
#ifdef Py_GIL_DISABLED
3114
    struct _mimalloc_thread_state *mts = &((_PyThreadStateImpl*)tstate)->mimalloc;
3115
3116
    // Initialize the mimalloc thread state. This must be called from the
3117
    // same thread that will use the thread state. The "mem" heap doubles as
3118
    // the "backing" heap.
3119
    mi_tld_t *tld = &mts->tld;
3120
    _mi_tld_init(tld, &mts->heaps[_Py_MIMALLOC_HEAP_MEM]);
3121
    llist_init(&mts->page_list);
3122
3123
    // Exiting threads push any remaining in-use segments to the abandoned
3124
    // pool to be re-claimed later by other threads. We use per-interpreter
3125
    // pools to keep Python objects from different interpreters separate.
3126
    tld->segments.abandoned = &tstate->interp->mimalloc.abandoned_pool;
3127
3128
    // Don't fill in the first N bytes up to ob_type in debug builds. We may
3129
    // access ob_tid and the refcount fields in the dict and list lock-less
3130
    // accesses, so they must remain valid for a while after deallocation.
3131
    size_t base_offset = offsetof(PyObject, ob_type);
3132
    if (_PyMem_DebugEnabled()) {
3133
        // The debug allocator adds two words at the beginning of each block.
3134
        base_offset += 2 * sizeof(size_t);
3135
    }
3136
    size_t debug_offsets[_Py_MIMALLOC_HEAP_COUNT] = {
3137
        [_Py_MIMALLOC_HEAP_OBJECT] = base_offset,
3138
        [_Py_MIMALLOC_HEAP_GC] = base_offset,
3139
        [_Py_MIMALLOC_HEAP_GC_PRE] = base_offset + 2 * sizeof(PyObject *),
3140
    };
3141
3142
    // Initialize each heap
3143
    for (uint8_t i = 0; i < _Py_MIMALLOC_HEAP_COUNT; i++) {
3144
        _mi_heap_init_ex(&mts->heaps[i], tld, _mi_arena_id_none(), false, i);
3145
        mts->heaps[i].debug_offset = (uint8_t)debug_offsets[i];
3146
    }
3147
3148
    // Heaps that store Python objects should use QSBR to delay freeing
3149
    // mimalloc pages while there may be concurrent lock-free readers.
3150
    mts->heaps[_Py_MIMALLOC_HEAP_OBJECT].page_use_qsbr = true;
3151
    mts->heaps[_Py_MIMALLOC_HEAP_GC].page_use_qsbr = true;
3152
    mts->heaps[_Py_MIMALLOC_HEAP_GC_PRE].page_use_qsbr = true;
3153
3154
    // By default, object allocations use _Py_MIMALLOC_HEAP_OBJECT.
3155
    // _PyObject_GC_New() and similar functions temporarily override this to
3156
    // use one of the GC heaps.
3157
    mts->current_object_heap = &mts->heaps[_Py_MIMALLOC_HEAP_OBJECT];
3158
3159
    _Py_atomic_store_int(&mts->initialized, 1);
3160
#endif
3161
22
}
3162
3163
void
3164
_PyThreadState_ClearMimallocHeaps(PyThreadState *tstate)
3165
0
{
3166
#ifdef Py_GIL_DISABLED
3167
    if (!tstate->_status.bound) {
3168
        // The mimalloc heaps are only initialized when the thread is bound.
3169
        return;
3170
    }
3171
3172
    _PyThreadStateImpl *tstate_impl = (_PyThreadStateImpl *)tstate;
3173
    for (Py_ssize_t i = 0; i < _Py_MIMALLOC_HEAP_COUNT; i++) {
3174
        // Abandon all segments in use by this thread. This pushes them to
3175
        // a shared pool to later be reclaimed by other threads. It's important
3176
        // to do this before the thread state is destroyed so that objects
3177
        // remain visible to the GC.
3178
        _mi_heap_collect_abandon(&tstate_impl->mimalloc.heaps[i]);
3179
    }
3180
#endif
3181
0
}
3182
3183
3184
int
3185
_Py_IsMainThread(void)
3186
33.2M
{
3187
33.2M
    unsigned long thread = PyThread_get_thread_ident();
3188
33.2M
    return (thread == _PyRuntime.main_thread);
3189
33.2M
}
3190
3191
3192
PyInterpreterState *
3193
_PyInterpreterState_Main(void)
3194
32.9M
{
3195
32.9M
    return _PyRuntime.interpreters.main;
3196
32.9M
}
3197
3198
3199
int
3200
_Py_IsMainInterpreterFinalizing(PyInterpreterState *interp)
3201
0
{
3202
    /* bpo-39877: Access _PyRuntime directly rather than using
3203
       tstate->interp->runtime to support calls from Python daemon threads.
3204
       After Py_Finalize() has been called, tstate can be a dangling pointer:
3205
       point to PyThreadState freed memory. */
3206
0
    return (_PyRuntimeState_GetFinalizing(&_PyRuntime) != NULL &&
3207
0
            interp == &_PyRuntime._main_interpreter);
3208
0
}
3209
3210
3211
const PyConfig *
3212
_Py_GetMainConfig(void)
3213
0
{
3214
0
    PyInterpreterState *interp = _PyInterpreterState_Main();
3215
0
    if (interp == NULL) {
3216
0
        return NULL;
3217
0
    }
3218
0
    return _PyInterpreterState_GetConfig(interp);
3219
0
}