Coverage Report

Created: 2025-10-10 06:33

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/cpython/Python/pystate.c
Line
Count
Source
1
2
/* Thread and interpreter state structures and their interfaces */
3
4
#include "Python.h"
5
#include "pycore_abstract.h"      // _PyIndex_Check()
6
#include "pycore_audit.h"         // _Py_AuditHookEntry
7
#include "pycore_ceval.h"         // _PyEval_AcquireLock()
8
#include "pycore_codecs.h"        // _PyCodec_Fini()
9
#include "pycore_critical_section.h" // _PyCriticalSection_Resume()
10
#include "pycore_dtoa.h"          // _dtoa_state_INIT()
11
#include "pycore_freelist.h"      // _PyObject_ClearFreeLists()
12
#include "pycore_initconfig.h"    // _PyStatus_OK()
13
#include "pycore_interpframe.h"   // _PyThreadState_HasStackSpace()
14
#include "pycore_object.h"        // _PyType_InitCache()
15
#include "pycore_obmalloc.h"      // _PyMem_obmalloc_state_on_heap()
16
#include "pycore_optimizer.h"     // JIT_CLEANUP_THRESHOLD
17
#include "pycore_parking_lot.h"   // _PyParkingLot_AfterFork()
18
#include "pycore_pyerrors.h"      // _PyErr_Clear()
19
#include "pycore_pylifecycle.h"   // _PyAST_Fini()
20
#include "pycore_pymem.h"         // _PyMem_DebugEnabled()
21
#include "pycore_runtime.h"       // _PyRuntime
22
#include "pycore_runtime_init.h"  // _PyRuntimeState_INIT
23
#include "pycore_stackref.h"      // Py_STACKREF_DEBUG
24
#include "pycore_time.h"          // _PyTime_Init()
25
#include "pycore_uop.h"           // UOP_BUFFER_SIZE
26
#include "pycore_uniqueid.h"      // _PyObject_FinalizePerThreadRefcounts()
27
28
29
/* --------------------------------------------------------------------------
30
CAUTION
31
32
Always use PyMem_RawMalloc() and PyMem_RawFree() directly in this file.  A
33
number of these functions are advertised as safe to call when the GIL isn't
34
held, and in a debug build Python redirects (e.g.) PyMem_NEW (etc) to Python's
35
debugging obmalloc functions.  Those aren't thread-safe (they rely on the GIL
36
to avoid the expense of doing their own locking).
37
-------------------------------------------------------------------------- */
38
39
#ifdef HAVE_DLOPEN
40
#  ifdef HAVE_DLFCN_H
41
#    include <dlfcn.h>
42
#  endif
43
#  if !HAVE_DECL_RTLD_LAZY
44
#    define RTLD_LAZY 1
45
#  endif
46
#endif
47
48
49
/****************************************/
50
/* helpers for the current thread state */
51
/****************************************/
52
53
// API for the current thread state is further down.
54
55
/* "current" means one of:
56
   - bound to the current OS thread
57
   - holds the GIL
58
 */
59
60
//-------------------------------------------------
61
// a highly efficient lookup for the current thread
62
//-------------------------------------------------
63
64
/*
65
   The stored thread state is set by PyThreadState_Swap().
66
67
   For each of these functions, the GIL must be held by the current thread.
68
 */
69
70
71
#ifdef HAVE_THREAD_LOCAL
72
/* The attached thread state for the current thread. */
73
_Py_thread_local PyThreadState *_Py_tss_tstate = NULL;
74
75
/* The "bound" thread state used by PyGILState_Ensure(),
76
   also known as a "gilstate." */
77
_Py_thread_local PyThreadState *_Py_tss_gilstate = NULL;
78
#endif
79
80
static inline PyThreadState *
81
current_fast_get(void)
82
102M
{
83
102M
#ifdef HAVE_THREAD_LOCAL
84
102M
    return _Py_tss_tstate;
85
#else
86
    // XXX Fall back to the PyThread_tss_*() API.
87
#  error "no supported thread-local variable storage classifier"
88
#endif
89
102M
}
90
91
static inline void
92
current_fast_set(_PyRuntimeState *Py_UNUSED(runtime), PyThreadState *tstate)
93
32.1k
{
94
32.1k
    assert(tstate != NULL);
95
32.1k
#ifdef HAVE_THREAD_LOCAL
96
32.1k
    _Py_tss_tstate = tstate;
97
#else
98
    // XXX Fall back to the PyThread_tss_*() API.
99
#  error "no supported thread-local variable storage classifier"
100
#endif
101
32.1k
}
102
103
static inline void
104
current_fast_clear(_PyRuntimeState *Py_UNUSED(runtime))
105
32.1k
{
106
32.1k
#ifdef HAVE_THREAD_LOCAL
107
32.1k
    _Py_tss_tstate = NULL;
108
#else
109
    // XXX Fall back to the PyThread_tss_*() API.
110
#  error "no supported thread-local variable storage classifier"
111
#endif
112
32.1k
}
113
114
#define tstate_verify_not_active(tstate) \
115
0
    if (tstate == current_fast_get()) { \
116
0
        _Py_FatalErrorFormat(__func__, "tstate %p is still current", tstate); \
117
0
    }
118
119
PyThreadState *
120
_PyThreadState_GetCurrent(void)
121
7.62M
{
122
7.62M
    return current_fast_get();
123
7.62M
}
124
125
126
//---------------------------------------------
127
// The thread state used by PyGILState_Ensure()
128
//---------------------------------------------
129
130
/*
131
   The stored thread state is set by bind_tstate() (AKA PyThreadState_Bind().
132
133
   The GIL does no need to be held for these.
134
  */
135
136
static inline PyThreadState *
137
gilstate_get(void)
138
32
{
139
32
    return _Py_tss_gilstate;
140
32
}
141
142
static inline void
143
gilstate_set(PyThreadState *tstate)
144
16
{
145
16
    assert(tstate != NULL);
146
16
    _Py_tss_gilstate = tstate;
147
16
}
148
149
static inline void
150
gilstate_clear(void)
151
0
{
152
0
    _Py_tss_gilstate = NULL;
153
0
}
154
155
156
#ifndef NDEBUG
157
static inline int tstate_is_alive(PyThreadState *tstate);
158
159
static inline int
160
tstate_is_bound(PyThreadState *tstate)
161
{
162
    return tstate->_status.bound && !tstate->_status.unbound;
163
}
164
#endif  // !NDEBUG
165
166
static void bind_gilstate_tstate(PyThreadState *);
167
static void unbind_gilstate_tstate(PyThreadState *);
168
169
static void tstate_mimalloc_bind(PyThreadState *);
170
171
static void
172
bind_tstate(PyThreadState *tstate)
173
16
{
174
16
    assert(tstate != NULL);
175
16
    assert(tstate_is_alive(tstate) && !tstate->_status.bound);
176
16
    assert(!tstate->_status.unbound);  // just in case
177
16
    assert(!tstate->_status.bound_gilstate);
178
16
    assert(tstate != gilstate_get());
179
16
    assert(!tstate->_status.active);
180
16
    assert(tstate->thread_id == 0);
181
16
    assert(tstate->native_thread_id == 0);
182
183
    // Currently we don't necessarily store the thread state
184
    // in thread-local storage (e.g. per-interpreter).
185
186
16
    tstate->thread_id = PyThread_get_thread_ident();
187
16
#ifdef PY_HAVE_THREAD_NATIVE_ID
188
16
    tstate->native_thread_id = PyThread_get_thread_native_id();
189
16
#endif
190
191
#ifdef Py_GIL_DISABLED
192
    // Initialize biased reference counting inter-thread queue. Note that this
193
    // needs to be initialized from the active thread.
194
    _Py_brc_init_thread(tstate);
195
#endif
196
197
    // mimalloc state needs to be initialized from the active thread.
198
16
    tstate_mimalloc_bind(tstate);
199
200
16
    tstate->_status.bound = 1;
201
16
}
202
203
static void
204
unbind_tstate(PyThreadState *tstate)
205
0
{
206
0
    assert(tstate != NULL);
207
0
    assert(tstate_is_bound(tstate));
208
0
#ifndef HAVE_PTHREAD_STUBS
209
0
    assert(tstate->thread_id > 0);
210
0
#endif
211
0
#ifdef PY_HAVE_THREAD_NATIVE_ID
212
0
    assert(tstate->native_thread_id > 0);
213
0
#endif
214
215
    // We leave thread_id and native_thread_id alone
216
    // since they can be useful for debugging.
217
    // Check the `_status` field to know if these values
218
    // are still valid.
219
220
    // We leave tstate->_status.bound set to 1
221
    // to indicate it was previously bound.
222
0
    tstate->_status.unbound = 1;
223
0
}
224
225
226
/* Stick the thread state for this thread in thread specific storage.
227
228
   When a thread state is created for a thread by some mechanism
229
   other than PyGILState_Ensure(), it's important that the GILState
230
   machinery knows about it so it doesn't try to create another
231
   thread state for the thread.
232
   (This is a better fix for SF bug #1010677 than the first one attempted.)
233
234
   The only situation where you can legitimately have more than one
235
   thread state for an OS level thread is when there are multiple
236
   interpreters.
237
238
   Before 3.12, the PyGILState_*() APIs didn't work with multiple
239
   interpreters (see bpo-10915 and bpo-15751), so this function used
240
   to set TSS only once.  Thus, the first thread state created for that
241
   given OS level thread would "win", which seemed reasonable behaviour.
242
*/
243
244
static void
245
bind_gilstate_tstate(PyThreadState *tstate)
246
16
{
247
16
    assert(tstate != NULL);
248
16
    assert(tstate_is_alive(tstate));
249
16
    assert(tstate_is_bound(tstate));
250
    // XXX assert(!tstate->_status.active);
251
16
    assert(!tstate->_status.bound_gilstate);
252
253
16
    PyThreadState *tcur = gilstate_get();
254
16
    assert(tstate != tcur);
255
256
16
    if (tcur != NULL) {
257
0
        tcur->_status.bound_gilstate = 0;
258
0
    }
259
16
    gilstate_set(tstate);
260
16
    tstate->_status.bound_gilstate = 1;
261
16
}
262
263
static void
264
unbind_gilstate_tstate(PyThreadState *tstate)
265
0
{
266
0
    assert(tstate != NULL);
267
    // XXX assert(tstate_is_alive(tstate));
268
0
    assert(tstate_is_bound(tstate));
269
    // XXX assert(!tstate->_status.active);
270
0
    assert(tstate->_status.bound_gilstate);
271
0
    assert(tstate == gilstate_get());
272
0
    gilstate_clear();
273
0
    tstate->_status.bound_gilstate = 0;
274
0
}
275
276
277
//----------------------------------------------
278
// the thread state that currently holds the GIL
279
//----------------------------------------------
280
281
/* This is not exported, as it is not reliable!  It can only
282
   ever be compared to the state for the *current* thread.
283
   * If not equal, then it doesn't matter that the actual
284
     value may change immediately after comparison, as it can't
285
     possibly change to the current thread's state.
286
   * If equal, then the current thread holds the lock, so the value can't
287
     change until we yield the lock.
288
*/
289
static int
290
holds_gil(PyThreadState *tstate)
291
0
{
292
    // XXX Fall back to tstate->interp->runtime->ceval.gil.last_holder
293
    // (and tstate->interp->runtime->ceval.gil.locked).
294
0
    assert(tstate != NULL);
295
    /* Must be the tstate for this thread */
296
0
    assert(tstate == gilstate_get());
297
0
    return tstate == current_fast_get();
298
0
}
299
300
301
/****************************/
302
/* the global runtime state */
303
/****************************/
304
305
//----------
306
// lifecycle
307
//----------
308
309
/* Suppress deprecation warning for PyBytesObject.ob_shash */
310
_Py_COMP_DIAG_PUSH
311
_Py_COMP_DIAG_IGNORE_DEPR_DECLS
312
/* We use "initial" if the runtime gets re-used
313
   (e.g. Py_Finalize() followed by Py_Initialize().
314
   Note that we initialize "initial" relative to _PyRuntime,
315
   to ensure pre-initialized pointers point to the active
316
   runtime state (and not "initial"). */
317
static const _PyRuntimeState initial = _PyRuntimeState_INIT(_PyRuntime, "");
318
_Py_COMP_DIAG_POP
319
320
#define LOCKS_INIT(runtime) \
321
0
    { \
322
0
        &(runtime)->interpreters.mutex, \
323
0
        &(runtime)->xi.data_lookup.registry.mutex, \
324
0
        &(runtime)->unicode_state.ids.mutex, \
325
0
        &(runtime)->imports.extensions.mutex, \
326
0
        &(runtime)->ceval.pending_mainthread.mutex, \
327
0
        &(runtime)->atexit.mutex, \
328
0
        &(runtime)->audit_hooks.mutex, \
329
0
        &(runtime)->allocators.mutex, \
330
0
        &(runtime)->_main_interpreter.types.mutex, \
331
0
        &(runtime)->_main_interpreter.code_state.mutex, \
332
0
    }
333
334
static void
335
init_runtime(_PyRuntimeState *runtime,
336
             void *open_code_hook, void *open_code_userdata,
337
             _Py_AuditHookEntry *audit_hook_head,
338
             Py_ssize_t unicode_next_index)
339
16
{
340
16
    assert(!runtime->preinitializing);
341
16
    assert(!runtime->preinitialized);
342
16
    assert(!runtime->core_initialized);
343
16
    assert(!runtime->initialized);
344
16
    assert(!runtime->_initialized);
345
346
16
    runtime->open_code_hook = open_code_hook;
347
16
    runtime->open_code_userdata = open_code_userdata;
348
16
    runtime->audit_hooks.head = audit_hook_head;
349
350
16
    PyPreConfig_InitPythonConfig(&runtime->preconfig);
351
352
    // Set it to the ID of the main thread of the main interpreter.
353
16
    runtime->main_thread = PyThread_get_thread_ident();
354
355
16
    runtime->unicode_state.ids.next_index = unicode_next_index;
356
16
    runtime->_initialized = 1;
357
16
}
358
359
PyStatus
360
_PyRuntimeState_Init(_PyRuntimeState *runtime)
361
16
{
362
    /* We preserve the hook across init, because there is
363
       currently no public API to set it between runtime
364
       initialization and interpreter initialization. */
365
16
    void *open_code_hook = runtime->open_code_hook;
366
16
    void *open_code_userdata = runtime->open_code_userdata;
367
16
    _Py_AuditHookEntry *audit_hook_head = runtime->audit_hooks.head;
368
    // bpo-42882: Preserve next_index value if Py_Initialize()/Py_Finalize()
369
    // is called multiple times.
370
16
    Py_ssize_t unicode_next_index = runtime->unicode_state.ids.next_index;
371
372
16
    if (runtime->_initialized) {
373
        // Py_Initialize() must be running again.
374
        // Reset to _PyRuntimeState_INIT.
375
0
        memcpy(runtime, &initial, sizeof(*runtime));
376
        // Preserve the cookie from the original runtime.
377
0
        memcpy(runtime->debug_offsets.cookie, _Py_Debug_Cookie, 8);
378
0
        assert(!runtime->_initialized);
379
0
    }
380
381
16
    PyStatus status = _PyTime_Init(&runtime->time);
382
16
    if (_PyStatus_EXCEPTION(status)) {
383
0
        return status;
384
0
    }
385
386
16
    init_runtime(runtime, open_code_hook, open_code_userdata, audit_hook_head,
387
16
                 unicode_next_index);
388
389
16
    return _PyStatus_OK();
390
16
}
391
392
void
393
_PyRuntimeState_Fini(_PyRuntimeState *runtime)
394
0
{
395
#ifdef Py_REF_DEBUG
396
    /* The count is cleared by _Py_FinalizeRefTotal(). */
397
    assert(runtime->object_state.interpreter_leaks == 0);
398
#endif
399
0
    gilstate_clear();
400
0
}
401
402
#ifdef HAVE_FORK
403
/* This function is called from PyOS_AfterFork_Child to ensure that
404
   newly created child processes do not share locks with the parent. */
405
PyStatus
406
_PyRuntimeState_ReInitThreads(_PyRuntimeState *runtime)
407
0
{
408
    // This was initially set in _PyRuntimeState_Init().
409
0
    runtime->main_thread = PyThread_get_thread_ident();
410
411
    // Clears the parking lot. Any waiting threads are dead. This must be
412
    // called before releasing any locks that use the parking lot.
413
0
    _PyParkingLot_AfterFork();
414
415
    // Re-initialize global locks
416
0
    PyMutex *locks[] = LOCKS_INIT(runtime);
417
0
    for (size_t i = 0; i < Py_ARRAY_LENGTH(locks); i++) {
418
0
        _PyMutex_at_fork_reinit(locks[i]);
419
0
    }
420
#ifdef Py_GIL_DISABLED
421
    for (PyInterpreterState *interp = runtime->interpreters.head;
422
         interp != NULL; interp = interp->next)
423
    {
424
        for (int i = 0; i < NUM_WEAKREF_LIST_LOCKS; i++) {
425
            _PyMutex_at_fork_reinit(&interp->weakref_locks[i]);
426
        }
427
    }
428
#endif
429
430
0
    _PyTypes_AfterFork();
431
432
0
    _PyThread_AfterFork(&runtime->threads);
433
434
0
    return _PyStatus_OK();
435
0
}
436
#endif
437
438
439
/*************************************/
440
/* the per-interpreter runtime state */
441
/*************************************/
442
443
//----------
444
// lifecycle
445
//----------
446
447
/* Calling this indicates that the runtime is ready to create interpreters. */
448
449
PyStatus
450
_PyInterpreterState_Enable(_PyRuntimeState *runtime)
451
16
{
452
16
    struct pyinterpreters *interpreters = &runtime->interpreters;
453
16
    interpreters->next_id = 0;
454
16
    return _PyStatus_OK();
455
16
}
456
457
static PyInterpreterState *
458
alloc_interpreter(void)
459
0
{
460
0
    size_t alignment = _Alignof(PyInterpreterState);
461
0
    size_t allocsize = sizeof(PyInterpreterState) + alignment - 1;
462
0
    void *mem = PyMem_RawCalloc(1, allocsize);
463
0
    if (mem == NULL) {
464
0
        return NULL;
465
0
    }
466
0
    PyInterpreterState *interp = _Py_ALIGN_UP(mem, alignment);
467
0
    assert(_Py_IS_ALIGNED(interp, alignment));
468
0
    interp->_malloced = mem;
469
0
    return interp;
470
0
}
471
472
static void
473
free_interpreter(PyInterpreterState *interp)
474
0
{
475
    // The main interpreter is statically allocated so
476
    // should not be freed.
477
0
    if (interp != &_PyRuntime._main_interpreter) {
478
0
        if (_PyMem_obmalloc_state_on_heap(interp)) {
479
            // interpreter has its own obmalloc state, free it
480
0
            PyMem_RawFree(interp->obmalloc);
481
0
            interp->obmalloc = NULL;
482
0
        }
483
0
        assert(_Py_IS_ALIGNED(interp, _Alignof(PyInterpreterState)));
484
0
        PyMem_RawFree(interp->_malloced);
485
0
    }
486
0
}
487
488
#ifndef NDEBUG
489
static inline int check_interpreter_whence(long);
490
#endif
491
492
extern _Py_CODEUNIT *
493
_Py_LazyJitTrampoline(
494
    struct _PyExecutorObject *exec, _PyInterpreterFrame *frame, _PyStackRef *stack_pointer, PyThreadState *tstate
495
);
496
497
/* Get the interpreter state to a minimal consistent state.
498
   Further init happens in pylifecycle.c before it can be used.
499
   All fields not initialized here are expected to be zeroed out,
500
   e.g. by PyMem_RawCalloc() or memset(), or otherwise pre-initialized.
501
   The runtime state is not manipulated.  Instead it is assumed that
502
   the interpreter is getting added to the runtime.
503
504
   Note that the main interpreter was statically initialized as part
505
   of the runtime and most state is already set properly.  That leaves
506
   a small number of fields to initialize dynamically, as well as some
507
   that are initialized lazily.
508
509
   For subinterpreters we memcpy() the main interpreter in
510
   PyInterpreterState_New(), leaving it in the same mostly-initialized
511
   state.  The only difference is that the interpreter has some
512
   self-referential state that is statically initializexd to the
513
   main interpreter.  We fix those fields here, in addition
514
   to the other dynamically initialized fields.
515
  */
516
static PyStatus
517
init_interpreter(PyInterpreterState *interp,
518
                 _PyRuntimeState *runtime, int64_t id,
519
                 PyInterpreterState *next,
520
                 long whence)
521
16
{
522
16
    if (interp->_initialized) {
523
0
        return _PyStatus_ERR("interpreter already initialized");
524
0
    }
525
526
16
    assert(interp->_whence == _PyInterpreterState_WHENCE_NOTSET);
527
16
    assert(check_interpreter_whence(whence) == 0);
528
16
    interp->_whence = whence;
529
530
16
    assert(runtime != NULL);
531
16
    interp->runtime = runtime;
532
533
16
    assert(id > 0 || (id == 0 && interp == runtime->interpreters.main));
534
16
    interp->id = id;
535
536
16
    interp->id_refcount = 0;
537
538
16
    assert(runtime->interpreters.head == interp);
539
16
    assert(next != NULL || (interp == runtime->interpreters.main));
540
16
    interp->next = next;
541
542
16
    interp->threads.preallocated = &interp->_initial_thread;
543
544
    // We would call _PyObject_InitState() at this point
545
    // if interp->feature_flags were alredy set.
546
547
16
    _PyEval_InitState(interp);
548
16
    _PyGC_InitState(&interp->gc);
549
16
    PyConfig_InitPythonConfig(&interp->config);
550
16
    _PyType_InitCache(interp);
551
#ifdef Py_GIL_DISABLED
552
    _Py_brc_init_state(interp);
553
#endif
554
555
#ifdef _Py_TIER2
556
     // Ensure the buffer is to be set as NULL.
557
    interp->jit_uop_buffer = NULL;
558
#endif
559
16
    llist_init(&interp->mem_free_queue.head);
560
16
    llist_init(&interp->asyncio_tasks_head);
561
16
    interp->asyncio_tasks_lock = (PyMutex){0};
562
272
    for (int i = 0; i < _PY_MONITORING_UNGROUPED_EVENTS; i++) {
563
256
        interp->monitors.tools[i] = 0;
564
256
    }
565
144
    for (int t = 0; t < PY_MONITORING_TOOL_IDS; t++) {
566
2.56k
        for (int e = 0; e < _PY_MONITORING_EVENTS; e++) {
567
2.43k
            interp->monitoring_callables[t][e] = NULL;
568
569
2.43k
        }
570
128
        interp->monitoring_tool_versions[t] = 0;
571
128
    }
572
16
    interp->_code_object_generation = 0;
573
16
    interp->jit = false;
574
16
    interp->compiling = false;
575
16
    interp->executor_list_head = NULL;
576
16
    interp->executor_deletion_list_head = NULL;
577
16
    interp->executor_deletion_list_remaining_capacity = 0;
578
16
    interp->trace_run_counter = JIT_CLEANUP_THRESHOLD;
579
16
    if (interp != &runtime->_main_interpreter) {
580
        /* Fix the self-referential, statically initialized fields. */
581
0
        interp->dtoa = (struct _dtoa_state)_dtoa_state_INIT(interp);
582
0
    }
583
#if !defined(Py_GIL_DISABLED) && defined(Py_STACKREF_DEBUG)
584
    interp->next_stackref = INITIAL_STACKREF_INDEX;
585
    _Py_hashtable_allocator_t alloc = {
586
        .malloc = malloc,
587
        .free = free,
588
    };
589
    interp->open_stackrefs_table = _Py_hashtable_new_full(
590
        _Py_hashtable_hash_ptr,
591
        _Py_hashtable_compare_direct,
592
        NULL,
593
        NULL,
594
        &alloc
595
    );
596
#  ifdef Py_STACKREF_CLOSE_DEBUG
597
    interp->closed_stackrefs_table = _Py_hashtable_new_full(
598
        _Py_hashtable_hash_ptr,
599
        _Py_hashtable_compare_direct,
600
        NULL,
601
        NULL,
602
        &alloc
603
    );
604
#  endif
605
    _Py_stackref_associate(interp, Py_None, PyStackRef_None);
606
    _Py_stackref_associate(interp, Py_False, PyStackRef_False);
607
    _Py_stackref_associate(interp, Py_True, PyStackRef_True);
608
#endif
609
610
16
    interp->_initialized = 1;
611
16
    return _PyStatus_OK();
612
16
}
613
614
615
PyStatus
616
_PyInterpreterState_New(PyThreadState *tstate, PyInterpreterState **pinterp)
617
16
{
618
16
    *pinterp = NULL;
619
620
    // Don't get runtime from tstate since tstate can be NULL
621
16
    _PyRuntimeState *runtime = &_PyRuntime;
622
623
    // tstate is NULL when pycore_create_interpreter() calls
624
    // _PyInterpreterState_New() to create the main interpreter.
625
16
    if (tstate != NULL) {
626
0
        if (_PySys_Audit(tstate, "cpython.PyInterpreterState_New", NULL) < 0) {
627
0
            return _PyStatus_ERR("sys.audit failed");
628
0
        }
629
0
    }
630
631
    /* We completely serialize creation of multiple interpreters, since
632
       it simplifies things here and blocking concurrent calls isn't a problem.
633
       Regardless, we must fully block subinterpreter creation until
634
       after the main interpreter is created. */
635
16
    HEAD_LOCK(runtime);
636
637
16
    struct pyinterpreters *interpreters = &runtime->interpreters;
638
16
    int64_t id = interpreters->next_id;
639
16
    interpreters->next_id += 1;
640
641
    // Allocate the interpreter and add it to the runtime state.
642
16
    PyInterpreterState *interp;
643
16
    PyStatus status;
644
16
    PyInterpreterState *old_head = interpreters->head;
645
16
    if (old_head == NULL) {
646
        // We are creating the main interpreter.
647
16
        assert(interpreters->main == NULL);
648
16
        assert(id == 0);
649
650
16
        interp = &runtime->_main_interpreter;
651
16
        assert(interp->id == 0);
652
16
        assert(interp->next == NULL);
653
654
16
        interpreters->main = interp;
655
16
    }
656
0
    else {
657
0
        assert(interpreters->main != NULL);
658
0
        assert(id != 0);
659
660
0
        interp = alloc_interpreter();
661
0
        if (interp == NULL) {
662
0
            status = _PyStatus_NO_MEMORY();
663
0
            goto error;
664
0
        }
665
        // Set to _PyInterpreterState_INIT.
666
0
        memcpy(interp, &initial._main_interpreter, sizeof(*interp));
667
668
0
        if (id < 0) {
669
            /* overflow or Py_Initialize() not called yet! */
670
0
            status = _PyStatus_ERR("failed to get an interpreter ID");
671
0
            goto error;
672
0
        }
673
0
    }
674
16
    interpreters->head = interp;
675
676
16
    long whence = _PyInterpreterState_WHENCE_UNKNOWN;
677
16
    status = init_interpreter(interp, runtime,
678
16
                              id, old_head, whence);
679
16
    if (_PyStatus_EXCEPTION(status)) {
680
0
        goto error;
681
0
    }
682
683
16
    HEAD_UNLOCK(runtime);
684
685
16
    assert(interp != NULL);
686
16
    *pinterp = interp;
687
16
    return _PyStatus_OK();
688
689
0
error:
690
0
    HEAD_UNLOCK(runtime);
691
692
0
    if (interp != NULL) {
693
0
        free_interpreter(interp);
694
0
    }
695
0
    return status;
696
16
}
697
698
699
PyInterpreterState *
700
PyInterpreterState_New(void)
701
0
{
702
    // tstate can be NULL
703
0
    PyThreadState *tstate = current_fast_get();
704
705
0
    PyInterpreterState *interp;
706
0
    PyStatus status = _PyInterpreterState_New(tstate, &interp);
707
0
    if (_PyStatus_EXCEPTION(status)) {
708
0
        Py_ExitStatusException(status);
709
0
    }
710
0
    assert(interp != NULL);
711
0
    return interp;
712
0
}
713
714
#if !defined(Py_GIL_DISABLED) && defined(Py_STACKREF_DEBUG)
715
extern void
716
_Py_stackref_report_leaks(PyInterpreterState *interp);
717
#endif
718
719
static void
720
interpreter_clear(PyInterpreterState *interp, PyThreadState *tstate)
721
0
{
722
0
    assert(interp != NULL);
723
0
    assert(tstate != NULL);
724
0
    _PyRuntimeState *runtime = interp->runtime;
725
726
    /* XXX Conditions we need to enforce:
727
728
       * the GIL must be held by the current thread
729
       * tstate must be the "current" thread state (current_fast_get())
730
       * tstate->interp must be interp
731
       * for the main interpreter, tstate must be the main thread
732
     */
733
    // XXX Ideally, we would not rely on any thread state in this function
734
    // (and we would drop the "tstate" argument).
735
736
0
    if (_PySys_Audit(tstate, "cpython.PyInterpreterState_Clear", NULL) < 0) {
737
0
        _PyErr_Clear(tstate);
738
0
    }
739
740
    // Clear the current/main thread state last.
741
0
    _Py_FOR_EACH_TSTATE_BEGIN(interp, p) {
742
        // See https://github.com/python/cpython/issues/102126
743
        // Must be called without HEAD_LOCK held as it can deadlock
744
        // if any finalizer tries to acquire that lock.
745
0
        HEAD_UNLOCK(runtime);
746
0
        PyThreadState_Clear(p);
747
0
        HEAD_LOCK(runtime);
748
0
    }
749
0
    _Py_FOR_EACH_TSTATE_END(interp);
750
0
    if (tstate->interp == interp) {
751
        /* We fix tstate->_status below when we for sure aren't using it
752
           (e.g. no longer need the GIL). */
753
        // XXX Eliminate the need to do this.
754
0
        tstate->_status.cleared = 0;
755
0
    }
756
757
    /* It is possible that any of the objects below have a finalizer
758
       that runs Python code or otherwise relies on a thread state
759
       or even the interpreter state.  For now we trust that isn't
760
       a problem.
761
     */
762
    // XXX Make sure we properly deal with problematic finalizers.
763
764
0
    Py_CLEAR(interp->audit_hooks);
765
766
    // At this time, all the threads should be cleared so we don't need atomic
767
    // operations for instrumentation_version or eval_breaker.
768
0
    interp->ceval.instrumentation_version = 0;
769
0
    tstate->eval_breaker = 0;
770
771
0
    for (int i = 0; i < _PY_MONITORING_UNGROUPED_EVENTS; i++) {
772
0
        interp->monitors.tools[i] = 0;
773
0
    }
774
0
    for (int t = 0; t < PY_MONITORING_TOOL_IDS; t++) {
775
0
        for (int e = 0; e < _PY_MONITORING_EVENTS; e++) {
776
0
            Py_CLEAR(interp->monitoring_callables[t][e]);
777
0
        }
778
0
    }
779
0
    for (int t = 0; t < PY_MONITORING_TOOL_IDS; t++) {
780
0
        Py_CLEAR(interp->monitoring_tool_names[t]);
781
0
    }
782
0
    interp->_code_object_generation = 0;
783
#ifdef Py_GIL_DISABLED
784
    interp->tlbc_indices.tlbc_generation = 0;
785
#endif
786
787
0
    PyConfig_Clear(&interp->config);
788
0
    _PyCodec_Fini(interp);
789
790
0
    assert(interp->imports.modules == NULL);
791
0
    assert(interp->imports.modules_by_index == NULL);
792
0
    assert(interp->imports.importlib == NULL);
793
0
    assert(interp->imports.import_func == NULL);
794
795
0
    Py_CLEAR(interp->sysdict_copy);
796
0
    Py_CLEAR(interp->builtins_copy);
797
0
    Py_CLEAR(interp->dict);
798
0
#ifdef HAVE_FORK
799
0
    Py_CLEAR(interp->before_forkers);
800
0
    Py_CLEAR(interp->after_forkers_parent);
801
0
    Py_CLEAR(interp->after_forkers_child);
802
0
#endif
803
804
805
#ifdef _Py_TIER2
806
    _Py_ClearExecutorDeletionList(interp);
807
    if (interp->jit_uop_buffer != NULL) {
808
        _PyObject_VirtualFree(interp->jit_uop_buffer, UOP_BUFFER_SIZE);
809
        interp->jit_uop_buffer = NULL;
810
    }
811
#endif
812
0
    _PyAST_Fini(interp);
813
0
    _PyAtExit_Fini(interp);
814
815
    // All Python types must be destroyed before the last GC collection. Python
816
    // types create a reference cycle to themselves in their in their
817
    // PyTypeObject.tp_mro member (the tuple contains the type).
818
819
    /* Last garbage collection on this interpreter */
820
0
    _PyGC_CollectNoFail(tstate);
821
0
    _PyGC_Fini(interp);
822
823
    // Finalize warnings after last gc so that any finalizers can
824
    // access warnings state
825
0
    _PyWarnings_Fini(interp);
826
0
    struct _PyExecutorObject *cold = interp->cold_executor;
827
0
    if (cold != NULL) {
828
0
        interp->cold_executor = NULL;
829
0
        assert(cold->vm_data.valid);
830
0
        assert(cold->vm_data.warm);
831
0
        _PyExecutor_Free(cold);
832
0
    }
833
    /* We don't clear sysdict and builtins until the end of this function.
834
       Because clearing other attributes can execute arbitrary Python code
835
       which requires sysdict and builtins. */
836
0
    PyDict_Clear(interp->sysdict);
837
0
    PyDict_Clear(interp->builtins);
838
0
    Py_CLEAR(interp->sysdict);
839
0
    Py_CLEAR(interp->builtins);
840
841
#if !defined(Py_GIL_DISABLED) && defined(Py_STACKREF_DEBUG)
842
#  ifdef Py_STACKREF_CLOSE_DEBUG
843
    _Py_hashtable_destroy(interp->closed_stackrefs_table);
844
    interp->closed_stackrefs_table = NULL;
845
#  endif
846
    _Py_stackref_report_leaks(interp);
847
    _Py_hashtable_destroy(interp->open_stackrefs_table);
848
    interp->open_stackrefs_table = NULL;
849
#endif
850
851
0
    if (tstate->interp == interp) {
852
        /* We are now safe to fix tstate->_status.cleared. */
853
        // XXX Do this (much) earlier?
854
0
        tstate->_status.cleared = 1;
855
0
    }
856
857
0
    for (int i=0; i < DICT_MAX_WATCHERS; i++) {
858
0
        interp->dict_state.watchers[i] = NULL;
859
0
    }
860
861
0
    for (int i=0; i < TYPE_MAX_WATCHERS; i++) {
862
0
        interp->type_watchers[i] = NULL;
863
0
    }
864
865
0
    for (int i=0; i < FUNC_MAX_WATCHERS; i++) {
866
0
        interp->func_watchers[i] = NULL;
867
0
    }
868
0
    interp->active_func_watchers = 0;
869
870
0
    for (int i=0; i < CODE_MAX_WATCHERS; i++) {
871
0
        interp->code_watchers[i] = NULL;
872
0
    }
873
0
    interp->active_code_watchers = 0;
874
875
0
    for (int i=0; i < CONTEXT_MAX_WATCHERS; i++) {
876
0
        interp->context_watchers[i] = NULL;
877
0
    }
878
0
    interp->active_context_watchers = 0;
879
    // XXX Once we have one allocator per interpreter (i.e.
880
    // per-interpreter GC) we must ensure that all of the interpreter's
881
    // objects have been cleaned up at the point.
882
883
    // We could clear interp->threads.freelist here
884
    // if it held more than just the initial thread state.
885
0
}
886
887
888
void
889
PyInterpreterState_Clear(PyInterpreterState *interp)
890
0
{
891
    // Use the current Python thread state to call audit hooks and to collect
892
    // garbage. It can be different than the current Python thread state
893
    // of 'interp'.
894
0
    PyThreadState *current_tstate = current_fast_get();
895
0
    _PyImport_ClearCore(interp);
896
0
    interpreter_clear(interp, current_tstate);
897
0
}
898
899
900
void
901
_PyInterpreterState_Clear(PyThreadState *tstate)
902
0
{
903
0
    _PyImport_ClearCore(tstate->interp);
904
0
    interpreter_clear(tstate->interp, tstate);
905
0
}
906
907
908
static inline void tstate_deactivate(PyThreadState *tstate);
909
static void tstate_set_detached(PyThreadState *tstate, int detached_state);
910
static void zapthreads(PyInterpreterState *interp);
911
912
void
913
PyInterpreterState_Delete(PyInterpreterState *interp)
914
0
{
915
0
    _PyRuntimeState *runtime = interp->runtime;
916
0
    struct pyinterpreters *interpreters = &runtime->interpreters;
917
918
    // XXX Clearing the "current" thread state should happen before
919
    // we start finalizing the interpreter (or the current thread state).
920
0
    PyThreadState *tcur = current_fast_get();
921
0
    if (tcur != NULL && interp == tcur->interp) {
922
        /* Unset current thread.  After this, many C API calls become crashy. */
923
0
        _PyThreadState_Detach(tcur);
924
0
    }
925
926
0
    zapthreads(interp);
927
928
    // XXX These two calls should be done at the end of clear_interpreter(),
929
    // but currently some objects get decref'ed after that.
930
#ifdef Py_REF_DEBUG
931
    _PyInterpreterState_FinalizeRefTotal(interp);
932
#endif
933
0
    _PyInterpreterState_FinalizeAllocatedBlocks(interp);
934
935
0
    HEAD_LOCK(runtime);
936
0
    PyInterpreterState **p;
937
0
    for (p = &interpreters->head; ; p = &(*p)->next) {
938
0
        if (*p == NULL) {
939
0
            Py_FatalError("NULL interpreter");
940
0
        }
941
0
        if (*p == interp) {
942
0
            break;
943
0
        }
944
0
    }
945
0
    if (interp->threads.head != NULL) {
946
0
        Py_FatalError("remaining threads");
947
0
    }
948
0
    *p = interp->next;
949
950
0
    if (interpreters->main == interp) {
951
0
        interpreters->main = NULL;
952
0
        if (interpreters->head != NULL) {
953
0
            Py_FatalError("remaining subinterpreters");
954
0
        }
955
0
    }
956
0
    HEAD_UNLOCK(runtime);
957
958
0
    _Py_qsbr_fini(interp);
959
960
0
    _PyObject_FiniState(interp);
961
962
0
    free_interpreter(interp);
963
0
}
964
965
966
#ifdef HAVE_FORK
967
/*
968
 * Delete all interpreter states except the main interpreter.  If there
969
 * is a current interpreter state, it *must* be the main interpreter.
970
 */
971
PyStatus
972
_PyInterpreterState_DeleteExceptMain(_PyRuntimeState *runtime)
973
0
{
974
0
    struct pyinterpreters *interpreters = &runtime->interpreters;
975
976
0
    PyThreadState *tstate = _PyThreadState_Swap(runtime, NULL);
977
0
    if (tstate != NULL && tstate->interp != interpreters->main) {
978
0
        return _PyStatus_ERR("not main interpreter");
979
0
    }
980
981
0
    HEAD_LOCK(runtime);
982
0
    PyInterpreterState *interp = interpreters->head;
983
0
    interpreters->head = NULL;
984
0
    while (interp != NULL) {
985
0
        if (interp == interpreters->main) {
986
0
            interpreters->main->next = NULL;
987
0
            interpreters->head = interp;
988
0
            interp = interp->next;
989
0
            continue;
990
0
        }
991
992
        // XXX Won't this fail since PyInterpreterState_Clear() requires
993
        // the "current" tstate to be set?
994
0
        PyInterpreterState_Clear(interp);  // XXX must activate?
995
0
        zapthreads(interp);
996
0
        PyInterpreterState *prev_interp = interp;
997
0
        interp = interp->next;
998
0
        free_interpreter(prev_interp);
999
0
    }
1000
0
    HEAD_UNLOCK(runtime);
1001
1002
0
    if (interpreters->head == NULL) {
1003
0
        return _PyStatus_ERR("missing main interpreter");
1004
0
    }
1005
0
    _PyThreadState_Swap(runtime, tstate);
1006
0
    return _PyStatus_OK();
1007
0
}
1008
#endif
1009
1010
static inline void
1011
set_main_thread(PyInterpreterState *interp, PyThreadState *tstate)
1012
0
{
1013
0
    _Py_atomic_store_ptr_relaxed(&interp->threads.main, tstate);
1014
0
}
1015
1016
static inline PyThreadState *
1017
get_main_thread(PyInterpreterState *interp)
1018
0
{
1019
0
    return _Py_atomic_load_ptr_relaxed(&interp->threads.main);
1020
0
}
1021
1022
void
1023
_PyErr_SetInterpreterAlreadyRunning(void)
1024
0
{
1025
0
    PyErr_SetString(PyExc_InterpreterError, "interpreter already running");
1026
0
}
1027
1028
int
1029
_PyInterpreterState_SetRunningMain(PyInterpreterState *interp)
1030
0
{
1031
0
    if (get_main_thread(interp) != NULL) {
1032
0
        _PyErr_SetInterpreterAlreadyRunning();
1033
0
        return -1;
1034
0
    }
1035
0
    PyThreadState *tstate = current_fast_get();
1036
0
    _Py_EnsureTstateNotNULL(tstate);
1037
0
    if (tstate->interp != interp) {
1038
0
        PyErr_SetString(PyExc_RuntimeError,
1039
0
                        "current tstate has wrong interpreter");
1040
0
        return -1;
1041
0
    }
1042
0
    set_main_thread(interp, tstate);
1043
1044
0
    return 0;
1045
0
}
1046
1047
void
1048
_PyInterpreterState_SetNotRunningMain(PyInterpreterState *interp)
1049
0
{
1050
0
    assert(get_main_thread(interp) == current_fast_get());
1051
0
    set_main_thread(interp, NULL);
1052
0
}
1053
1054
int
1055
_PyInterpreterState_IsRunningMain(PyInterpreterState *interp)
1056
0
{
1057
0
    if (get_main_thread(interp) != NULL) {
1058
0
        return 1;
1059
0
    }
1060
    // Embedders might not know to call _PyInterpreterState_SetRunningMain(),
1061
    // so their main thread wouldn't show it is running the main interpreter's
1062
    // program.  (Py_Main() doesn't have this problem.)  For now this isn't
1063
    // critical.  If it were, we would need to infer "running main" from other
1064
    // information, like if it's the main interpreter.  We used to do that
1065
    // but the naive approach led to some inconsistencies that caused problems.
1066
0
    return 0;
1067
0
}
1068
1069
int
1070
_PyThreadState_IsRunningMain(PyThreadState *tstate)
1071
0
{
1072
0
    PyInterpreterState *interp = tstate->interp;
1073
    // See the note in _PyInterpreterState_IsRunningMain() about
1074
    // possible false negatives here for embedders.
1075
0
    return get_main_thread(interp) == tstate;
1076
0
}
1077
1078
void
1079
_PyInterpreterState_ReinitRunningMain(PyThreadState *tstate)
1080
0
{
1081
0
    PyInterpreterState *interp = tstate->interp;
1082
0
    if (get_main_thread(interp) != tstate) {
1083
0
        set_main_thread(interp, NULL);
1084
0
    }
1085
0
}
1086
1087
1088
//----------
1089
// accessors
1090
//----------
1091
1092
int
1093
_PyInterpreterState_IsReady(PyInterpreterState *interp)
1094
0
{
1095
0
    return interp->_ready;
1096
0
}
1097
1098
#ifndef NDEBUG
1099
static inline int
1100
check_interpreter_whence(long whence)
1101
{
1102
    if(whence < 0) {
1103
        return -1;
1104
    }
1105
    if (whence > _PyInterpreterState_WHENCE_MAX) {
1106
        return -1;
1107
    }
1108
    return 0;
1109
}
1110
#endif
1111
1112
long
1113
_PyInterpreterState_GetWhence(PyInterpreterState *interp)
1114
0
{
1115
0
    assert(check_interpreter_whence(interp->_whence) == 0);
1116
0
    return interp->_whence;
1117
0
}
1118
1119
void
1120
_PyInterpreterState_SetWhence(PyInterpreterState *interp, long whence)
1121
16
{
1122
16
    assert(interp->_whence != _PyInterpreterState_WHENCE_NOTSET);
1123
16
    assert(check_interpreter_whence(whence) == 0);
1124
16
    interp->_whence = whence;
1125
16
}
1126
1127
1128
PyObject *
1129
_Py_GetMainModule(PyThreadState *tstate)
1130
0
{
1131
    // We return None to indicate "not found" or "bogus".
1132
0
    PyObject *modules = _PyImport_GetModulesRef(tstate->interp);
1133
0
    if (modules == Py_None) {
1134
0
        return modules;
1135
0
    }
1136
0
    PyObject *module = NULL;
1137
0
    (void)PyMapping_GetOptionalItem(modules, &_Py_ID(__main__), &module);
1138
0
    Py_DECREF(modules);
1139
0
    if (module == NULL && !PyErr_Occurred()) {
1140
0
        Py_RETURN_NONE;
1141
0
    }
1142
0
    return module;
1143
0
}
1144
1145
int
1146
_Py_CheckMainModule(PyObject *module)
1147
0
{
1148
0
    if (module == NULL || module == Py_None) {
1149
0
        if (!PyErr_Occurred()) {
1150
0
            (void)_PyErr_SetModuleNotFoundError(&_Py_ID(__main__));
1151
0
        }
1152
0
        return -1;
1153
0
    }
1154
0
    if (!Py_IS_TYPE(module, &PyModule_Type)) {
1155
        /* The __main__ module has been tampered with. */
1156
0
        PyObject *msg = PyUnicode_FromString("invalid __main__ module");
1157
0
        if (msg != NULL) {
1158
0
            (void)PyErr_SetImportError(msg, &_Py_ID(__main__), NULL);
1159
0
            Py_DECREF(msg);
1160
0
        }
1161
0
        return -1;
1162
0
    }
1163
0
    return 0;
1164
0
}
1165
1166
1167
PyObject *
1168
PyInterpreterState_GetDict(PyInterpreterState *interp)
1169
12
{
1170
12
    if (interp->dict == NULL) {
1171
6
        interp->dict = PyDict_New();
1172
6
        if (interp->dict == NULL) {
1173
0
            PyErr_Clear();
1174
0
        }
1175
6
    }
1176
    /* Returning NULL means no per-interpreter dict is available. */
1177
12
    return interp->dict;
1178
12
}
1179
1180
1181
//----------
1182
// interp ID
1183
//----------
1184
1185
int64_t
1186
_PyInterpreterState_ObjectToID(PyObject *idobj)
1187
0
{
1188
0
    if (!_PyIndex_Check(idobj)) {
1189
0
        PyErr_Format(PyExc_TypeError,
1190
0
                     "interpreter ID must be an int, got %.100s",
1191
0
                     Py_TYPE(idobj)->tp_name);
1192
0
        return -1;
1193
0
    }
1194
1195
    // This may raise OverflowError.
1196
    // For now, we don't worry about if LLONG_MAX < INT64_MAX.
1197
0
    long long id = PyLong_AsLongLong(idobj);
1198
0
    if (id == -1 && PyErr_Occurred()) {
1199
0
        return -1;
1200
0
    }
1201
1202
0
    if (id < 0) {
1203
0
        PyErr_Format(PyExc_ValueError,
1204
0
                     "interpreter ID must be a non-negative int, got %R",
1205
0
                     idobj);
1206
0
        return -1;
1207
0
    }
1208
#if LLONG_MAX > INT64_MAX
1209
    else if (id > INT64_MAX) {
1210
        PyErr_SetString(PyExc_OverflowError, "int too big to convert");
1211
        return -1;
1212
    }
1213
#endif
1214
0
    else {
1215
0
        return (int64_t)id;
1216
0
    }
1217
0
}
1218
1219
int64_t
1220
PyInterpreterState_GetID(PyInterpreterState *interp)
1221
0
{
1222
0
    if (interp == NULL) {
1223
0
        PyErr_SetString(PyExc_RuntimeError, "no interpreter provided");
1224
0
        return -1;
1225
0
    }
1226
0
    return interp->id;
1227
0
}
1228
1229
PyObject *
1230
_PyInterpreterState_GetIDObject(PyInterpreterState *interp)
1231
0
{
1232
0
    int64_t interpid = interp->id;
1233
0
    if (interpid < 0) {
1234
0
        return NULL;
1235
0
    }
1236
0
    assert(interpid < LLONG_MAX);
1237
0
    return PyLong_FromLongLong(interpid);
1238
0
}
1239
1240
1241
1242
void
1243
_PyInterpreterState_IDIncref(PyInterpreterState *interp)
1244
0
{
1245
0
    _Py_atomic_add_ssize(&interp->id_refcount, 1);
1246
0
}
1247
1248
1249
void
1250
_PyInterpreterState_IDDecref(PyInterpreterState *interp)
1251
0
{
1252
0
    _PyRuntimeState *runtime = interp->runtime;
1253
1254
0
    Py_ssize_t refcount = _Py_atomic_add_ssize(&interp->id_refcount, -1);
1255
1256
0
    if (refcount == 1 && interp->requires_idref) {
1257
0
        PyThreadState *tstate =
1258
0
            _PyThreadState_NewBound(interp, _PyThreadState_WHENCE_FINI);
1259
1260
        // XXX Possible GILState issues?
1261
0
        PyThreadState *save_tstate = _PyThreadState_Swap(runtime, tstate);
1262
0
        Py_EndInterpreter(tstate);
1263
0
        _PyThreadState_Swap(runtime, save_tstate);
1264
0
    }
1265
0
}
1266
1267
int
1268
_PyInterpreterState_RequiresIDRef(PyInterpreterState *interp)
1269
0
{
1270
0
    return interp->requires_idref;
1271
0
}
1272
1273
void
1274
_PyInterpreterState_RequireIDRef(PyInterpreterState *interp, int required)
1275
0
{
1276
0
    interp->requires_idref = required ? 1 : 0;
1277
0
}
1278
1279
1280
//-----------------------------
1281
// look up an interpreter state
1282
//-----------------------------
1283
1284
/* Return the interpreter associated with the current OS thread.
1285
1286
   The GIL must be held.
1287
  */
1288
1289
PyInterpreterState*
1290
PyInterpreterState_Get(void)
1291
35
{
1292
35
    PyThreadState *tstate = current_fast_get();
1293
35
    _Py_EnsureTstateNotNULL(tstate);
1294
35
    PyInterpreterState *interp = tstate->interp;
1295
35
    if (interp == NULL) {
1296
0
        Py_FatalError("no current interpreter");
1297
0
    }
1298
35
    return interp;
1299
35
}
1300
1301
1302
static PyInterpreterState *
1303
interp_look_up_id(_PyRuntimeState *runtime, int64_t requested_id)
1304
0
{
1305
0
    PyInterpreterState *interp = runtime->interpreters.head;
1306
0
    while (interp != NULL) {
1307
0
        int64_t id = interp->id;
1308
0
        assert(id >= 0);
1309
0
        if (requested_id == id) {
1310
0
            return interp;
1311
0
        }
1312
0
        interp = PyInterpreterState_Next(interp);
1313
0
    }
1314
0
    return NULL;
1315
0
}
1316
1317
/* Return the interpreter state with the given ID.
1318
1319
   Fail with RuntimeError if the interpreter is not found. */
1320
1321
PyInterpreterState *
1322
_PyInterpreterState_LookUpID(int64_t requested_id)
1323
0
{
1324
0
    PyInterpreterState *interp = NULL;
1325
0
    if (requested_id >= 0) {
1326
0
        _PyRuntimeState *runtime = &_PyRuntime;
1327
0
        HEAD_LOCK(runtime);
1328
0
        interp = interp_look_up_id(runtime, requested_id);
1329
0
        HEAD_UNLOCK(runtime);
1330
0
    }
1331
0
    if (interp == NULL && !PyErr_Occurred()) {
1332
0
        PyErr_Format(PyExc_InterpreterNotFoundError,
1333
0
                     "unrecognized interpreter ID %lld", requested_id);
1334
0
    }
1335
0
    return interp;
1336
0
}
1337
1338
PyInterpreterState *
1339
_PyInterpreterState_LookUpIDObject(PyObject *requested_id)
1340
0
{
1341
0
    int64_t id = _PyInterpreterState_ObjectToID(requested_id);
1342
0
    if (id < 0) {
1343
0
        return NULL;
1344
0
    }
1345
0
    return _PyInterpreterState_LookUpID(id);
1346
0
}
1347
1348
1349
/********************************/
1350
/* the per-thread runtime state */
1351
/********************************/
1352
1353
#ifndef NDEBUG
1354
static inline int
1355
tstate_is_alive(PyThreadState *tstate)
1356
{
1357
    return (tstate->_status.initialized &&
1358
            !tstate->_status.finalized &&
1359
            !tstate->_status.cleared &&
1360
            !tstate->_status.finalizing);
1361
}
1362
#endif
1363
1364
1365
//----------
1366
// lifecycle
1367
//----------
1368
1369
static _PyStackChunk*
1370
allocate_chunk(int size_in_bytes, _PyStackChunk* previous)
1371
158k
{
1372
158k
    assert(size_in_bytes % sizeof(PyObject **) == 0);
1373
158k
    _PyStackChunk *res = _PyObject_VirtualAlloc(size_in_bytes);
1374
158k
    if (res == NULL) {
1375
0
        return NULL;
1376
0
    }
1377
158k
    res->previous = previous;
1378
158k
    res->size = size_in_bytes;
1379
158k
    res->top = 0;
1380
158k
    return res;
1381
158k
}
1382
1383
static void
1384
reset_threadstate(_PyThreadStateImpl *tstate)
1385
0
{
1386
    // Set to _PyThreadState_INIT directly?
1387
0
    memcpy(tstate,
1388
0
           &initial._main_interpreter._initial_thread,
1389
0
           sizeof(*tstate));
1390
0
}
1391
1392
static _PyThreadStateImpl *
1393
alloc_threadstate(PyInterpreterState *interp)
1394
16
{
1395
16
    _PyThreadStateImpl *tstate;
1396
1397
    // Try the preallocated tstate first.
1398
16
    tstate = _Py_atomic_exchange_ptr(&interp->threads.preallocated, NULL);
1399
1400
    // Fall back to the allocator.
1401
16
    if (tstate == NULL) {
1402
0
        tstate = PyMem_RawCalloc(1, sizeof(_PyThreadStateImpl));
1403
0
        if (tstate == NULL) {
1404
0
            return NULL;
1405
0
        }
1406
0
        reset_threadstate(tstate);
1407
0
    }
1408
16
    return tstate;
1409
16
}
1410
1411
static void
1412
free_threadstate(_PyThreadStateImpl *tstate)
1413
0
{
1414
0
    PyInterpreterState *interp = tstate->base.interp;
1415
    // The initial thread state of the interpreter is allocated
1416
    // as part of the interpreter state so should not be freed.
1417
0
    if (tstate == &interp->_initial_thread) {
1418
        // Make it available again.
1419
0
        reset_threadstate(tstate);
1420
0
        assert(interp->threads.preallocated == NULL);
1421
0
        _Py_atomic_store_ptr(&interp->threads.preallocated, tstate);
1422
0
    }
1423
0
    else {
1424
0
        PyMem_RawFree(tstate);
1425
0
    }
1426
0
}
1427
1428
static void
1429
decref_threadstate(_PyThreadStateImpl *tstate)
1430
0
{
1431
0
    if (_Py_atomic_add_ssize(&tstate->refcount, -1) == 1) {
1432
        // The last reference to the thread state is gone.
1433
0
        free_threadstate(tstate);
1434
0
    }
1435
0
}
1436
1437
/* Get the thread state to a minimal consistent state.
1438
   Further init happens in pylifecycle.c before it can be used.
1439
   All fields not initialized here are expected to be zeroed out,
1440
   e.g. by PyMem_RawCalloc() or memset(), or otherwise pre-initialized.
1441
   The interpreter state is not manipulated.  Instead it is assumed that
1442
   the thread is getting added to the interpreter.
1443
  */
1444
1445
static void
1446
init_threadstate(_PyThreadStateImpl *_tstate,
1447
                 PyInterpreterState *interp, uint64_t id, int whence)
1448
16
{
1449
16
    PyThreadState *tstate = (PyThreadState *)_tstate;
1450
16
    if (tstate->_status.initialized) {
1451
0
        Py_FatalError("thread state already initialized");
1452
0
    }
1453
1454
16
    assert(interp != NULL);
1455
16
    tstate->interp = interp;
1456
16
    tstate->eval_breaker =
1457
16
        _Py_atomic_load_uintptr_relaxed(&interp->ceval.instrumentation_version);
1458
1459
    // next/prev are set in add_threadstate().
1460
16
    assert(tstate->next == NULL);
1461
16
    assert(tstate->prev == NULL);
1462
1463
16
    assert(tstate->_whence == _PyThreadState_WHENCE_NOTSET);
1464
16
    assert(whence >= 0 && whence <= _PyThreadState_WHENCE_THREADING_DAEMON);
1465
16
    tstate->_whence = whence;
1466
1467
16
    assert(id > 0);
1468
16
    tstate->id = id;
1469
1470
    // thread_id and native_thread_id are set in bind_tstate().
1471
1472
16
    tstate->py_recursion_limit = interp->ceval.recursion_limit;
1473
16
    tstate->py_recursion_remaining = interp->ceval.recursion_limit;
1474
16
    tstate->exc_info = &tstate->exc_state;
1475
1476
    // PyGILState_Release must not try to delete this thread state.
1477
    // This is cleared when PyGILState_Ensure() creates the thread state.
1478
16
    tstate->gilstate_counter = 1;
1479
1480
16
    tstate->current_frame = NULL;
1481
16
    tstate->datastack_chunk = NULL;
1482
16
    tstate->datastack_top = NULL;
1483
16
    tstate->datastack_limit = NULL;
1484
16
    tstate->what_event = -1;
1485
16
    tstate->current_executor = NULL;
1486
16
    tstate->jit_exit = NULL;
1487
16
    tstate->dict_global_version = 0;
1488
1489
16
    _tstate->c_stack_soft_limit = UINTPTR_MAX;
1490
16
    _tstate->c_stack_top = 0;
1491
16
    _tstate->c_stack_hard_limit = 0;
1492
1493
16
    _tstate->asyncio_running_loop = NULL;
1494
16
    _tstate->asyncio_running_task = NULL;
1495
1496
16
    tstate->delete_later = NULL;
1497
1498
16
    llist_init(&_tstate->mem_free_queue);
1499
16
    llist_init(&_tstate->asyncio_tasks_head);
1500
16
    if (interp->stoptheworld.requested || _PyRuntime.stoptheworld.requested) {
1501
        // Start in the suspended state if there is an ongoing stop-the-world.
1502
0
        tstate->state = _Py_THREAD_SUSPENDED;
1503
0
    }
1504
1505
16
    tstate->_status.initialized = 1;
1506
16
}
1507
1508
static void
1509
add_threadstate(PyInterpreterState *interp, PyThreadState *tstate,
1510
                PyThreadState *next)
1511
16
{
1512
16
    assert(interp->threads.head != tstate);
1513
16
    if (next != NULL) {
1514
0
        assert(next->prev == NULL || next->prev == tstate);
1515
0
        next->prev = tstate;
1516
0
    }
1517
16
    tstate->next = next;
1518
16
    assert(tstate->prev == NULL);
1519
16
    interp->threads.head = tstate;
1520
16
}
1521
1522
static PyThreadState *
1523
new_threadstate(PyInterpreterState *interp, int whence)
1524
16
{
1525
    // Allocate the thread state.
1526
16
    _PyThreadStateImpl *tstate = alloc_threadstate(interp);
1527
16
    if (tstate == NULL) {
1528
0
        return NULL;
1529
0
    }
1530
1531
#ifdef Py_GIL_DISABLED
1532
    Py_ssize_t qsbr_idx = _Py_qsbr_reserve(interp);
1533
    if (qsbr_idx < 0) {
1534
        free_threadstate(tstate);
1535
        return NULL;
1536
    }
1537
    int32_t tlbc_idx = _Py_ReserveTLBCIndex(interp);
1538
    if (tlbc_idx < 0) {
1539
        free_threadstate(tstate);
1540
        return NULL;
1541
    }
1542
#endif
1543
1544
    /* We serialize concurrent creation to protect global state. */
1545
16
    HEAD_LOCK(interp->runtime);
1546
1547
    // Initialize the new thread state.
1548
16
    interp->threads.next_unique_id += 1;
1549
16
    uint64_t id = interp->threads.next_unique_id;
1550
16
    init_threadstate(tstate, interp, id, whence);
1551
1552
    // Add the new thread state to the interpreter.
1553
16
    PyThreadState *old_head = interp->threads.head;
1554
16
    add_threadstate(interp, (PyThreadState *)tstate, old_head);
1555
1556
16
    HEAD_UNLOCK(interp->runtime);
1557
1558
#ifdef Py_GIL_DISABLED
1559
    // Must be called with lock unlocked to avoid lock ordering deadlocks.
1560
    _Py_qsbr_register(tstate, interp, qsbr_idx);
1561
    tstate->tlbc_index = tlbc_idx;
1562
#endif
1563
1564
16
    return (PyThreadState *)tstate;
1565
16
}
1566
1567
PyThreadState *
1568
PyThreadState_New(PyInterpreterState *interp)
1569
0
{
1570
0
    return _PyThreadState_NewBound(interp, _PyThreadState_WHENCE_UNKNOWN);
1571
0
}
1572
1573
PyThreadState *
1574
_PyThreadState_NewBound(PyInterpreterState *interp, int whence)
1575
0
{
1576
0
    PyThreadState *tstate = new_threadstate(interp, whence);
1577
0
    if (tstate) {
1578
0
        bind_tstate(tstate);
1579
        // This makes sure there's a gilstate tstate bound
1580
        // as soon as possible.
1581
0
        if (gilstate_get() == NULL) {
1582
0
            bind_gilstate_tstate(tstate);
1583
0
        }
1584
0
    }
1585
0
    return tstate;
1586
0
}
1587
1588
// This must be followed by a call to _PyThreadState_Bind();
1589
PyThreadState *
1590
_PyThreadState_New(PyInterpreterState *interp, int whence)
1591
16
{
1592
16
    return new_threadstate(interp, whence);
1593
16
}
1594
1595
// We keep this for stable ABI compabibility.
1596
PyAPI_FUNC(PyThreadState*)
1597
_PyThreadState_Prealloc(PyInterpreterState *interp)
1598
0
{
1599
0
    return _PyThreadState_New(interp, _PyThreadState_WHENCE_UNKNOWN);
1600
0
}
1601
1602
// We keep this around for (accidental) stable ABI compatibility.
1603
// Realistically, no extensions are using it.
1604
PyAPI_FUNC(void)
1605
_PyThreadState_Init(PyThreadState *tstate)
1606
0
{
1607
0
    Py_FatalError("_PyThreadState_Init() is for internal use only");
1608
0
}
1609
1610
1611
static void
1612
clear_datastack(PyThreadState *tstate)
1613
0
{
1614
0
    _PyStackChunk *chunk = tstate->datastack_chunk;
1615
0
    tstate->datastack_chunk = NULL;
1616
0
    while (chunk != NULL) {
1617
0
        _PyStackChunk *prev = chunk->previous;
1618
0
        _PyObject_VirtualFree(chunk, chunk->size);
1619
0
        chunk = prev;
1620
0
    }
1621
0
}
1622
1623
void
1624
PyThreadState_Clear(PyThreadState *tstate)
1625
0
{
1626
0
    assert(tstate->_status.initialized && !tstate->_status.cleared);
1627
0
    assert(current_fast_get()->interp == tstate->interp);
1628
    // GH-126016: In the _interpreters module, KeyboardInterrupt exceptions
1629
    // during PyEval_EvalCode() are sent to finalization, which doesn't let us
1630
    // mark threads as "not running main". So, for now this assertion is
1631
    // disabled.
1632
    // XXX assert(!_PyThreadState_IsRunningMain(tstate));
1633
    // XXX assert(!tstate->_status.bound || tstate->_status.unbound);
1634
0
    tstate->_status.finalizing = 1;  // just in case
1635
1636
    /* XXX Conditions we need to enforce:
1637
1638
       * the GIL must be held by the current thread
1639
       * current_fast_get()->interp must match tstate->interp
1640
       * for the main interpreter, current_fast_get() must be the main thread
1641
     */
1642
1643
0
    int verbose = _PyInterpreterState_GetConfig(tstate->interp)->verbose;
1644
1645
0
    if (verbose && tstate->current_frame != NULL) {
1646
        /* bpo-20526: After the main thread calls
1647
           _PyInterpreterState_SetFinalizing() in Py_FinalizeEx()
1648
           (or in Py_EndInterpreter() for subinterpreters),
1649
           threads must exit when trying to take the GIL.
1650
           If a thread exit in the middle of _PyEval_EvalFrameDefault(),
1651
           tstate->frame is not reset to its previous value.
1652
           It is more likely with daemon threads, but it can happen
1653
           with regular threads if threading._shutdown() fails
1654
           (ex: interrupted by CTRL+C). */
1655
0
        fprintf(stderr,
1656
0
          "PyThreadState_Clear: warning: thread still has a frame\n");
1657
0
    }
1658
1659
0
    if (verbose && tstate->current_exception != NULL) {
1660
0
        fprintf(stderr, "PyThreadState_Clear: warning: thread has an exception set\n");
1661
0
        _PyErr_Print(tstate);
1662
0
    }
1663
1664
    /* At this point tstate shouldn't be used any more,
1665
       neither to run Python code nor for other uses.
1666
1667
       This is tricky when current_fast_get() == tstate, in the same way
1668
       as noted in interpreter_clear() above.  The below finalizers
1669
       can possibly run Python code or otherwise use the partially
1670
       cleared thread state.  For now we trust that isn't a problem
1671
       in practice.
1672
     */
1673
    // XXX Deal with the possibility of problematic finalizers.
1674
1675
    /* Don't clear tstate->pyframe: it is a borrowed reference */
1676
1677
0
    Py_CLEAR(tstate->threading_local_key);
1678
0
    Py_CLEAR(tstate->threading_local_sentinel);
1679
1680
0
    Py_CLEAR(((_PyThreadStateImpl *)tstate)->asyncio_running_loop);
1681
0
    Py_CLEAR(((_PyThreadStateImpl *)tstate)->asyncio_running_task);
1682
1683
1684
0
    PyMutex_Lock(&tstate->interp->asyncio_tasks_lock);
1685
    // merge any lingering tasks from thread state to interpreter's
1686
    // tasks list
1687
0
    llist_concat(&tstate->interp->asyncio_tasks_head,
1688
0
                 &((_PyThreadStateImpl *)tstate)->asyncio_tasks_head);
1689
0
    PyMutex_Unlock(&tstate->interp->asyncio_tasks_lock);
1690
1691
0
    Py_CLEAR(tstate->dict);
1692
0
    Py_CLEAR(tstate->async_exc);
1693
1694
0
    Py_CLEAR(tstate->current_exception);
1695
1696
0
    Py_CLEAR(tstate->exc_state.exc_value);
1697
1698
    /* The stack of exception states should contain just this thread. */
1699
0
    if (verbose && tstate->exc_info != &tstate->exc_state) {
1700
0
        fprintf(stderr,
1701
0
          "PyThreadState_Clear: warning: thread still has a generator\n");
1702
0
    }
1703
1704
0
    if (tstate->c_profilefunc != NULL) {
1705
0
        FT_ATOMIC_ADD_SSIZE(tstate->interp->sys_profiling_threads, -1);
1706
0
        tstate->c_profilefunc = NULL;
1707
0
    }
1708
0
    if (tstate->c_tracefunc != NULL) {
1709
0
        FT_ATOMIC_ADD_SSIZE(tstate->interp->sys_tracing_threads, -1);
1710
0
        tstate->c_tracefunc = NULL;
1711
0
    }
1712
1713
0
    Py_CLEAR(tstate->c_profileobj);
1714
0
    Py_CLEAR(tstate->c_traceobj);
1715
1716
0
    Py_CLEAR(tstate->async_gen_firstiter);
1717
0
    Py_CLEAR(tstate->async_gen_finalizer);
1718
1719
0
    Py_CLEAR(tstate->context);
1720
1721
#ifdef Py_GIL_DISABLED
1722
    // Each thread should clear own freelists in free-threading builds.
1723
    struct _Py_freelists *freelists = _Py_freelists_GET();
1724
    _PyObject_ClearFreeLists(freelists, 1);
1725
1726
    // Merge our thread-local refcounts into the type's own refcount and
1727
    // free our local refcount array.
1728
    _PyObject_FinalizePerThreadRefcounts((_PyThreadStateImpl *)tstate);
1729
1730
    // Remove ourself from the biased reference counting table of threads.
1731
    _Py_brc_remove_thread(tstate);
1732
1733
    // Release our thread-local copies of the bytecode for reuse by another
1734
    // thread
1735
    _Py_ClearTLBCIndex((_PyThreadStateImpl *)tstate);
1736
#endif
1737
1738
    // Merge our queue of pointers to be freed into the interpreter queue.
1739
0
    _PyMem_AbandonDelayed(tstate);
1740
1741
0
    _PyThreadState_ClearMimallocHeaps(tstate);
1742
1743
0
    tstate->_status.cleared = 1;
1744
1745
    // XXX Call _PyThreadStateSwap(runtime, NULL) here if "current".
1746
    // XXX Do it as early in the function as possible.
1747
0
}
1748
1749
static void
1750
decrement_stoptheworld_countdown(struct _stoptheworld_state *stw);
1751
1752
/* Common code for PyThreadState_Delete() and PyThreadState_DeleteCurrent() */
1753
static void
1754
tstate_delete_common(PyThreadState *tstate, int release_gil)
1755
0
{
1756
0
    assert(tstate->_status.cleared && !tstate->_status.finalized);
1757
0
    tstate_verify_not_active(tstate);
1758
0
    assert(!_PyThreadState_IsRunningMain(tstate));
1759
1760
0
    PyInterpreterState *interp = tstate->interp;
1761
0
    if (interp == NULL) {
1762
0
        Py_FatalError("NULL interpreter");
1763
0
    }
1764
0
    _PyRuntimeState *runtime = interp->runtime;
1765
1766
0
    HEAD_LOCK(runtime);
1767
0
    if (tstate->prev) {
1768
0
        tstate->prev->next = tstate->next;
1769
0
    }
1770
0
    else {
1771
0
        interp->threads.head = tstate->next;
1772
0
    }
1773
0
    if (tstate->next) {
1774
0
        tstate->next->prev = tstate->prev;
1775
0
    }
1776
0
    if (tstate->state != _Py_THREAD_SUSPENDED) {
1777
        // Any ongoing stop-the-world request should not wait for us because
1778
        // our thread is getting deleted.
1779
0
        if (interp->stoptheworld.requested) {
1780
0
            decrement_stoptheworld_countdown(&interp->stoptheworld);
1781
0
        }
1782
0
        if (runtime->stoptheworld.requested) {
1783
0
            decrement_stoptheworld_countdown(&runtime->stoptheworld);
1784
0
        }
1785
0
    }
1786
1787
#if defined(Py_REF_DEBUG) && defined(Py_GIL_DISABLED)
1788
    // Add our portion of the total refcount to the interpreter's total.
1789
    _PyThreadStateImpl *tstate_impl = (_PyThreadStateImpl *)tstate;
1790
    tstate->interp->object_state.reftotal += tstate_impl->reftotal;
1791
    tstate_impl->reftotal = 0;
1792
    assert(tstate_impl->refcounts.values == NULL);
1793
#endif
1794
1795
0
    HEAD_UNLOCK(runtime);
1796
1797
    // XXX Unbind in PyThreadState_Clear(), or earlier
1798
    // (and assert not-equal here)?
1799
0
    if (tstate->_status.bound_gilstate) {
1800
0
        unbind_gilstate_tstate(tstate);
1801
0
    }
1802
0
    if (tstate->_status.bound) {
1803
0
        unbind_tstate(tstate);
1804
0
    }
1805
1806
    // XXX Move to PyThreadState_Clear()?
1807
0
    clear_datastack(tstate);
1808
1809
0
    if (release_gil) {
1810
0
        _PyEval_ReleaseLock(tstate->interp, tstate, 1);
1811
0
    }
1812
1813
#ifdef Py_GIL_DISABLED
1814
    _Py_qsbr_unregister(tstate);
1815
#endif
1816
1817
0
    tstate->_status.finalized = 1;
1818
0
}
1819
1820
static void
1821
zapthreads(PyInterpreterState *interp)
1822
0
{
1823
0
    PyThreadState *tstate;
1824
    /* No need to lock the mutex here because this should only happen
1825
       when the threads are all really dead (XXX famous last words).
1826
1827
       Cannot use _Py_FOR_EACH_TSTATE_UNLOCKED because we are freeing
1828
       the thread states here.
1829
    */
1830
0
    while ((tstate = interp->threads.head) != NULL) {
1831
0
        tstate_verify_not_active(tstate);
1832
0
        tstate_delete_common(tstate, 0);
1833
0
        free_threadstate((_PyThreadStateImpl *)tstate);
1834
0
    }
1835
0
}
1836
1837
1838
void
1839
PyThreadState_Delete(PyThreadState *tstate)
1840
0
{
1841
0
    _Py_EnsureTstateNotNULL(tstate);
1842
0
    tstate_verify_not_active(tstate);
1843
0
    tstate_delete_common(tstate, 0);
1844
0
    free_threadstate((_PyThreadStateImpl *)tstate);
1845
0
}
1846
1847
1848
void
1849
_PyThreadState_DeleteCurrent(PyThreadState *tstate)
1850
0
{
1851
0
    _Py_EnsureTstateNotNULL(tstate);
1852
#ifdef Py_GIL_DISABLED
1853
    _Py_qsbr_detach(((_PyThreadStateImpl *)tstate)->qsbr);
1854
#endif
1855
0
    current_fast_clear(tstate->interp->runtime);
1856
0
    tstate_delete_common(tstate, 1);  // release GIL as part of call
1857
0
    free_threadstate((_PyThreadStateImpl *)tstate);
1858
0
}
1859
1860
void
1861
PyThreadState_DeleteCurrent(void)
1862
0
{
1863
0
    PyThreadState *tstate = current_fast_get();
1864
0
    _PyThreadState_DeleteCurrent(tstate);
1865
0
}
1866
1867
1868
// Unlinks and removes all thread states from `tstate->interp`, with the
1869
// exception of the one passed as an argument. However, it does not delete
1870
// these thread states. Instead, it returns the removed thread states as a
1871
// linked list.
1872
//
1873
// Note that if there is a current thread state, it *must* be the one
1874
// passed as argument.  Also, this won't touch any interpreters other
1875
// than the current one, since we don't know which thread state should
1876
// be kept in those other interpreters.
1877
PyThreadState *
1878
_PyThreadState_RemoveExcept(PyThreadState *tstate)
1879
0
{
1880
0
    assert(tstate != NULL);
1881
0
    PyInterpreterState *interp = tstate->interp;
1882
0
    _PyRuntimeState *runtime = interp->runtime;
1883
1884
#ifdef Py_GIL_DISABLED
1885
    assert(runtime->stoptheworld.world_stopped);
1886
#endif
1887
1888
0
    HEAD_LOCK(runtime);
1889
    /* Remove all thread states, except tstate, from the linked list of
1890
       thread states. */
1891
0
    PyThreadState *list = interp->threads.head;
1892
0
    if (list == tstate) {
1893
0
        list = tstate->next;
1894
0
    }
1895
0
    if (tstate->prev) {
1896
0
        tstate->prev->next = tstate->next;
1897
0
    }
1898
0
    if (tstate->next) {
1899
0
        tstate->next->prev = tstate->prev;
1900
0
    }
1901
0
    tstate->prev = tstate->next = NULL;
1902
0
    interp->threads.head = tstate;
1903
0
    HEAD_UNLOCK(runtime);
1904
1905
0
    return list;
1906
0
}
1907
1908
// Deletes the thread states in the linked list `list`.
1909
//
1910
// This is intended to be used in conjunction with _PyThreadState_RemoveExcept.
1911
//
1912
// If `is_after_fork` is true, the thread states are immediately freed.
1913
// Otherwise, they are decref'd because they may still be referenced by an
1914
// OS thread.
1915
void
1916
_PyThreadState_DeleteList(PyThreadState *list, int is_after_fork)
1917
0
{
1918
    // The world can't be stopped because we PyThreadState_Clear() can
1919
    // call destructors.
1920
0
    assert(!_PyRuntime.stoptheworld.world_stopped);
1921
1922
0
    PyThreadState *p, *next;
1923
0
    for (p = list; p; p = next) {
1924
0
        next = p->next;
1925
0
        PyThreadState_Clear(p);
1926
0
        if (is_after_fork) {
1927
0
            free_threadstate((_PyThreadStateImpl *)p);
1928
0
        }
1929
0
        else {
1930
0
            decref_threadstate((_PyThreadStateImpl *)p);
1931
0
        }
1932
0
    }
1933
0
}
1934
1935
1936
//----------
1937
// accessors
1938
//----------
1939
1940
/* An extension mechanism to store arbitrary additional per-thread state.
1941
   PyThreadState_GetDict() returns a dictionary that can be used to hold such
1942
   state; the caller should pick a unique key and store its state there.  If
1943
   PyThreadState_GetDict() returns NULL, an exception has *not* been raised
1944
   and the caller should assume no per-thread state is available. */
1945
1946
PyObject *
1947
_PyThreadState_GetDict(PyThreadState *tstate)
1948
8.44M
{
1949
8.44M
    assert(tstate != NULL);
1950
8.44M
    if (tstate->dict == NULL) {
1951
1
        tstate->dict = PyDict_New();
1952
1
        if (tstate->dict == NULL) {
1953
0
            _PyErr_Clear(tstate);
1954
0
        }
1955
1
    }
1956
8.44M
    return tstate->dict;
1957
8.44M
}
1958
1959
1960
PyObject *
1961
PyThreadState_GetDict(void)
1962
8.44M
{
1963
8.44M
    PyThreadState *tstate = current_fast_get();
1964
8.44M
    if (tstate == NULL) {
1965
0
        return NULL;
1966
0
    }
1967
8.44M
    return _PyThreadState_GetDict(tstate);
1968
8.44M
}
1969
1970
1971
PyInterpreterState *
1972
PyThreadState_GetInterpreter(PyThreadState *tstate)
1973
0
{
1974
0
    assert(tstate != NULL);
1975
0
    return tstate->interp;
1976
0
}
1977
1978
1979
PyFrameObject*
1980
PyThreadState_GetFrame(PyThreadState *tstate)
1981
2.19k
{
1982
2.19k
    assert(tstate != NULL);
1983
2.19k
    _PyInterpreterFrame *f = _PyThreadState_GetFrame(tstate);
1984
2.19k
    if (f == NULL) {
1985
0
        return NULL;
1986
0
    }
1987
2.19k
    PyFrameObject *frame = _PyFrame_GetFrameObject(f);
1988
2.19k
    if (frame == NULL) {
1989
0
        PyErr_Clear();
1990
0
    }
1991
2.19k
    return (PyFrameObject*)Py_XNewRef(frame);
1992
2.19k
}
1993
1994
1995
uint64_t
1996
PyThreadState_GetID(PyThreadState *tstate)
1997
0
{
1998
0
    assert(tstate != NULL);
1999
0
    return tstate->id;
2000
0
}
2001
2002
2003
static inline void
2004
tstate_activate(PyThreadState *tstate)
2005
32.1k
{
2006
32.1k
    assert(tstate != NULL);
2007
    // XXX assert(tstate_is_alive(tstate));
2008
32.1k
    assert(tstate_is_bound(tstate));
2009
32.1k
    assert(!tstate->_status.active);
2010
2011
32.1k
    assert(!tstate->_status.bound_gilstate ||
2012
32.1k
           tstate == gilstate_get());
2013
32.1k
    if (!tstate->_status.bound_gilstate) {
2014
0
        bind_gilstate_tstate(tstate);
2015
0
    }
2016
2017
32.1k
    tstate->_status.active = 1;
2018
32.1k
}
2019
2020
static inline void
2021
tstate_deactivate(PyThreadState *tstate)
2022
32.1k
{
2023
32.1k
    assert(tstate != NULL);
2024
    // XXX assert(tstate_is_alive(tstate));
2025
32.1k
    assert(tstate_is_bound(tstate));
2026
32.1k
    assert(tstate->_status.active);
2027
2028
32.1k
    tstate->_status.active = 0;
2029
2030
    // We do not unbind the gilstate tstate here.
2031
    // It will still be used in PyGILState_Ensure().
2032
32.1k
}
2033
2034
static int
2035
tstate_try_attach(PyThreadState *tstate)
2036
32.1k
{
2037
#ifdef Py_GIL_DISABLED
2038
    int expected = _Py_THREAD_DETACHED;
2039
    return _Py_atomic_compare_exchange_int(&tstate->state,
2040
                                           &expected,
2041
                                           _Py_THREAD_ATTACHED);
2042
#else
2043
32.1k
    assert(tstate->state == _Py_THREAD_DETACHED);
2044
32.1k
    tstate->state = _Py_THREAD_ATTACHED;
2045
32.1k
    return 1;
2046
32.1k
#endif
2047
32.1k
}
2048
2049
static void
2050
tstate_set_detached(PyThreadState *tstate, int detached_state)
2051
32.1k
{
2052
32.1k
    assert(_Py_atomic_load_int_relaxed(&tstate->state) == _Py_THREAD_ATTACHED);
2053
#ifdef Py_GIL_DISABLED
2054
    _Py_atomic_store_int(&tstate->state, detached_state);
2055
#else
2056
32.1k
    tstate->state = detached_state;
2057
32.1k
#endif
2058
32.1k
}
2059
2060
static void
2061
tstate_wait_attach(PyThreadState *tstate)
2062
0
{
2063
0
    do {
2064
0
        int state = _Py_atomic_load_int_relaxed(&tstate->state);
2065
0
        if (state == _Py_THREAD_SUSPENDED) {
2066
            // Wait until we're switched out of SUSPENDED to DETACHED.
2067
0
            _PyParkingLot_Park(&tstate->state, &state, sizeof(tstate->state),
2068
0
                               /*timeout=*/-1, NULL, /*detach=*/0);
2069
0
        }
2070
0
        else if (state == _Py_THREAD_SHUTTING_DOWN) {
2071
            // We're shutting down, so we can't attach.
2072
0
            _PyThreadState_HangThread(tstate);
2073
0
        }
2074
0
        else {
2075
0
            assert(state == _Py_THREAD_DETACHED);
2076
0
        }
2077
        // Once we're back in DETACHED we can re-attach
2078
0
    } while (!tstate_try_attach(tstate));
2079
0
}
2080
2081
void
2082
_PyThreadState_Attach(PyThreadState *tstate)
2083
32.1k
{
2084
#if defined(Py_DEBUG)
2085
    // This is called from PyEval_RestoreThread(). Similar
2086
    // to it, we need to ensure errno doesn't change.
2087
    int err = errno;
2088
#endif
2089
2090
32.1k
    _Py_EnsureTstateNotNULL(tstate);
2091
32.1k
    if (current_fast_get() != NULL) {
2092
0
        Py_FatalError("non-NULL old thread state");
2093
0
    }
2094
32.1k
    _PyThreadStateImpl *_tstate = (_PyThreadStateImpl *)tstate;
2095
32.1k
    if (_tstate->c_stack_hard_limit == 0) {
2096
16
        _Py_InitializeRecursionLimits(tstate);
2097
16
    }
2098
2099
32.1k
    while (1) {
2100
32.1k
        _PyEval_AcquireLock(tstate);
2101
2102
        // XXX assert(tstate_is_alive(tstate));
2103
32.1k
        current_fast_set(&_PyRuntime, tstate);
2104
32.1k
        if (!tstate_try_attach(tstate)) {
2105
0
            tstate_wait_attach(tstate);
2106
0
        }
2107
32.1k
        tstate_activate(tstate);
2108
2109
#ifdef Py_GIL_DISABLED
2110
        if (_PyEval_IsGILEnabled(tstate) && !tstate->holds_gil) {
2111
            // The GIL was enabled between our call to _PyEval_AcquireLock()
2112
            // and when we attached (the GIL can't go from enabled to disabled
2113
            // here because only a thread holding the GIL can disable
2114
            // it). Detach and try again.
2115
            tstate_set_detached(tstate, _Py_THREAD_DETACHED);
2116
            tstate_deactivate(tstate);
2117
            current_fast_clear(&_PyRuntime);
2118
            continue;
2119
        }
2120
        _Py_qsbr_attach(((_PyThreadStateImpl *)tstate)->qsbr);
2121
#endif
2122
32.1k
        break;
2123
32.1k
    }
2124
2125
    // Resume previous critical section. This acquires the lock(s) from the
2126
    // top-most critical section.
2127
32.1k
    if (tstate->critical_section != 0) {
2128
0
        _PyCriticalSection_Resume(tstate);
2129
0
    }
2130
2131
#if defined(Py_DEBUG)
2132
    errno = err;
2133
#endif
2134
32.1k
}
2135
2136
static void
2137
detach_thread(PyThreadState *tstate, int detached_state)
2138
32.1k
{
2139
    // XXX assert(tstate_is_alive(tstate) && tstate_is_bound(tstate));
2140
32.1k
    assert(_Py_atomic_load_int_relaxed(&tstate->state) == _Py_THREAD_ATTACHED);
2141
32.1k
    assert(tstate == current_fast_get());
2142
32.1k
    if (tstate->critical_section != 0) {
2143
0
        _PyCriticalSection_SuspendAll(tstate);
2144
0
    }
2145
#ifdef Py_GIL_DISABLED
2146
    _Py_qsbr_detach(((_PyThreadStateImpl *)tstate)->qsbr);
2147
#endif
2148
32.1k
    tstate_deactivate(tstate);
2149
32.1k
    tstate_set_detached(tstate, detached_state);
2150
32.1k
    current_fast_clear(&_PyRuntime);
2151
32.1k
    _PyEval_ReleaseLock(tstate->interp, tstate, 0);
2152
32.1k
}
2153
2154
void
2155
_PyThreadState_Detach(PyThreadState *tstate)
2156
32.1k
{
2157
32.1k
    detach_thread(tstate, _Py_THREAD_DETACHED);
2158
32.1k
}
2159
2160
void
2161
_PyThreadState_Suspend(PyThreadState *tstate)
2162
0
{
2163
0
    _PyRuntimeState *runtime = &_PyRuntime;
2164
2165
0
    assert(_Py_atomic_load_int_relaxed(&tstate->state) == _Py_THREAD_ATTACHED);
2166
2167
0
    struct _stoptheworld_state *stw = NULL;
2168
0
    HEAD_LOCK(runtime);
2169
0
    if (runtime->stoptheworld.requested) {
2170
0
        stw = &runtime->stoptheworld;
2171
0
    }
2172
0
    else if (tstate->interp->stoptheworld.requested) {
2173
0
        stw = &tstate->interp->stoptheworld;
2174
0
    }
2175
0
    HEAD_UNLOCK(runtime);
2176
2177
0
    if (stw == NULL) {
2178
        // Switch directly to "detached" if there is no active stop-the-world
2179
        // request.
2180
0
        detach_thread(tstate, _Py_THREAD_DETACHED);
2181
0
        return;
2182
0
    }
2183
2184
    // Switch to "suspended" state.
2185
0
    detach_thread(tstate, _Py_THREAD_SUSPENDED);
2186
2187
    // Decrease the count of remaining threads needing to park.
2188
0
    HEAD_LOCK(runtime);
2189
0
    decrement_stoptheworld_countdown(stw);
2190
0
    HEAD_UNLOCK(runtime);
2191
0
}
2192
2193
void
2194
_PyThreadState_SetShuttingDown(PyThreadState *tstate)
2195
0
{
2196
0
    _Py_atomic_store_int(&tstate->state, _Py_THREAD_SHUTTING_DOWN);
2197
#ifdef Py_GIL_DISABLED
2198
    _PyParkingLot_UnparkAll(&tstate->state);
2199
#endif
2200
0
}
2201
2202
// Decrease stop-the-world counter of remaining number of threads that need to
2203
// pause. If we are the final thread to pause, notify the requesting thread.
2204
static void
2205
decrement_stoptheworld_countdown(struct _stoptheworld_state *stw)
2206
0
{
2207
0
    assert(stw->thread_countdown > 0);
2208
0
    if (--stw->thread_countdown == 0) {
2209
0
        _PyEvent_Notify(&stw->stop_event);
2210
0
    }
2211
0
}
2212
2213
#ifdef Py_GIL_DISABLED
2214
// Interpreter for _Py_FOR_EACH_STW_INTERP(). For global stop-the-world events,
2215
// we start with the first interpreter and then iterate over all interpreters.
2216
// For per-interpreter stop-the-world events, we only operate on the one
2217
// interpreter.
2218
static PyInterpreterState *
2219
interp_for_stop_the_world(struct _stoptheworld_state *stw)
2220
{
2221
    return (stw->is_global
2222
        ? PyInterpreterState_Head()
2223
        : _Py_CONTAINER_OF(stw, PyInterpreterState, stoptheworld));
2224
}
2225
2226
// Loops over threads for a stop-the-world event.
2227
// For global: all threads in all interpreters
2228
// For per-interpreter: all threads in the interpreter
2229
#define _Py_FOR_EACH_STW_INTERP(stw, i)                                     \
2230
    for (PyInterpreterState *i = interp_for_stop_the_world((stw));          \
2231
            i != NULL; i = ((stw->is_global) ? i->next : NULL))
2232
2233
2234
// Try to transition threads atomically from the "detached" state to the
2235
// "gc stopped" state. Returns true if all threads are in the "gc stopped"
2236
static bool
2237
park_detached_threads(struct _stoptheworld_state *stw)
2238
{
2239
    int num_parked = 0;
2240
    _Py_FOR_EACH_STW_INTERP(stw, i) {
2241
        _Py_FOR_EACH_TSTATE_UNLOCKED(i, t) {
2242
            int state = _Py_atomic_load_int_relaxed(&t->state);
2243
            if (state == _Py_THREAD_DETACHED) {
2244
                // Atomically transition to "suspended" if in "detached" state.
2245
                if (_Py_atomic_compare_exchange_int(
2246
                                &t->state, &state, _Py_THREAD_SUSPENDED)) {
2247
                    num_parked++;
2248
                }
2249
            }
2250
            else if (state == _Py_THREAD_ATTACHED && t != stw->requester) {
2251
                _Py_set_eval_breaker_bit(t, _PY_EVAL_PLEASE_STOP_BIT);
2252
            }
2253
        }
2254
    }
2255
    stw->thread_countdown -= num_parked;
2256
    assert(stw->thread_countdown >= 0);
2257
    return num_parked > 0 && stw->thread_countdown == 0;
2258
}
2259
2260
static void
2261
stop_the_world(struct _stoptheworld_state *stw)
2262
{
2263
    _PyRuntimeState *runtime = &_PyRuntime;
2264
2265
    // gh-137433: Acquire the rwmutex first to avoid deadlocks with daemon
2266
    // threads that may hang when blocked on lock acquisition.
2267
    if (stw->is_global) {
2268
        _PyRWMutex_Lock(&runtime->stoptheworld_mutex);
2269
    }
2270
    else {
2271
        _PyRWMutex_RLock(&runtime->stoptheworld_mutex);
2272
    }
2273
    PyMutex_Lock(&stw->mutex);
2274
2275
    HEAD_LOCK(runtime);
2276
    stw->requested = 1;
2277
    stw->thread_countdown = 0;
2278
    stw->stop_event = (PyEvent){0};  // zero-initialize (unset)
2279
    stw->requester = _PyThreadState_GET();  // may be NULL
2280
2281
    _Py_FOR_EACH_STW_INTERP(stw, i) {
2282
        _Py_FOR_EACH_TSTATE_UNLOCKED(i, t) {
2283
            if (t != stw->requester) {
2284
                // Count all the other threads (we don't wait on ourself).
2285
                stw->thread_countdown++;
2286
            }
2287
        }
2288
    }
2289
2290
    if (stw->thread_countdown == 0) {
2291
        HEAD_UNLOCK(runtime);
2292
        stw->world_stopped = 1;
2293
        return;
2294
    }
2295
2296
    for (;;) {
2297
        // Switch threads that are detached to the GC stopped state
2298
        bool stopped_all_threads = park_detached_threads(stw);
2299
        HEAD_UNLOCK(runtime);
2300
2301
        if (stopped_all_threads) {
2302
            break;
2303
        }
2304
2305
        PyTime_t wait_ns = 1000*1000;  // 1ms (arbitrary, may need tuning)
2306
        int detach = 0;
2307
        if (PyEvent_WaitTimed(&stw->stop_event, wait_ns, detach)) {
2308
            assert(stw->thread_countdown == 0);
2309
            break;
2310
        }
2311
2312
        HEAD_LOCK(runtime);
2313
    }
2314
    stw->world_stopped = 1;
2315
}
2316
2317
static void
2318
start_the_world(struct _stoptheworld_state *stw)
2319
{
2320
    _PyRuntimeState *runtime = &_PyRuntime;
2321
    assert(PyMutex_IsLocked(&stw->mutex));
2322
2323
    HEAD_LOCK(runtime);
2324
    stw->requested = 0;
2325
    stw->world_stopped = 0;
2326
    // Switch threads back to the detached state.
2327
    _Py_FOR_EACH_STW_INTERP(stw, i) {
2328
        _Py_FOR_EACH_TSTATE_UNLOCKED(i, t) {
2329
            if (t != stw->requester) {
2330
                assert(_Py_atomic_load_int_relaxed(&t->state) ==
2331
                       _Py_THREAD_SUSPENDED);
2332
                _Py_atomic_store_int(&t->state, _Py_THREAD_DETACHED);
2333
                _PyParkingLot_UnparkAll(&t->state);
2334
            }
2335
        }
2336
    }
2337
    stw->requester = NULL;
2338
    HEAD_UNLOCK(runtime);
2339
    PyMutex_Unlock(&stw->mutex);
2340
    if (stw->is_global) {
2341
        _PyRWMutex_Unlock(&runtime->stoptheworld_mutex);
2342
    }
2343
    else {
2344
        _PyRWMutex_RUnlock(&runtime->stoptheworld_mutex);
2345
    }
2346
}
2347
#endif  // Py_GIL_DISABLED
2348
2349
void
2350
_PyEval_StopTheWorldAll(_PyRuntimeState *runtime)
2351
0
{
2352
#ifdef Py_GIL_DISABLED
2353
    stop_the_world(&runtime->stoptheworld);
2354
#endif
2355
0
}
2356
2357
void
2358
_PyEval_StartTheWorldAll(_PyRuntimeState *runtime)
2359
0
{
2360
#ifdef Py_GIL_DISABLED
2361
    start_the_world(&runtime->stoptheworld);
2362
#endif
2363
0
}
2364
2365
void
2366
_PyEval_StopTheWorld(PyInterpreterState *interp)
2367
4
{
2368
#ifdef Py_GIL_DISABLED
2369
    stop_the_world(&interp->stoptheworld);
2370
#endif
2371
4
}
2372
2373
void
2374
_PyEval_StartTheWorld(PyInterpreterState *interp)
2375
4
{
2376
#ifdef Py_GIL_DISABLED
2377
    start_the_world(&interp->stoptheworld);
2378
#endif
2379
4
}
2380
2381
//----------
2382
// other API
2383
//----------
2384
2385
/* Asynchronously raise an exception in a thread.
2386
   Requested by Just van Rossum and Alex Martelli.
2387
   To prevent naive misuse, you must write your own extension
2388
   to call this, or use ctypes.  Must be called with the GIL held.
2389
   Returns the number of tstates modified (normally 1, but 0 if `id` didn't
2390
   match any known thread id).  Can be called with exc=NULL to clear an
2391
   existing async exception.  This raises no exceptions. */
2392
2393
// XXX Move this to Python/ceval_gil.c?
2394
// XXX Deprecate this.
2395
int
2396
PyThreadState_SetAsyncExc(unsigned long id, PyObject *exc)
2397
0
{
2398
0
    PyInterpreterState *interp = _PyInterpreterState_GET();
2399
2400
    /* Although the GIL is held, a few C API functions can be called
2401
     * without the GIL held, and in particular some that create and
2402
     * destroy thread and interpreter states.  Those can mutate the
2403
     * list of thread states we're traversing, so to prevent that we lock
2404
     * head_mutex for the duration.
2405
     */
2406
0
    PyThreadState *tstate = NULL;
2407
0
    _Py_FOR_EACH_TSTATE_BEGIN(interp, t) {
2408
0
        if (t->thread_id == id) {
2409
0
            tstate = t;
2410
0
            break;
2411
0
        }
2412
0
    }
2413
0
    _Py_FOR_EACH_TSTATE_END(interp);
2414
2415
0
    if (tstate != NULL) {
2416
        /* Tricky:  we need to decref the current value
2417
         * (if any) in tstate->async_exc, but that can in turn
2418
         * allow arbitrary Python code to run, including
2419
         * perhaps calls to this function.  To prevent
2420
         * deadlock, we need to release head_mutex before
2421
         * the decref.
2422
         */
2423
0
        Py_XINCREF(exc);
2424
0
        PyObject *old_exc = _Py_atomic_exchange_ptr(&tstate->async_exc, exc);
2425
2426
0
        Py_XDECREF(old_exc);
2427
0
        _Py_set_eval_breaker_bit(tstate, _PY_ASYNC_EXCEPTION_BIT);
2428
0
    }
2429
2430
0
    return tstate != NULL;
2431
0
}
2432
2433
//---------------------------------
2434
// API for the current thread state
2435
//---------------------------------
2436
2437
PyThreadState *
2438
PyThreadState_GetUnchecked(void)
2439
0
{
2440
0
    return current_fast_get();
2441
0
}
2442
2443
2444
PyThreadState *
2445
PyThreadState_Get(void)
2446
86.2M
{
2447
86.2M
    PyThreadState *tstate = current_fast_get();
2448
86.2M
    _Py_EnsureTstateNotNULL(tstate);
2449
86.2M
    return tstate;
2450
86.2M
}
2451
2452
PyThreadState *
2453
_PyThreadState_Swap(_PyRuntimeState *runtime, PyThreadState *newts)
2454
0
{
2455
0
    PyThreadState *oldts = current_fast_get();
2456
0
    if (oldts != NULL) {
2457
0
        _PyThreadState_Detach(oldts);
2458
0
    }
2459
0
    if (newts != NULL) {
2460
0
        _PyThreadState_Attach(newts);
2461
0
    }
2462
0
    return oldts;
2463
0
}
2464
2465
PyThreadState *
2466
PyThreadState_Swap(PyThreadState *newts)
2467
0
{
2468
0
    return _PyThreadState_Swap(&_PyRuntime, newts);
2469
0
}
2470
2471
2472
void
2473
_PyThreadState_Bind(PyThreadState *tstate)
2474
16
{
2475
    // gh-104690: If Python is being finalized and PyInterpreterState_Delete()
2476
    // was called, tstate becomes a dangling pointer.
2477
16
    assert(_PyThreadState_CheckConsistency(tstate));
2478
2479
16
    bind_tstate(tstate);
2480
    // This makes sure there's a gilstate tstate bound
2481
    // as soon as possible.
2482
16
    if (gilstate_get() == NULL) {
2483
16
        bind_gilstate_tstate(tstate);
2484
16
    }
2485
16
}
2486
2487
#if defined(Py_GIL_DISABLED) && !defined(Py_LIMITED_API)
2488
uintptr_t
2489
_Py_GetThreadLocal_Addr(void)
2490
{
2491
#ifdef HAVE_THREAD_LOCAL
2492
    // gh-112535: Use the address of the thread-local PyThreadState variable as
2493
    // a unique identifier for the current thread. Each thread has a unique
2494
    // _Py_tss_tstate variable with a unique address.
2495
    return (uintptr_t)&_Py_tss_tstate;
2496
#else
2497
#  error "no supported thread-local variable storage classifier"
2498
#endif
2499
}
2500
#endif
2501
2502
/***********************************/
2503
/* routines for advanced debuggers */
2504
/***********************************/
2505
2506
// (requested by David Beazley)
2507
// Don't use unless you know what you are doing!
2508
2509
PyInterpreterState *
2510
PyInterpreterState_Head(void)
2511
0
{
2512
0
    return _PyRuntime.interpreters.head;
2513
0
}
2514
2515
PyInterpreterState *
2516
PyInterpreterState_Main(void)
2517
0
{
2518
0
    return _PyInterpreterState_Main();
2519
0
}
2520
2521
PyInterpreterState *
2522
0
PyInterpreterState_Next(PyInterpreterState *interp) {
2523
0
    return interp->next;
2524
0
}
2525
2526
PyThreadState *
2527
52.6k
PyInterpreterState_ThreadHead(PyInterpreterState *interp) {
2528
52.6k
    return interp->threads.head;
2529
52.6k
}
2530
2531
PyThreadState *
2532
52.6k
PyThreadState_Next(PyThreadState *tstate) {
2533
52.6k
    return tstate->next;
2534
52.6k
}
2535
2536
2537
/********************************************/
2538
/* reporting execution state of all threads */
2539
/********************************************/
2540
2541
/* The implementation of sys._current_frames().  This is intended to be
2542
   called with the GIL held, as it will be when called via
2543
   sys._current_frames().  It's possible it would work fine even without
2544
   the GIL held, but haven't thought enough about that.
2545
*/
2546
PyObject *
2547
_PyThread_CurrentFrames(void)
2548
0
{
2549
0
    _PyRuntimeState *runtime = &_PyRuntime;
2550
0
    PyThreadState *tstate = current_fast_get();
2551
0
    if (_PySys_Audit(tstate, "sys._current_frames", NULL) < 0) {
2552
0
        return NULL;
2553
0
    }
2554
2555
0
    PyObject *result = PyDict_New();
2556
0
    if (result == NULL) {
2557
0
        return NULL;
2558
0
    }
2559
2560
    /* for i in all interpreters:
2561
     *     for t in all of i's thread states:
2562
     *          if t's frame isn't NULL, map t's id to its frame
2563
     * Because these lists can mutate even when the GIL is held, we
2564
     * need to grab head_mutex for the duration.
2565
     */
2566
0
    _PyEval_StopTheWorldAll(runtime);
2567
0
    HEAD_LOCK(runtime);
2568
0
    PyInterpreterState *i;
2569
0
    for (i = runtime->interpreters.head; i != NULL; i = i->next) {
2570
0
        _Py_FOR_EACH_TSTATE_UNLOCKED(i, t) {
2571
0
            _PyInterpreterFrame *frame = t->current_frame;
2572
0
            frame = _PyFrame_GetFirstComplete(frame);
2573
0
            if (frame == NULL) {
2574
0
                continue;
2575
0
            }
2576
0
            PyObject *id = PyLong_FromUnsignedLong(t->thread_id);
2577
0
            if (id == NULL) {
2578
0
                goto fail;
2579
0
            }
2580
0
            PyObject *frameobj = (PyObject *)_PyFrame_GetFrameObject(frame);
2581
0
            if (frameobj == NULL) {
2582
0
                Py_DECREF(id);
2583
0
                goto fail;
2584
0
            }
2585
0
            int stat = PyDict_SetItem(result, id, frameobj);
2586
0
            Py_DECREF(id);
2587
0
            if (stat < 0) {
2588
0
                goto fail;
2589
0
            }
2590
0
        }
2591
0
    }
2592
0
    goto done;
2593
2594
0
fail:
2595
0
    Py_CLEAR(result);
2596
2597
0
done:
2598
0
    HEAD_UNLOCK(runtime);
2599
0
    _PyEval_StartTheWorldAll(runtime);
2600
0
    return result;
2601
0
}
2602
2603
/* The implementation of sys._current_exceptions().  This is intended to be
2604
   called with the GIL held, as it will be when called via
2605
   sys._current_exceptions().  It's possible it would work fine even without
2606
   the GIL held, but haven't thought enough about that.
2607
*/
2608
PyObject *
2609
_PyThread_CurrentExceptions(void)
2610
0
{
2611
0
    _PyRuntimeState *runtime = &_PyRuntime;
2612
0
    PyThreadState *tstate = current_fast_get();
2613
2614
0
    _Py_EnsureTstateNotNULL(tstate);
2615
2616
0
    if (_PySys_Audit(tstate, "sys._current_exceptions", NULL) < 0) {
2617
0
        return NULL;
2618
0
    }
2619
2620
0
    PyObject *result = PyDict_New();
2621
0
    if (result == NULL) {
2622
0
        return NULL;
2623
0
    }
2624
2625
    /* for i in all interpreters:
2626
     *     for t in all of i's thread states:
2627
     *          if t's frame isn't NULL, map t's id to its frame
2628
     * Because these lists can mutate even when the GIL is held, we
2629
     * need to grab head_mutex for the duration.
2630
     */
2631
0
    _PyEval_StopTheWorldAll(runtime);
2632
0
    HEAD_LOCK(runtime);
2633
0
    PyInterpreterState *i;
2634
0
    for (i = runtime->interpreters.head; i != NULL; i = i->next) {
2635
0
        _Py_FOR_EACH_TSTATE_UNLOCKED(i, t) {
2636
0
            _PyErr_StackItem *err_info = _PyErr_GetTopmostException(t);
2637
0
            if (err_info == NULL) {
2638
0
                continue;
2639
0
            }
2640
0
            PyObject *id = PyLong_FromUnsignedLong(t->thread_id);
2641
0
            if (id == NULL) {
2642
0
                goto fail;
2643
0
            }
2644
0
            PyObject *exc = err_info->exc_value;
2645
0
            assert(exc == NULL ||
2646
0
                   exc == Py_None ||
2647
0
                   PyExceptionInstance_Check(exc));
2648
2649
0
            int stat = PyDict_SetItem(result, id, exc == NULL ? Py_None : exc);
2650
0
            Py_DECREF(id);
2651
0
            if (stat < 0) {
2652
0
                goto fail;
2653
0
            }
2654
0
        }
2655
0
    }
2656
0
    goto done;
2657
2658
0
fail:
2659
0
    Py_CLEAR(result);
2660
2661
0
done:
2662
0
    HEAD_UNLOCK(runtime);
2663
0
    _PyEval_StartTheWorldAll(runtime);
2664
0
    return result;
2665
0
}
2666
2667
2668
/***********************************/
2669
/* Python "auto thread state" API. */
2670
/***********************************/
2671
2672
/* Internal initialization/finalization functions called by
2673
   Py_Initialize/Py_FinalizeEx
2674
*/
2675
PyStatus
2676
_PyGILState_Init(PyInterpreterState *interp)
2677
16
{
2678
16
    if (!_Py_IsMainInterpreter(interp)) {
2679
        /* Currently, PyGILState is shared by all interpreters. The main
2680
         * interpreter is responsible to initialize it. */
2681
0
        return _PyStatus_OK();
2682
0
    }
2683
16
    _PyRuntimeState *runtime = interp->runtime;
2684
16
    assert(gilstate_get() == NULL);
2685
16
    assert(runtime->gilstate.autoInterpreterState == NULL);
2686
16
    runtime->gilstate.autoInterpreterState = interp;
2687
16
    return _PyStatus_OK();
2688
16
}
2689
2690
void
2691
_PyGILState_Fini(PyInterpreterState *interp)
2692
0
{
2693
0
    if (!_Py_IsMainInterpreter(interp)) {
2694
        /* Currently, PyGILState is shared by all interpreters. The main
2695
         * interpreter is responsible to initialize it. */
2696
0
        return;
2697
0
    }
2698
0
    interp->runtime->gilstate.autoInterpreterState = NULL;
2699
0
}
2700
2701
2702
// XXX Drop this.
2703
void
2704
_PyGILState_SetTstate(PyThreadState *tstate)
2705
16
{
2706
    /* must init with valid states */
2707
16
    assert(tstate != NULL);
2708
16
    assert(tstate->interp != NULL);
2709
2710
16
    if (!_Py_IsMainInterpreter(tstate->interp)) {
2711
        /* Currently, PyGILState is shared by all interpreters. The main
2712
         * interpreter is responsible to initialize it. */
2713
0
        return;
2714
0
    }
2715
2716
#ifndef NDEBUG
2717
    _PyRuntimeState *runtime = tstate->interp->runtime;
2718
2719
    assert(runtime->gilstate.autoInterpreterState == tstate->interp);
2720
    assert(gilstate_get() == tstate);
2721
    assert(tstate->gilstate_counter == 1);
2722
#endif
2723
16
}
2724
2725
PyInterpreterState *
2726
_PyGILState_GetInterpreterStateUnsafe(void)
2727
0
{
2728
0
    return _PyRuntime.gilstate.autoInterpreterState;
2729
0
}
2730
2731
/* The public functions */
2732
2733
PyThreadState *
2734
PyGILState_GetThisThreadState(void)
2735
0
{
2736
0
    return gilstate_get();
2737
0
}
2738
2739
int
2740
PyGILState_Check(void)
2741
0
{
2742
0
    _PyRuntimeState *runtime = &_PyRuntime;
2743
0
    if (!runtime->gilstate.check_enabled) {
2744
0
        return 1;
2745
0
    }
2746
2747
0
    PyThreadState *tstate = current_fast_get();
2748
0
    if (tstate == NULL) {
2749
0
        return 0;
2750
0
    }
2751
2752
0
    PyThreadState *tcur = gilstate_get();
2753
0
    return (tstate == tcur);
2754
0
}
2755
2756
PyGILState_STATE
2757
PyGILState_Ensure(void)
2758
0
{
2759
0
    _PyRuntimeState *runtime = &_PyRuntime;
2760
2761
    /* Note that we do not auto-init Python here - apart from
2762
       potential races with 2 threads auto-initializing, pep-311
2763
       spells out other issues.  Embedders are expected to have
2764
       called Py_Initialize(). */
2765
2766
    /* Ensure that _PyEval_InitThreads() and _PyGILState_Init() have been
2767
       called by Py_Initialize()
2768
2769
       TODO: This isn't thread-safe. There's no protection here against
2770
       concurrent finalization of the interpreter; it's simply a guard
2771
       for *after* the interpreter has finalized.
2772
     */
2773
0
    if (!_PyEval_ThreadsInitialized() || runtime->gilstate.autoInterpreterState == NULL) {
2774
0
        PyThread_hang_thread();
2775
0
    }
2776
2777
0
    PyThreadState *tcur = gilstate_get();
2778
0
    int has_gil;
2779
0
    if (tcur == NULL) {
2780
        /* Create a new Python thread state for this thread */
2781
        // XXX Use PyInterpreterState_EnsureThreadState()?
2782
0
        tcur = new_threadstate(runtime->gilstate.autoInterpreterState,
2783
0
                               _PyThreadState_WHENCE_GILSTATE);
2784
0
        if (tcur == NULL) {
2785
0
            Py_FatalError("Couldn't create thread-state for new thread");
2786
0
        }
2787
0
        bind_tstate(tcur);
2788
0
        bind_gilstate_tstate(tcur);
2789
2790
        /* This is our thread state!  We'll need to delete it in the
2791
           matching call to PyGILState_Release(). */
2792
0
        assert(tcur->gilstate_counter == 1);
2793
0
        tcur->gilstate_counter = 0;
2794
0
        has_gil = 0; /* new thread state is never current */
2795
0
    }
2796
0
    else {
2797
0
        has_gil = holds_gil(tcur);
2798
0
    }
2799
2800
0
    if (!has_gil) {
2801
0
        PyEval_RestoreThread(tcur);
2802
0
    }
2803
2804
    /* Update our counter in the thread-state - no need for locks:
2805
       - tcur will remain valid as we hold the GIL.
2806
       - the counter is safe as we are the only thread "allowed"
2807
         to modify this value
2808
    */
2809
0
    ++tcur->gilstate_counter;
2810
2811
0
    return has_gil ? PyGILState_LOCKED : PyGILState_UNLOCKED;
2812
0
}
2813
2814
void
2815
PyGILState_Release(PyGILState_STATE oldstate)
2816
0
{
2817
0
    PyThreadState *tstate = gilstate_get();
2818
0
    if (tstate == NULL) {
2819
0
        Py_FatalError("auto-releasing thread-state, "
2820
0
                      "but no thread-state for this thread");
2821
0
    }
2822
2823
    /* We must hold the GIL and have our thread state current */
2824
0
    if (!holds_gil(tstate)) {
2825
0
        _Py_FatalErrorFormat(__func__,
2826
0
                             "thread state %p must be current when releasing",
2827
0
                             tstate);
2828
0
    }
2829
0
    --tstate->gilstate_counter;
2830
0
    assert(tstate->gilstate_counter >= 0); /* illegal counter value */
2831
2832
    /* If we're going to destroy this thread-state, we must
2833
     * clear it while the GIL is held, as destructors may run.
2834
     */
2835
0
    if (tstate->gilstate_counter == 0) {
2836
        /* can't have been locked when we created it */
2837
0
        assert(oldstate == PyGILState_UNLOCKED);
2838
        // XXX Unbind tstate here.
2839
        // gh-119585: `PyThreadState_Clear()` may call destructors that
2840
        // themselves use PyGILState_Ensure and PyGILState_Release, so make
2841
        // sure that gilstate_counter is not zero when calling it.
2842
0
        ++tstate->gilstate_counter;
2843
0
        PyThreadState_Clear(tstate);
2844
0
        --tstate->gilstate_counter;
2845
        /* Delete the thread-state.  Note this releases the GIL too!
2846
         * It's vital that the GIL be held here, to avoid shutdown
2847
         * races; see bugs 225673 and 1061968 (that nasty bug has a
2848
         * habit of coming back).
2849
         */
2850
0
        assert(tstate->gilstate_counter == 0);
2851
0
        assert(current_fast_get() == tstate);
2852
0
        _PyThreadState_DeleteCurrent(tstate);
2853
0
    }
2854
    /* Release the lock if necessary */
2855
0
    else if (oldstate == PyGILState_UNLOCKED) {
2856
0
        PyEval_SaveThread();
2857
0
    }
2858
0
}
2859
2860
2861
/*************/
2862
/* Other API */
2863
/*************/
2864
2865
_PyFrameEvalFunction
2866
_PyInterpreterState_GetEvalFrameFunc(PyInterpreterState *interp)
2867
0
{
2868
0
    if (interp->eval_frame == NULL) {
2869
0
        return _PyEval_EvalFrameDefault;
2870
0
    }
2871
0
    return interp->eval_frame;
2872
0
}
2873
2874
2875
void
2876
_PyInterpreterState_SetEvalFrameFunc(PyInterpreterState *interp,
2877
                                     _PyFrameEvalFunction eval_frame)
2878
0
{
2879
0
    if (eval_frame == _PyEval_EvalFrameDefault) {
2880
0
        eval_frame = NULL;
2881
0
    }
2882
0
    if (eval_frame == interp->eval_frame) {
2883
0
        return;
2884
0
    }
2885
#ifdef _Py_TIER2
2886
    if (eval_frame != NULL) {
2887
        _Py_Executors_InvalidateAll(interp, 1);
2888
    }
2889
#endif
2890
0
    RARE_EVENT_INC(set_eval_frame_func);
2891
0
    _PyEval_StopTheWorld(interp);
2892
0
    interp->eval_frame = eval_frame;
2893
0
    _PyEval_StartTheWorld(interp);
2894
0
}
2895
2896
2897
const PyConfig*
2898
_PyInterpreterState_GetConfig(PyInterpreterState *interp)
2899
73.8M
{
2900
73.8M
    return &interp->config;
2901
73.8M
}
2902
2903
2904
const PyConfig*
2905
_Py_GetConfig(void)
2906
46.3k
{
2907
46.3k
    PyThreadState *tstate = current_fast_get();
2908
46.3k
    _Py_EnsureTstateNotNULL(tstate);
2909
46.3k
    return _PyInterpreterState_GetConfig(tstate->interp);
2910
46.3k
}
2911
2912
2913
int
2914
_PyInterpreterState_HasFeature(PyInterpreterState *interp, unsigned long feature)
2915
0
{
2916
0
    return ((interp->feature_flags & feature) != 0);
2917
0
}
2918
2919
2920
158k
#define MINIMUM_OVERHEAD 1000
2921
2922
static PyObject **
2923
push_chunk(PyThreadState *tstate, int size)
2924
158k
{
2925
158k
    int allocate_size = _PY_DATA_STACK_CHUNK_SIZE;
2926
158k
    while (allocate_size < (int)sizeof(PyObject*)*(size + MINIMUM_OVERHEAD)) {
2927
0
        allocate_size *= 2;
2928
0
    }
2929
158k
    _PyStackChunk *new = allocate_chunk(allocate_size, tstate->datastack_chunk);
2930
158k
    if (new == NULL) {
2931
0
        return NULL;
2932
0
    }
2933
158k
    if (tstate->datastack_chunk) {
2934
158k
        tstate->datastack_chunk->top = tstate->datastack_top -
2935
158k
                                       &tstate->datastack_chunk->data[0];
2936
158k
    }
2937
158k
    tstate->datastack_chunk = new;
2938
158k
    tstate->datastack_limit = (PyObject **)(((char *)new) + allocate_size);
2939
    // When new is the "root" chunk (i.e. new->previous == NULL), we can keep
2940
    // _PyThreadState_PopFrame from freeing it later by "skipping" over the
2941
    // first element:
2942
158k
    PyObject **res = &new->data[new->previous == NULL];
2943
158k
    tstate->datastack_top = res + size;
2944
158k
    return res;
2945
158k
}
2946
2947
_PyInterpreterFrame *
2948
_PyThreadState_PushFrame(PyThreadState *tstate, size_t size)
2949
155M
{
2950
155M
    assert(size < INT_MAX/sizeof(PyObject *));
2951
155M
    if (_PyThreadState_HasStackSpace(tstate, (int)size)) {
2952
155M
        _PyInterpreterFrame *res = (_PyInterpreterFrame *)tstate->datastack_top;
2953
155M
        tstate->datastack_top += size;
2954
155M
        return res;
2955
155M
    }
2956
158k
    return (_PyInterpreterFrame *)push_chunk(tstate, (int)size);
2957
155M
}
2958
2959
void
2960
_PyThreadState_PopFrame(PyThreadState *tstate, _PyInterpreterFrame * frame)
2961
496M
{
2962
496M
    assert(tstate->datastack_chunk);
2963
496M
    PyObject **base = (PyObject **)frame;
2964
496M
    if (base == &tstate->datastack_chunk->data[0]) {
2965
158k
        _PyStackChunk *chunk = tstate->datastack_chunk;
2966
158k
        _PyStackChunk *previous = chunk->previous;
2967
        // push_chunk ensures that the root chunk is never popped:
2968
158k
        assert(previous);
2969
158k
        tstate->datastack_top = &previous->data[previous->top];
2970
158k
        tstate->datastack_chunk = previous;
2971
158k
        _PyObject_VirtualFree(chunk, chunk->size);
2972
158k
        tstate->datastack_limit = (PyObject **)(((char *)previous) + previous->size);
2973
158k
    }
2974
496M
    else {
2975
496M
        assert(tstate->datastack_top);
2976
496M
        assert(tstate->datastack_top >= base);
2977
496M
        tstate->datastack_top = base;
2978
496M
    }
2979
496M
}
2980
2981
2982
#ifndef NDEBUG
2983
// Check that a Python thread state valid. In practice, this function is used
2984
// on a Python debug build to check if 'tstate' is a dangling pointer, if the
2985
// PyThreadState memory has been freed.
2986
//
2987
// Usage:
2988
//
2989
//     assert(_PyThreadState_CheckConsistency(tstate));
2990
int
2991
_PyThreadState_CheckConsistency(PyThreadState *tstate)
2992
{
2993
    assert(!_PyMem_IsPtrFreed(tstate));
2994
    assert(!_PyMem_IsPtrFreed(tstate->interp));
2995
    return 1;
2996
}
2997
#endif
2998
2999
3000
// Check if a Python thread must call _PyThreadState_HangThread(), rather than
3001
// taking the GIL or attaching to the interpreter if Py_Finalize() has been
3002
// called.
3003
//
3004
// When this function is called by a daemon thread after Py_Finalize() has been
3005
// called, the GIL may no longer exist.
3006
//
3007
// tstate must be non-NULL.
3008
int
3009
_PyThreadState_MustExit(PyThreadState *tstate)
3010
64.2k
{
3011
64.2k
    int state = _Py_atomic_load_int_relaxed(&tstate->state);
3012
64.2k
    return state == _Py_THREAD_SHUTTING_DOWN;
3013
64.2k
}
3014
3015
void
3016
_PyThreadState_HangThread(PyThreadState *tstate)
3017
0
{
3018
0
    _PyThreadStateImpl *tstate_impl = (_PyThreadStateImpl *)tstate;
3019
0
    decref_threadstate(tstate_impl);
3020
0
    PyThread_hang_thread();
3021
0
}
3022
3023
/********************/
3024
/* mimalloc support */
3025
/********************/
3026
3027
static void
3028
tstate_mimalloc_bind(PyThreadState *tstate)
3029
16
{
3030
#ifdef Py_GIL_DISABLED
3031
    struct _mimalloc_thread_state *mts = &((_PyThreadStateImpl*)tstate)->mimalloc;
3032
3033
    // Initialize the mimalloc thread state. This must be called from the
3034
    // same thread that will use the thread state. The "mem" heap doubles as
3035
    // the "backing" heap.
3036
    mi_tld_t *tld = &mts->tld;
3037
    _mi_tld_init(tld, &mts->heaps[_Py_MIMALLOC_HEAP_MEM]);
3038
    llist_init(&mts->page_list);
3039
3040
    // Exiting threads push any remaining in-use segments to the abandoned
3041
    // pool to be re-claimed later by other threads. We use per-interpreter
3042
    // pools to keep Python objects from different interpreters separate.
3043
    tld->segments.abandoned = &tstate->interp->mimalloc.abandoned_pool;
3044
3045
    // Don't fill in the first N bytes up to ob_type in debug builds. We may
3046
    // access ob_tid and the refcount fields in the dict and list lock-less
3047
    // accesses, so they must remain valid for a while after deallocation.
3048
    size_t base_offset = offsetof(PyObject, ob_type);
3049
    if (_PyMem_DebugEnabled()) {
3050
        // The debug allocator adds two words at the beginning of each block.
3051
        base_offset += 2 * sizeof(size_t);
3052
    }
3053
    size_t debug_offsets[_Py_MIMALLOC_HEAP_COUNT] = {
3054
        [_Py_MIMALLOC_HEAP_OBJECT] = base_offset,
3055
        [_Py_MIMALLOC_HEAP_GC] = base_offset,
3056
        [_Py_MIMALLOC_HEAP_GC_PRE] = base_offset + 2 * sizeof(PyObject *),
3057
    };
3058
3059
    // Initialize each heap
3060
    for (uint8_t i = 0; i < _Py_MIMALLOC_HEAP_COUNT; i++) {
3061
        _mi_heap_init_ex(&mts->heaps[i], tld, _mi_arena_id_none(), false, i);
3062
        mts->heaps[i].debug_offset = (uint8_t)debug_offsets[i];
3063
    }
3064
3065
    // Heaps that store Python objects should use QSBR to delay freeing
3066
    // mimalloc pages while there may be concurrent lock-free readers.
3067
    mts->heaps[_Py_MIMALLOC_HEAP_OBJECT].page_use_qsbr = true;
3068
    mts->heaps[_Py_MIMALLOC_HEAP_GC].page_use_qsbr = true;
3069
    mts->heaps[_Py_MIMALLOC_HEAP_GC_PRE].page_use_qsbr = true;
3070
3071
    // By default, object allocations use _Py_MIMALLOC_HEAP_OBJECT.
3072
    // _PyObject_GC_New() and similar functions temporarily override this to
3073
    // use one of the GC heaps.
3074
    mts->current_object_heap = &mts->heaps[_Py_MIMALLOC_HEAP_OBJECT];
3075
3076
    _Py_atomic_store_int(&mts->initialized, 1);
3077
#endif
3078
16
}
3079
3080
void
3081
_PyThreadState_ClearMimallocHeaps(PyThreadState *tstate)
3082
0
{
3083
#ifdef Py_GIL_DISABLED
3084
    if (!tstate->_status.bound) {
3085
        // The mimalloc heaps are only initialized when the thread is bound.
3086
        return;
3087
    }
3088
3089
    _PyThreadStateImpl *tstate_impl = (_PyThreadStateImpl *)tstate;
3090
    for (Py_ssize_t i = 0; i < _Py_MIMALLOC_HEAP_COUNT; i++) {
3091
        // Abandon all segments in use by this thread. This pushes them to
3092
        // a shared pool to later be reclaimed by other threads. It's important
3093
        // to do this before the thread state is destroyed so that objects
3094
        // remain visible to the GC.
3095
        _mi_heap_collect_abandon(&tstate_impl->mimalloc.heaps[i]);
3096
    }
3097
#endif
3098
0
}
3099
3100
3101
int
3102
_Py_IsMainThread(void)
3103
62.8M
{
3104
62.8M
    unsigned long thread = PyThread_get_thread_ident();
3105
62.8M
    return (thread == _PyRuntime.main_thread);
3106
62.8M
}
3107
3108
3109
PyInterpreterState *
3110
_PyInterpreterState_Main(void)
3111
62.7M
{
3112
62.7M
    return _PyRuntime.interpreters.main;
3113
62.7M
}
3114
3115
3116
int
3117
_Py_IsMainInterpreterFinalizing(PyInterpreterState *interp)
3118
0
{
3119
    /* bpo-39877: Access _PyRuntime directly rather than using
3120
       tstate->interp->runtime to support calls from Python daemon threads.
3121
       After Py_Finalize() has been called, tstate can be a dangling pointer:
3122
       point to PyThreadState freed memory. */
3123
0
    return (_PyRuntimeState_GetFinalizing(&_PyRuntime) != NULL &&
3124
0
            interp == &_PyRuntime._main_interpreter);
3125
0
}
3126
3127
3128
const PyConfig *
3129
_Py_GetMainConfig(void)
3130
0
{
3131
0
    PyInterpreterState *interp = _PyInterpreterState_Main();
3132
0
    if (interp == NULL) {
3133
0
        return NULL;
3134
0
    }
3135
0
    return _PyInterpreterState_GetConfig(interp);
3136
0
}