Coverage Report

Created: 2025-12-14 07:06

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/cpython/Python/pystate.c
Line
Count
Source
1
2
/* Thread and interpreter state structures and their interfaces */
3
4
#include "Python.h"
5
#include "pycore_abstract.h"      // _PyIndex_Check()
6
#include "pycore_audit.h"         // _Py_AuditHookEntry
7
#include "pycore_ceval.h"         // _PyEval_AcquireLock()
8
#include "pycore_codecs.h"        // _PyCodec_Fini()
9
#include "pycore_critical_section.h" // _PyCriticalSection_Resume()
10
#include "pycore_dtoa.h"          // _dtoa_state_INIT()
11
#include "pycore_freelist.h"      // _PyObject_ClearFreeLists()
12
#include "pycore_initconfig.h"    // _PyStatus_OK()
13
#include "pycore_interpframe.h"   // _PyThreadState_HasStackSpace()
14
#include "pycore_object.h"        // _PyType_InitCache()
15
#include "pycore_obmalloc.h"      // _PyMem_obmalloc_state_on_heap()
16
#include "pycore_optimizer.h"     // JIT_CLEANUP_THRESHOLD
17
#include "pycore_parking_lot.h"   // _PyParkingLot_AfterFork()
18
#include "pycore_pyerrors.h"      // _PyErr_Clear()
19
#include "pycore_pylifecycle.h"   // _PyAST_Fini()
20
#include "pycore_pymem.h"         // _PyMem_DebugEnabled()
21
#include "pycore_runtime.h"       // _PyRuntime
22
#include "pycore_runtime_init.h"  // _PyRuntimeState_INIT
23
#include "pycore_stackref.h"      // Py_STACKREF_DEBUG
24
#include "pycore_stats.h"         // FT_STAT_WORLD_STOP_INC()
25
#include "pycore_time.h"          // _PyTime_Init()
26
#include "pycore_uop.h"           // UOP_BUFFER_SIZE
27
#include "pycore_uniqueid.h"      // _PyObject_FinalizePerThreadRefcounts()
28
29
30
/* --------------------------------------------------------------------------
31
CAUTION
32
33
Always use PyMem_RawMalloc() and PyMem_RawFree() directly in this file.  A
34
number of these functions are advertised as safe to call when the GIL isn't
35
held, and in a debug build Python redirects (e.g.) PyMem_NEW (etc) to Python's
36
debugging obmalloc functions.  Those aren't thread-safe (they rely on the GIL
37
to avoid the expense of doing their own locking).
38
-------------------------------------------------------------------------- */
39
40
#ifdef HAVE_DLOPEN
41
#  ifdef HAVE_DLFCN_H
42
#    include <dlfcn.h>
43
#  endif
44
#  if !HAVE_DECL_RTLD_LAZY
45
#    define RTLD_LAZY 1
46
#  endif
47
#endif
48
49
50
/****************************************/
51
/* helpers for the current thread state */
52
/****************************************/
53
54
// API for the current thread state is further down.
55
56
/* "current" means one of:
57
   - bound to the current OS thread
58
   - holds the GIL
59
 */
60
61
//-------------------------------------------------
62
// a highly efficient lookup for the current thread
63
//-------------------------------------------------
64
65
/*
66
   The stored thread state is set by PyThreadState_Swap().
67
68
   For each of these functions, the GIL must be held by the current thread.
69
 */
70
71
72
/* The attached thread state for the current thread. */
73
_Py_thread_local PyThreadState *_Py_tss_tstate = NULL;
74
75
/* The "bound" thread state used by PyGILState_Ensure(),
76
   also known as a "gilstate." */
77
_Py_thread_local PyThreadState *_Py_tss_gilstate = NULL;
78
79
/* The interpreter of the attached thread state,
80
   and is same as tstate->interp. */
81
_Py_thread_local PyInterpreterState *_Py_tss_interp = NULL;
82
83
static inline PyThreadState *
84
current_fast_get(void)
85
106M
{
86
106M
    return _Py_tss_tstate;
87
106M
}
88
89
static inline void
90
current_fast_set(_PyRuntimeState *Py_UNUSED(runtime), PyThreadState *tstate)
91
2.99M
{
92
2.99M
    assert(tstate != NULL);
93
2.99M
    _Py_tss_tstate = tstate;
94
2.99M
    assert(tstate->interp != NULL);
95
2.99M
    _Py_tss_interp = tstate->interp;
96
2.99M
}
97
98
static inline void
99
current_fast_clear(_PyRuntimeState *Py_UNUSED(runtime))
100
2.99M
{
101
2.99M
    _Py_tss_tstate = NULL;
102
2.99M
    _Py_tss_interp = NULL;
103
2.99M
}
104
105
#define tstate_verify_not_active(tstate) \
106
0
    if (tstate == current_fast_get()) { \
107
0
        _Py_FatalErrorFormat(__func__, "tstate %p is still current", tstate); \
108
0
    }
109
110
PyThreadState *
111
_PyThreadState_GetCurrent(void)
112
9.59M
{
113
9.59M
    return current_fast_get();
114
9.59M
}
115
116
117
//---------------------------------------------
118
// The thread state used by PyGILState_Ensure()
119
//---------------------------------------------
120
121
/*
122
   The stored thread state is set by bind_tstate() (AKA PyThreadState_Bind().
123
124
   The GIL does no need to be held for these.
125
  */
126
127
static inline PyThreadState *
128
gilstate_get(void)
129
56
{
130
56
    return _Py_tss_gilstate;
131
56
}
132
133
static inline void
134
gilstate_set(PyThreadState *tstate)
135
28
{
136
28
    assert(tstate != NULL);
137
28
    _Py_tss_gilstate = tstate;
138
28
}
139
140
static inline void
141
gilstate_clear(void)
142
0
{
143
0
    _Py_tss_gilstate = NULL;
144
0
}
145
146
147
#ifndef NDEBUG
148
static inline int tstate_is_alive(PyThreadState *tstate);
149
150
static inline int
151
tstate_is_bound(PyThreadState *tstate)
152
{
153
    return tstate->_status.bound && !tstate->_status.unbound;
154
}
155
#endif  // !NDEBUG
156
157
static void bind_gilstate_tstate(PyThreadState *);
158
static void unbind_gilstate_tstate(PyThreadState *);
159
160
static void tstate_mimalloc_bind(PyThreadState *);
161
162
static void
163
bind_tstate(PyThreadState *tstate)
164
28
{
165
28
    assert(tstate != NULL);
166
28
    assert(tstate_is_alive(tstate) && !tstate->_status.bound);
167
28
    assert(!tstate->_status.unbound);  // just in case
168
28
    assert(!tstate->_status.bound_gilstate);
169
28
    assert(tstate != gilstate_get());
170
28
    assert(!tstate->_status.active);
171
28
    assert(tstate->thread_id == 0);
172
28
    assert(tstate->native_thread_id == 0);
173
174
    // Currently we don't necessarily store the thread state
175
    // in thread-local storage (e.g. per-interpreter).
176
177
28
    tstate->thread_id = PyThread_get_thread_ident();
178
28
#ifdef PY_HAVE_THREAD_NATIVE_ID
179
28
    tstate->native_thread_id = PyThread_get_thread_native_id();
180
28
#endif
181
182
#ifdef Py_GIL_DISABLED
183
    // Initialize biased reference counting inter-thread queue. Note that this
184
    // needs to be initialized from the active thread.
185
    _Py_brc_init_thread(tstate);
186
#endif
187
188
    // mimalloc state needs to be initialized from the active thread.
189
28
    tstate_mimalloc_bind(tstate);
190
191
28
    tstate->_status.bound = 1;
192
28
}
193
194
static void
195
unbind_tstate(PyThreadState *tstate)
196
0
{
197
0
    assert(tstate != NULL);
198
0
    assert(tstate_is_bound(tstate));
199
0
#ifndef HAVE_PTHREAD_STUBS
200
0
    assert(tstate->thread_id > 0);
201
0
#endif
202
0
#ifdef PY_HAVE_THREAD_NATIVE_ID
203
0
    assert(tstate->native_thread_id > 0);
204
0
#endif
205
206
    // We leave thread_id and native_thread_id alone
207
    // since they can be useful for debugging.
208
    // Check the `_status` field to know if these values
209
    // are still valid.
210
211
    // We leave tstate->_status.bound set to 1
212
    // to indicate it was previously bound.
213
0
    tstate->_status.unbound = 1;
214
0
}
215
216
217
/* Stick the thread state for this thread in thread specific storage.
218
219
   When a thread state is created for a thread by some mechanism
220
   other than PyGILState_Ensure(), it's important that the GILState
221
   machinery knows about it so it doesn't try to create another
222
   thread state for the thread.
223
   (This is a better fix for SF bug #1010677 than the first one attempted.)
224
225
   The only situation where you can legitimately have more than one
226
   thread state for an OS level thread is when there are multiple
227
   interpreters.
228
229
   Before 3.12, the PyGILState_*() APIs didn't work with multiple
230
   interpreters (see bpo-10915 and bpo-15751), so this function used
231
   to set TSS only once.  Thus, the first thread state created for that
232
   given OS level thread would "win", which seemed reasonable behaviour.
233
*/
234
235
static void
236
bind_gilstate_tstate(PyThreadState *tstate)
237
28
{
238
28
    assert(tstate != NULL);
239
28
    assert(tstate_is_alive(tstate));
240
28
    assert(tstate_is_bound(tstate));
241
    // XXX assert(!tstate->_status.active);
242
28
    assert(!tstate->_status.bound_gilstate);
243
244
28
    PyThreadState *tcur = gilstate_get();
245
28
    assert(tstate != tcur);
246
247
28
    if (tcur != NULL) {
248
0
        tcur->_status.bound_gilstate = 0;
249
0
    }
250
28
    gilstate_set(tstate);
251
28
    tstate->_status.bound_gilstate = 1;
252
28
}
253
254
static void
255
unbind_gilstate_tstate(PyThreadState *tstate)
256
0
{
257
0
    assert(tstate != NULL);
258
    // XXX assert(tstate_is_alive(tstate));
259
0
    assert(tstate_is_bound(tstate));
260
    // XXX assert(!tstate->_status.active);
261
0
    assert(tstate->_status.bound_gilstate);
262
0
    assert(tstate == gilstate_get());
263
0
    gilstate_clear();
264
0
    tstate->_status.bound_gilstate = 0;
265
0
}
266
267
268
//----------------------------------------------
269
// the thread state that currently holds the GIL
270
//----------------------------------------------
271
272
/* This is not exported, as it is not reliable!  It can only
273
   ever be compared to the state for the *current* thread.
274
   * If not equal, then it doesn't matter that the actual
275
     value may change immediately after comparison, as it can't
276
     possibly change to the current thread's state.
277
   * If equal, then the current thread holds the lock, so the value can't
278
     change until we yield the lock.
279
*/
280
static int
281
holds_gil(PyThreadState *tstate)
282
0
{
283
    // XXX Fall back to tstate->interp->runtime->ceval.gil.last_holder
284
    // (and tstate->interp->runtime->ceval.gil.locked).
285
0
    assert(tstate != NULL);
286
    /* Must be the tstate for this thread */
287
0
    assert(tstate == gilstate_get());
288
0
    return tstate == current_fast_get();
289
0
}
290
291
292
/****************************/
293
/* the global runtime state */
294
/****************************/
295
296
//----------
297
// lifecycle
298
//----------
299
300
/* Suppress deprecation warning for PyBytesObject.ob_shash */
301
_Py_COMP_DIAG_PUSH
302
_Py_COMP_DIAG_IGNORE_DEPR_DECLS
303
/* We use "initial" if the runtime gets re-used
304
   (e.g. Py_Finalize() followed by Py_Initialize().
305
   Note that we initialize "initial" relative to _PyRuntime,
306
   to ensure pre-initialized pointers point to the active
307
   runtime state (and not "initial"). */
308
static const _PyRuntimeState initial = _PyRuntimeState_INIT(_PyRuntime, "");
309
_Py_COMP_DIAG_POP
310
311
#define LOCKS_INIT(runtime) \
312
0
    { \
313
0
        &(runtime)->interpreters.mutex, \
314
0
        &(runtime)->xi.data_lookup.registry.mutex, \
315
0
        &(runtime)->unicode_state.ids.mutex, \
316
0
        &(runtime)->imports.extensions.mutex, \
317
0
        &(runtime)->ceval.pending_mainthread.mutex, \
318
0
        &(runtime)->atexit.mutex, \
319
0
        &(runtime)->audit_hooks.mutex, \
320
0
        &(runtime)->allocators.mutex, \
321
0
        &(runtime)->_main_interpreter.types.mutex, \
322
0
        &(runtime)->_main_interpreter.code_state.mutex, \
323
0
    }
324
325
static void
326
init_runtime(_PyRuntimeState *runtime,
327
             void *open_code_hook, void *open_code_userdata,
328
             _Py_AuditHookEntry *audit_hook_head,
329
             Py_ssize_t unicode_next_index)
330
28
{
331
28
    assert(!runtime->preinitializing);
332
28
    assert(!runtime->preinitialized);
333
28
    assert(!runtime->core_initialized);
334
28
    assert(!runtime->initialized);
335
28
    assert(!runtime->_initialized);
336
337
28
    runtime->open_code_hook = open_code_hook;
338
28
    runtime->open_code_userdata = open_code_userdata;
339
28
    runtime->audit_hooks.head = audit_hook_head;
340
341
28
    PyPreConfig_InitPythonConfig(&runtime->preconfig);
342
343
    // Set it to the ID of the main thread of the main interpreter.
344
28
    runtime->main_thread = PyThread_get_thread_ident();
345
346
28
    runtime->unicode_state.ids.next_index = unicode_next_index;
347
28
    runtime->_initialized = 1;
348
28
}
349
350
PyStatus
351
_PyRuntimeState_Init(_PyRuntimeState *runtime)
352
28
{
353
    /* We preserve the hook across init, because there is
354
       currently no public API to set it between runtime
355
       initialization and interpreter initialization. */
356
28
    void *open_code_hook = runtime->open_code_hook;
357
28
    void *open_code_userdata = runtime->open_code_userdata;
358
28
    _Py_AuditHookEntry *audit_hook_head = runtime->audit_hooks.head;
359
    // bpo-42882: Preserve next_index value if Py_Initialize()/Py_Finalize()
360
    // is called multiple times.
361
28
    Py_ssize_t unicode_next_index = runtime->unicode_state.ids.next_index;
362
363
28
    if (runtime->_initialized) {
364
        // Py_Initialize() must be running again.
365
        // Reset to _PyRuntimeState_INIT.
366
0
        memcpy(runtime, &initial, sizeof(*runtime));
367
        // Preserve the cookie from the original runtime.
368
0
        memcpy(runtime->debug_offsets.cookie, _Py_Debug_Cookie, 8);
369
0
        assert(!runtime->_initialized);
370
0
    }
371
372
28
    PyStatus status = _PyTime_Init(&runtime->time);
373
28
    if (_PyStatus_EXCEPTION(status)) {
374
0
        return status;
375
0
    }
376
377
28
    init_runtime(runtime, open_code_hook, open_code_userdata, audit_hook_head,
378
28
                 unicode_next_index);
379
380
28
    return _PyStatus_OK();
381
28
}
382
383
void
384
_PyRuntimeState_Fini(_PyRuntimeState *runtime)
385
0
{
386
#ifdef Py_REF_DEBUG
387
    /* The count is cleared by _Py_FinalizeRefTotal(). */
388
    assert(runtime->object_state.interpreter_leaks == 0);
389
#endif
390
0
    gilstate_clear();
391
0
}
392
393
#ifdef HAVE_FORK
394
/* This function is called from PyOS_AfterFork_Child to ensure that
395
   newly created child processes do not share locks with the parent. */
396
PyStatus
397
_PyRuntimeState_ReInitThreads(_PyRuntimeState *runtime)
398
0
{
399
    // This was initially set in _PyRuntimeState_Init().
400
0
    runtime->main_thread = PyThread_get_thread_ident();
401
402
    // Clears the parking lot. Any waiting threads are dead. This must be
403
    // called before releasing any locks that use the parking lot.
404
0
    _PyParkingLot_AfterFork();
405
406
    // Re-initialize global locks
407
0
    PyMutex *locks[] = LOCKS_INIT(runtime);
408
0
    for (size_t i = 0; i < Py_ARRAY_LENGTH(locks); i++) {
409
0
        _PyMutex_at_fork_reinit(locks[i]);
410
0
    }
411
#ifdef Py_GIL_DISABLED
412
    for (PyInterpreterState *interp = runtime->interpreters.head;
413
         interp != NULL; interp = interp->next)
414
    {
415
        for (int i = 0; i < NUM_WEAKREF_LIST_LOCKS; i++) {
416
            _PyMutex_at_fork_reinit(&interp->weakref_locks[i]);
417
        }
418
    }
419
#endif
420
421
0
    _PyTypes_AfterFork();
422
423
0
    _PyThread_AfterFork(&runtime->threads);
424
425
0
    return _PyStatus_OK();
426
0
}
427
#endif
428
429
430
/*************************************/
431
/* the per-interpreter runtime state */
432
/*************************************/
433
434
//----------
435
// lifecycle
436
//----------
437
438
/* Calling this indicates that the runtime is ready to create interpreters. */
439
440
PyStatus
441
_PyInterpreterState_Enable(_PyRuntimeState *runtime)
442
28
{
443
28
    struct pyinterpreters *interpreters = &runtime->interpreters;
444
28
    interpreters->next_id = 0;
445
28
    return _PyStatus_OK();
446
28
}
447
448
static PyInterpreterState *
449
alloc_interpreter(void)
450
0
{
451
    // Aligned allocation for PyInterpreterState.
452
    // the first word of the memory block is used to store
453
    // the original pointer to be used later to free the memory.
454
0
    size_t alignment = _Alignof(PyInterpreterState);
455
0
    size_t allocsize = sizeof(PyInterpreterState) + sizeof(void *) + alignment - 1;
456
0
    void *mem = PyMem_RawCalloc(1, allocsize);
457
0
    if (mem == NULL) {
458
0
        return NULL;
459
0
    }
460
0
    void *ptr = _Py_ALIGN_UP((char *)mem + sizeof(void *), alignment);
461
0
    ((void **)ptr)[-1] = mem;
462
0
    assert(_Py_IS_ALIGNED(ptr, alignment));
463
0
    return ptr;
464
0
}
465
466
static void
467
free_interpreter(PyInterpreterState *interp)
468
0
{
469
#ifdef Py_STATS
470
    if (interp->pystats_struct) {
471
        PyMem_RawFree(interp->pystats_struct);
472
        interp->pystats_struct = NULL;
473
    }
474
#endif
475
    // The main interpreter is statically allocated so
476
    // should not be freed.
477
0
    if (interp != &_PyRuntime._main_interpreter) {
478
0
        if (_PyMem_obmalloc_state_on_heap(interp)) {
479
            // interpreter has its own obmalloc state, free it
480
0
            PyMem_RawFree(interp->obmalloc);
481
0
            interp->obmalloc = NULL;
482
0
        }
483
0
        assert(_Py_IS_ALIGNED(interp, _Alignof(PyInterpreterState)));
484
0
        PyMem_RawFree(((void **)interp)[-1]);
485
0
    }
486
0
}
487
488
#ifndef NDEBUG
489
static inline int check_interpreter_whence(long);
490
#endif
491
492
extern _Py_CODEUNIT *
493
_Py_LazyJitTrampoline(
494
    struct _PyExecutorObject *exec, _PyInterpreterFrame *frame, _PyStackRef *stack_pointer, PyThreadState *tstate
495
);
496
497
/* Get the interpreter state to a minimal consistent state.
498
   Further init happens in pylifecycle.c before it can be used.
499
   All fields not initialized here are expected to be zeroed out,
500
   e.g. by PyMem_RawCalloc() or memset(), or otherwise pre-initialized.
501
   The runtime state is not manipulated.  Instead it is assumed that
502
   the interpreter is getting added to the runtime.
503
504
   Note that the main interpreter was statically initialized as part
505
   of the runtime and most state is already set properly.  That leaves
506
   a small number of fields to initialize dynamically, as well as some
507
   that are initialized lazily.
508
509
   For subinterpreters we memcpy() the main interpreter in
510
   PyInterpreterState_New(), leaving it in the same mostly-initialized
511
   state.  The only difference is that the interpreter has some
512
   self-referential state that is statically initializexd to the
513
   main interpreter.  We fix those fields here, in addition
514
   to the other dynamically initialized fields.
515
  */
516
static PyStatus
517
init_interpreter(PyInterpreterState *interp,
518
                 _PyRuntimeState *runtime, int64_t id,
519
                 PyInterpreterState *next,
520
                 long whence)
521
28
{
522
28
    if (interp->_initialized) {
523
0
        return _PyStatus_ERR("interpreter already initialized");
524
0
    }
525
526
28
    assert(interp->_whence == _PyInterpreterState_WHENCE_NOTSET);
527
28
    assert(check_interpreter_whence(whence) == 0);
528
28
    interp->_whence = whence;
529
530
28
    assert(runtime != NULL);
531
28
    interp->runtime = runtime;
532
533
28
    assert(id > 0 || (id == 0 && interp == runtime->interpreters.main));
534
28
    interp->id = id;
535
536
28
    interp->id_refcount = 0;
537
538
28
    assert(runtime->interpreters.head == interp);
539
28
    assert(next != NULL || (interp == runtime->interpreters.main));
540
28
    interp->next = next;
541
542
28
    interp->threads.preallocated = &interp->_initial_thread;
543
544
    // We would call _PyObject_InitState() at this point
545
    // if interp->feature_flags were alredy set.
546
547
28
    _PyEval_InitState(interp);
548
28
    _PyGC_InitState(&interp->gc);
549
28
    PyConfig_InitPythonConfig(&interp->config);
550
28
    _PyType_InitCache(interp);
551
#ifdef Py_GIL_DISABLED
552
    _Py_brc_init_state(interp);
553
#endif
554
555
28
    llist_init(&interp->mem_free_queue.head);
556
28
    llist_init(&interp->asyncio_tasks_head);
557
28
    interp->asyncio_tasks_lock = (PyMutex){0};
558
476
    for (int i = 0; i < _PY_MONITORING_UNGROUPED_EVENTS; i++) {
559
448
        interp->monitors.tools[i] = 0;
560
448
    }
561
252
    for (int t = 0; t < PY_MONITORING_TOOL_IDS; t++) {
562
4.48k
        for (int e = 0; e < _PY_MONITORING_EVENTS; e++) {
563
4.25k
            interp->monitoring_callables[t][e] = NULL;
564
565
4.25k
        }
566
224
        interp->monitoring_tool_versions[t] = 0;
567
224
    }
568
28
    interp->_code_object_generation = 0;
569
28
    interp->jit = false;
570
28
    interp->compiling = false;
571
28
    interp->executor_list_head = NULL;
572
28
    interp->executor_deletion_list_head = NULL;
573
28
    interp->executor_deletion_list_remaining_capacity = 0;
574
28
    interp->executor_creation_counter = JIT_CLEANUP_THRESHOLD;
575
28
    if (interp != &runtime->_main_interpreter) {
576
        /* Fix the self-referential, statically initialized fields. */
577
0
        interp->dtoa = (struct _dtoa_state)_dtoa_state_INIT(interp);
578
0
    }
579
#if !defined(Py_GIL_DISABLED) && defined(Py_STACKREF_DEBUG)
580
    interp->next_stackref = INITIAL_STACKREF_INDEX;
581
    _Py_hashtable_allocator_t alloc = {
582
        .malloc = malloc,
583
        .free = free,
584
    };
585
    interp->open_stackrefs_table = _Py_hashtable_new_full(
586
        _Py_hashtable_hash_ptr,
587
        _Py_hashtable_compare_direct,
588
        NULL,
589
        NULL,
590
        &alloc
591
    );
592
#  ifdef Py_STACKREF_CLOSE_DEBUG
593
    interp->closed_stackrefs_table = _Py_hashtable_new_full(
594
        _Py_hashtable_hash_ptr,
595
        _Py_hashtable_compare_direct,
596
        NULL,
597
        NULL,
598
        &alloc
599
    );
600
#  endif
601
    _Py_stackref_associate(interp, Py_None, PyStackRef_None);
602
    _Py_stackref_associate(interp, Py_False, PyStackRef_False);
603
    _Py_stackref_associate(interp, Py_True, PyStackRef_True);
604
#endif
605
606
28
    interp->_initialized = 1;
607
28
    return _PyStatus_OK();
608
28
}
609
610
611
PyStatus
612
_PyInterpreterState_New(PyThreadState *tstate, PyInterpreterState **pinterp)
613
28
{
614
28
    *pinterp = NULL;
615
616
    // Don't get runtime from tstate since tstate can be NULL
617
28
    _PyRuntimeState *runtime = &_PyRuntime;
618
619
    // tstate is NULL when pycore_create_interpreter() calls
620
    // _PyInterpreterState_New() to create the main interpreter.
621
28
    if (tstate != NULL) {
622
0
        if (_PySys_Audit(tstate, "cpython.PyInterpreterState_New", NULL) < 0) {
623
0
            return _PyStatus_ERR("sys.audit failed");
624
0
        }
625
0
    }
626
627
    /* We completely serialize creation of multiple interpreters, since
628
       it simplifies things here and blocking concurrent calls isn't a problem.
629
       Regardless, we must fully block subinterpreter creation until
630
       after the main interpreter is created. */
631
28
    HEAD_LOCK(runtime);
632
633
28
    struct pyinterpreters *interpreters = &runtime->interpreters;
634
28
    int64_t id = interpreters->next_id;
635
28
    interpreters->next_id += 1;
636
637
    // Allocate the interpreter and add it to the runtime state.
638
28
    PyInterpreterState *interp;
639
28
    PyStatus status;
640
28
    PyInterpreterState *old_head = interpreters->head;
641
28
    if (old_head == NULL) {
642
        // We are creating the main interpreter.
643
28
        assert(interpreters->main == NULL);
644
28
        assert(id == 0);
645
646
28
        interp = &runtime->_main_interpreter;
647
28
        assert(interp->id == 0);
648
28
        assert(interp->next == NULL);
649
650
28
        interpreters->main = interp;
651
28
    }
652
0
    else {
653
0
        assert(interpreters->main != NULL);
654
0
        assert(id != 0);
655
656
0
        interp = alloc_interpreter();
657
0
        if (interp == NULL) {
658
0
            status = _PyStatus_NO_MEMORY();
659
0
            goto error;
660
0
        }
661
        // Set to _PyInterpreterState_INIT.
662
0
        memcpy(interp, &initial._main_interpreter, sizeof(*interp));
663
664
0
        if (id < 0) {
665
            /* overflow or Py_Initialize() not called yet! */
666
0
            status = _PyStatus_ERR("failed to get an interpreter ID");
667
0
            goto error;
668
0
        }
669
0
    }
670
28
    interpreters->head = interp;
671
672
28
    long whence = _PyInterpreterState_WHENCE_UNKNOWN;
673
28
    status = init_interpreter(interp, runtime,
674
28
                              id, old_head, whence);
675
28
    if (_PyStatus_EXCEPTION(status)) {
676
0
        goto error;
677
0
    }
678
679
28
    HEAD_UNLOCK(runtime);
680
681
28
    assert(interp != NULL);
682
28
    *pinterp = interp;
683
28
    return _PyStatus_OK();
684
685
0
error:
686
0
    HEAD_UNLOCK(runtime);
687
688
0
    if (interp != NULL) {
689
0
        free_interpreter(interp);
690
0
    }
691
0
    return status;
692
28
}
693
694
695
PyInterpreterState *
696
PyInterpreterState_New(void)
697
0
{
698
    // tstate can be NULL
699
0
    PyThreadState *tstate = current_fast_get();
700
701
0
    PyInterpreterState *interp;
702
0
    PyStatus status = _PyInterpreterState_New(tstate, &interp);
703
0
    if (_PyStatus_EXCEPTION(status)) {
704
0
        Py_ExitStatusException(status);
705
0
    }
706
0
    assert(interp != NULL);
707
0
    return interp;
708
0
}
709
710
#if !defined(Py_GIL_DISABLED) && defined(Py_STACKREF_DEBUG)
711
extern void
712
_Py_stackref_report_leaks(PyInterpreterState *interp);
713
#endif
714
715
static void
716
interpreter_clear(PyInterpreterState *interp, PyThreadState *tstate)
717
0
{
718
0
    assert(interp != NULL);
719
0
    assert(tstate != NULL);
720
0
    _PyRuntimeState *runtime = interp->runtime;
721
722
    /* XXX Conditions we need to enforce:
723
724
       * the GIL must be held by the current thread
725
       * tstate must be the "current" thread state (current_fast_get())
726
       * tstate->interp must be interp
727
       * for the main interpreter, tstate must be the main thread
728
     */
729
    // XXX Ideally, we would not rely on any thread state in this function
730
    // (and we would drop the "tstate" argument).
731
732
0
    if (_PySys_Audit(tstate, "cpython.PyInterpreterState_Clear", NULL) < 0) {
733
0
        _PyErr_Clear(tstate);
734
0
    }
735
736
    // Clear the current/main thread state last.
737
0
    _Py_FOR_EACH_TSTATE_BEGIN(interp, p) {
738
        // See https://github.com/python/cpython/issues/102126
739
        // Must be called without HEAD_LOCK held as it can deadlock
740
        // if any finalizer tries to acquire that lock.
741
0
        HEAD_UNLOCK(runtime);
742
0
        PyThreadState_Clear(p);
743
0
        HEAD_LOCK(runtime);
744
0
    }
745
0
    _Py_FOR_EACH_TSTATE_END(interp);
746
0
    if (tstate->interp == interp) {
747
        /* We fix tstate->_status below when we for sure aren't using it
748
           (e.g. no longer need the GIL). */
749
        // XXX Eliminate the need to do this.
750
0
        tstate->_status.cleared = 0;
751
0
    }
752
753
    /* It is possible that any of the objects below have a finalizer
754
       that runs Python code or otherwise relies on a thread state
755
       or even the interpreter state.  For now we trust that isn't
756
       a problem.
757
     */
758
    // XXX Make sure we properly deal with problematic finalizers.
759
760
0
    Py_CLEAR(interp->audit_hooks);
761
762
    // gh-140257: Threads have already been cleared, but daemon threads may
763
    // still access eval_breaker atomically via take_gil() right before they
764
    // hang. Use an atomic store to prevent data races during finalization.
765
0
    interp->ceval.instrumentation_version = 0;
766
0
    _Py_atomic_store_uintptr(&tstate->eval_breaker, 0);
767
768
0
    for (int i = 0; i < _PY_MONITORING_UNGROUPED_EVENTS; i++) {
769
0
        interp->monitors.tools[i] = 0;
770
0
    }
771
0
    for (int t = 0; t < PY_MONITORING_TOOL_IDS; t++) {
772
0
        for (int e = 0; e < _PY_MONITORING_EVENTS; e++) {
773
0
            Py_CLEAR(interp->monitoring_callables[t][e]);
774
0
        }
775
0
    }
776
0
    for (int t = 0; t < PY_MONITORING_TOOL_IDS; t++) {
777
0
        Py_CLEAR(interp->monitoring_tool_names[t]);
778
0
    }
779
0
    interp->_code_object_generation = 0;
780
#ifdef Py_GIL_DISABLED
781
    interp->tlbc_indices.tlbc_generation = 0;
782
#endif
783
784
0
    PyConfig_Clear(&interp->config);
785
0
    _PyCodec_Fini(interp);
786
787
0
    assert(interp->imports.modules == NULL);
788
0
    assert(interp->imports.modules_by_index == NULL);
789
0
    assert(interp->imports.importlib == NULL);
790
0
    assert(interp->imports.import_func == NULL);
791
792
0
    Py_CLEAR(interp->sysdict_copy);
793
0
    Py_CLEAR(interp->builtins_copy);
794
0
    Py_CLEAR(interp->dict);
795
0
#ifdef HAVE_FORK
796
0
    Py_CLEAR(interp->before_forkers);
797
0
    Py_CLEAR(interp->after_forkers_parent);
798
0
    Py_CLEAR(interp->after_forkers_child);
799
0
#endif
800
801
802
#ifdef _Py_TIER2
803
    _Py_ClearExecutorDeletionList(interp);
804
#endif
805
0
    _PyAST_Fini(interp);
806
0
    _PyAtExit_Fini(interp);
807
808
    // All Python types must be destroyed before the last GC collection. Python
809
    // types create a reference cycle to themselves in their in their
810
    // PyTypeObject.tp_mro member (the tuple contains the type).
811
812
    /* Last garbage collection on this interpreter */
813
0
    _PyGC_CollectNoFail(tstate);
814
0
    _PyGC_Fini(interp);
815
816
    // Finalize warnings after last gc so that any finalizers can
817
    // access warnings state
818
0
    _PyWarnings_Fini(interp);
819
0
    struct _PyExecutorObject *cold = interp->cold_executor;
820
0
    if (cold != NULL) {
821
0
        interp->cold_executor = NULL;
822
0
        assert(cold->vm_data.valid);
823
0
        assert(cold->vm_data.warm);
824
0
        _PyExecutor_Free(cold);
825
0
    }
826
827
0
    struct _PyExecutorObject *cold_dynamic = interp->cold_dynamic_executor;
828
0
    if (cold_dynamic != NULL) {
829
0
        interp->cold_dynamic_executor = NULL;
830
0
        assert(cold_dynamic->vm_data.valid);
831
0
        assert(cold_dynamic->vm_data.warm);
832
0
        _PyExecutor_Free(cold_dynamic);
833
0
    }
834
    /* We don't clear sysdict and builtins until the end of this function.
835
       Because clearing other attributes can execute arbitrary Python code
836
       which requires sysdict and builtins. */
837
0
    PyDict_Clear(interp->sysdict);
838
0
    PyDict_Clear(interp->builtins);
839
0
    Py_CLEAR(interp->sysdict);
840
0
    Py_CLEAR(interp->builtins);
841
842
#if !defined(Py_GIL_DISABLED) && defined(Py_STACKREF_DEBUG)
843
#  ifdef Py_STACKREF_CLOSE_DEBUG
844
    _Py_hashtable_destroy(interp->closed_stackrefs_table);
845
    interp->closed_stackrefs_table = NULL;
846
#  endif
847
    _Py_stackref_report_leaks(interp);
848
    _Py_hashtable_destroy(interp->open_stackrefs_table);
849
    interp->open_stackrefs_table = NULL;
850
#endif
851
852
0
    if (tstate->interp == interp) {
853
        /* We are now safe to fix tstate->_status.cleared. */
854
        // XXX Do this (much) earlier?
855
0
        tstate->_status.cleared = 1;
856
0
    }
857
858
0
    for (int i=0; i < DICT_MAX_WATCHERS; i++) {
859
0
        interp->dict_state.watchers[i] = NULL;
860
0
    }
861
862
0
    for (int i=0; i < TYPE_MAX_WATCHERS; i++) {
863
0
        interp->type_watchers[i] = NULL;
864
0
    }
865
866
0
    for (int i=0; i < FUNC_MAX_WATCHERS; i++) {
867
0
        interp->func_watchers[i] = NULL;
868
0
    }
869
0
    interp->active_func_watchers = 0;
870
871
0
    for (int i=0; i < CODE_MAX_WATCHERS; i++) {
872
0
        interp->code_watchers[i] = NULL;
873
0
    }
874
0
    interp->active_code_watchers = 0;
875
876
0
    for (int i=0; i < CONTEXT_MAX_WATCHERS; i++) {
877
0
        interp->context_watchers[i] = NULL;
878
0
    }
879
0
    interp->active_context_watchers = 0;
880
    // XXX Once we have one allocator per interpreter (i.e.
881
    // per-interpreter GC) we must ensure that all of the interpreter's
882
    // objects have been cleaned up at the point.
883
884
    // We could clear interp->threads.freelist here
885
    // if it held more than just the initial thread state.
886
0
}
887
888
889
void
890
PyInterpreterState_Clear(PyInterpreterState *interp)
891
0
{
892
    // Use the current Python thread state to call audit hooks and to collect
893
    // garbage. It can be different than the current Python thread state
894
    // of 'interp'.
895
0
    PyThreadState *current_tstate = current_fast_get();
896
0
    _PyImport_ClearCore(interp);
897
0
    interpreter_clear(interp, current_tstate);
898
0
}
899
900
901
void
902
_PyInterpreterState_Clear(PyThreadState *tstate)
903
0
{
904
0
    _PyImport_ClearCore(tstate->interp);
905
0
    interpreter_clear(tstate->interp, tstate);
906
0
}
907
908
909
static inline void tstate_deactivate(PyThreadState *tstate);
910
static void tstate_set_detached(PyThreadState *tstate, int detached_state);
911
static void zapthreads(PyInterpreterState *interp);
912
913
void
914
PyInterpreterState_Delete(PyInterpreterState *interp)
915
0
{
916
0
    _PyRuntimeState *runtime = interp->runtime;
917
0
    struct pyinterpreters *interpreters = &runtime->interpreters;
918
919
    // XXX Clearing the "current" thread state should happen before
920
    // we start finalizing the interpreter (or the current thread state).
921
0
    PyThreadState *tcur = current_fast_get();
922
0
    if (tcur != NULL && interp == tcur->interp) {
923
        /* Unset current thread.  After this, many C API calls become crashy. */
924
0
        _PyThreadState_Detach(tcur);
925
0
    }
926
927
0
    zapthreads(interp);
928
929
    // XXX These two calls should be done at the end of clear_interpreter(),
930
    // but currently some objects get decref'ed after that.
931
#ifdef Py_REF_DEBUG
932
    _PyInterpreterState_FinalizeRefTotal(interp);
933
#endif
934
0
    _PyInterpreterState_FinalizeAllocatedBlocks(interp);
935
936
0
    HEAD_LOCK(runtime);
937
0
    PyInterpreterState **p;
938
0
    for (p = &interpreters->head; ; p = &(*p)->next) {
939
0
        if (*p == NULL) {
940
0
            Py_FatalError("NULL interpreter");
941
0
        }
942
0
        if (*p == interp) {
943
0
            break;
944
0
        }
945
0
    }
946
0
    if (interp->threads.head != NULL) {
947
0
        Py_FatalError("remaining threads");
948
0
    }
949
0
    *p = interp->next;
950
951
0
    if (interpreters->main == interp) {
952
0
        interpreters->main = NULL;
953
0
        if (interpreters->head != NULL) {
954
0
            Py_FatalError("remaining subinterpreters");
955
0
        }
956
0
    }
957
0
    HEAD_UNLOCK(runtime);
958
959
0
    _Py_qsbr_fini(interp);
960
961
0
    _PyObject_FiniState(interp);
962
963
0
    PyConfig_Clear(&interp->config);
964
965
0
    free_interpreter(interp);
966
0
}
967
968
969
#ifdef HAVE_FORK
970
/*
971
 * Delete all interpreter states except the main interpreter.  If there
972
 * is a current interpreter state, it *must* be the main interpreter.
973
 */
974
PyStatus
975
_PyInterpreterState_DeleteExceptMain(_PyRuntimeState *runtime)
976
0
{
977
0
    struct pyinterpreters *interpreters = &runtime->interpreters;
978
979
0
    PyThreadState *tstate = _PyThreadState_Swap(runtime, NULL);
980
0
    if (tstate != NULL && tstate->interp != interpreters->main) {
981
0
        return _PyStatus_ERR("not main interpreter");
982
0
    }
983
984
0
    HEAD_LOCK(runtime);
985
0
    PyInterpreterState *interp = interpreters->head;
986
0
    interpreters->head = NULL;
987
0
    while (interp != NULL) {
988
0
        if (interp == interpreters->main) {
989
0
            interpreters->main->next = NULL;
990
0
            interpreters->head = interp;
991
0
            interp = interp->next;
992
0
            continue;
993
0
        }
994
995
        // XXX Won't this fail since PyInterpreterState_Clear() requires
996
        // the "current" tstate to be set?
997
0
        PyInterpreterState_Clear(interp);  // XXX must activate?
998
0
        zapthreads(interp);
999
0
        PyInterpreterState *prev_interp = interp;
1000
0
        interp = interp->next;
1001
0
        free_interpreter(prev_interp);
1002
0
    }
1003
0
    HEAD_UNLOCK(runtime);
1004
1005
0
    if (interpreters->head == NULL) {
1006
0
        return _PyStatus_ERR("missing main interpreter");
1007
0
    }
1008
0
    _PyThreadState_Swap(runtime, tstate);
1009
0
    return _PyStatus_OK();
1010
0
}
1011
#endif
1012
1013
static inline void
1014
set_main_thread(PyInterpreterState *interp, PyThreadState *tstate)
1015
0
{
1016
0
    _Py_atomic_store_ptr_relaxed(&interp->threads.main, tstate);
1017
0
}
1018
1019
static inline PyThreadState *
1020
get_main_thread(PyInterpreterState *interp)
1021
0
{
1022
0
    return _Py_atomic_load_ptr_relaxed(&interp->threads.main);
1023
0
}
1024
1025
void
1026
_PyErr_SetInterpreterAlreadyRunning(void)
1027
0
{
1028
0
    PyErr_SetString(PyExc_InterpreterError, "interpreter already running");
1029
0
}
1030
1031
int
1032
_PyInterpreterState_SetRunningMain(PyInterpreterState *interp)
1033
0
{
1034
0
    if (get_main_thread(interp) != NULL) {
1035
0
        _PyErr_SetInterpreterAlreadyRunning();
1036
0
        return -1;
1037
0
    }
1038
0
    PyThreadState *tstate = current_fast_get();
1039
0
    _Py_EnsureTstateNotNULL(tstate);
1040
0
    if (tstate->interp != interp) {
1041
0
        PyErr_SetString(PyExc_RuntimeError,
1042
0
                        "current tstate has wrong interpreter");
1043
0
        return -1;
1044
0
    }
1045
0
    set_main_thread(interp, tstate);
1046
1047
0
    return 0;
1048
0
}
1049
1050
void
1051
_PyInterpreterState_SetNotRunningMain(PyInterpreterState *interp)
1052
0
{
1053
0
    assert(get_main_thread(interp) == current_fast_get());
1054
0
    set_main_thread(interp, NULL);
1055
0
}
1056
1057
int
1058
_PyInterpreterState_IsRunningMain(PyInterpreterState *interp)
1059
0
{
1060
0
    if (get_main_thread(interp) != NULL) {
1061
0
        return 1;
1062
0
    }
1063
    // Embedders might not know to call _PyInterpreterState_SetRunningMain(),
1064
    // so their main thread wouldn't show it is running the main interpreter's
1065
    // program.  (Py_Main() doesn't have this problem.)  For now this isn't
1066
    // critical.  If it were, we would need to infer "running main" from other
1067
    // information, like if it's the main interpreter.  We used to do that
1068
    // but the naive approach led to some inconsistencies that caused problems.
1069
0
    return 0;
1070
0
}
1071
1072
int
1073
_PyThreadState_IsRunningMain(PyThreadState *tstate)
1074
0
{
1075
0
    PyInterpreterState *interp = tstate->interp;
1076
    // See the note in _PyInterpreterState_IsRunningMain() about
1077
    // possible false negatives here for embedders.
1078
0
    return get_main_thread(interp) == tstate;
1079
0
}
1080
1081
void
1082
_PyInterpreterState_ReinitRunningMain(PyThreadState *tstate)
1083
0
{
1084
0
    PyInterpreterState *interp = tstate->interp;
1085
0
    if (get_main_thread(interp) != tstate) {
1086
0
        set_main_thread(interp, NULL);
1087
0
    }
1088
0
}
1089
1090
1091
//----------
1092
// accessors
1093
//----------
1094
1095
int
1096
_PyInterpreterState_IsReady(PyInterpreterState *interp)
1097
0
{
1098
0
    return interp->_ready;
1099
0
}
1100
1101
#ifndef NDEBUG
1102
static inline int
1103
check_interpreter_whence(long whence)
1104
{
1105
    if(whence < 0) {
1106
        return -1;
1107
    }
1108
    if (whence > _PyInterpreterState_WHENCE_MAX) {
1109
        return -1;
1110
    }
1111
    return 0;
1112
}
1113
#endif
1114
1115
long
1116
_PyInterpreterState_GetWhence(PyInterpreterState *interp)
1117
0
{
1118
0
    assert(check_interpreter_whence(interp->_whence) == 0);
1119
0
    return interp->_whence;
1120
0
}
1121
1122
void
1123
_PyInterpreterState_SetWhence(PyInterpreterState *interp, long whence)
1124
28
{
1125
28
    assert(interp->_whence != _PyInterpreterState_WHENCE_NOTSET);
1126
28
    assert(check_interpreter_whence(whence) == 0);
1127
28
    interp->_whence = whence;
1128
28
}
1129
1130
1131
PyObject *
1132
_Py_GetMainModule(PyThreadState *tstate)
1133
0
{
1134
    // We return None to indicate "not found" or "bogus".
1135
0
    PyObject *modules = _PyImport_GetModulesRef(tstate->interp);
1136
0
    if (modules == Py_None) {
1137
0
        return modules;
1138
0
    }
1139
0
    PyObject *module = NULL;
1140
0
    (void)PyMapping_GetOptionalItem(modules, &_Py_ID(__main__), &module);
1141
0
    Py_DECREF(modules);
1142
0
    if (module == NULL && !PyErr_Occurred()) {
1143
0
        Py_RETURN_NONE;
1144
0
    }
1145
0
    return module;
1146
0
}
1147
1148
int
1149
_Py_CheckMainModule(PyObject *module)
1150
0
{
1151
0
    if (module == NULL || module == Py_None) {
1152
0
        if (!PyErr_Occurred()) {
1153
0
            (void)_PyErr_SetModuleNotFoundError(&_Py_ID(__main__));
1154
0
        }
1155
0
        return -1;
1156
0
    }
1157
0
    if (!Py_IS_TYPE(module, &PyModule_Type)) {
1158
        /* The __main__ module has been tampered with. */
1159
0
        PyObject *msg = PyUnicode_FromString("invalid __main__ module");
1160
0
        if (msg != NULL) {
1161
0
            (void)PyErr_SetImportError(msg, &_Py_ID(__main__), NULL);
1162
0
            Py_DECREF(msg);
1163
0
        }
1164
0
        return -1;
1165
0
    }
1166
0
    return 0;
1167
0
}
1168
1169
1170
PyObject *
1171
PyInterpreterState_GetDict(PyInterpreterState *interp)
1172
36
{
1173
36
    if (interp->dict == NULL) {
1174
10
        interp->dict = PyDict_New();
1175
10
        if (interp->dict == NULL) {
1176
0
            PyErr_Clear();
1177
0
        }
1178
10
    }
1179
    /* Returning NULL means no per-interpreter dict is available. */
1180
36
    return interp->dict;
1181
36
}
1182
1183
1184
//----------
1185
// interp ID
1186
//----------
1187
1188
int64_t
1189
_PyInterpreterState_ObjectToID(PyObject *idobj)
1190
0
{
1191
0
    if (!_PyIndex_Check(idobj)) {
1192
0
        PyErr_Format(PyExc_TypeError,
1193
0
                     "interpreter ID must be an int, got %.100s",
1194
0
                     Py_TYPE(idobj)->tp_name);
1195
0
        return -1;
1196
0
    }
1197
1198
    // This may raise OverflowError.
1199
    // For now, we don't worry about if LLONG_MAX < INT64_MAX.
1200
0
    long long id = PyLong_AsLongLong(idobj);
1201
0
    if (id == -1 && PyErr_Occurred()) {
1202
0
        return -1;
1203
0
    }
1204
1205
0
    if (id < 0) {
1206
0
        PyErr_Format(PyExc_ValueError,
1207
0
                     "interpreter ID must be a non-negative int, got %R",
1208
0
                     idobj);
1209
0
        return -1;
1210
0
    }
1211
#if LLONG_MAX > INT64_MAX
1212
    else if (id > INT64_MAX) {
1213
        PyErr_SetString(PyExc_OverflowError, "int too big to convert");
1214
        return -1;
1215
    }
1216
#endif
1217
0
    else {
1218
0
        return (int64_t)id;
1219
0
    }
1220
0
}
1221
1222
int64_t
1223
PyInterpreterState_GetID(PyInterpreterState *interp)
1224
0
{
1225
0
    if (interp == NULL) {
1226
0
        PyErr_SetString(PyExc_RuntimeError, "no interpreter provided");
1227
0
        return -1;
1228
0
    }
1229
0
    return interp->id;
1230
0
}
1231
1232
PyObject *
1233
_PyInterpreterState_GetIDObject(PyInterpreterState *interp)
1234
0
{
1235
0
    int64_t interpid = interp->id;
1236
0
    if (interpid < 0) {
1237
0
        return NULL;
1238
0
    }
1239
0
    assert(interpid < LLONG_MAX);
1240
0
    return PyLong_FromLongLong(interpid);
1241
0
}
1242
1243
1244
1245
void
1246
_PyInterpreterState_IDIncref(PyInterpreterState *interp)
1247
0
{
1248
0
    _Py_atomic_add_ssize(&interp->id_refcount, 1);
1249
0
}
1250
1251
1252
void
1253
_PyInterpreterState_IDDecref(PyInterpreterState *interp)
1254
0
{
1255
0
    _PyRuntimeState *runtime = interp->runtime;
1256
1257
0
    Py_ssize_t refcount = _Py_atomic_add_ssize(&interp->id_refcount, -1);
1258
1259
0
    if (refcount == 1 && interp->requires_idref) {
1260
0
        PyThreadState *tstate =
1261
0
            _PyThreadState_NewBound(interp, _PyThreadState_WHENCE_FINI);
1262
1263
        // XXX Possible GILState issues?
1264
0
        PyThreadState *save_tstate = _PyThreadState_Swap(runtime, tstate);
1265
0
        Py_EndInterpreter(tstate);
1266
0
        _PyThreadState_Swap(runtime, save_tstate);
1267
0
    }
1268
0
}
1269
1270
int
1271
_PyInterpreterState_RequiresIDRef(PyInterpreterState *interp)
1272
0
{
1273
0
    return interp->requires_idref;
1274
0
}
1275
1276
void
1277
_PyInterpreterState_RequireIDRef(PyInterpreterState *interp, int required)
1278
0
{
1279
0
    interp->requires_idref = required ? 1 : 0;
1280
0
}
1281
1282
1283
//-----------------------------
1284
// look up an interpreter state
1285
//-----------------------------
1286
1287
/* Return the interpreter associated with the current OS thread.
1288
1289
   The GIL must be held.
1290
  */
1291
1292
PyInterpreterState*
1293
PyInterpreterState_Get(void)
1294
72
{
1295
72
    _Py_AssertHoldsTstate();
1296
72
    PyInterpreterState *interp = _Py_tss_interp;
1297
72
    if (interp == NULL) {
1298
0
        Py_FatalError("no current interpreter");
1299
0
    }
1300
72
    return interp;
1301
72
}
1302
1303
1304
static PyInterpreterState *
1305
interp_look_up_id(_PyRuntimeState *runtime, int64_t requested_id)
1306
0
{
1307
0
    PyInterpreterState *interp = runtime->interpreters.head;
1308
0
    while (interp != NULL) {
1309
0
        int64_t id = interp->id;
1310
0
        assert(id >= 0);
1311
0
        if (requested_id == id) {
1312
0
            return interp;
1313
0
        }
1314
0
        interp = PyInterpreterState_Next(interp);
1315
0
    }
1316
0
    return NULL;
1317
0
}
1318
1319
/* Return the interpreter state with the given ID.
1320
1321
   Fail with RuntimeError if the interpreter is not found. */
1322
1323
PyInterpreterState *
1324
_PyInterpreterState_LookUpID(int64_t requested_id)
1325
0
{
1326
0
    PyInterpreterState *interp = NULL;
1327
0
    if (requested_id >= 0) {
1328
0
        _PyRuntimeState *runtime = &_PyRuntime;
1329
0
        HEAD_LOCK(runtime);
1330
0
        interp = interp_look_up_id(runtime, requested_id);
1331
0
        HEAD_UNLOCK(runtime);
1332
0
    }
1333
0
    if (interp == NULL && !PyErr_Occurred()) {
1334
0
        PyErr_Format(PyExc_InterpreterNotFoundError,
1335
0
                     "unrecognized interpreter ID %lld", requested_id);
1336
0
    }
1337
0
    return interp;
1338
0
}
1339
1340
PyInterpreterState *
1341
_PyInterpreterState_LookUpIDObject(PyObject *requested_id)
1342
0
{
1343
0
    int64_t id = _PyInterpreterState_ObjectToID(requested_id);
1344
0
    if (id < 0) {
1345
0
        return NULL;
1346
0
    }
1347
0
    return _PyInterpreterState_LookUpID(id);
1348
0
}
1349
1350
1351
/********************************/
1352
/* the per-thread runtime state */
1353
/********************************/
1354
1355
#ifndef NDEBUG
1356
static inline int
1357
tstate_is_alive(PyThreadState *tstate)
1358
{
1359
    return (tstate->_status.initialized &&
1360
            !tstate->_status.finalized &&
1361
            !tstate->_status.cleared &&
1362
            !tstate->_status.finalizing);
1363
}
1364
#endif
1365
1366
1367
//----------
1368
// lifecycle
1369
//----------
1370
1371
static _PyStackChunk*
1372
allocate_chunk(int size_in_bytes, _PyStackChunk* previous)
1373
229k
{
1374
229k
    assert(size_in_bytes % sizeof(PyObject **) == 0);
1375
229k
    _PyStackChunk *res = _PyObject_VirtualAlloc(size_in_bytes);
1376
229k
    if (res == NULL) {
1377
0
        return NULL;
1378
0
    }
1379
229k
    res->previous = previous;
1380
229k
    res->size = size_in_bytes;
1381
229k
    res->top = 0;
1382
229k
    return res;
1383
229k
}
1384
1385
static void
1386
reset_threadstate(_PyThreadStateImpl *tstate)
1387
0
{
1388
    // Set to _PyThreadState_INIT directly?
1389
0
    memcpy(tstate,
1390
0
           &initial._main_interpreter._initial_thread,
1391
0
           sizeof(*tstate));
1392
0
}
1393
1394
static _PyThreadStateImpl *
1395
alloc_threadstate(PyInterpreterState *interp)
1396
28
{
1397
28
    _PyThreadStateImpl *tstate;
1398
1399
    // Try the preallocated tstate first.
1400
28
    tstate = _Py_atomic_exchange_ptr(&interp->threads.preallocated, NULL);
1401
1402
    // Fall back to the allocator.
1403
28
    if (tstate == NULL) {
1404
0
        tstate = PyMem_RawCalloc(1, sizeof(_PyThreadStateImpl));
1405
0
        if (tstate == NULL) {
1406
0
            return NULL;
1407
0
        }
1408
0
        reset_threadstate(tstate);
1409
0
    }
1410
28
    return tstate;
1411
28
}
1412
1413
static void
1414
free_threadstate(_PyThreadStateImpl *tstate)
1415
0
{
1416
0
    PyInterpreterState *interp = tstate->base.interp;
1417
#ifdef Py_STATS
1418
    _PyStats_ThreadFini(tstate);
1419
#endif
1420
    // The initial thread state of the interpreter is allocated
1421
    // as part of the interpreter state so should not be freed.
1422
0
    if (tstate == &interp->_initial_thread) {
1423
        // Make it available again.
1424
0
        reset_threadstate(tstate);
1425
0
        assert(interp->threads.preallocated == NULL);
1426
0
        _Py_atomic_store_ptr(&interp->threads.preallocated, tstate);
1427
0
    }
1428
0
    else {
1429
0
        PyMem_RawFree(tstate);
1430
0
    }
1431
0
}
1432
1433
static void
1434
decref_threadstate(_PyThreadStateImpl *tstate)
1435
0
{
1436
0
    if (_Py_atomic_add_ssize(&tstate->refcount, -1) == 1) {
1437
        // The last reference to the thread state is gone.
1438
0
        free_threadstate(tstate);
1439
0
    }
1440
0
}
1441
1442
/* Get the thread state to a minimal consistent state.
1443
   Further init happens in pylifecycle.c before it can be used.
1444
   All fields not initialized here are expected to be zeroed out,
1445
   e.g. by PyMem_RawCalloc() or memset(), or otherwise pre-initialized.
1446
   The interpreter state is not manipulated.  Instead it is assumed that
1447
   the thread is getting added to the interpreter.
1448
  */
1449
1450
static void
1451
init_threadstate(_PyThreadStateImpl *_tstate,
1452
                 PyInterpreterState *interp, uint64_t id, int whence)
1453
28
{
1454
28
    PyThreadState *tstate = (PyThreadState *)_tstate;
1455
28
    if (tstate->_status.initialized) {
1456
0
        Py_FatalError("thread state already initialized");
1457
0
    }
1458
1459
28
    assert(interp != NULL);
1460
28
    tstate->interp = interp;
1461
28
    tstate->eval_breaker =
1462
28
        _Py_atomic_load_uintptr_relaxed(&interp->ceval.instrumentation_version);
1463
1464
    // next/prev are set in add_threadstate().
1465
28
    assert(tstate->next == NULL);
1466
28
    assert(tstate->prev == NULL);
1467
1468
28
    assert(tstate->_whence == _PyThreadState_WHENCE_NOTSET);
1469
28
    assert(whence >= 0 && whence <= _PyThreadState_WHENCE_THREADING_DAEMON);
1470
28
    tstate->_whence = whence;
1471
1472
28
    assert(id > 0);
1473
28
    tstate->id = id;
1474
1475
    // thread_id and native_thread_id are set in bind_tstate().
1476
1477
28
    tstate->py_recursion_limit = interp->ceval.recursion_limit;
1478
28
    tstate->py_recursion_remaining = interp->ceval.recursion_limit;
1479
28
    tstate->exc_info = &tstate->exc_state;
1480
1481
    // PyGILState_Release must not try to delete this thread state.
1482
    // This is cleared when PyGILState_Ensure() creates the thread state.
1483
28
    tstate->gilstate_counter = 1;
1484
1485
    // Initialize the embedded base frame - sentinel at the bottom of the frame stack
1486
28
    _tstate->base_frame.previous = NULL;
1487
28
    _tstate->base_frame.f_executable = PyStackRef_None;
1488
28
    _tstate->base_frame.f_funcobj = PyStackRef_NULL;
1489
28
    _tstate->base_frame.f_globals = NULL;
1490
28
    _tstate->base_frame.f_builtins = NULL;
1491
28
    _tstate->base_frame.f_locals = NULL;
1492
28
    _tstate->base_frame.frame_obj = NULL;
1493
28
    _tstate->base_frame.instr_ptr = NULL;
1494
28
    _tstate->base_frame.stackpointer = _tstate->base_frame.localsplus;
1495
28
    _tstate->base_frame.return_offset = 0;
1496
28
    _tstate->base_frame.owner = FRAME_OWNED_BY_INTERPRETER;
1497
28
    _tstate->base_frame.visited = 0;
1498
#ifdef Py_DEBUG
1499
    _tstate->base_frame.lltrace = 0;
1500
#endif
1501
#ifdef Py_GIL_DISABLED
1502
    _tstate->base_frame.tlbc_index = 0;
1503
#endif
1504
28
    _tstate->base_frame.localsplus[0] = PyStackRef_NULL;
1505
1506
    // current_frame starts pointing to the base frame
1507
28
    tstate->current_frame = &_tstate->base_frame;
1508
    // base_frame pointer for profilers to validate stack unwinding
1509
28
    tstate->base_frame = &_tstate->base_frame;
1510
28
    tstate->datastack_chunk = NULL;
1511
28
    tstate->datastack_top = NULL;
1512
28
    tstate->datastack_limit = NULL;
1513
28
    tstate->what_event = -1;
1514
28
    tstate->current_executor = NULL;
1515
28
    tstate->jit_exit = NULL;
1516
28
    tstate->dict_global_version = 0;
1517
1518
28
    _tstate->c_stack_soft_limit = UINTPTR_MAX;
1519
28
    _tstate->c_stack_top = 0;
1520
28
    _tstate->c_stack_hard_limit = 0;
1521
1522
28
    _tstate->c_stack_init_base = 0;
1523
28
    _tstate->c_stack_init_top = 0;
1524
1525
28
    _tstate->asyncio_running_loop = NULL;
1526
28
    _tstate->asyncio_running_task = NULL;
1527
1528
#ifdef _Py_TIER2
1529
    _tstate->jit_tracer_state.code_buffer = NULL;
1530
#endif
1531
28
    tstate->delete_later = NULL;
1532
1533
28
    llist_init(&_tstate->mem_free_queue);
1534
28
    llist_init(&_tstate->asyncio_tasks_head);
1535
28
    if (interp->stoptheworld.requested || _PyRuntime.stoptheworld.requested) {
1536
        // Start in the suspended state if there is an ongoing stop-the-world.
1537
0
        tstate->state = _Py_THREAD_SUSPENDED;
1538
0
    }
1539
1540
28
    tstate->_status.initialized = 1;
1541
28
}
1542
1543
static void
1544
add_threadstate(PyInterpreterState *interp, PyThreadState *tstate,
1545
                PyThreadState *next)
1546
28
{
1547
28
    assert(interp->threads.head != tstate);
1548
28
    if (next != NULL) {
1549
0
        assert(next->prev == NULL || next->prev == tstate);
1550
0
        next->prev = tstate;
1551
0
    }
1552
28
    tstate->next = next;
1553
28
    assert(tstate->prev == NULL);
1554
28
    interp->threads.head = tstate;
1555
28
}
1556
1557
static PyThreadState *
1558
new_threadstate(PyInterpreterState *interp, int whence)
1559
28
{
1560
    // Allocate the thread state.
1561
28
    _PyThreadStateImpl *tstate = alloc_threadstate(interp);
1562
28
    if (tstate == NULL) {
1563
0
        return NULL;
1564
0
    }
1565
1566
#ifdef Py_GIL_DISABLED
1567
    Py_ssize_t qsbr_idx = _Py_qsbr_reserve(interp);
1568
    if (qsbr_idx < 0) {
1569
        free_threadstate(tstate);
1570
        return NULL;
1571
    }
1572
    int32_t tlbc_idx = _Py_ReserveTLBCIndex(interp);
1573
    if (tlbc_idx < 0) {
1574
        free_threadstate(tstate);
1575
        return NULL;
1576
    }
1577
#endif
1578
#ifdef Py_STATS
1579
    // The PyStats structure is quite large and is allocated separated from tstate.
1580
    if (!_PyStats_ThreadInit(interp, tstate)) {
1581
        free_threadstate(tstate);
1582
        return NULL;
1583
    }
1584
#endif
1585
1586
    /* We serialize concurrent creation to protect global state. */
1587
28
    HEAD_LOCK(interp->runtime);
1588
1589
    // Initialize the new thread state.
1590
28
    interp->threads.next_unique_id += 1;
1591
28
    uint64_t id = interp->threads.next_unique_id;
1592
28
    init_threadstate(tstate, interp, id, whence);
1593
1594
    // Add the new thread state to the interpreter.
1595
28
    PyThreadState *old_head = interp->threads.head;
1596
28
    add_threadstate(interp, (PyThreadState *)tstate, old_head);
1597
1598
28
    HEAD_UNLOCK(interp->runtime);
1599
1600
#ifdef Py_GIL_DISABLED
1601
    // Must be called with lock unlocked to avoid lock ordering deadlocks.
1602
    _Py_qsbr_register(tstate, interp, qsbr_idx);
1603
    tstate->tlbc_index = tlbc_idx;
1604
#endif
1605
1606
28
    return (PyThreadState *)tstate;
1607
28
}
1608
1609
PyThreadState *
1610
PyThreadState_New(PyInterpreterState *interp)
1611
0
{
1612
0
    return _PyThreadState_NewBound(interp, _PyThreadState_WHENCE_UNKNOWN);
1613
0
}
1614
1615
PyThreadState *
1616
_PyThreadState_NewBound(PyInterpreterState *interp, int whence)
1617
0
{
1618
0
    PyThreadState *tstate = new_threadstate(interp, whence);
1619
0
    if (tstate) {
1620
0
        bind_tstate(tstate);
1621
        // This makes sure there's a gilstate tstate bound
1622
        // as soon as possible.
1623
0
        if (gilstate_get() == NULL) {
1624
0
            bind_gilstate_tstate(tstate);
1625
0
        }
1626
0
    }
1627
0
    return tstate;
1628
0
}
1629
1630
// This must be followed by a call to _PyThreadState_Bind();
1631
PyThreadState *
1632
_PyThreadState_New(PyInterpreterState *interp, int whence)
1633
28
{
1634
28
    return new_threadstate(interp, whence);
1635
28
}
1636
1637
// We keep this for stable ABI compabibility.
1638
PyAPI_FUNC(PyThreadState*)
1639
_PyThreadState_Prealloc(PyInterpreterState *interp)
1640
0
{
1641
0
    return _PyThreadState_New(interp, _PyThreadState_WHENCE_UNKNOWN);
1642
0
}
1643
1644
// We keep this around for (accidental) stable ABI compatibility.
1645
// Realistically, no extensions are using it.
1646
PyAPI_FUNC(void)
1647
_PyThreadState_Init(PyThreadState *tstate)
1648
0
{
1649
0
    Py_FatalError("_PyThreadState_Init() is for internal use only");
1650
0
}
1651
1652
1653
static void
1654
clear_datastack(PyThreadState *tstate)
1655
0
{
1656
0
    _PyStackChunk *chunk = tstate->datastack_chunk;
1657
0
    tstate->datastack_chunk = NULL;
1658
0
    while (chunk != NULL) {
1659
0
        _PyStackChunk *prev = chunk->previous;
1660
0
        _PyObject_VirtualFree(chunk, chunk->size);
1661
0
        chunk = prev;
1662
0
    }
1663
0
}
1664
1665
void
1666
PyThreadState_Clear(PyThreadState *tstate)
1667
0
{
1668
0
    assert(tstate->_status.initialized && !tstate->_status.cleared);
1669
0
    assert(current_fast_get()->interp == tstate->interp);
1670
    // GH-126016: In the _interpreters module, KeyboardInterrupt exceptions
1671
    // during PyEval_EvalCode() are sent to finalization, which doesn't let us
1672
    // mark threads as "not running main". So, for now this assertion is
1673
    // disabled.
1674
    // XXX assert(!_PyThreadState_IsRunningMain(tstate));
1675
    // XXX assert(!tstate->_status.bound || tstate->_status.unbound);
1676
0
    tstate->_status.finalizing = 1;  // just in case
1677
1678
    /* XXX Conditions we need to enforce:
1679
1680
       * the GIL must be held by the current thread
1681
       * current_fast_get()->interp must match tstate->interp
1682
       * for the main interpreter, current_fast_get() must be the main thread
1683
     */
1684
1685
0
    int verbose = _PyInterpreterState_GetConfig(tstate->interp)->verbose;
1686
1687
0
    if (verbose && tstate->current_frame != tstate->base_frame) {
1688
        /* bpo-20526: After the main thread calls
1689
           _PyInterpreterState_SetFinalizing() in Py_FinalizeEx()
1690
           (or in Py_EndInterpreter() for subinterpreters),
1691
           threads must exit when trying to take the GIL.
1692
           If a thread exit in the middle of _PyEval_EvalFrameDefault(),
1693
           tstate->frame is not reset to its previous value.
1694
           It is more likely with daemon threads, but it can happen
1695
           with regular threads if threading._shutdown() fails
1696
           (ex: interrupted by CTRL+C). */
1697
0
        fprintf(stderr,
1698
0
          "PyThreadState_Clear: warning: thread still has a frame\n");
1699
0
    }
1700
1701
0
    if (verbose && tstate->current_exception != NULL) {
1702
0
        fprintf(stderr, "PyThreadState_Clear: warning: thread has an exception set\n");
1703
0
        _PyErr_Print(tstate);
1704
0
    }
1705
1706
    /* At this point tstate shouldn't be used any more,
1707
       neither to run Python code nor for other uses.
1708
1709
       This is tricky when current_fast_get() == tstate, in the same way
1710
       as noted in interpreter_clear() above.  The below finalizers
1711
       can possibly run Python code or otherwise use the partially
1712
       cleared thread state.  For now we trust that isn't a problem
1713
       in practice.
1714
     */
1715
    // XXX Deal with the possibility of problematic finalizers.
1716
1717
    /* Don't clear tstate->pyframe: it is a borrowed reference */
1718
1719
0
    Py_CLEAR(tstate->threading_local_key);
1720
0
    Py_CLEAR(tstate->threading_local_sentinel);
1721
1722
0
    Py_CLEAR(((_PyThreadStateImpl *)tstate)->asyncio_running_loop);
1723
0
    Py_CLEAR(((_PyThreadStateImpl *)tstate)->asyncio_running_task);
1724
1725
1726
0
    PyMutex_Lock(&tstate->interp->asyncio_tasks_lock);
1727
    // merge any lingering tasks from thread state to interpreter's
1728
    // tasks list
1729
0
    llist_concat(&tstate->interp->asyncio_tasks_head,
1730
0
                 &((_PyThreadStateImpl *)tstate)->asyncio_tasks_head);
1731
0
    PyMutex_Unlock(&tstate->interp->asyncio_tasks_lock);
1732
1733
0
    Py_CLEAR(tstate->dict);
1734
0
    Py_CLEAR(tstate->async_exc);
1735
1736
0
    Py_CLEAR(tstate->current_exception);
1737
1738
0
    Py_CLEAR(tstate->exc_state.exc_value);
1739
1740
    /* The stack of exception states should contain just this thread. */
1741
0
    if (verbose && tstate->exc_info != &tstate->exc_state) {
1742
0
        fprintf(stderr,
1743
0
          "PyThreadState_Clear: warning: thread still has a generator\n");
1744
0
    }
1745
1746
0
    if (tstate->c_profilefunc != NULL) {
1747
0
        FT_ATOMIC_ADD_SSIZE(tstate->interp->sys_profiling_threads, -1);
1748
0
        tstate->c_profilefunc = NULL;
1749
0
    }
1750
0
    if (tstate->c_tracefunc != NULL) {
1751
0
        FT_ATOMIC_ADD_SSIZE(tstate->interp->sys_tracing_threads, -1);
1752
0
        tstate->c_tracefunc = NULL;
1753
0
    }
1754
1755
0
    Py_CLEAR(tstate->c_profileobj);
1756
0
    Py_CLEAR(tstate->c_traceobj);
1757
1758
0
    Py_CLEAR(tstate->async_gen_firstiter);
1759
0
    Py_CLEAR(tstate->async_gen_finalizer);
1760
1761
0
    Py_CLEAR(tstate->context);
1762
1763
#ifdef Py_GIL_DISABLED
1764
    // Each thread should clear own freelists in free-threading builds.
1765
    struct _Py_freelists *freelists = _Py_freelists_GET();
1766
    _PyObject_ClearFreeLists(freelists, 1);
1767
1768
    // Flush the thread's local GC allocation count to the global count
1769
    // before the thread state is cleared, otherwise the count is lost.
1770
    _PyThreadStateImpl *tstate_impl = (_PyThreadStateImpl *)tstate;
1771
    _Py_atomic_add_int(&tstate->interp->gc.young.count,
1772
                       (int)tstate_impl->gc.alloc_count);
1773
    tstate_impl->gc.alloc_count = 0;
1774
1775
    // Merge our thread-local refcounts into the type's own refcount and
1776
    // free our local refcount array.
1777
    _PyObject_FinalizePerThreadRefcounts(tstate_impl);
1778
1779
    // Remove ourself from the biased reference counting table of threads.
1780
    _Py_brc_remove_thread(tstate);
1781
1782
    // Release our thread-local copies of the bytecode for reuse by another
1783
    // thread
1784
    _Py_ClearTLBCIndex(tstate_impl);
1785
#endif
1786
1787
    // Merge our queue of pointers to be freed into the interpreter queue.
1788
0
    _PyMem_AbandonDelayed(tstate);
1789
1790
0
    _PyThreadState_ClearMimallocHeaps(tstate);
1791
1792
0
    tstate->_status.cleared = 1;
1793
1794
    // XXX Call _PyThreadStateSwap(runtime, NULL) here if "current".
1795
    // XXX Do it as early in the function as possible.
1796
0
}
1797
1798
static void
1799
decrement_stoptheworld_countdown(struct _stoptheworld_state *stw);
1800
1801
/* Common code for PyThreadState_Delete() and PyThreadState_DeleteCurrent() */
1802
static void
1803
tstate_delete_common(PyThreadState *tstate, int release_gil)
1804
0
{
1805
0
    assert(tstate->_status.cleared && !tstate->_status.finalized);
1806
0
    tstate_verify_not_active(tstate);
1807
0
    assert(!_PyThreadState_IsRunningMain(tstate));
1808
1809
0
    PyInterpreterState *interp = tstate->interp;
1810
0
    if (interp == NULL) {
1811
0
        Py_FatalError("NULL interpreter");
1812
0
    }
1813
0
    _PyRuntimeState *runtime = interp->runtime;
1814
1815
0
    HEAD_LOCK(runtime);
1816
0
    if (tstate->prev) {
1817
0
        tstate->prev->next = tstate->next;
1818
0
    }
1819
0
    else {
1820
0
        interp->threads.head = tstate->next;
1821
0
    }
1822
0
    if (tstate->next) {
1823
0
        tstate->next->prev = tstate->prev;
1824
0
    }
1825
0
    if (tstate->state != _Py_THREAD_SUSPENDED) {
1826
        // Any ongoing stop-the-world request should not wait for us because
1827
        // our thread is getting deleted.
1828
0
        if (interp->stoptheworld.requested) {
1829
0
            decrement_stoptheworld_countdown(&interp->stoptheworld);
1830
0
        }
1831
0
        if (runtime->stoptheworld.requested) {
1832
0
            decrement_stoptheworld_countdown(&runtime->stoptheworld);
1833
0
        }
1834
0
    }
1835
1836
#if defined(Py_REF_DEBUG) && defined(Py_GIL_DISABLED)
1837
    // Add our portion of the total refcount to the interpreter's total.
1838
    _PyThreadStateImpl *tstate_impl = (_PyThreadStateImpl *)tstate;
1839
    tstate->interp->object_state.reftotal += tstate_impl->reftotal;
1840
    tstate_impl->reftotal = 0;
1841
    assert(tstate_impl->refcounts.values == NULL);
1842
#endif
1843
1844
#if _Py_TIER2
1845
    _PyThreadStateImpl *_tstate = (_PyThreadStateImpl *)tstate;
1846
    if (_tstate->jit_tracer_state.code_buffer != NULL) {
1847
        _PyObject_VirtualFree(_tstate->jit_tracer_state.code_buffer, UOP_BUFFER_SIZE);
1848
        _tstate->jit_tracer_state.code_buffer = NULL;
1849
    }
1850
#endif
1851
1852
0
    HEAD_UNLOCK(runtime);
1853
1854
    // XXX Unbind in PyThreadState_Clear(), or earlier
1855
    // (and assert not-equal here)?
1856
0
    if (tstate->_status.bound_gilstate) {
1857
0
        unbind_gilstate_tstate(tstate);
1858
0
    }
1859
0
    if (tstate->_status.bound) {
1860
0
        unbind_tstate(tstate);
1861
0
    }
1862
1863
    // XXX Move to PyThreadState_Clear()?
1864
0
    clear_datastack(tstate);
1865
1866
0
    if (release_gil) {
1867
0
        _PyEval_ReleaseLock(tstate->interp, tstate, 1);
1868
0
    }
1869
1870
#ifdef Py_GIL_DISABLED
1871
    _Py_qsbr_unregister(tstate);
1872
#endif
1873
1874
0
    tstate->_status.finalized = 1;
1875
0
}
1876
1877
static void
1878
zapthreads(PyInterpreterState *interp)
1879
0
{
1880
0
    PyThreadState *tstate;
1881
    /* No need to lock the mutex here because this should only happen
1882
       when the threads are all really dead (XXX famous last words).
1883
1884
       Cannot use _Py_FOR_EACH_TSTATE_UNLOCKED because we are freeing
1885
       the thread states here.
1886
    */
1887
0
    while ((tstate = interp->threads.head) != NULL) {
1888
0
        tstate_verify_not_active(tstate);
1889
0
        tstate_delete_common(tstate, 0);
1890
0
        free_threadstate((_PyThreadStateImpl *)tstate);
1891
0
    }
1892
0
}
1893
1894
1895
void
1896
PyThreadState_Delete(PyThreadState *tstate)
1897
0
{
1898
0
    _Py_EnsureTstateNotNULL(tstate);
1899
0
    tstate_verify_not_active(tstate);
1900
0
    tstate_delete_common(tstate, 0);
1901
0
    free_threadstate((_PyThreadStateImpl *)tstate);
1902
0
}
1903
1904
1905
void
1906
_PyThreadState_DeleteCurrent(PyThreadState *tstate)
1907
0
{
1908
0
    _Py_EnsureTstateNotNULL(tstate);
1909
#ifdef Py_GIL_DISABLED
1910
    _Py_qsbr_detach(((_PyThreadStateImpl *)tstate)->qsbr);
1911
#endif
1912
#ifdef Py_STATS
1913
    _PyStats_Detach((_PyThreadStateImpl *)tstate);
1914
#endif
1915
0
    current_fast_clear(tstate->interp->runtime);
1916
0
    tstate_delete_common(tstate, 1);  // release GIL as part of call
1917
0
    free_threadstate((_PyThreadStateImpl *)tstate);
1918
0
}
1919
1920
void
1921
PyThreadState_DeleteCurrent(void)
1922
0
{
1923
0
    PyThreadState *tstate = current_fast_get();
1924
0
    _PyThreadState_DeleteCurrent(tstate);
1925
0
}
1926
1927
1928
// Unlinks and removes all thread states from `tstate->interp`, with the
1929
// exception of the one passed as an argument. However, it does not delete
1930
// these thread states. Instead, it returns the removed thread states as a
1931
// linked list.
1932
//
1933
// Note that if there is a current thread state, it *must* be the one
1934
// passed as argument.  Also, this won't touch any interpreters other
1935
// than the current one, since we don't know which thread state should
1936
// be kept in those other interpreters.
1937
PyThreadState *
1938
_PyThreadState_RemoveExcept(PyThreadState *tstate)
1939
0
{
1940
0
    assert(tstate != NULL);
1941
0
    PyInterpreterState *interp = tstate->interp;
1942
0
    _PyRuntimeState *runtime = interp->runtime;
1943
1944
#ifdef Py_GIL_DISABLED
1945
    assert(runtime->stoptheworld.world_stopped);
1946
#endif
1947
1948
0
    HEAD_LOCK(runtime);
1949
    /* Remove all thread states, except tstate, from the linked list of
1950
       thread states. */
1951
0
    PyThreadState *list = interp->threads.head;
1952
0
    if (list == tstate) {
1953
0
        list = tstate->next;
1954
0
    }
1955
0
    if (tstate->prev) {
1956
0
        tstate->prev->next = tstate->next;
1957
0
    }
1958
0
    if (tstate->next) {
1959
0
        tstate->next->prev = tstate->prev;
1960
0
    }
1961
0
    tstate->prev = tstate->next = NULL;
1962
0
    interp->threads.head = tstate;
1963
0
    HEAD_UNLOCK(runtime);
1964
1965
0
    return list;
1966
0
}
1967
1968
// Deletes the thread states in the linked list `list`.
1969
//
1970
// This is intended to be used in conjunction with _PyThreadState_RemoveExcept.
1971
//
1972
// If `is_after_fork` is true, the thread states are immediately freed.
1973
// Otherwise, they are decref'd because they may still be referenced by an
1974
// OS thread.
1975
void
1976
_PyThreadState_DeleteList(PyThreadState *list, int is_after_fork)
1977
0
{
1978
    // The world can't be stopped because we PyThreadState_Clear() can
1979
    // call destructors.
1980
0
    assert(!_PyRuntime.stoptheworld.world_stopped);
1981
1982
0
    PyThreadState *p, *next;
1983
0
    for (p = list; p; p = next) {
1984
0
        next = p->next;
1985
0
        PyThreadState_Clear(p);
1986
0
        if (is_after_fork) {
1987
0
            free_threadstate((_PyThreadStateImpl *)p);
1988
0
        }
1989
0
        else {
1990
0
            decref_threadstate((_PyThreadStateImpl *)p);
1991
0
        }
1992
0
    }
1993
0
}
1994
1995
1996
//----------
1997
// accessors
1998
//----------
1999
2000
/* An extension mechanism to store arbitrary additional per-thread state.
2001
   PyThreadState_GetDict() returns a dictionary that can be used to hold such
2002
   state; the caller should pick a unique key and store its state there.  If
2003
   PyThreadState_GetDict() returns NULL, an exception has *not* been raised
2004
   and the caller should assume no per-thread state is available. */
2005
2006
PyObject *
2007
_PyThreadState_GetDict(PyThreadState *tstate)
2008
8.18M
{
2009
8.18M
    assert(tstate != NULL);
2010
8.18M
    if (tstate->dict == NULL) {
2011
2
        tstate->dict = PyDict_New();
2012
2
        if (tstate->dict == NULL) {
2013
0
            _PyErr_Clear(tstate);
2014
0
        }
2015
2
    }
2016
8.18M
    return tstate->dict;
2017
8.18M
}
2018
2019
2020
PyObject *
2021
PyThreadState_GetDict(void)
2022
8.18M
{
2023
8.18M
    PyThreadState *tstate = current_fast_get();
2024
8.18M
    if (tstate == NULL) {
2025
0
        return NULL;
2026
0
    }
2027
8.18M
    return _PyThreadState_GetDict(tstate);
2028
8.18M
}
2029
2030
2031
PyInterpreterState *
2032
PyThreadState_GetInterpreter(PyThreadState *tstate)
2033
0
{
2034
0
    assert(tstate != NULL);
2035
0
    return tstate->interp;
2036
0
}
2037
2038
2039
PyFrameObject*
2040
PyThreadState_GetFrame(PyThreadState *tstate)
2041
616k
{
2042
616k
    assert(tstate != NULL);
2043
616k
    _PyInterpreterFrame *f = _PyThreadState_GetFrame(tstate);
2044
616k
    if (f == NULL) {
2045
0
        return NULL;
2046
0
    }
2047
616k
    PyFrameObject *frame = _PyFrame_GetFrameObject(f);
2048
616k
    if (frame == NULL) {
2049
0
        PyErr_Clear();
2050
0
    }
2051
616k
    return (PyFrameObject*)Py_XNewRef(frame);
2052
616k
}
2053
2054
2055
uint64_t
2056
PyThreadState_GetID(PyThreadState *tstate)
2057
0
{
2058
0
    assert(tstate != NULL);
2059
0
    return tstate->id;
2060
0
}
2061
2062
2063
static inline void
2064
tstate_activate(PyThreadState *tstate)
2065
2.99M
{
2066
2.99M
    assert(tstate != NULL);
2067
    // XXX assert(tstate_is_alive(tstate));
2068
2.99M
    assert(tstate_is_bound(tstate));
2069
2.99M
    assert(!tstate->_status.active);
2070
2071
2.99M
    assert(!tstate->_status.bound_gilstate ||
2072
2.99M
           tstate == gilstate_get());
2073
2.99M
    if (!tstate->_status.bound_gilstate) {
2074
0
        bind_gilstate_tstate(tstate);
2075
0
    }
2076
2077
2.99M
    tstate->_status.active = 1;
2078
2.99M
}
2079
2080
static inline void
2081
tstate_deactivate(PyThreadState *tstate)
2082
2.99M
{
2083
2.99M
    assert(tstate != NULL);
2084
    // XXX assert(tstate_is_alive(tstate));
2085
2.99M
    assert(tstate_is_bound(tstate));
2086
2.99M
    assert(tstate->_status.active);
2087
2088
#if Py_STATS
2089
    _PyStats_Detach((_PyThreadStateImpl *)tstate);
2090
#endif
2091
2092
2.99M
    tstate->_status.active = 0;
2093
2094
    // We do not unbind the gilstate tstate here.
2095
    // It will still be used in PyGILState_Ensure().
2096
2.99M
}
2097
2098
static int
2099
tstate_try_attach(PyThreadState *tstate)
2100
2.99M
{
2101
#ifdef Py_GIL_DISABLED
2102
    int expected = _Py_THREAD_DETACHED;
2103
    return _Py_atomic_compare_exchange_int(&tstate->state,
2104
                                           &expected,
2105
                                           _Py_THREAD_ATTACHED);
2106
#else
2107
2.99M
    assert(tstate->state == _Py_THREAD_DETACHED);
2108
2.99M
    tstate->state = _Py_THREAD_ATTACHED;
2109
2.99M
    return 1;
2110
2.99M
#endif
2111
2.99M
}
2112
2113
static void
2114
tstate_set_detached(PyThreadState *tstate, int detached_state)
2115
2.99M
{
2116
2.99M
    assert(_Py_atomic_load_int_relaxed(&tstate->state) == _Py_THREAD_ATTACHED);
2117
#ifdef Py_GIL_DISABLED
2118
    _Py_atomic_store_int(&tstate->state, detached_state);
2119
#else
2120
2.99M
    tstate->state = detached_state;
2121
2.99M
#endif
2122
2.99M
}
2123
2124
static void
2125
tstate_wait_attach(PyThreadState *tstate)
2126
0
{
2127
0
    do {
2128
0
        int state = _Py_atomic_load_int_relaxed(&tstate->state);
2129
0
        if (state == _Py_THREAD_SUSPENDED) {
2130
            // Wait until we're switched out of SUSPENDED to DETACHED.
2131
0
            _PyParkingLot_Park(&tstate->state, &state, sizeof(tstate->state),
2132
0
                               /*timeout=*/-1, NULL, /*detach=*/0);
2133
0
        }
2134
0
        else if (state == _Py_THREAD_SHUTTING_DOWN) {
2135
            // We're shutting down, so we can't attach.
2136
0
            _PyThreadState_HangThread(tstate);
2137
0
        }
2138
0
        else {
2139
0
            assert(state == _Py_THREAD_DETACHED);
2140
0
        }
2141
        // Once we're back in DETACHED we can re-attach
2142
0
    } while (!tstate_try_attach(tstate));
2143
0
}
2144
2145
void
2146
_PyThreadState_Attach(PyThreadState *tstate)
2147
2.99M
{
2148
#if defined(Py_DEBUG)
2149
    // This is called from PyEval_RestoreThread(). Similar
2150
    // to it, we need to ensure errno doesn't change.
2151
    int err = errno;
2152
#endif
2153
2154
2.99M
    _Py_EnsureTstateNotNULL(tstate);
2155
2.99M
    if (current_fast_get() != NULL) {
2156
0
        Py_FatalError("non-NULL old thread state");
2157
0
    }
2158
2.99M
    _PyThreadStateImpl *_tstate = (_PyThreadStateImpl *)tstate;
2159
2.99M
    if (_tstate->c_stack_hard_limit == 0) {
2160
28
        _Py_InitializeRecursionLimits(tstate);
2161
28
    }
2162
2163
2.99M
    while (1) {
2164
2.99M
        _PyEval_AcquireLock(tstate);
2165
2166
        // XXX assert(tstate_is_alive(tstate));
2167
2.99M
        current_fast_set(&_PyRuntime, tstate);
2168
2.99M
        if (!tstate_try_attach(tstate)) {
2169
0
            tstate_wait_attach(tstate);
2170
0
        }
2171
2.99M
        tstate_activate(tstate);
2172
2173
#ifdef Py_GIL_DISABLED
2174
        if (_PyEval_IsGILEnabled(tstate) && !tstate->holds_gil) {
2175
            // The GIL was enabled between our call to _PyEval_AcquireLock()
2176
            // and when we attached (the GIL can't go from enabled to disabled
2177
            // here because only a thread holding the GIL can disable
2178
            // it). Detach and try again.
2179
            tstate_set_detached(tstate, _Py_THREAD_DETACHED);
2180
            tstate_deactivate(tstate);
2181
            current_fast_clear(&_PyRuntime);
2182
            continue;
2183
        }
2184
        _Py_qsbr_attach(((_PyThreadStateImpl *)tstate)->qsbr);
2185
#endif
2186
2.99M
        break;
2187
2.99M
    }
2188
2189
    // Resume previous critical section. This acquires the lock(s) from the
2190
    // top-most critical section.
2191
2.99M
    if (tstate->critical_section != 0) {
2192
0
        _PyCriticalSection_Resume(tstate);
2193
0
    }
2194
2195
#ifdef Py_STATS
2196
    _PyStats_Attach((_PyThreadStateImpl *)tstate);
2197
#endif
2198
2199
#if defined(Py_DEBUG)
2200
    errno = err;
2201
#endif
2202
2.99M
}
2203
2204
static void
2205
detach_thread(PyThreadState *tstate, int detached_state)
2206
2.99M
{
2207
    // XXX assert(tstate_is_alive(tstate) && tstate_is_bound(tstate));
2208
2.99M
    assert(_Py_atomic_load_int_relaxed(&tstate->state) == _Py_THREAD_ATTACHED);
2209
2.99M
    assert(tstate == current_fast_get());
2210
2.99M
    if (tstate->critical_section != 0) {
2211
0
        _PyCriticalSection_SuspendAll(tstate);
2212
0
    }
2213
#ifdef Py_GIL_DISABLED
2214
    _Py_qsbr_detach(((_PyThreadStateImpl *)tstate)->qsbr);
2215
#endif
2216
2.99M
    tstate_deactivate(tstate);
2217
2.99M
    tstate_set_detached(tstate, detached_state);
2218
2.99M
    current_fast_clear(&_PyRuntime);
2219
2.99M
    _PyEval_ReleaseLock(tstate->interp, tstate, 0);
2220
2.99M
}
2221
2222
void
2223
_PyThreadState_Detach(PyThreadState *tstate)
2224
2.99M
{
2225
2.99M
    detach_thread(tstate, _Py_THREAD_DETACHED);
2226
2.99M
}
2227
2228
void
2229
_PyThreadState_Suspend(PyThreadState *tstate)
2230
0
{
2231
0
    _PyRuntimeState *runtime = &_PyRuntime;
2232
2233
0
    assert(_Py_atomic_load_int_relaxed(&tstate->state) == _Py_THREAD_ATTACHED);
2234
2235
0
    struct _stoptheworld_state *stw = NULL;
2236
0
    HEAD_LOCK(runtime);
2237
0
    if (runtime->stoptheworld.requested) {
2238
0
        stw = &runtime->stoptheworld;
2239
0
    }
2240
0
    else if (tstate->interp->stoptheworld.requested) {
2241
0
        stw = &tstate->interp->stoptheworld;
2242
0
    }
2243
0
    HEAD_UNLOCK(runtime);
2244
2245
0
    if (stw == NULL) {
2246
        // Switch directly to "detached" if there is no active stop-the-world
2247
        // request.
2248
0
        detach_thread(tstate, _Py_THREAD_DETACHED);
2249
0
        return;
2250
0
    }
2251
2252
    // Switch to "suspended" state.
2253
0
    detach_thread(tstate, _Py_THREAD_SUSPENDED);
2254
2255
    // Decrease the count of remaining threads needing to park.
2256
0
    HEAD_LOCK(runtime);
2257
0
    decrement_stoptheworld_countdown(stw);
2258
0
    HEAD_UNLOCK(runtime);
2259
0
}
2260
2261
void
2262
_PyThreadState_SetShuttingDown(PyThreadState *tstate)
2263
0
{
2264
0
    _Py_atomic_store_int(&tstate->state, _Py_THREAD_SHUTTING_DOWN);
2265
#ifdef Py_GIL_DISABLED
2266
    _PyParkingLot_UnparkAll(&tstate->state);
2267
#endif
2268
0
}
2269
2270
// Decrease stop-the-world counter of remaining number of threads that need to
2271
// pause. If we are the final thread to pause, notify the requesting thread.
2272
static void
2273
decrement_stoptheworld_countdown(struct _stoptheworld_state *stw)
2274
0
{
2275
0
    assert(stw->thread_countdown > 0);
2276
0
    if (--stw->thread_countdown == 0) {
2277
0
        _PyEvent_Notify(&stw->stop_event);
2278
0
    }
2279
0
}
2280
2281
#ifdef Py_GIL_DISABLED
2282
// Interpreter for _Py_FOR_EACH_STW_INTERP(). For global stop-the-world events,
2283
// we start with the first interpreter and then iterate over all interpreters.
2284
// For per-interpreter stop-the-world events, we only operate on the one
2285
// interpreter.
2286
static PyInterpreterState *
2287
interp_for_stop_the_world(struct _stoptheworld_state *stw)
2288
{
2289
    return (stw->is_global
2290
        ? PyInterpreterState_Head()
2291
        : _Py_CONTAINER_OF(stw, PyInterpreterState, stoptheworld));
2292
}
2293
2294
// Loops over threads for a stop-the-world event.
2295
// For global: all threads in all interpreters
2296
// For per-interpreter: all threads in the interpreter
2297
#define _Py_FOR_EACH_STW_INTERP(stw, i)                                     \
2298
    for (PyInterpreterState *i = interp_for_stop_the_world((stw));          \
2299
            i != NULL; i = ((stw->is_global) ? i->next : NULL))
2300
2301
2302
// Try to transition threads atomically from the "detached" state to the
2303
// "gc stopped" state. Returns true if all threads are in the "gc stopped"
2304
static bool
2305
park_detached_threads(struct _stoptheworld_state *stw)
2306
{
2307
    int num_parked = 0;
2308
    _Py_FOR_EACH_STW_INTERP(stw, i) {
2309
        _Py_FOR_EACH_TSTATE_UNLOCKED(i, t) {
2310
            int state = _Py_atomic_load_int_relaxed(&t->state);
2311
            if (state == _Py_THREAD_DETACHED) {
2312
                // Atomically transition to "suspended" if in "detached" state.
2313
                if (_Py_atomic_compare_exchange_int(
2314
                                &t->state, &state, _Py_THREAD_SUSPENDED)) {
2315
                    num_parked++;
2316
                }
2317
            }
2318
            else if (state == _Py_THREAD_ATTACHED && t != stw->requester) {
2319
                _Py_set_eval_breaker_bit(t, _PY_EVAL_PLEASE_STOP_BIT);
2320
            }
2321
        }
2322
    }
2323
    stw->thread_countdown -= num_parked;
2324
    assert(stw->thread_countdown >= 0);
2325
    return num_parked > 0 && stw->thread_countdown == 0;
2326
}
2327
2328
static void
2329
stop_the_world(struct _stoptheworld_state *stw)
2330
{
2331
    _PyRuntimeState *runtime = &_PyRuntime;
2332
2333
    // gh-137433: Acquire the rwmutex first to avoid deadlocks with daemon
2334
    // threads that may hang when blocked on lock acquisition.
2335
    if (stw->is_global) {
2336
        _PyRWMutex_Lock(&runtime->stoptheworld_mutex);
2337
    }
2338
    else {
2339
        _PyRWMutex_RLock(&runtime->stoptheworld_mutex);
2340
    }
2341
    PyMutex_Lock(&stw->mutex);
2342
2343
    HEAD_LOCK(runtime);
2344
    stw->requested = 1;
2345
    stw->thread_countdown = 0;
2346
    stw->stop_event = (PyEvent){0};  // zero-initialize (unset)
2347
    stw->requester = _PyThreadState_GET();  // may be NULL
2348
    FT_STAT_WORLD_STOP_INC();
2349
2350
    _Py_FOR_EACH_STW_INTERP(stw, i) {
2351
        _Py_FOR_EACH_TSTATE_UNLOCKED(i, t) {
2352
            if (t != stw->requester) {
2353
                // Count all the other threads (we don't wait on ourself).
2354
                stw->thread_countdown++;
2355
            }
2356
        }
2357
    }
2358
2359
    if (stw->thread_countdown == 0) {
2360
        HEAD_UNLOCK(runtime);
2361
        stw->world_stopped = 1;
2362
        return;
2363
    }
2364
2365
    for (;;) {
2366
        // Switch threads that are detached to the GC stopped state
2367
        bool stopped_all_threads = park_detached_threads(stw);
2368
        HEAD_UNLOCK(runtime);
2369
2370
        if (stopped_all_threads) {
2371
            break;
2372
        }
2373
2374
        PyTime_t wait_ns = 1000*1000;  // 1ms (arbitrary, may need tuning)
2375
        int detach = 0;
2376
        if (PyEvent_WaitTimed(&stw->stop_event, wait_ns, detach)) {
2377
            assert(stw->thread_countdown == 0);
2378
            break;
2379
        }
2380
2381
        HEAD_LOCK(runtime);
2382
    }
2383
    stw->world_stopped = 1;
2384
}
2385
2386
static void
2387
start_the_world(struct _stoptheworld_state *stw)
2388
{
2389
    _PyRuntimeState *runtime = &_PyRuntime;
2390
    assert(PyMutex_IsLocked(&stw->mutex));
2391
2392
    HEAD_LOCK(runtime);
2393
    stw->requested = 0;
2394
    stw->world_stopped = 0;
2395
    // Switch threads back to the detached state.
2396
    _Py_FOR_EACH_STW_INTERP(stw, i) {
2397
        _Py_FOR_EACH_TSTATE_UNLOCKED(i, t) {
2398
            if (t != stw->requester) {
2399
                assert(_Py_atomic_load_int_relaxed(&t->state) ==
2400
                       _Py_THREAD_SUSPENDED);
2401
                _Py_atomic_store_int(&t->state, _Py_THREAD_DETACHED);
2402
                _PyParkingLot_UnparkAll(&t->state);
2403
            }
2404
        }
2405
    }
2406
    stw->requester = NULL;
2407
    HEAD_UNLOCK(runtime);
2408
    PyMutex_Unlock(&stw->mutex);
2409
    if (stw->is_global) {
2410
        _PyRWMutex_Unlock(&runtime->stoptheworld_mutex);
2411
    }
2412
    else {
2413
        _PyRWMutex_RUnlock(&runtime->stoptheworld_mutex);
2414
    }
2415
}
2416
#endif  // Py_GIL_DISABLED
2417
2418
void
2419
_PyEval_StopTheWorldAll(_PyRuntimeState *runtime)
2420
0
{
2421
#ifdef Py_GIL_DISABLED
2422
    stop_the_world(&runtime->stoptheworld);
2423
#endif
2424
0
}
2425
2426
void
2427
_PyEval_StartTheWorldAll(_PyRuntimeState *runtime)
2428
0
{
2429
#ifdef Py_GIL_DISABLED
2430
    start_the_world(&runtime->stoptheworld);
2431
#endif
2432
0
}
2433
2434
void
2435
_PyEval_StopTheWorld(PyInterpreterState *interp)
2436
4
{
2437
#ifdef Py_GIL_DISABLED
2438
    stop_the_world(&interp->stoptheworld);
2439
#endif
2440
4
}
2441
2442
void
2443
_PyEval_StartTheWorld(PyInterpreterState *interp)
2444
4
{
2445
#ifdef Py_GIL_DISABLED
2446
    start_the_world(&interp->stoptheworld);
2447
#endif
2448
4
}
2449
2450
//----------
2451
// other API
2452
//----------
2453
2454
/* Asynchronously raise an exception in a thread.
2455
   Requested by Just van Rossum and Alex Martelli.
2456
   To prevent naive misuse, you must write your own extension
2457
   to call this, or use ctypes.  Must be called with the GIL held.
2458
   Returns the number of tstates modified (normally 1, but 0 if `id` didn't
2459
   match any known thread id).  Can be called with exc=NULL to clear an
2460
   existing async exception.  This raises no exceptions. */
2461
2462
// XXX Move this to Python/ceval_gil.c?
2463
// XXX Deprecate this.
2464
int
2465
PyThreadState_SetAsyncExc(unsigned long id, PyObject *exc)
2466
0
{
2467
0
    PyInterpreterState *interp = _PyInterpreterState_GET();
2468
2469
    /* Although the GIL is held, a few C API functions can be called
2470
     * without the GIL held, and in particular some that create and
2471
     * destroy thread and interpreter states.  Those can mutate the
2472
     * list of thread states we're traversing, so to prevent that we lock
2473
     * head_mutex for the duration.
2474
     */
2475
0
    PyThreadState *tstate = NULL;
2476
0
    _Py_FOR_EACH_TSTATE_BEGIN(interp, t) {
2477
0
        if (t->thread_id == id) {
2478
0
            tstate = t;
2479
0
            break;
2480
0
        }
2481
0
    }
2482
0
    _Py_FOR_EACH_TSTATE_END(interp);
2483
2484
0
    if (tstate != NULL) {
2485
        /* Tricky:  we need to decref the current value
2486
         * (if any) in tstate->async_exc, but that can in turn
2487
         * allow arbitrary Python code to run, including
2488
         * perhaps calls to this function.  To prevent
2489
         * deadlock, we need to release head_mutex before
2490
         * the decref.
2491
         */
2492
0
        Py_XINCREF(exc);
2493
0
        PyObject *old_exc = _Py_atomic_exchange_ptr(&tstate->async_exc, exc);
2494
2495
0
        Py_XDECREF(old_exc);
2496
0
        _Py_set_eval_breaker_bit(tstate, _PY_ASYNC_EXCEPTION_BIT);
2497
0
    }
2498
2499
0
    return tstate != NULL;
2500
0
}
2501
2502
//---------------------------------
2503
// API for the current thread state
2504
//---------------------------------
2505
2506
PyThreadState *
2507
PyThreadState_GetUnchecked(void)
2508
0
{
2509
0
    return current_fast_get();
2510
0
}
2511
2512
2513
PyThreadState *
2514
PyThreadState_Get(void)
2515
85.5M
{
2516
85.5M
    PyThreadState *tstate = current_fast_get();
2517
85.5M
    _Py_EnsureTstateNotNULL(tstate);
2518
85.5M
    return tstate;
2519
85.5M
}
2520
2521
PyThreadState *
2522
_PyThreadState_Swap(_PyRuntimeState *runtime, PyThreadState *newts)
2523
0
{
2524
0
    PyThreadState *oldts = current_fast_get();
2525
0
    if (oldts != NULL) {
2526
0
        _PyThreadState_Detach(oldts);
2527
0
    }
2528
0
    if (newts != NULL) {
2529
0
        _PyThreadState_Attach(newts);
2530
0
    }
2531
0
    return oldts;
2532
0
}
2533
2534
PyThreadState *
2535
PyThreadState_Swap(PyThreadState *newts)
2536
0
{
2537
0
    return _PyThreadState_Swap(&_PyRuntime, newts);
2538
0
}
2539
2540
2541
void
2542
_PyThreadState_Bind(PyThreadState *tstate)
2543
28
{
2544
    // gh-104690: If Python is being finalized and PyInterpreterState_Delete()
2545
    // was called, tstate becomes a dangling pointer.
2546
28
    assert(_PyThreadState_CheckConsistency(tstate));
2547
2548
28
    bind_tstate(tstate);
2549
    // This makes sure there's a gilstate tstate bound
2550
    // as soon as possible.
2551
28
    if (gilstate_get() == NULL) {
2552
28
        bind_gilstate_tstate(tstate);
2553
28
    }
2554
28
}
2555
2556
#if defined(Py_GIL_DISABLED) && !defined(Py_LIMITED_API)
2557
uintptr_t
2558
_Py_GetThreadLocal_Addr(void)
2559
{
2560
    // gh-112535: Use the address of the thread-local PyThreadState variable as
2561
    // a unique identifier for the current thread. Each thread has a unique
2562
    // _Py_tss_tstate variable with a unique address.
2563
    return (uintptr_t)&_Py_tss_tstate;
2564
}
2565
#endif
2566
2567
/***********************************/
2568
/* routines for advanced debuggers */
2569
/***********************************/
2570
2571
// (requested by David Beazley)
2572
// Don't use unless you know what you are doing!
2573
2574
PyInterpreterState *
2575
PyInterpreterState_Head(void)
2576
0
{
2577
0
    return _PyRuntime.interpreters.head;
2578
0
}
2579
2580
PyInterpreterState *
2581
PyInterpreterState_Main(void)
2582
0
{
2583
0
    return _PyInterpreterState_Main();
2584
0
}
2585
2586
PyInterpreterState *
2587
0
PyInterpreterState_Next(PyInterpreterState *interp) {
2588
0
    return interp->next;
2589
0
}
2590
2591
PyThreadState *
2592
17.8k
PyInterpreterState_ThreadHead(PyInterpreterState *interp) {
2593
17.8k
    return interp->threads.head;
2594
17.8k
}
2595
2596
PyThreadState *
2597
17.8k
PyThreadState_Next(PyThreadState *tstate) {
2598
17.8k
    return tstate->next;
2599
17.8k
}
2600
2601
2602
/********************************************/
2603
/* reporting execution state of all threads */
2604
/********************************************/
2605
2606
/* The implementation of sys._current_frames().  This is intended to be
2607
   called with the GIL held, as it will be when called via
2608
   sys._current_frames().  It's possible it would work fine even without
2609
   the GIL held, but haven't thought enough about that.
2610
*/
2611
PyObject *
2612
_PyThread_CurrentFrames(void)
2613
0
{
2614
0
    _PyRuntimeState *runtime = &_PyRuntime;
2615
0
    PyThreadState *tstate = current_fast_get();
2616
0
    if (_PySys_Audit(tstate, "sys._current_frames", NULL) < 0) {
2617
0
        return NULL;
2618
0
    }
2619
2620
0
    PyObject *result = PyDict_New();
2621
0
    if (result == NULL) {
2622
0
        return NULL;
2623
0
    }
2624
2625
    /* for i in all interpreters:
2626
     *     for t in all of i's thread states:
2627
     *          if t's frame isn't NULL, map t's id to its frame
2628
     * Because these lists can mutate even when the GIL is held, we
2629
     * need to grab head_mutex for the duration.
2630
     */
2631
0
    _PyEval_StopTheWorldAll(runtime);
2632
0
    HEAD_LOCK(runtime);
2633
0
    PyInterpreterState *i;
2634
0
    for (i = runtime->interpreters.head; i != NULL; i = i->next) {
2635
0
        _Py_FOR_EACH_TSTATE_UNLOCKED(i, t) {
2636
0
            _PyInterpreterFrame *frame = t->current_frame;
2637
0
            frame = _PyFrame_GetFirstComplete(frame);
2638
0
            if (frame == NULL) {
2639
0
                continue;
2640
0
            }
2641
0
            PyObject *id = PyLong_FromUnsignedLong(t->thread_id);
2642
0
            if (id == NULL) {
2643
0
                goto fail;
2644
0
            }
2645
0
            PyObject *frameobj = (PyObject *)_PyFrame_GetFrameObject(frame);
2646
0
            if (frameobj == NULL) {
2647
0
                Py_DECREF(id);
2648
0
                goto fail;
2649
0
            }
2650
0
            int stat = PyDict_SetItem(result, id, frameobj);
2651
0
            Py_DECREF(id);
2652
0
            if (stat < 0) {
2653
0
                goto fail;
2654
0
            }
2655
0
        }
2656
0
    }
2657
0
    goto done;
2658
2659
0
fail:
2660
0
    Py_CLEAR(result);
2661
2662
0
done:
2663
0
    HEAD_UNLOCK(runtime);
2664
0
    _PyEval_StartTheWorldAll(runtime);
2665
0
    return result;
2666
0
}
2667
2668
/* The implementation of sys._current_exceptions().  This is intended to be
2669
   called with the GIL held, as it will be when called via
2670
   sys._current_exceptions().  It's possible it would work fine even without
2671
   the GIL held, but haven't thought enough about that.
2672
*/
2673
PyObject *
2674
_PyThread_CurrentExceptions(void)
2675
0
{
2676
0
    _PyRuntimeState *runtime = &_PyRuntime;
2677
0
    PyThreadState *tstate = current_fast_get();
2678
2679
0
    _Py_EnsureTstateNotNULL(tstate);
2680
2681
0
    if (_PySys_Audit(tstate, "sys._current_exceptions", NULL) < 0) {
2682
0
        return NULL;
2683
0
    }
2684
2685
0
    PyObject *result = PyDict_New();
2686
0
    if (result == NULL) {
2687
0
        return NULL;
2688
0
    }
2689
2690
    /* for i in all interpreters:
2691
     *     for t in all of i's thread states:
2692
     *          if t's frame isn't NULL, map t's id to its frame
2693
     * Because these lists can mutate even when the GIL is held, we
2694
     * need to grab head_mutex for the duration.
2695
     */
2696
0
    _PyEval_StopTheWorldAll(runtime);
2697
0
    HEAD_LOCK(runtime);
2698
0
    PyInterpreterState *i;
2699
0
    for (i = runtime->interpreters.head; i != NULL; i = i->next) {
2700
0
        _Py_FOR_EACH_TSTATE_UNLOCKED(i, t) {
2701
0
            _PyErr_StackItem *err_info = _PyErr_GetTopmostException(t);
2702
0
            if (err_info == NULL) {
2703
0
                continue;
2704
0
            }
2705
0
            PyObject *id = PyLong_FromUnsignedLong(t->thread_id);
2706
0
            if (id == NULL) {
2707
0
                goto fail;
2708
0
            }
2709
0
            PyObject *exc = err_info->exc_value;
2710
0
            assert(exc == NULL ||
2711
0
                   exc == Py_None ||
2712
0
                   PyExceptionInstance_Check(exc));
2713
2714
0
            int stat = PyDict_SetItem(result, id, exc == NULL ? Py_None : exc);
2715
0
            Py_DECREF(id);
2716
0
            if (stat < 0) {
2717
0
                goto fail;
2718
0
            }
2719
0
        }
2720
0
    }
2721
0
    goto done;
2722
2723
0
fail:
2724
0
    Py_CLEAR(result);
2725
2726
0
done:
2727
0
    HEAD_UNLOCK(runtime);
2728
0
    _PyEval_StartTheWorldAll(runtime);
2729
0
    return result;
2730
0
}
2731
2732
2733
/***********************************/
2734
/* Python "auto thread state" API. */
2735
/***********************************/
2736
2737
/* Internal initialization/finalization functions called by
2738
   Py_Initialize/Py_FinalizeEx
2739
*/
2740
PyStatus
2741
_PyGILState_Init(PyInterpreterState *interp)
2742
28
{
2743
28
    if (!_Py_IsMainInterpreter(interp)) {
2744
        /* Currently, PyGILState is shared by all interpreters. The main
2745
         * interpreter is responsible to initialize it. */
2746
0
        return _PyStatus_OK();
2747
0
    }
2748
28
    _PyRuntimeState *runtime = interp->runtime;
2749
28
    assert(gilstate_get() == NULL);
2750
28
    assert(runtime->gilstate.autoInterpreterState == NULL);
2751
28
    runtime->gilstate.autoInterpreterState = interp;
2752
28
    return _PyStatus_OK();
2753
28
}
2754
2755
void
2756
_PyGILState_Fini(PyInterpreterState *interp)
2757
0
{
2758
0
    if (!_Py_IsMainInterpreter(interp)) {
2759
        /* Currently, PyGILState is shared by all interpreters. The main
2760
         * interpreter is responsible to initialize it. */
2761
0
        return;
2762
0
    }
2763
0
    interp->runtime->gilstate.autoInterpreterState = NULL;
2764
0
}
2765
2766
2767
// XXX Drop this.
2768
void
2769
_PyGILState_SetTstate(PyThreadState *tstate)
2770
28
{
2771
    /* must init with valid states */
2772
28
    assert(tstate != NULL);
2773
28
    assert(tstate->interp != NULL);
2774
2775
28
    if (!_Py_IsMainInterpreter(tstate->interp)) {
2776
        /* Currently, PyGILState is shared by all interpreters. The main
2777
         * interpreter is responsible to initialize it. */
2778
0
        return;
2779
0
    }
2780
2781
#ifndef NDEBUG
2782
    _PyRuntimeState *runtime = tstate->interp->runtime;
2783
2784
    assert(runtime->gilstate.autoInterpreterState == tstate->interp);
2785
    assert(gilstate_get() == tstate);
2786
    assert(tstate->gilstate_counter == 1);
2787
#endif
2788
28
}
2789
2790
PyInterpreterState *
2791
_PyGILState_GetInterpreterStateUnsafe(void)
2792
0
{
2793
0
    return _PyRuntime.gilstate.autoInterpreterState;
2794
0
}
2795
2796
/* The public functions */
2797
2798
PyThreadState *
2799
PyGILState_GetThisThreadState(void)
2800
0
{
2801
0
    return gilstate_get();
2802
0
}
2803
2804
int
2805
PyGILState_Check(void)
2806
0
{
2807
0
    _PyRuntimeState *runtime = &_PyRuntime;
2808
0
    if (!runtime->gilstate.check_enabled) {
2809
0
        return 1;
2810
0
    }
2811
2812
0
    PyThreadState *tstate = current_fast_get();
2813
0
    if (tstate == NULL) {
2814
0
        return 0;
2815
0
    }
2816
2817
0
    PyThreadState *tcur = gilstate_get();
2818
0
    return (tstate == tcur);
2819
0
}
2820
2821
PyGILState_STATE
2822
PyGILState_Ensure(void)
2823
0
{
2824
0
    _PyRuntimeState *runtime = &_PyRuntime;
2825
2826
    /* Note that we do not auto-init Python here - apart from
2827
       potential races with 2 threads auto-initializing, pep-311
2828
       spells out other issues.  Embedders are expected to have
2829
       called Py_Initialize(). */
2830
2831
    /* Ensure that _PyEval_InitThreads() and _PyGILState_Init() have been
2832
       called by Py_Initialize()
2833
2834
       TODO: This isn't thread-safe. There's no protection here against
2835
       concurrent finalization of the interpreter; it's simply a guard
2836
       for *after* the interpreter has finalized.
2837
     */
2838
0
    if (!_PyEval_ThreadsInitialized() || runtime->gilstate.autoInterpreterState == NULL) {
2839
0
        PyThread_hang_thread();
2840
0
    }
2841
2842
0
    PyThreadState *tcur = gilstate_get();
2843
0
    int has_gil;
2844
0
    if (tcur == NULL) {
2845
        /* Create a new Python thread state for this thread */
2846
        // XXX Use PyInterpreterState_EnsureThreadState()?
2847
0
        tcur = new_threadstate(runtime->gilstate.autoInterpreterState,
2848
0
                               _PyThreadState_WHENCE_GILSTATE);
2849
0
        if (tcur == NULL) {
2850
0
            Py_FatalError("Couldn't create thread-state for new thread");
2851
0
        }
2852
0
        bind_tstate(tcur);
2853
0
        bind_gilstate_tstate(tcur);
2854
2855
        /* This is our thread state!  We'll need to delete it in the
2856
           matching call to PyGILState_Release(). */
2857
0
        assert(tcur->gilstate_counter == 1);
2858
0
        tcur->gilstate_counter = 0;
2859
0
        has_gil = 0; /* new thread state is never current */
2860
0
    }
2861
0
    else {
2862
0
        has_gil = holds_gil(tcur);
2863
0
    }
2864
2865
0
    if (!has_gil) {
2866
0
        PyEval_RestoreThread(tcur);
2867
0
    }
2868
2869
    /* Update our counter in the thread-state - no need for locks:
2870
       - tcur will remain valid as we hold the GIL.
2871
       - the counter is safe as we are the only thread "allowed"
2872
         to modify this value
2873
    */
2874
0
    ++tcur->gilstate_counter;
2875
2876
0
    return has_gil ? PyGILState_LOCKED : PyGILState_UNLOCKED;
2877
0
}
2878
2879
void
2880
PyGILState_Release(PyGILState_STATE oldstate)
2881
0
{
2882
0
    PyThreadState *tstate = gilstate_get();
2883
0
    if (tstate == NULL) {
2884
0
        Py_FatalError("auto-releasing thread-state, "
2885
0
                      "but no thread-state for this thread");
2886
0
    }
2887
2888
    /* We must hold the GIL and have our thread state current */
2889
0
    if (!holds_gil(tstate)) {
2890
0
        _Py_FatalErrorFormat(__func__,
2891
0
                             "thread state %p must be current when releasing",
2892
0
                             tstate);
2893
0
    }
2894
0
    --tstate->gilstate_counter;
2895
0
    assert(tstate->gilstate_counter >= 0); /* illegal counter value */
2896
2897
    /* If we're going to destroy this thread-state, we must
2898
     * clear it while the GIL is held, as destructors may run.
2899
     */
2900
0
    if (tstate->gilstate_counter == 0) {
2901
        /* can't have been locked when we created it */
2902
0
        assert(oldstate == PyGILState_UNLOCKED);
2903
        // XXX Unbind tstate here.
2904
        // gh-119585: `PyThreadState_Clear()` may call destructors that
2905
        // themselves use PyGILState_Ensure and PyGILState_Release, so make
2906
        // sure that gilstate_counter is not zero when calling it.
2907
0
        ++tstate->gilstate_counter;
2908
0
        PyThreadState_Clear(tstate);
2909
0
        --tstate->gilstate_counter;
2910
        /* Delete the thread-state.  Note this releases the GIL too!
2911
         * It's vital that the GIL be held here, to avoid shutdown
2912
         * races; see bugs 225673 and 1061968 (that nasty bug has a
2913
         * habit of coming back).
2914
         */
2915
0
        assert(tstate->gilstate_counter == 0);
2916
0
        assert(current_fast_get() == tstate);
2917
0
        _PyThreadState_DeleteCurrent(tstate);
2918
0
    }
2919
    /* Release the lock if necessary */
2920
0
    else if (oldstate == PyGILState_UNLOCKED) {
2921
0
        PyEval_SaveThread();
2922
0
    }
2923
0
}
2924
2925
2926
/*************/
2927
/* Other API */
2928
/*************/
2929
2930
_PyFrameEvalFunction
2931
_PyInterpreterState_GetEvalFrameFunc(PyInterpreterState *interp)
2932
0
{
2933
0
    if (interp->eval_frame == NULL) {
2934
0
        return _PyEval_EvalFrameDefault;
2935
0
    }
2936
0
    return interp->eval_frame;
2937
0
}
2938
2939
2940
void
2941
_PyInterpreterState_SetEvalFrameFunc(PyInterpreterState *interp,
2942
                                     _PyFrameEvalFunction eval_frame)
2943
0
{
2944
0
    if (eval_frame == _PyEval_EvalFrameDefault) {
2945
0
        eval_frame = NULL;
2946
0
    }
2947
0
    if (eval_frame == interp->eval_frame) {
2948
0
        return;
2949
0
    }
2950
#ifdef _Py_TIER2
2951
    if (eval_frame != NULL) {
2952
        _Py_Executors_InvalidateAll(interp, 1);
2953
    }
2954
#endif
2955
0
    RARE_EVENT_INC(set_eval_frame_func);
2956
0
    _PyEval_StopTheWorld(interp);
2957
0
    interp->eval_frame = eval_frame;
2958
0
    _PyEval_StartTheWorld(interp);
2959
0
}
2960
2961
2962
const PyConfig*
2963
_PyInterpreterState_GetConfig(PyInterpreterState *interp)
2964
118M
{
2965
118M
    return &interp->config;
2966
118M
}
2967
2968
2969
const PyConfig*
2970
_Py_GetConfig(void)
2971
162k
{
2972
162k
    PyThreadState *tstate = current_fast_get();
2973
162k
    _Py_EnsureTstateNotNULL(tstate);
2974
162k
    return _PyInterpreterState_GetConfig(tstate->interp);
2975
162k
}
2976
2977
2978
int
2979
_PyInterpreterState_HasFeature(PyInterpreterState *interp, unsigned long feature)
2980
0
{
2981
0
    return ((interp->feature_flags & feature) != 0);
2982
0
}
2983
2984
2985
229k
#define MINIMUM_OVERHEAD 1000
2986
2987
static PyObject **
2988
push_chunk(PyThreadState *tstate, int size)
2989
229k
{
2990
229k
    int allocate_size = _PY_DATA_STACK_CHUNK_SIZE;
2991
229k
    while (allocate_size < (int)sizeof(PyObject*)*(size + MINIMUM_OVERHEAD)) {
2992
0
        allocate_size *= 2;
2993
0
    }
2994
229k
    _PyStackChunk *new = allocate_chunk(allocate_size, tstate->datastack_chunk);
2995
229k
    if (new == NULL) {
2996
0
        return NULL;
2997
0
    }
2998
229k
    if (tstate->datastack_chunk) {
2999
229k
        tstate->datastack_chunk->top = tstate->datastack_top -
3000
229k
                                       &tstate->datastack_chunk->data[0];
3001
229k
    }
3002
229k
    tstate->datastack_chunk = new;
3003
229k
    tstate->datastack_limit = (PyObject **)(((char *)new) + allocate_size);
3004
    // When new is the "root" chunk (i.e. new->previous == NULL), we can keep
3005
    // _PyThreadState_PopFrame from freeing it later by "skipping" over the
3006
    // first element:
3007
229k
    PyObject **res = &new->data[new->previous == NULL];
3008
229k
    tstate->datastack_top = res + size;
3009
229k
    return res;
3010
229k
}
3011
3012
_PyInterpreterFrame *
3013
_PyThreadState_PushFrame(PyThreadState *tstate, size_t size)
3014
221M
{
3015
221M
    assert(size < INT_MAX/sizeof(PyObject *));
3016
221M
    if (_PyThreadState_HasStackSpace(tstate, (int)size)) {
3017
220M
        _PyInterpreterFrame *res = (_PyInterpreterFrame *)tstate->datastack_top;
3018
220M
        tstate->datastack_top += size;
3019
220M
        return res;
3020
220M
    }
3021
229k
    return (_PyInterpreterFrame *)push_chunk(tstate, (int)size);
3022
221M
}
3023
3024
void
3025
_PyThreadState_PopFrame(PyThreadState *tstate, _PyInterpreterFrame * frame)
3026
1.00G
{
3027
1.00G
    assert(tstate->datastack_chunk);
3028
1.00G
    PyObject **base = (PyObject **)frame;
3029
1.00G
    if (base == &tstate->datastack_chunk->data[0]) {
3030
229k
        _PyStackChunk *chunk = tstate->datastack_chunk;
3031
229k
        _PyStackChunk *previous = chunk->previous;
3032
        // push_chunk ensures that the root chunk is never popped:
3033
229k
        assert(previous);
3034
229k
        tstate->datastack_top = &previous->data[previous->top];
3035
229k
        tstate->datastack_chunk = previous;
3036
229k
        _PyObject_VirtualFree(chunk, chunk->size);
3037
229k
        tstate->datastack_limit = (PyObject **)(((char *)previous) + previous->size);
3038
229k
    }
3039
1.00G
    else {
3040
1.00G
        assert(tstate->datastack_top);
3041
1.00G
        assert(tstate->datastack_top >= base);
3042
1.00G
        tstate->datastack_top = base;
3043
1.00G
    }
3044
1.00G
}
3045
3046
3047
#ifndef NDEBUG
3048
// Check that a Python thread state valid. In practice, this function is used
3049
// on a Python debug build to check if 'tstate' is a dangling pointer, if the
3050
// PyThreadState memory has been freed.
3051
//
3052
// Usage:
3053
//
3054
//     assert(_PyThreadState_CheckConsistency(tstate));
3055
int
3056
_PyThreadState_CheckConsistency(PyThreadState *tstate)
3057
{
3058
    assert(!_PyMem_IsPtrFreed(tstate));
3059
    assert(!_PyMem_IsPtrFreed(tstate->interp));
3060
    return 1;
3061
}
3062
#endif
3063
3064
3065
// Check if a Python thread must call _PyThreadState_HangThread(), rather than
3066
// taking the GIL or attaching to the interpreter if Py_Finalize() has been
3067
// called.
3068
//
3069
// When this function is called by a daemon thread after Py_Finalize() has been
3070
// called, the GIL may no longer exist.
3071
//
3072
// tstate must be non-NULL.
3073
int
3074
_PyThreadState_MustExit(PyThreadState *tstate)
3075
5.98M
{
3076
5.98M
    int state = _Py_atomic_load_int_relaxed(&tstate->state);
3077
5.98M
    return state == _Py_THREAD_SHUTTING_DOWN;
3078
5.98M
}
3079
3080
void
3081
_PyThreadState_HangThread(PyThreadState *tstate)
3082
0
{
3083
0
    _PyThreadStateImpl *tstate_impl = (_PyThreadStateImpl *)tstate;
3084
0
    decref_threadstate(tstate_impl);
3085
0
    PyThread_hang_thread();
3086
0
}
3087
3088
/********************/
3089
/* mimalloc support */
3090
/********************/
3091
3092
static void
3093
tstate_mimalloc_bind(PyThreadState *tstate)
3094
28
{
3095
#ifdef Py_GIL_DISABLED
3096
    struct _mimalloc_thread_state *mts = &((_PyThreadStateImpl*)tstate)->mimalloc;
3097
3098
    // Initialize the mimalloc thread state. This must be called from the
3099
    // same thread that will use the thread state. The "mem" heap doubles as
3100
    // the "backing" heap.
3101
    mi_tld_t *tld = &mts->tld;
3102
    _mi_tld_init(tld, &mts->heaps[_Py_MIMALLOC_HEAP_MEM]);
3103
    llist_init(&mts->page_list);
3104
3105
    // Exiting threads push any remaining in-use segments to the abandoned
3106
    // pool to be re-claimed later by other threads. We use per-interpreter
3107
    // pools to keep Python objects from different interpreters separate.
3108
    tld->segments.abandoned = &tstate->interp->mimalloc.abandoned_pool;
3109
3110
    // Don't fill in the first N bytes up to ob_type in debug builds. We may
3111
    // access ob_tid and the refcount fields in the dict and list lock-less
3112
    // accesses, so they must remain valid for a while after deallocation.
3113
    size_t base_offset = offsetof(PyObject, ob_type);
3114
    if (_PyMem_DebugEnabled()) {
3115
        // The debug allocator adds two words at the beginning of each block.
3116
        base_offset += 2 * sizeof(size_t);
3117
    }
3118
    size_t debug_offsets[_Py_MIMALLOC_HEAP_COUNT] = {
3119
        [_Py_MIMALLOC_HEAP_OBJECT] = base_offset,
3120
        [_Py_MIMALLOC_HEAP_GC] = base_offset,
3121
        [_Py_MIMALLOC_HEAP_GC_PRE] = base_offset + 2 * sizeof(PyObject *),
3122
    };
3123
3124
    // Initialize each heap
3125
    for (uint8_t i = 0; i < _Py_MIMALLOC_HEAP_COUNT; i++) {
3126
        _mi_heap_init_ex(&mts->heaps[i], tld, _mi_arena_id_none(), false, i);
3127
        mts->heaps[i].debug_offset = (uint8_t)debug_offsets[i];
3128
    }
3129
3130
    // Heaps that store Python objects should use QSBR to delay freeing
3131
    // mimalloc pages while there may be concurrent lock-free readers.
3132
    mts->heaps[_Py_MIMALLOC_HEAP_OBJECT].page_use_qsbr = true;
3133
    mts->heaps[_Py_MIMALLOC_HEAP_GC].page_use_qsbr = true;
3134
    mts->heaps[_Py_MIMALLOC_HEAP_GC_PRE].page_use_qsbr = true;
3135
3136
    // By default, object allocations use _Py_MIMALLOC_HEAP_OBJECT.
3137
    // _PyObject_GC_New() and similar functions temporarily override this to
3138
    // use one of the GC heaps.
3139
    mts->current_object_heap = &mts->heaps[_Py_MIMALLOC_HEAP_OBJECT];
3140
3141
    _Py_atomic_store_int(&mts->initialized, 1);
3142
#endif
3143
28
}
3144
3145
void
3146
_PyThreadState_ClearMimallocHeaps(PyThreadState *tstate)
3147
0
{
3148
#ifdef Py_GIL_DISABLED
3149
    if (!tstate->_status.bound) {
3150
        // The mimalloc heaps are only initialized when the thread is bound.
3151
        return;
3152
    }
3153
3154
    _PyThreadStateImpl *tstate_impl = (_PyThreadStateImpl *)tstate;
3155
    for (Py_ssize_t i = 0; i < _Py_MIMALLOC_HEAP_COUNT; i++) {
3156
        // Abandon all segments in use by this thread. This pushes them to
3157
        // a shared pool to later be reclaimed by other threads. It's important
3158
        // to do this before the thread state is destroyed so that objects
3159
        // remain visible to the GC.
3160
        _mi_heap_collect_abandon(&tstate_impl->mimalloc.heaps[i]);
3161
    }
3162
#endif
3163
0
}
3164
3165
3166
int
3167
_Py_IsMainThread(void)
3168
104M
{
3169
104M
    unsigned long thread = PyThread_get_thread_ident();
3170
104M
    return (thread == _PyRuntime.main_thread);
3171
104M
}
3172
3173
3174
PyInterpreterState *
3175
_PyInterpreterState_Main(void)
3176
101M
{
3177
101M
    return _PyRuntime.interpreters.main;
3178
101M
}
3179
3180
3181
int
3182
_Py_IsMainInterpreterFinalizing(PyInterpreterState *interp)
3183
0
{
3184
    /* bpo-39877: Access _PyRuntime directly rather than using
3185
       tstate->interp->runtime to support calls from Python daemon threads.
3186
       After Py_Finalize() has been called, tstate can be a dangling pointer:
3187
       point to PyThreadState freed memory. */
3188
0
    return (_PyRuntimeState_GetFinalizing(&_PyRuntime) != NULL &&
3189
0
            interp == &_PyRuntime._main_interpreter);
3190
0
}
3191
3192
3193
const PyConfig *
3194
_Py_GetMainConfig(void)
3195
0
{
3196
0
    PyInterpreterState *interp = _PyInterpreterState_Main();
3197
0
    if (interp == NULL) {
3198
0
        return NULL;
3199
0
    }
3200
0
    return _PyInterpreterState_GetConfig(interp);
3201
0
}