Coverage Report

Created: 2026-01-10 07:03

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/cpython3/Python/pystate.c
Line
Count
Source
1
2
/* Thread and interpreter state structures and their interfaces */
3
4
#include "Python.h"
5
#include "pycore_abstract.h"      // _PyIndex_Check()
6
#include "pycore_audit.h"         // _Py_AuditHookEntry
7
#include "pycore_backoff.h"       // JUMP_BACKWARD_INITIAL_VALUE, SIDE_EXIT_INITIAL_VALUE
8
#include "pycore_ceval.h"         // _PyEval_AcquireLock()
9
#include "pycore_codecs.h"        // _PyCodec_Fini()
10
#include "pycore_critical_section.h" // _PyCriticalSection_Resume()
11
#include "pycore_dtoa.h"          // _dtoa_state_INIT()
12
#include "pycore_freelist.h"      // _PyObject_ClearFreeLists()
13
#include "pycore_initconfig.h"    // _PyStatus_OK()
14
#include "pycore_interpframe.h"   // _PyThreadState_HasStackSpace()
15
#include "pycore_object.h"        // _PyType_InitCache()
16
#include "pycore_obmalloc.h"      // _PyMem_obmalloc_state_on_heap()
17
#include "pycore_optimizer.h"     // JIT_CLEANUP_THRESHOLD
18
#include "pycore_parking_lot.h"   // _PyParkingLot_AfterFork()
19
#include "pycore_pyerrors.h"      // _PyErr_Clear()
20
#include "pycore_pylifecycle.h"   // _PyAST_Fini()
21
#include "pycore_pymem.h"         // _PyMem_DebugEnabled()
22
#include "pycore_runtime.h"       // _PyRuntime
23
#include "pycore_runtime_init.h"  // _PyRuntimeState_INIT
24
#include "pycore_stackref.h"      // Py_STACKREF_DEBUG
25
#include "pycore_stats.h"         // FT_STAT_WORLD_STOP_INC()
26
#include "pycore_time.h"          // _PyTime_Init()
27
#include "pycore_uop.h"           // UOP_BUFFER_SIZE
28
#include "pycore_uniqueid.h"      // _PyObject_FinalizePerThreadRefcounts()
29
30
31
/* --------------------------------------------------------------------------
32
CAUTION
33
34
Always use PyMem_RawMalloc() and PyMem_RawFree() directly in this file.  A
35
number of these functions are advertised as safe to call when the GIL isn't
36
held, and in a debug build Python redirects (e.g.) PyMem_NEW (etc) to Python's
37
debugging obmalloc functions.  Those aren't thread-safe (they rely on the GIL
38
to avoid the expense of doing their own locking).
39
-------------------------------------------------------------------------- */
40
41
#ifdef HAVE_DLOPEN
42
#  ifdef HAVE_DLFCN_H
43
#    include <dlfcn.h>
44
#  endif
45
#  if !HAVE_DECL_RTLD_LAZY
46
#    define RTLD_LAZY 1
47
#  endif
48
#endif
49
50
51
/****************************************/
52
/* helpers for the current thread state */
53
/****************************************/
54
55
// API for the current thread state is further down.
56
57
/* "current" means one of:
58
   - bound to the current OS thread
59
   - holds the GIL
60
 */
61
62
//-------------------------------------------------
63
// a highly efficient lookup for the current thread
64
//-------------------------------------------------
65
66
/*
67
   The stored thread state is set by PyThreadState_Swap().
68
69
   For each of these functions, the GIL must be held by the current thread.
70
 */
71
72
73
/* The attached thread state for the current thread. */
74
_Py_thread_local PyThreadState *_Py_tss_tstate = NULL;
75
76
/* The "bound" thread state used by PyGILState_Ensure(),
77
   also known as a "gilstate." */
78
_Py_thread_local PyThreadState *_Py_tss_gilstate = NULL;
79
80
/* The interpreter of the attached thread state,
81
   and is same as tstate->interp. */
82
_Py_thread_local PyInterpreterState *_Py_tss_interp = NULL;
83
84
static inline PyThreadState *
85
current_fast_get(void)
86
160M
{
87
160M
    return _Py_tss_tstate;
88
160M
}
89
90
static inline void
91
current_fast_set(_PyRuntimeState *Py_UNUSED(runtime), PyThreadState *tstate)
92
293k
{
93
293k
    assert(tstate != NULL);
94
293k
    _Py_tss_tstate = tstate;
95
293k
    assert(tstate->interp != NULL);
96
293k
    _Py_tss_interp = tstate->interp;
97
293k
}
98
99
static inline void
100
current_fast_clear(_PyRuntimeState *Py_UNUSED(runtime))
101
293k
{
102
293k
    _Py_tss_tstate = NULL;
103
293k
    _Py_tss_interp = NULL;
104
293k
}
105
106
#define tstate_verify_not_active(tstate) \
107
0
    if (tstate == current_fast_get()) { \
108
0
        _Py_FatalErrorFormat(__func__, "tstate %p is still current", tstate); \
109
0
    }
110
111
PyThreadState *
112
_PyThreadState_GetCurrent(void)
113
14.8M
{
114
14.8M
    return current_fast_get();
115
14.8M
}
116
117
118
//---------------------------------------------
119
// The thread state used by PyGILState_Ensure()
120
//---------------------------------------------
121
122
/*
123
   The stored thread state is set by bind_tstate() (AKA PyThreadState_Bind().
124
125
   The GIL does no need to be held for these.
126
  */
127
128
static inline PyThreadState *
129
gilstate_get(void)
130
293k
{
131
293k
    return _Py_tss_gilstate;
132
293k
}
133
134
static inline void
135
gilstate_set(PyThreadState *tstate)
136
22
{
137
22
    assert(tstate != NULL);
138
22
    _Py_tss_gilstate = tstate;
139
22
}
140
141
static inline void
142
gilstate_clear(void)
143
0
{
144
0
    _Py_tss_gilstate = NULL;
145
0
}
146
147
148
#ifndef NDEBUG
149
static inline int tstate_is_alive(PyThreadState *tstate);
150
151
static inline int
152
tstate_is_bound(PyThreadState *tstate)
153
587k
{
154
587k
    return tstate->_status.bound && !tstate->_status.unbound;
155
587k
}
156
#endif  // !NDEBUG
157
158
static void bind_gilstate_tstate(PyThreadState *);
159
static void unbind_gilstate_tstate(PyThreadState *);
160
161
static void tstate_mimalloc_bind(PyThreadState *);
162
163
static void
164
bind_tstate(PyThreadState *tstate)
165
22
{
166
22
    assert(tstate != NULL);
167
22
    assert(tstate_is_alive(tstate) && !tstate->_status.bound);
168
22
    assert(!tstate->_status.unbound);  // just in case
169
22
    assert(!tstate->_status.bound_gilstate);
170
22
    assert(tstate != gilstate_get());
171
22
    assert(!tstate->_status.active);
172
22
    assert(tstate->thread_id == 0);
173
22
    assert(tstate->native_thread_id == 0);
174
175
    // Currently we don't necessarily store the thread state
176
    // in thread-local storage (e.g. per-interpreter).
177
178
22
    tstate->thread_id = PyThread_get_thread_ident();
179
22
#ifdef PY_HAVE_THREAD_NATIVE_ID
180
22
    tstate->native_thread_id = PyThread_get_thread_native_id();
181
22
#endif
182
183
#ifdef Py_GIL_DISABLED
184
    // Initialize biased reference counting inter-thread queue. Note that this
185
    // needs to be initialized from the active thread.
186
    _Py_brc_init_thread(tstate);
187
#endif
188
189
    // mimalloc state needs to be initialized from the active thread.
190
22
    tstate_mimalloc_bind(tstate);
191
192
22
    tstate->_status.bound = 1;
193
22
}
194
195
static void
196
unbind_tstate(PyThreadState *tstate)
197
0
{
198
0
    assert(tstate != NULL);
199
0
    assert(tstate_is_bound(tstate));
200
0
#ifndef HAVE_PTHREAD_STUBS
201
0
    assert(tstate->thread_id > 0);
202
0
#endif
203
0
#ifdef PY_HAVE_THREAD_NATIVE_ID
204
0
    assert(tstate->native_thread_id > 0);
205
0
#endif
206
207
    // We leave thread_id and native_thread_id alone
208
    // since they can be useful for debugging.
209
    // Check the `_status` field to know if these values
210
    // are still valid.
211
212
    // We leave tstate->_status.bound set to 1
213
    // to indicate it was previously bound.
214
0
    tstate->_status.unbound = 1;
215
0
}
216
217
218
/* Stick the thread state for this thread in thread specific storage.
219
220
   When a thread state is created for a thread by some mechanism
221
   other than PyGILState_Ensure(), it's important that the GILState
222
   machinery knows about it so it doesn't try to create another
223
   thread state for the thread.
224
   (This is a better fix for SF bug #1010677 than the first one attempted.)
225
226
   The only situation where you can legitimately have more than one
227
   thread state for an OS level thread is when there are multiple
228
   interpreters.
229
230
   Before 3.12, the PyGILState_*() APIs didn't work with multiple
231
   interpreters (see bpo-10915 and bpo-15751), so this function used
232
   to set TSS only once.  Thus, the first thread state created for that
233
   given OS level thread would "win", which seemed reasonable behaviour.
234
*/
235
236
static void
237
bind_gilstate_tstate(PyThreadState *tstate)
238
22
{
239
22
    assert(tstate != NULL);
240
22
    assert(tstate_is_alive(tstate));
241
22
    assert(tstate_is_bound(tstate));
242
    // XXX assert(!tstate->_status.active);
243
22
    assert(!tstate->_status.bound_gilstate);
244
245
22
    PyThreadState *tcur = gilstate_get();
246
22
    assert(tstate != tcur);
247
248
22
    if (tcur != NULL) {
249
0
        tcur->_status.bound_gilstate = 0;
250
0
    }
251
22
    gilstate_set(tstate);
252
22
    tstate->_status.bound_gilstate = 1;
253
22
}
254
255
static void
256
unbind_gilstate_tstate(PyThreadState *tstate)
257
0
{
258
0
    assert(tstate != NULL);
259
    // XXX assert(tstate_is_alive(tstate));
260
0
    assert(tstate_is_bound(tstate));
261
    // XXX assert(!tstate->_status.active);
262
0
    assert(tstate->_status.bound_gilstate);
263
0
    assert(tstate == gilstate_get());
264
0
    gilstate_clear();
265
0
    tstate->_status.bound_gilstate = 0;
266
0
}
267
268
269
//----------------------------------------------
270
// the thread state that currently holds the GIL
271
//----------------------------------------------
272
273
/* This is not exported, as it is not reliable!  It can only
274
   ever be compared to the state for the *current* thread.
275
   * If not equal, then it doesn't matter that the actual
276
     value may change immediately after comparison, as it can't
277
     possibly change to the current thread's state.
278
   * If equal, then the current thread holds the lock, so the value can't
279
     change until we yield the lock.
280
*/
281
static int
282
holds_gil(PyThreadState *tstate)
283
0
{
284
    // XXX Fall back to tstate->interp->runtime->ceval.gil.last_holder
285
    // (and tstate->interp->runtime->ceval.gil.locked).
286
0
    assert(tstate != NULL);
287
    /* Must be the tstate for this thread */
288
0
    assert(tstate == gilstate_get());
289
0
    return tstate == current_fast_get();
290
0
}
291
292
293
/****************************/
294
/* the global runtime state */
295
/****************************/
296
297
//----------
298
// lifecycle
299
//----------
300
301
/* Suppress deprecation warning for PyBytesObject.ob_shash */
302
_Py_COMP_DIAG_PUSH
303
_Py_COMP_DIAG_IGNORE_DEPR_DECLS
304
/* We use "initial" if the runtime gets re-used
305
   (e.g. Py_Finalize() followed by Py_Initialize().
306
   Note that we initialize "initial" relative to _PyRuntime,
307
   to ensure pre-initialized pointers point to the active
308
   runtime state (and not "initial"). */
309
static const _PyRuntimeState initial = _PyRuntimeState_INIT(_PyRuntime, "");
310
_Py_COMP_DIAG_POP
311
312
#define LOCKS_INIT(runtime) \
313
0
    { \
314
0
        &(runtime)->interpreters.mutex, \
315
0
        &(runtime)->xi.data_lookup.registry.mutex, \
316
0
        &(runtime)->unicode_state.ids.mutex, \
317
0
        &(runtime)->imports.extensions.mutex, \
318
0
        &(runtime)->ceval.pending_mainthread.mutex, \
319
0
        &(runtime)->atexit.mutex, \
320
0
        &(runtime)->audit_hooks.mutex, \
321
0
        &(runtime)->allocators.mutex, \
322
0
        &(runtime)->_main_interpreter.types.mutex, \
323
0
        &(runtime)->_main_interpreter.code_state.mutex, \
324
0
    }
325
326
static void
327
init_runtime(_PyRuntimeState *runtime,
328
             void *open_code_hook, void *open_code_userdata,
329
             _Py_AuditHookEntry *audit_hook_head,
330
             Py_ssize_t unicode_next_index)
331
22
{
332
22
    assert(!runtime->preinitializing);
333
22
    assert(!runtime->preinitialized);
334
22
    assert(!runtime->core_initialized);
335
22
    assert(!runtime->initialized);
336
22
    assert(!runtime->_initialized);
337
338
22
    runtime->open_code_hook = open_code_hook;
339
22
    runtime->open_code_userdata = open_code_userdata;
340
22
    runtime->audit_hooks.head = audit_hook_head;
341
342
22
    PyPreConfig_InitPythonConfig(&runtime->preconfig);
343
344
    // Set it to the ID of the main thread of the main interpreter.
345
22
    runtime->main_thread = PyThread_get_thread_ident();
346
347
22
    runtime->unicode_state.ids.next_index = unicode_next_index;
348
22
    runtime->_initialized = 1;
349
22
}
350
351
PyStatus
352
_PyRuntimeState_Init(_PyRuntimeState *runtime)
353
22
{
354
    /* We preserve the hook across init, because there is
355
       currently no public API to set it between runtime
356
       initialization and interpreter initialization. */
357
22
    void *open_code_hook = runtime->open_code_hook;
358
22
    void *open_code_userdata = runtime->open_code_userdata;
359
22
    _Py_AuditHookEntry *audit_hook_head = runtime->audit_hooks.head;
360
    // bpo-42882: Preserve next_index value if Py_Initialize()/Py_Finalize()
361
    // is called multiple times.
362
22
    Py_ssize_t unicode_next_index = runtime->unicode_state.ids.next_index;
363
364
22
    if (runtime->_initialized) {
365
        // Py_Initialize() must be running again.
366
        // Reset to _PyRuntimeState_INIT.
367
0
        memcpy(runtime, &initial, sizeof(*runtime));
368
        // Preserve the cookie from the original runtime.
369
0
        memcpy(runtime->debug_offsets.cookie, _Py_Debug_Cookie, 8);
370
0
        assert(!runtime->_initialized);
371
0
    }
372
373
22
    PyStatus status = _PyTime_Init(&runtime->time);
374
22
    if (_PyStatus_EXCEPTION(status)) {
375
0
        return status;
376
0
    }
377
378
22
    init_runtime(runtime, open_code_hook, open_code_userdata, audit_hook_head,
379
22
                 unicode_next_index);
380
381
22
    return _PyStatus_OK();
382
22
}
383
384
void
385
_PyRuntimeState_Fini(_PyRuntimeState *runtime)
386
0
{
387
#ifdef Py_REF_DEBUG
388
    /* The count is cleared by _Py_FinalizeRefTotal(). */
389
    assert(runtime->object_state.interpreter_leaks == 0);
390
#endif
391
0
    gilstate_clear();
392
0
}
393
394
#ifdef HAVE_FORK
395
/* This function is called from PyOS_AfterFork_Child to ensure that
396
   newly created child processes do not share locks with the parent. */
397
PyStatus
398
_PyRuntimeState_ReInitThreads(_PyRuntimeState *runtime)
399
0
{
400
    // This was initially set in _PyRuntimeState_Init().
401
0
    runtime->main_thread = PyThread_get_thread_ident();
402
403
    // Clears the parking lot. Any waiting threads are dead. This must be
404
    // called before releasing any locks that use the parking lot.
405
0
    _PyParkingLot_AfterFork();
406
407
    // Re-initialize global locks
408
0
    PyMutex *locks[] = LOCKS_INIT(runtime);
409
0
    for (size_t i = 0; i < Py_ARRAY_LENGTH(locks); i++) {
410
0
        _PyMutex_at_fork_reinit(locks[i]);
411
0
    }
412
#ifdef Py_GIL_DISABLED
413
    for (PyInterpreterState *interp = runtime->interpreters.head;
414
         interp != NULL; interp = interp->next)
415
    {
416
        for (int i = 0; i < NUM_WEAKREF_LIST_LOCKS; i++) {
417
            _PyMutex_at_fork_reinit(&interp->weakref_locks[i]);
418
        }
419
    }
420
#endif
421
422
0
    _PyTypes_AfterFork();
423
424
0
    _PyThread_AfterFork(&runtime->threads);
425
426
0
    return _PyStatus_OK();
427
0
}
428
#endif
429
430
431
/*************************************/
432
/* the per-interpreter runtime state */
433
/*************************************/
434
435
//----------
436
// lifecycle
437
//----------
438
439
/* Calling this indicates that the runtime is ready to create interpreters. */
440
441
PyStatus
442
_PyInterpreterState_Enable(_PyRuntimeState *runtime)
443
22
{
444
22
    struct pyinterpreters *interpreters = &runtime->interpreters;
445
22
    interpreters->next_id = 0;
446
22
    return _PyStatus_OK();
447
22
}
448
449
static PyInterpreterState *
450
alloc_interpreter(void)
451
0
{
452
    // Aligned allocation for PyInterpreterState.
453
    // the first word of the memory block is used to store
454
    // the original pointer to be used later to free the memory.
455
0
    size_t alignment = _Alignof(PyInterpreterState);
456
0
    size_t allocsize = sizeof(PyInterpreterState) + sizeof(void *) + alignment - 1;
457
0
    void *mem = PyMem_RawCalloc(1, allocsize);
458
0
    if (mem == NULL) {
459
0
        return NULL;
460
0
    }
461
0
    void *ptr = _Py_ALIGN_UP((char *)mem + sizeof(void *), alignment);
462
0
    ((void **)ptr)[-1] = mem;
463
0
    assert(_Py_IS_ALIGNED(ptr, alignment));
464
0
    return ptr;
465
0
}
466
467
static void
468
free_interpreter(PyInterpreterState *interp)
469
0
{
470
#ifdef Py_STATS
471
    if (interp->pystats_struct) {
472
        PyMem_RawFree(interp->pystats_struct);
473
        interp->pystats_struct = NULL;
474
    }
475
#endif
476
    // The main interpreter is statically allocated so
477
    // should not be freed.
478
0
    if (interp != &_PyRuntime._main_interpreter) {
479
0
        if (_PyMem_obmalloc_state_on_heap(interp)) {
480
            // interpreter has its own obmalloc state, free it
481
0
            PyMem_RawFree(interp->obmalloc);
482
0
            interp->obmalloc = NULL;
483
0
        }
484
0
        assert(_Py_IS_ALIGNED(interp, _Alignof(PyInterpreterState)));
485
0
        PyMem_RawFree(((void **)interp)[-1]);
486
0
    }
487
0
}
488
489
#ifndef NDEBUG
490
static inline int check_interpreter_whence(long);
491
#endif
492
493
extern _Py_CODEUNIT *
494
_Py_LazyJitShim(
495
    struct _PyExecutorObject *exec, _PyInterpreterFrame *frame, _PyStackRef *stack_pointer, PyThreadState *tstate
496
);
497
498
/* Get the interpreter state to a minimal consistent state.
499
   Further init happens in pylifecycle.c before it can be used.
500
   All fields not initialized here are expected to be zeroed out,
501
   e.g. by PyMem_RawCalloc() or memset(), or otherwise pre-initialized.
502
   The runtime state is not manipulated.  Instead it is assumed that
503
   the interpreter is getting added to the runtime.
504
505
   Note that the main interpreter was statically initialized as part
506
   of the runtime and most state is already set properly.  That leaves
507
   a small number of fields to initialize dynamically, as well as some
508
   that are initialized lazily.
509
510
   For subinterpreters we memcpy() the main interpreter in
511
   PyInterpreterState_New(), leaving it in the same mostly-initialized
512
   state.  The only difference is that the interpreter has some
513
   self-referential state that is statically initializexd to the
514
   main interpreter.  We fix those fields here, in addition
515
   to the other dynamically initialized fields.
516
  */
517
static PyStatus
518
init_interpreter(PyInterpreterState *interp,
519
                 _PyRuntimeState *runtime, int64_t id,
520
                 PyInterpreterState *next,
521
                 long whence)
522
22
{
523
22
    if (interp->_initialized) {
524
0
        return _PyStatus_ERR("interpreter already initialized");
525
0
    }
526
527
22
    assert(interp->_whence == _PyInterpreterState_WHENCE_NOTSET);
528
22
    assert(check_interpreter_whence(whence) == 0);
529
22
    interp->_whence = whence;
530
531
22
    assert(runtime != NULL);
532
22
    interp->runtime = runtime;
533
534
22
    assert(id > 0 || (id == 0 && interp == runtime->interpreters.main));
535
22
    interp->id = id;
536
537
22
    interp->id_refcount = 0;
538
539
22
    assert(runtime->interpreters.head == interp);
540
22
    assert(next != NULL || (interp == runtime->interpreters.main));
541
22
    interp->next = next;
542
543
22
    interp->threads.preallocated = &interp->_initial_thread;
544
545
    // We would call _PyObject_InitState() at this point
546
    // if interp->feature_flags were alredy set.
547
548
22
    _PyEval_InitState(interp);
549
22
    _PyGC_InitState(&interp->gc);
550
22
    PyConfig_InitPythonConfig(&interp->config);
551
22
    _PyType_InitCache(interp);
552
#ifdef Py_GIL_DISABLED
553
    _Py_brc_init_state(interp);
554
#endif
555
556
22
    llist_init(&interp->mem_free_queue.head);
557
22
    llist_init(&interp->asyncio_tasks_head);
558
22
    interp->asyncio_tasks_lock = (PyMutex){0};
559
374
    for (int i = 0; i < _PY_MONITORING_UNGROUPED_EVENTS; i++) {
560
352
        interp->monitors.tools[i] = 0;
561
352
    }
562
198
    for (int t = 0; t < PY_MONITORING_TOOL_IDS; t++) {
563
3.52k
        for (int e = 0; e < _PY_MONITORING_EVENTS; e++) {
564
3.34k
            interp->monitoring_callables[t][e] = NULL;
565
566
3.34k
        }
567
176
        interp->monitoring_tool_versions[t] = 0;
568
176
    }
569
22
    interp->_code_object_generation = 0;
570
22
    interp->jit = false;
571
22
    interp->compiling = false;
572
22
    interp->executor_list_head = NULL;
573
22
    interp->executor_deletion_list_head = NULL;
574
22
    interp->executor_creation_counter = JIT_CLEANUP_THRESHOLD;
575
22
    if (interp != &runtime->_main_interpreter) {
576
        /* Fix the self-referential, statically initialized fields. */
577
0
        interp->dtoa = (struct _dtoa_state)_dtoa_state_INIT(interp);
578
0
    }
579
#if !defined(Py_GIL_DISABLED) && defined(Py_STACKREF_DEBUG)
580
    interp->next_stackref = INITIAL_STACKREF_INDEX;
581
    _Py_hashtable_allocator_t alloc = {
582
        .malloc = malloc,
583
        .free = free,
584
    };
585
    interp->open_stackrefs_table = _Py_hashtable_new_full(
586
        _Py_hashtable_hash_ptr,
587
        _Py_hashtable_compare_direct,
588
        NULL,
589
        NULL,
590
        &alloc
591
    );
592
#  ifdef Py_STACKREF_CLOSE_DEBUG
593
    interp->closed_stackrefs_table = _Py_hashtable_new_full(
594
        _Py_hashtable_hash_ptr,
595
        _Py_hashtable_compare_direct,
596
        NULL,
597
        NULL,
598
        &alloc
599
    );
600
#  endif
601
    _Py_stackref_associate(interp, Py_None, PyStackRef_None);
602
    _Py_stackref_associate(interp, Py_False, PyStackRef_False);
603
    _Py_stackref_associate(interp, Py_True, PyStackRef_True);
604
#endif
605
606
22
    interp->_initialized = 1;
607
22
    return _PyStatus_OK();
608
22
}
609
610
611
PyStatus
612
_PyInterpreterState_New(PyThreadState *tstate, PyInterpreterState **pinterp)
613
22
{
614
22
    *pinterp = NULL;
615
616
    // Don't get runtime from tstate since tstate can be NULL
617
22
    _PyRuntimeState *runtime = &_PyRuntime;
618
619
    // tstate is NULL when pycore_create_interpreter() calls
620
    // _PyInterpreterState_New() to create the main interpreter.
621
22
    if (tstate != NULL) {
622
0
        if (_PySys_Audit(tstate, "cpython.PyInterpreterState_New", NULL) < 0) {
623
0
            return _PyStatus_ERR("sys.audit failed");
624
0
        }
625
0
    }
626
627
    /* We completely serialize creation of multiple interpreters, since
628
       it simplifies things here and blocking concurrent calls isn't a problem.
629
       Regardless, we must fully block subinterpreter creation until
630
       after the main interpreter is created. */
631
22
    HEAD_LOCK(runtime);
632
633
22
    struct pyinterpreters *interpreters = &runtime->interpreters;
634
22
    int64_t id = interpreters->next_id;
635
22
    interpreters->next_id += 1;
636
637
    // Allocate the interpreter and add it to the runtime state.
638
22
    PyInterpreterState *interp;
639
22
    PyStatus status;
640
22
    PyInterpreterState *old_head = interpreters->head;
641
22
    if (old_head == NULL) {
642
        // We are creating the main interpreter.
643
22
        assert(interpreters->main == NULL);
644
22
        assert(id == 0);
645
646
22
        interp = &runtime->_main_interpreter;
647
22
        assert(interp->id == 0);
648
22
        assert(interp->next == NULL);
649
650
22
        interpreters->main = interp;
651
22
    }
652
0
    else {
653
0
        assert(interpreters->main != NULL);
654
0
        assert(id != 0);
655
656
0
        interp = alloc_interpreter();
657
0
        if (interp == NULL) {
658
0
            status = _PyStatus_NO_MEMORY();
659
0
            goto error;
660
0
        }
661
        // Set to _PyInterpreterState_INIT.
662
0
        memcpy(interp, &initial._main_interpreter, sizeof(*interp));
663
664
0
        if (id < 0) {
665
            /* overflow or Py_Initialize() not called yet! */
666
0
            status = _PyStatus_ERR("failed to get an interpreter ID");
667
0
            goto error;
668
0
        }
669
0
    }
670
22
    interpreters->head = interp;
671
672
22
    long whence = _PyInterpreterState_WHENCE_UNKNOWN;
673
22
    status = init_interpreter(interp, runtime,
674
22
                              id, old_head, whence);
675
22
    if (_PyStatus_EXCEPTION(status)) {
676
0
        goto error;
677
0
    }
678
679
22
    HEAD_UNLOCK(runtime);
680
681
22
    assert(interp != NULL);
682
22
    *pinterp = interp;
683
22
    return _PyStatus_OK();
684
685
0
error:
686
0
    HEAD_UNLOCK(runtime);
687
688
0
    if (interp != NULL) {
689
0
        free_interpreter(interp);
690
0
    }
691
0
    return status;
692
22
}
693
694
695
PyInterpreterState *
696
PyInterpreterState_New(void)
697
0
{
698
    // tstate can be NULL
699
0
    PyThreadState *tstate = current_fast_get();
700
701
0
    PyInterpreterState *interp;
702
0
    PyStatus status = _PyInterpreterState_New(tstate, &interp);
703
0
    if (_PyStatus_EXCEPTION(status)) {
704
0
        Py_ExitStatusException(status);
705
0
    }
706
0
    assert(interp != NULL);
707
0
    return interp;
708
0
}
709
710
#if !defined(Py_GIL_DISABLED) && defined(Py_STACKREF_DEBUG)
711
extern void
712
_Py_stackref_report_leaks(PyInterpreterState *interp);
713
#endif
714
715
static void
716
interpreter_clear(PyInterpreterState *interp, PyThreadState *tstate)
717
0
{
718
0
    assert(interp != NULL);
719
0
    assert(tstate != NULL);
720
0
    _PyRuntimeState *runtime = interp->runtime;
721
722
    /* XXX Conditions we need to enforce:
723
724
       * the GIL must be held by the current thread
725
       * tstate must be the "current" thread state (current_fast_get())
726
       * tstate->interp must be interp
727
       * for the main interpreter, tstate must be the main thread
728
     */
729
    // XXX Ideally, we would not rely on any thread state in this function
730
    // (and we would drop the "tstate" argument).
731
732
0
    if (_PySys_Audit(tstate, "cpython.PyInterpreterState_Clear", NULL) < 0) {
733
0
        _PyErr_Clear(tstate);
734
0
    }
735
736
    // Clear the current/main thread state last.
737
0
    _Py_FOR_EACH_TSTATE_BEGIN(interp, p) {
738
        // See https://github.com/python/cpython/issues/102126
739
        // Must be called without HEAD_LOCK held as it can deadlock
740
        // if any finalizer tries to acquire that lock.
741
0
        HEAD_UNLOCK(runtime);
742
0
        PyThreadState_Clear(p);
743
0
        HEAD_LOCK(runtime);
744
0
    }
745
0
    _Py_FOR_EACH_TSTATE_END(interp);
746
0
    if (tstate->interp == interp) {
747
        /* We fix tstate->_status below when we for sure aren't using it
748
           (e.g. no longer need the GIL). */
749
        // XXX Eliminate the need to do this.
750
0
        tstate->_status.cleared = 0;
751
0
    }
752
753
    /* It is possible that any of the objects below have a finalizer
754
       that runs Python code or otherwise relies on a thread state
755
       or even the interpreter state.  For now we trust that isn't
756
       a problem.
757
     */
758
    // XXX Make sure we properly deal with problematic finalizers.
759
760
0
    Py_CLEAR(interp->audit_hooks);
761
762
    // gh-140257: Threads have already been cleared, but daemon threads may
763
    // still access eval_breaker atomically via take_gil() right before they
764
    // hang. Use an atomic store to prevent data races during finalization.
765
0
    interp->ceval.instrumentation_version = 0;
766
0
    _Py_atomic_store_uintptr(&tstate->eval_breaker, 0);
767
768
0
    for (int i = 0; i < _PY_MONITORING_UNGROUPED_EVENTS; i++) {
769
0
        interp->monitors.tools[i] = 0;
770
0
    }
771
0
    for (int t = 0; t < PY_MONITORING_TOOL_IDS; t++) {
772
0
        for (int e = 0; e < _PY_MONITORING_EVENTS; e++) {
773
0
            Py_CLEAR(interp->monitoring_callables[t][e]);
774
0
        }
775
0
    }
776
0
    for (int t = 0; t < PY_MONITORING_TOOL_IDS; t++) {
777
0
        Py_CLEAR(interp->monitoring_tool_names[t]);
778
0
    }
779
0
    interp->_code_object_generation = 0;
780
#ifdef Py_GIL_DISABLED
781
    interp->tlbc_indices.tlbc_generation = 0;
782
#endif
783
784
0
    PyConfig_Clear(&interp->config);
785
0
    _PyCodec_Fini(interp);
786
787
0
    assert(interp->imports.modules == NULL);
788
0
    assert(interp->imports.modules_by_index == NULL);
789
0
    assert(interp->imports.importlib == NULL);
790
0
    assert(interp->imports.import_func == NULL);
791
792
0
    Py_CLEAR(interp->sysdict_copy);
793
0
    Py_CLEAR(interp->builtins_copy);
794
0
    Py_CLEAR(interp->dict);
795
0
#ifdef HAVE_FORK
796
0
    Py_CLEAR(interp->before_forkers);
797
0
    Py_CLEAR(interp->after_forkers_parent);
798
0
    Py_CLEAR(interp->after_forkers_child);
799
0
#endif
800
801
802
#ifdef _Py_TIER2
803
    _Py_ClearExecutorDeletionList(interp);
804
#endif
805
0
    _PyAST_Fini(interp);
806
0
    _PyAtExit_Fini(interp);
807
808
    // All Python types must be destroyed before the last GC collection. Python
809
    // types create a reference cycle to themselves in their in their
810
    // PyTypeObject.tp_mro member (the tuple contains the type).
811
812
    /* Last garbage collection on this interpreter */
813
0
    _PyGC_CollectNoFail(tstate);
814
0
    _PyGC_Fini(interp);
815
816
    // Finalize warnings after last gc so that any finalizers can
817
    // access warnings state
818
0
    _PyWarnings_Fini(interp);
819
0
    struct _PyExecutorObject *cold = interp->cold_executor;
820
0
    if (cold != NULL) {
821
0
        interp->cold_executor = NULL;
822
0
        assert(cold->vm_data.valid);
823
0
        assert(cold->vm_data.warm);
824
0
        _PyExecutor_Free(cold);
825
0
    }
826
827
0
    struct _PyExecutorObject *cold_dynamic = interp->cold_dynamic_executor;
828
0
    if (cold_dynamic != NULL) {
829
0
        interp->cold_dynamic_executor = NULL;
830
0
        assert(cold_dynamic->vm_data.valid);
831
0
        assert(cold_dynamic->vm_data.warm);
832
0
        _PyExecutor_Free(cold_dynamic);
833
0
    }
834
    /* We don't clear sysdict and builtins until the end of this function.
835
       Because clearing other attributes can execute arbitrary Python code
836
       which requires sysdict and builtins. */
837
0
    PyDict_Clear(interp->sysdict);
838
0
    PyDict_Clear(interp->builtins);
839
0
    Py_CLEAR(interp->sysdict);
840
0
    Py_CLEAR(interp->builtins);
841
842
#if !defined(Py_GIL_DISABLED) && defined(Py_STACKREF_DEBUG)
843
#  ifdef Py_STACKREF_CLOSE_DEBUG
844
    _Py_hashtable_destroy(interp->closed_stackrefs_table);
845
    interp->closed_stackrefs_table = NULL;
846
#  endif
847
    _Py_stackref_report_leaks(interp);
848
    _Py_hashtable_destroy(interp->open_stackrefs_table);
849
    interp->open_stackrefs_table = NULL;
850
#endif
851
852
0
    if (tstate->interp == interp) {
853
        /* We are now safe to fix tstate->_status.cleared. */
854
        // XXX Do this (much) earlier?
855
0
        tstate->_status.cleared = 1;
856
0
    }
857
858
0
    for (int i=0; i < DICT_MAX_WATCHERS; i++) {
859
0
        interp->dict_state.watchers[i] = NULL;
860
0
    }
861
862
0
    for (int i=0; i < TYPE_MAX_WATCHERS; i++) {
863
0
        interp->type_watchers[i] = NULL;
864
0
    }
865
866
0
    for (int i=0; i < FUNC_MAX_WATCHERS; i++) {
867
0
        interp->func_watchers[i] = NULL;
868
0
    }
869
0
    interp->active_func_watchers = 0;
870
871
0
    for (int i=0; i < CODE_MAX_WATCHERS; i++) {
872
0
        interp->code_watchers[i] = NULL;
873
0
    }
874
0
    interp->active_code_watchers = 0;
875
876
0
    for (int i=0; i < CONTEXT_MAX_WATCHERS; i++) {
877
0
        interp->context_watchers[i] = NULL;
878
0
    }
879
0
    interp->active_context_watchers = 0;
880
    // XXX Once we have one allocator per interpreter (i.e.
881
    // per-interpreter GC) we must ensure that all of the interpreter's
882
    // objects have been cleaned up at the point.
883
884
    // We could clear interp->threads.freelist here
885
    // if it held more than just the initial thread state.
886
0
}
887
888
889
void
890
PyInterpreterState_Clear(PyInterpreterState *interp)
891
0
{
892
    // Use the current Python thread state to call audit hooks and to collect
893
    // garbage. It can be different than the current Python thread state
894
    // of 'interp'.
895
0
    PyThreadState *current_tstate = current_fast_get();
896
0
    _PyImport_ClearCore(interp);
897
0
    interpreter_clear(interp, current_tstate);
898
0
}
899
900
901
void
902
_PyInterpreterState_Clear(PyThreadState *tstate)
903
0
{
904
0
    _PyImport_ClearCore(tstate->interp);
905
0
    interpreter_clear(tstate->interp, tstate);
906
0
}
907
908
909
static inline void tstate_deactivate(PyThreadState *tstate);
910
static void tstate_set_detached(PyThreadState *tstate, int detached_state);
911
static void zapthreads(PyInterpreterState *interp);
912
913
void
914
PyInterpreterState_Delete(PyInterpreterState *interp)
915
0
{
916
0
    _PyRuntimeState *runtime = interp->runtime;
917
0
    struct pyinterpreters *interpreters = &runtime->interpreters;
918
919
    // XXX Clearing the "current" thread state should happen before
920
    // we start finalizing the interpreter (or the current thread state).
921
0
    PyThreadState *tcur = current_fast_get();
922
0
    if (tcur != NULL && interp == tcur->interp) {
923
        /* Unset current thread.  After this, many C API calls become crashy. */
924
0
        _PyThreadState_Detach(tcur);
925
0
    }
926
927
0
    zapthreads(interp);
928
929
    // XXX These two calls should be done at the end of clear_interpreter(),
930
    // but currently some objects get decref'ed after that.
931
#ifdef Py_REF_DEBUG
932
    _PyInterpreterState_FinalizeRefTotal(interp);
933
#endif
934
0
    _PyInterpreterState_FinalizeAllocatedBlocks(interp);
935
936
0
    HEAD_LOCK(runtime);
937
0
    PyInterpreterState **p;
938
0
    for (p = &interpreters->head; ; p = &(*p)->next) {
939
0
        if (*p == NULL) {
940
0
            Py_FatalError("NULL interpreter");
941
0
        }
942
0
        if (*p == interp) {
943
0
            break;
944
0
        }
945
0
    }
946
0
    if (interp->threads.head != NULL) {
947
0
        Py_FatalError("remaining threads");
948
0
    }
949
0
    *p = interp->next;
950
951
0
    if (interpreters->main == interp) {
952
0
        interpreters->main = NULL;
953
0
        if (interpreters->head != NULL) {
954
0
            Py_FatalError("remaining subinterpreters");
955
0
        }
956
0
    }
957
0
    HEAD_UNLOCK(runtime);
958
959
0
    _Py_qsbr_fini(interp);
960
961
0
    _PyObject_FiniState(interp);
962
963
0
    PyConfig_Clear(&interp->config);
964
965
0
    free_interpreter(interp);
966
0
}
967
968
969
#ifdef HAVE_FORK
970
/*
971
 * Delete all interpreter states except the main interpreter.  If there
972
 * is a current interpreter state, it *must* be the main interpreter.
973
 */
974
PyStatus
975
_PyInterpreterState_DeleteExceptMain(_PyRuntimeState *runtime)
976
0
{
977
0
    struct pyinterpreters *interpreters = &runtime->interpreters;
978
979
0
    PyThreadState *tstate = _PyThreadState_Swap(runtime, NULL);
980
0
    if (tstate != NULL && tstate->interp != interpreters->main) {
981
0
        return _PyStatus_ERR("not main interpreter");
982
0
    }
983
984
0
    HEAD_LOCK(runtime);
985
0
    PyInterpreterState *interp = interpreters->head;
986
0
    interpreters->head = NULL;
987
0
    while (interp != NULL) {
988
0
        if (interp == interpreters->main) {
989
0
            interpreters->main->next = NULL;
990
0
            interpreters->head = interp;
991
0
            interp = interp->next;
992
0
            continue;
993
0
        }
994
995
        // XXX Won't this fail since PyInterpreterState_Clear() requires
996
        // the "current" tstate to be set?
997
0
        PyInterpreterState_Clear(interp);  // XXX must activate?
998
0
        zapthreads(interp);
999
0
        PyInterpreterState *prev_interp = interp;
1000
0
        interp = interp->next;
1001
0
        free_interpreter(prev_interp);
1002
0
    }
1003
0
    HEAD_UNLOCK(runtime);
1004
1005
0
    if (interpreters->head == NULL) {
1006
0
        return _PyStatus_ERR("missing main interpreter");
1007
0
    }
1008
0
    _PyThreadState_Swap(runtime, tstate);
1009
0
    return _PyStatus_OK();
1010
0
}
1011
#endif
1012
1013
static inline void
1014
set_main_thread(PyInterpreterState *interp, PyThreadState *tstate)
1015
0
{
1016
0
    _Py_atomic_store_ptr_relaxed(&interp->threads.main, tstate);
1017
0
}
1018
1019
static inline PyThreadState *
1020
get_main_thread(PyInterpreterState *interp)
1021
0
{
1022
0
    return _Py_atomic_load_ptr_relaxed(&interp->threads.main);
1023
0
}
1024
1025
void
1026
_PyErr_SetInterpreterAlreadyRunning(void)
1027
0
{
1028
0
    PyErr_SetString(PyExc_InterpreterError, "interpreter already running");
1029
0
}
1030
1031
int
1032
_PyInterpreterState_SetRunningMain(PyInterpreterState *interp)
1033
0
{
1034
0
    if (get_main_thread(interp) != NULL) {
1035
0
        _PyErr_SetInterpreterAlreadyRunning();
1036
0
        return -1;
1037
0
    }
1038
0
    PyThreadState *tstate = current_fast_get();
1039
0
    _Py_EnsureTstateNotNULL(tstate);
1040
0
    if (tstate->interp != interp) {
1041
0
        PyErr_SetString(PyExc_RuntimeError,
1042
0
                        "current tstate has wrong interpreter");
1043
0
        return -1;
1044
0
    }
1045
0
    set_main_thread(interp, tstate);
1046
1047
0
    return 0;
1048
0
}
1049
1050
void
1051
_PyInterpreterState_SetNotRunningMain(PyInterpreterState *interp)
1052
0
{
1053
0
    assert(get_main_thread(interp) == current_fast_get());
1054
0
    set_main_thread(interp, NULL);
1055
0
}
1056
1057
int
1058
_PyInterpreterState_IsRunningMain(PyInterpreterState *interp)
1059
0
{
1060
0
    if (get_main_thread(interp) != NULL) {
1061
0
        return 1;
1062
0
    }
1063
    // Embedders might not know to call _PyInterpreterState_SetRunningMain(),
1064
    // so their main thread wouldn't show it is running the main interpreter's
1065
    // program.  (Py_Main() doesn't have this problem.)  For now this isn't
1066
    // critical.  If it were, we would need to infer "running main" from other
1067
    // information, like if it's the main interpreter.  We used to do that
1068
    // but the naive approach led to some inconsistencies that caused problems.
1069
0
    return 0;
1070
0
}
1071
1072
int
1073
_PyThreadState_IsRunningMain(PyThreadState *tstate)
1074
0
{
1075
0
    PyInterpreterState *interp = tstate->interp;
1076
    // See the note in _PyInterpreterState_IsRunningMain() about
1077
    // possible false negatives here for embedders.
1078
0
    return get_main_thread(interp) == tstate;
1079
0
}
1080
1081
void
1082
_PyInterpreterState_ReinitRunningMain(PyThreadState *tstate)
1083
0
{
1084
0
    PyInterpreterState *interp = tstate->interp;
1085
0
    if (get_main_thread(interp) != tstate) {
1086
0
        set_main_thread(interp, NULL);
1087
0
    }
1088
0
}
1089
1090
1091
//----------
1092
// accessors
1093
//----------
1094
1095
int
1096
_PyInterpreterState_IsReady(PyInterpreterState *interp)
1097
0
{
1098
0
    return interp->_ready;
1099
0
}
1100
1101
#ifndef NDEBUG
1102
static inline int
1103
check_interpreter_whence(long whence)
1104
44
{
1105
44
    if(whence < 0) {
1106
0
        return -1;
1107
0
    }
1108
44
    if (whence > _PyInterpreterState_WHENCE_MAX) {
1109
0
        return -1;
1110
0
    }
1111
44
    return 0;
1112
44
}
1113
#endif
1114
1115
long
1116
_PyInterpreterState_GetWhence(PyInterpreterState *interp)
1117
0
{
1118
0
    assert(check_interpreter_whence(interp->_whence) == 0);
1119
0
    return interp->_whence;
1120
0
}
1121
1122
void
1123
_PyInterpreterState_SetWhence(PyInterpreterState *interp, long whence)
1124
22
{
1125
22
    assert(interp->_whence != _PyInterpreterState_WHENCE_NOTSET);
1126
22
    assert(check_interpreter_whence(whence) == 0);
1127
22
    interp->_whence = whence;
1128
22
}
1129
1130
1131
PyObject *
1132
_Py_GetMainModule(PyThreadState *tstate)
1133
0
{
1134
    // We return None to indicate "not found" or "bogus".
1135
0
    PyObject *modules = _PyImport_GetModulesRef(tstate->interp);
1136
0
    if (modules == Py_None) {
1137
0
        return modules;
1138
0
    }
1139
0
    PyObject *module = NULL;
1140
0
    (void)PyMapping_GetOptionalItem(modules, &_Py_ID(__main__), &module);
1141
0
    Py_DECREF(modules);
1142
0
    if (module == NULL && !PyErr_Occurred()) {
1143
0
        Py_RETURN_NONE;
1144
0
    }
1145
0
    return module;
1146
0
}
1147
1148
int
1149
_Py_CheckMainModule(PyObject *module)
1150
0
{
1151
0
    if (module == NULL || module == Py_None) {
1152
0
        if (!PyErr_Occurred()) {
1153
0
            (void)_PyErr_SetModuleNotFoundError(&_Py_ID(__main__));
1154
0
        }
1155
0
        return -1;
1156
0
    }
1157
0
    if (!Py_IS_TYPE(module, &PyModule_Type)) {
1158
        /* The __main__ module has been tampered with. */
1159
0
        PyObject *msg = PyUnicode_FromString("invalid __main__ module");
1160
0
        if (msg != NULL) {
1161
0
            (void)PyErr_SetImportError(msg, &_Py_ID(__main__), NULL);
1162
0
            Py_DECREF(msg);
1163
0
        }
1164
0
        return -1;
1165
0
    }
1166
0
    return 0;
1167
0
}
1168
1169
1170
PyObject *
1171
PyInterpreterState_GetDict(PyInterpreterState *interp)
1172
0
{
1173
0
    if (interp->dict == NULL) {
1174
0
        interp->dict = PyDict_New();
1175
0
        if (interp->dict == NULL) {
1176
0
            PyErr_Clear();
1177
0
        }
1178
0
    }
1179
    /* Returning NULL means no per-interpreter dict is available. */
1180
0
    return interp->dict;
1181
0
}
1182
1183
1184
//----------
1185
// interp ID
1186
//----------
1187
1188
int64_t
1189
_PyInterpreterState_ObjectToID(PyObject *idobj)
1190
0
{
1191
0
    if (!_PyIndex_Check(idobj)) {
1192
0
        PyErr_Format(PyExc_TypeError,
1193
0
                     "interpreter ID must be an int, got %.100s",
1194
0
                     Py_TYPE(idobj)->tp_name);
1195
0
        return -1;
1196
0
    }
1197
1198
    // This may raise OverflowError.
1199
    // For now, we don't worry about if LLONG_MAX < INT64_MAX.
1200
0
    long long id = PyLong_AsLongLong(idobj);
1201
0
    if (id == -1 && PyErr_Occurred()) {
1202
0
        return -1;
1203
0
    }
1204
1205
0
    if (id < 0) {
1206
0
        PyErr_Format(PyExc_ValueError,
1207
0
                     "interpreter ID must be a non-negative int, got %R",
1208
0
                     idobj);
1209
0
        return -1;
1210
0
    }
1211
#if LLONG_MAX > INT64_MAX
1212
    else if (id > INT64_MAX) {
1213
        PyErr_SetString(PyExc_OverflowError, "int too big to convert");
1214
        return -1;
1215
    }
1216
#endif
1217
0
    else {
1218
0
        return (int64_t)id;
1219
0
    }
1220
0
}
1221
1222
int64_t
1223
PyInterpreterState_GetID(PyInterpreterState *interp)
1224
0
{
1225
0
    if (interp == NULL) {
1226
0
        PyErr_SetString(PyExc_RuntimeError, "no interpreter provided");
1227
0
        return -1;
1228
0
    }
1229
0
    return interp->id;
1230
0
}
1231
1232
PyObject *
1233
_PyInterpreterState_GetIDObject(PyInterpreterState *interp)
1234
0
{
1235
0
    int64_t interpid = interp->id;
1236
0
    if (interpid < 0) {
1237
0
        return NULL;
1238
0
    }
1239
0
    assert(interpid < LLONG_MAX);
1240
0
    return PyLong_FromLongLong(interpid);
1241
0
}
1242
1243
1244
1245
void
1246
_PyInterpreterState_IDIncref(PyInterpreterState *interp)
1247
0
{
1248
0
    _Py_atomic_add_ssize(&interp->id_refcount, 1);
1249
0
}
1250
1251
1252
void
1253
_PyInterpreterState_IDDecref(PyInterpreterState *interp)
1254
0
{
1255
0
    _PyRuntimeState *runtime = interp->runtime;
1256
1257
0
    Py_ssize_t refcount = _Py_atomic_add_ssize(&interp->id_refcount, -1);
1258
1259
0
    if (refcount == 1 && interp->requires_idref) {
1260
0
        PyThreadState *tstate =
1261
0
            _PyThreadState_NewBound(interp, _PyThreadState_WHENCE_FINI);
1262
1263
        // XXX Possible GILState issues?
1264
0
        PyThreadState *save_tstate = _PyThreadState_Swap(runtime, tstate);
1265
0
        Py_EndInterpreter(tstate);
1266
0
        _PyThreadState_Swap(runtime, save_tstate);
1267
0
    }
1268
0
}
1269
1270
int
1271
_PyInterpreterState_RequiresIDRef(PyInterpreterState *interp)
1272
0
{
1273
0
    return interp->requires_idref;
1274
0
}
1275
1276
void
1277
_PyInterpreterState_RequireIDRef(PyInterpreterState *interp, int required)
1278
0
{
1279
0
    interp->requires_idref = required ? 1 : 0;
1280
0
}
1281
1282
1283
//-----------------------------
1284
// look up an interpreter state
1285
//-----------------------------
1286
1287
/* Return the interpreter associated with the current OS thread.
1288
1289
   The GIL must be held.
1290
  */
1291
1292
PyInterpreterState*
1293
PyInterpreterState_Get(void)
1294
0
{
1295
0
    _Py_AssertHoldsTstate();
1296
0
    PyInterpreterState *interp = _Py_tss_interp;
1297
0
    if (interp == NULL) {
1298
0
        Py_FatalError("no current interpreter");
1299
0
    }
1300
0
    return interp;
1301
0
}
1302
1303
1304
static PyInterpreterState *
1305
interp_look_up_id(_PyRuntimeState *runtime, int64_t requested_id)
1306
0
{
1307
0
    PyInterpreterState *interp = runtime->interpreters.head;
1308
0
    while (interp != NULL) {
1309
0
        int64_t id = interp->id;
1310
0
        assert(id >= 0);
1311
0
        if (requested_id == id) {
1312
0
            return interp;
1313
0
        }
1314
0
        interp = PyInterpreterState_Next(interp);
1315
0
    }
1316
0
    return NULL;
1317
0
}
1318
1319
/* Return the interpreter state with the given ID.
1320
1321
   Fail with RuntimeError if the interpreter is not found. */
1322
1323
PyInterpreterState *
1324
_PyInterpreterState_LookUpID(int64_t requested_id)
1325
0
{
1326
0
    PyInterpreterState *interp = NULL;
1327
0
    if (requested_id >= 0) {
1328
0
        _PyRuntimeState *runtime = &_PyRuntime;
1329
0
        HEAD_LOCK(runtime);
1330
0
        interp = interp_look_up_id(runtime, requested_id);
1331
0
        HEAD_UNLOCK(runtime);
1332
0
    }
1333
0
    if (interp == NULL && !PyErr_Occurred()) {
1334
0
        PyErr_Format(PyExc_InterpreterNotFoundError,
1335
0
                     "unrecognized interpreter ID %lld", requested_id);
1336
0
    }
1337
0
    return interp;
1338
0
}
1339
1340
PyInterpreterState *
1341
_PyInterpreterState_LookUpIDObject(PyObject *requested_id)
1342
0
{
1343
0
    int64_t id = _PyInterpreterState_ObjectToID(requested_id);
1344
0
    if (id < 0) {
1345
0
        return NULL;
1346
0
    }
1347
0
    return _PyInterpreterState_LookUpID(id);
1348
0
}
1349
1350
1351
/********************************/
1352
/* the per-thread runtime state */
1353
/********************************/
1354
1355
#ifndef NDEBUG
1356
static inline int
1357
tstate_is_alive(PyThreadState *tstate)
1358
44
{
1359
44
    return (tstate->_status.initialized &&
1360
44
            !tstate->_status.finalized &&
1361
44
            !tstate->_status.cleared &&
1362
44
            !tstate->_status.finalizing);
1363
44
}
1364
#endif
1365
1366
1367
//----------
1368
// lifecycle
1369
//----------
1370
1371
static _PyStackChunk*
1372
allocate_chunk(int size_in_bytes, _PyStackChunk* previous)
1373
386k
{
1374
386k
    assert(size_in_bytes % sizeof(PyObject **) == 0);
1375
386k
    _PyStackChunk *res = _PyObject_VirtualAlloc(size_in_bytes);
1376
386k
    if (res == NULL) {
1377
0
        return NULL;
1378
0
    }
1379
386k
    res->previous = previous;
1380
386k
    res->size = size_in_bytes;
1381
386k
    res->top = 0;
1382
386k
    return res;
1383
386k
}
1384
1385
static void
1386
reset_threadstate(_PyThreadStateImpl *tstate)
1387
0
{
1388
    // Set to _PyThreadState_INIT directly?
1389
0
    memcpy(tstate,
1390
0
           &initial._main_interpreter._initial_thread,
1391
0
           sizeof(*tstate));
1392
0
}
1393
1394
static _PyThreadStateImpl *
1395
alloc_threadstate(PyInterpreterState *interp)
1396
22
{
1397
22
    _PyThreadStateImpl *tstate;
1398
1399
    // Try the preallocated tstate first.
1400
22
    tstate = _Py_atomic_exchange_ptr(&interp->threads.preallocated, NULL);
1401
1402
    // Fall back to the allocator.
1403
22
    if (tstate == NULL) {
1404
0
        tstate = PyMem_RawCalloc(1, sizeof(_PyThreadStateImpl));
1405
0
        if (tstate == NULL) {
1406
0
            return NULL;
1407
0
        }
1408
0
        reset_threadstate(tstate);
1409
0
    }
1410
22
    return tstate;
1411
22
}
1412
1413
static void
1414
free_threadstate(_PyThreadStateImpl *tstate)
1415
0
{
1416
0
    PyInterpreterState *interp = tstate->base.interp;
1417
#ifdef Py_STATS
1418
    _PyStats_ThreadFini(tstate);
1419
#endif
1420
    // The initial thread state of the interpreter is allocated
1421
    // as part of the interpreter state so should not be freed.
1422
0
    if (tstate == &interp->_initial_thread) {
1423
        // Make it available again.
1424
0
        reset_threadstate(tstate);
1425
0
        assert(interp->threads.preallocated == NULL);
1426
0
        _Py_atomic_store_ptr(&interp->threads.preallocated, tstate);
1427
0
    }
1428
0
    else {
1429
0
        PyMem_RawFree(tstate);
1430
0
    }
1431
0
}
1432
1433
static void
1434
decref_threadstate(_PyThreadStateImpl *tstate)
1435
0
{
1436
0
    if (_Py_atomic_add_ssize(&tstate->refcount, -1) == 1) {
1437
        // The last reference to the thread state is gone.
1438
0
        free_threadstate(tstate);
1439
0
    }
1440
0
}
1441
1442
static inline void
1443
init_policy(uint16_t *target, const char *env_name, uint16_t default_value,
1444
                long min_value, long max_value)
1445
44
{
1446
44
    *target = default_value;
1447
44
    char *env = Py_GETENV(env_name);
1448
44
    if (env && *env != '\0') {
1449
0
        long value = atol(env);
1450
0
        if (value >= min_value && value <= max_value) {
1451
0
            *target = (uint16_t)value;
1452
0
        }
1453
0
    }
1454
44
}
1455
1456
/* Get the thread state to a minimal consistent state.
1457
   Further init happens in pylifecycle.c before it can be used.
1458
   All fields not initialized here are expected to be zeroed out,
1459
   e.g. by PyMem_RawCalloc() or memset(), or otherwise pre-initialized.
1460
   The interpreter state is not manipulated.  Instead it is assumed that
1461
   the thread is getting added to the interpreter.
1462
  */
1463
1464
static void
1465
init_threadstate(_PyThreadStateImpl *_tstate,
1466
                 PyInterpreterState *interp, uint64_t id, int whence)
1467
22
{
1468
22
    PyThreadState *tstate = (PyThreadState *)_tstate;
1469
22
    if (tstate->_status.initialized) {
1470
0
        Py_FatalError("thread state already initialized");
1471
0
    }
1472
1473
22
    assert(interp != NULL);
1474
22
    tstate->interp = interp;
1475
22
    tstate->eval_breaker =
1476
22
        _Py_atomic_load_uintptr_relaxed(&interp->ceval.instrumentation_version);
1477
1478
    // next/prev are set in add_threadstate().
1479
22
    assert(tstate->next == NULL);
1480
22
    assert(tstate->prev == NULL);
1481
1482
22
    assert(tstate->_whence == _PyThreadState_WHENCE_NOTSET);
1483
22
    assert(whence >= 0 && whence <= _PyThreadState_WHENCE_THREADING_DAEMON);
1484
22
    tstate->_whence = whence;
1485
1486
22
    assert(id > 0);
1487
22
    tstate->id = id;
1488
1489
    // thread_id and native_thread_id are set in bind_tstate().
1490
1491
22
    tstate->py_recursion_limit = interp->ceval.recursion_limit;
1492
22
    tstate->py_recursion_remaining = interp->ceval.recursion_limit;
1493
22
    tstate->exc_info = &tstate->exc_state;
1494
1495
    // PyGILState_Release must not try to delete this thread state.
1496
    // This is cleared when PyGILState_Ensure() creates the thread state.
1497
22
    tstate->gilstate_counter = 1;
1498
1499
    // Initialize the embedded base frame - sentinel at the bottom of the frame stack
1500
22
    _tstate->base_frame.previous = NULL;
1501
22
    _tstate->base_frame.f_executable = PyStackRef_None;
1502
22
    _tstate->base_frame.f_funcobj = PyStackRef_NULL;
1503
22
    _tstate->base_frame.f_globals = NULL;
1504
22
    _tstate->base_frame.f_builtins = NULL;
1505
22
    _tstate->base_frame.f_locals = NULL;
1506
22
    _tstate->base_frame.frame_obj = NULL;
1507
22
    _tstate->base_frame.instr_ptr = NULL;
1508
22
    _tstate->base_frame.stackpointer = _tstate->base_frame.localsplus;
1509
22
    _tstate->base_frame.return_offset = 0;
1510
22
    _tstate->base_frame.owner = FRAME_OWNED_BY_INTERPRETER;
1511
22
    _tstate->base_frame.visited = 0;
1512
#ifdef Py_DEBUG
1513
    _tstate->base_frame.lltrace = 0;
1514
#endif
1515
#ifdef Py_GIL_DISABLED
1516
    _tstate->base_frame.tlbc_index = 0;
1517
#endif
1518
22
    _tstate->base_frame.localsplus[0] = PyStackRef_NULL;
1519
1520
    // current_frame starts pointing to the base frame
1521
22
    tstate->current_frame = &_tstate->base_frame;
1522
    // base_frame pointer for profilers to validate stack unwinding
1523
22
    tstate->base_frame = &_tstate->base_frame;
1524
22
    tstate->datastack_chunk = NULL;
1525
22
    tstate->datastack_top = NULL;
1526
22
    tstate->datastack_limit = NULL;
1527
22
    tstate->what_event = -1;
1528
22
    tstate->current_executor = NULL;
1529
22
    tstate->jit_exit = NULL;
1530
22
    tstate->dict_global_version = 0;
1531
1532
22
    _tstate->c_stack_soft_limit = UINTPTR_MAX;
1533
22
    _tstate->c_stack_top = 0;
1534
22
    _tstate->c_stack_hard_limit = 0;
1535
1536
22
    _tstate->c_stack_init_base = 0;
1537
22
    _tstate->c_stack_init_top = 0;
1538
1539
22
    _tstate->asyncio_running_loop = NULL;
1540
22
    _tstate->asyncio_running_task = NULL;
1541
    // Initialize interpreter policy from environment variables
1542
22
    init_policy(&_tstate->policy.interp.jump_backward_initial_value,
1543
22
                "PYTHON_JIT_JUMP_BACKWARD_INITIAL_VALUE",
1544
22
                JUMP_BACKWARD_INITIAL_VALUE, 1, MAX_VALUE);
1545
22
    init_policy(&_tstate->policy.interp.jump_backward_initial_backoff,
1546
22
                "PYTHON_JIT_JUMP_BACKWARD_INITIAL_BACKOFF",
1547
22
                JUMP_BACKWARD_INITIAL_BACKOFF, 0, MAX_BACKOFF);
1548
#ifdef _Py_TIER2
1549
    // Initialize JIT policy from environment variables
1550
    init_policy(&_tstate->policy.jit.side_exit_initial_value,
1551
                "PYTHON_JIT_SIDE_EXIT_INITIAL_VALUE",
1552
                SIDE_EXIT_INITIAL_VALUE, 1, MAX_VALUE);
1553
    init_policy(&_tstate->policy.jit.side_exit_initial_backoff,
1554
                "PYTHON_JIT_SIDE_EXIT_INITIAL_BACKOFF",
1555
                SIDE_EXIT_INITIAL_BACKOFF, 0, MAX_BACKOFF);
1556
    _tstate->jit_tracer_state = NULL;
1557
#endif
1558
22
    tstate->delete_later = NULL;
1559
1560
22
    llist_init(&_tstate->mem_free_queue);
1561
22
    llist_init(&_tstate->asyncio_tasks_head);
1562
22
    if (interp->stoptheworld.requested || _PyRuntime.stoptheworld.requested) {
1563
        // Start in the suspended state if there is an ongoing stop-the-world.
1564
0
        tstate->state = _Py_THREAD_SUSPENDED;
1565
0
    }
1566
1567
22
    tstate->_status.initialized = 1;
1568
22
}
1569
1570
static void
1571
add_threadstate(PyInterpreterState *interp, PyThreadState *tstate,
1572
                PyThreadState *next)
1573
22
{
1574
22
    assert(interp->threads.head != tstate);
1575
22
    if (next != NULL) {
1576
0
        assert(next->prev == NULL || next->prev == tstate);
1577
0
        next->prev = tstate;
1578
0
    }
1579
22
    tstate->next = next;
1580
22
    assert(tstate->prev == NULL);
1581
22
    interp->threads.head = tstate;
1582
22
}
1583
1584
static PyThreadState *
1585
new_threadstate(PyInterpreterState *interp, int whence)
1586
22
{
1587
    // Allocate the thread state.
1588
22
    _PyThreadStateImpl *tstate = alloc_threadstate(interp);
1589
22
    if (tstate == NULL) {
1590
0
        return NULL;
1591
0
    }
1592
1593
#ifdef Py_GIL_DISABLED
1594
    Py_ssize_t qsbr_idx = _Py_qsbr_reserve(interp);
1595
    if (qsbr_idx < 0) {
1596
        free_threadstate(tstate);
1597
        return NULL;
1598
    }
1599
    int32_t tlbc_idx = _Py_ReserveTLBCIndex(interp);
1600
    if (tlbc_idx < 0) {
1601
        free_threadstate(tstate);
1602
        return NULL;
1603
    }
1604
#endif
1605
#ifdef Py_STATS
1606
    // The PyStats structure is quite large and is allocated separated from tstate.
1607
    if (!_PyStats_ThreadInit(interp, tstate)) {
1608
        free_threadstate(tstate);
1609
        return NULL;
1610
    }
1611
#endif
1612
1613
    /* We serialize concurrent creation to protect global state. */
1614
22
    HEAD_LOCK(interp->runtime);
1615
1616
    // Initialize the new thread state.
1617
22
    interp->threads.next_unique_id += 1;
1618
22
    uint64_t id = interp->threads.next_unique_id;
1619
22
    init_threadstate(tstate, interp, id, whence);
1620
1621
    // Add the new thread state to the interpreter.
1622
22
    PyThreadState *old_head = interp->threads.head;
1623
22
    add_threadstate(interp, (PyThreadState *)tstate, old_head);
1624
1625
22
    HEAD_UNLOCK(interp->runtime);
1626
1627
#ifdef Py_GIL_DISABLED
1628
    // Must be called with lock unlocked to avoid lock ordering deadlocks.
1629
    _Py_qsbr_register(tstate, interp, qsbr_idx);
1630
    tstate->tlbc_index = tlbc_idx;
1631
#endif
1632
1633
22
    return (PyThreadState *)tstate;
1634
22
}
1635
1636
PyThreadState *
1637
PyThreadState_New(PyInterpreterState *interp)
1638
0
{
1639
0
    return _PyThreadState_NewBound(interp, _PyThreadState_WHENCE_UNKNOWN);
1640
0
}
1641
1642
PyThreadState *
1643
_PyThreadState_NewBound(PyInterpreterState *interp, int whence)
1644
0
{
1645
0
    PyThreadState *tstate = new_threadstate(interp, whence);
1646
0
    if (tstate) {
1647
0
        bind_tstate(tstate);
1648
        // This makes sure there's a gilstate tstate bound
1649
        // as soon as possible.
1650
0
        if (gilstate_get() == NULL) {
1651
0
            bind_gilstate_tstate(tstate);
1652
0
        }
1653
0
    }
1654
0
    return tstate;
1655
0
}
1656
1657
// This must be followed by a call to _PyThreadState_Bind();
1658
PyThreadState *
1659
_PyThreadState_New(PyInterpreterState *interp, int whence)
1660
22
{
1661
22
    return new_threadstate(interp, whence);
1662
22
}
1663
1664
// We keep this for stable ABI compabibility.
1665
PyAPI_FUNC(PyThreadState*)
1666
_PyThreadState_Prealloc(PyInterpreterState *interp)
1667
0
{
1668
0
    return _PyThreadState_New(interp, _PyThreadState_WHENCE_UNKNOWN);
1669
0
}
1670
1671
// We keep this around for (accidental) stable ABI compatibility.
1672
// Realistically, no extensions are using it.
1673
PyAPI_FUNC(void)
1674
_PyThreadState_Init(PyThreadState *tstate)
1675
0
{
1676
0
    Py_FatalError("_PyThreadState_Init() is for internal use only");
1677
0
}
1678
1679
1680
static void
1681
clear_datastack(PyThreadState *tstate)
1682
0
{
1683
0
    _PyStackChunk *chunk = tstate->datastack_chunk;
1684
0
    tstate->datastack_chunk = NULL;
1685
0
    while (chunk != NULL) {
1686
0
        _PyStackChunk *prev = chunk->previous;
1687
0
        _PyObject_VirtualFree(chunk, chunk->size);
1688
0
        chunk = prev;
1689
0
    }
1690
0
}
1691
1692
void
1693
PyThreadState_Clear(PyThreadState *tstate)
1694
0
{
1695
0
    assert(tstate->_status.initialized && !tstate->_status.cleared);
1696
0
    assert(current_fast_get()->interp == tstate->interp);
1697
    // GH-126016: In the _interpreters module, KeyboardInterrupt exceptions
1698
    // during PyEval_EvalCode() are sent to finalization, which doesn't let us
1699
    // mark threads as "not running main". So, for now this assertion is
1700
    // disabled.
1701
    // XXX assert(!_PyThreadState_IsRunningMain(tstate));
1702
    // XXX assert(!tstate->_status.bound || tstate->_status.unbound);
1703
0
    tstate->_status.finalizing = 1;  // just in case
1704
1705
    /* XXX Conditions we need to enforce:
1706
1707
       * the GIL must be held by the current thread
1708
       * current_fast_get()->interp must match tstate->interp
1709
       * for the main interpreter, current_fast_get() must be the main thread
1710
     */
1711
1712
0
    int verbose = _PyInterpreterState_GetConfig(tstate->interp)->verbose;
1713
1714
0
    if (verbose && tstate->current_frame != tstate->base_frame) {
1715
        /* bpo-20526: After the main thread calls
1716
           _PyInterpreterState_SetFinalizing() in Py_FinalizeEx()
1717
           (or in Py_EndInterpreter() for subinterpreters),
1718
           threads must exit when trying to take the GIL.
1719
           If a thread exit in the middle of _PyEval_EvalFrameDefault(),
1720
           tstate->frame is not reset to its previous value.
1721
           It is more likely with daemon threads, but it can happen
1722
           with regular threads if threading._shutdown() fails
1723
           (ex: interrupted by CTRL+C). */
1724
0
        fprintf(stderr,
1725
0
          "PyThreadState_Clear: warning: thread still has a frame\n");
1726
0
    }
1727
1728
0
    if (verbose && tstate->current_exception != NULL) {
1729
0
        fprintf(stderr, "PyThreadState_Clear: warning: thread has an exception set\n");
1730
0
        _PyErr_Print(tstate);
1731
0
    }
1732
1733
    /* At this point tstate shouldn't be used any more,
1734
       neither to run Python code nor for other uses.
1735
1736
       This is tricky when current_fast_get() == tstate, in the same way
1737
       as noted in interpreter_clear() above.  The below finalizers
1738
       can possibly run Python code or otherwise use the partially
1739
       cleared thread state.  For now we trust that isn't a problem
1740
       in practice.
1741
     */
1742
    // XXX Deal with the possibility of problematic finalizers.
1743
1744
    /* Don't clear tstate->pyframe: it is a borrowed reference */
1745
1746
0
    Py_CLEAR(tstate->threading_local_key);
1747
0
    Py_CLEAR(tstate->threading_local_sentinel);
1748
1749
0
    Py_CLEAR(((_PyThreadStateImpl *)tstate)->asyncio_running_loop);
1750
0
    Py_CLEAR(((_PyThreadStateImpl *)tstate)->asyncio_running_task);
1751
1752
1753
0
    PyMutex_Lock(&tstate->interp->asyncio_tasks_lock);
1754
    // merge any lingering tasks from thread state to interpreter's
1755
    // tasks list
1756
0
    llist_concat(&tstate->interp->asyncio_tasks_head,
1757
0
                 &((_PyThreadStateImpl *)tstate)->asyncio_tasks_head);
1758
0
    PyMutex_Unlock(&tstate->interp->asyncio_tasks_lock);
1759
1760
0
    Py_CLEAR(tstate->dict);
1761
0
    Py_CLEAR(tstate->async_exc);
1762
1763
0
    Py_CLEAR(tstate->current_exception);
1764
1765
0
    Py_CLEAR(tstate->exc_state.exc_value);
1766
1767
    /* The stack of exception states should contain just this thread. */
1768
0
    if (verbose && tstate->exc_info != &tstate->exc_state) {
1769
0
        fprintf(stderr,
1770
0
          "PyThreadState_Clear: warning: thread still has a generator\n");
1771
0
    }
1772
1773
0
    if (tstate->c_profilefunc != NULL) {
1774
0
        FT_ATOMIC_ADD_SSIZE(tstate->interp->sys_profiling_threads, -1);
1775
0
        tstate->c_profilefunc = NULL;
1776
0
    }
1777
0
    if (tstate->c_tracefunc != NULL) {
1778
0
        FT_ATOMIC_ADD_SSIZE(tstate->interp->sys_tracing_threads, -1);
1779
0
        tstate->c_tracefunc = NULL;
1780
0
    }
1781
1782
0
    Py_CLEAR(tstate->c_profileobj);
1783
0
    Py_CLEAR(tstate->c_traceobj);
1784
1785
0
    Py_CLEAR(tstate->async_gen_firstiter);
1786
0
    Py_CLEAR(tstate->async_gen_finalizer);
1787
1788
0
    Py_CLEAR(tstate->context);
1789
1790
#ifdef Py_GIL_DISABLED
1791
    // Each thread should clear own freelists in free-threading builds.
1792
    struct _Py_freelists *freelists = _Py_freelists_GET();
1793
    _PyObject_ClearFreeLists(freelists, 1);
1794
1795
    // Flush the thread's local GC allocation count to the global count
1796
    // before the thread state is cleared, otherwise the count is lost.
1797
    _PyThreadStateImpl *tstate_impl = (_PyThreadStateImpl *)tstate;
1798
    _Py_atomic_add_int(&tstate->interp->gc.young.count,
1799
                       (int)tstate_impl->gc.alloc_count);
1800
    tstate_impl->gc.alloc_count = 0;
1801
1802
    // Merge our thread-local refcounts into the type's own refcount and
1803
    // free our local refcount array.
1804
    _PyObject_FinalizePerThreadRefcounts(tstate_impl);
1805
1806
    // Remove ourself from the biased reference counting table of threads.
1807
    _Py_brc_remove_thread(tstate);
1808
1809
    // Release our thread-local copies of the bytecode for reuse by another
1810
    // thread
1811
    _Py_ClearTLBCIndex(tstate_impl);
1812
#endif
1813
1814
    // Merge our queue of pointers to be freed into the interpreter queue.
1815
0
    _PyMem_AbandonDelayed(tstate);
1816
1817
0
    _PyThreadState_ClearMimallocHeaps(tstate);
1818
1819
0
    tstate->_status.cleared = 1;
1820
1821
    // XXX Call _PyThreadStateSwap(runtime, NULL) here if "current".
1822
    // XXX Do it as early in the function as possible.
1823
0
}
1824
1825
static void
1826
decrement_stoptheworld_countdown(struct _stoptheworld_state *stw);
1827
1828
/* Common code for PyThreadState_Delete() and PyThreadState_DeleteCurrent() */
1829
static void
1830
tstate_delete_common(PyThreadState *tstate, int release_gil)
1831
0
{
1832
0
    assert(tstate->_status.cleared && !tstate->_status.finalized);
1833
0
    tstate_verify_not_active(tstate);
1834
0
    assert(!_PyThreadState_IsRunningMain(tstate));
1835
1836
0
    PyInterpreterState *interp = tstate->interp;
1837
0
    if (interp == NULL) {
1838
0
        Py_FatalError("NULL interpreter");
1839
0
    }
1840
0
    _PyRuntimeState *runtime = interp->runtime;
1841
1842
0
    HEAD_LOCK(runtime);
1843
0
    if (tstate->prev) {
1844
0
        tstate->prev->next = tstate->next;
1845
0
    }
1846
0
    else {
1847
0
        interp->threads.head = tstate->next;
1848
0
    }
1849
0
    if (tstate->next) {
1850
0
        tstate->next->prev = tstate->prev;
1851
0
    }
1852
0
    if (tstate->state != _Py_THREAD_SUSPENDED) {
1853
        // Any ongoing stop-the-world request should not wait for us because
1854
        // our thread is getting deleted.
1855
0
        if (interp->stoptheworld.requested) {
1856
0
            decrement_stoptheworld_countdown(&interp->stoptheworld);
1857
0
        }
1858
0
        if (runtime->stoptheworld.requested) {
1859
0
            decrement_stoptheworld_countdown(&runtime->stoptheworld);
1860
0
        }
1861
0
    }
1862
1863
#if defined(Py_REF_DEBUG) && defined(Py_GIL_DISABLED)
1864
    // Add our portion of the total refcount to the interpreter's total.
1865
    _PyThreadStateImpl *tstate_impl = (_PyThreadStateImpl *)tstate;
1866
    tstate->interp->object_state.reftotal += tstate_impl->reftotal;
1867
    tstate_impl->reftotal = 0;
1868
    assert(tstate_impl->refcounts.values == NULL);
1869
#endif
1870
1871
#if _Py_TIER2
1872
    _PyJit_TracerFree((_PyThreadStateImpl *)tstate);
1873
#endif
1874
1875
0
    HEAD_UNLOCK(runtime);
1876
1877
    // XXX Unbind in PyThreadState_Clear(), or earlier
1878
    // (and assert not-equal here)?
1879
0
    if (tstate->_status.bound_gilstate) {
1880
0
        unbind_gilstate_tstate(tstate);
1881
0
    }
1882
0
    if (tstate->_status.bound) {
1883
0
        unbind_tstate(tstate);
1884
0
    }
1885
1886
    // XXX Move to PyThreadState_Clear()?
1887
0
    clear_datastack(tstate);
1888
1889
0
    if (release_gil) {
1890
0
        _PyEval_ReleaseLock(tstate->interp, tstate, 1);
1891
0
    }
1892
1893
#ifdef Py_GIL_DISABLED
1894
    _Py_qsbr_unregister(tstate);
1895
#endif
1896
1897
0
    tstate->_status.finalized = 1;
1898
0
}
1899
1900
static void
1901
zapthreads(PyInterpreterState *interp)
1902
0
{
1903
0
    PyThreadState *tstate;
1904
    /* No need to lock the mutex here because this should only happen
1905
       when the threads are all really dead (XXX famous last words).
1906
1907
       Cannot use _Py_FOR_EACH_TSTATE_UNLOCKED because we are freeing
1908
       the thread states here.
1909
    */
1910
0
    while ((tstate = interp->threads.head) != NULL) {
1911
0
        tstate_verify_not_active(tstate);
1912
0
        tstate_delete_common(tstate, 0);
1913
0
        free_threadstate((_PyThreadStateImpl *)tstate);
1914
0
    }
1915
0
}
1916
1917
1918
void
1919
PyThreadState_Delete(PyThreadState *tstate)
1920
0
{
1921
0
    _Py_EnsureTstateNotNULL(tstate);
1922
0
    tstate_verify_not_active(tstate);
1923
0
    tstate_delete_common(tstate, 0);
1924
0
    free_threadstate((_PyThreadStateImpl *)tstate);
1925
0
}
1926
1927
1928
void
1929
_PyThreadState_DeleteCurrent(PyThreadState *tstate)
1930
0
{
1931
0
    _Py_EnsureTstateNotNULL(tstate);
1932
#ifdef Py_GIL_DISABLED
1933
    _Py_qsbr_detach(((_PyThreadStateImpl *)tstate)->qsbr);
1934
#endif
1935
#ifdef Py_STATS
1936
    _PyStats_Detach((_PyThreadStateImpl *)tstate);
1937
#endif
1938
0
    current_fast_clear(tstate->interp->runtime);
1939
0
    tstate_delete_common(tstate, 1);  // release GIL as part of call
1940
0
    free_threadstate((_PyThreadStateImpl *)tstate);
1941
0
}
1942
1943
void
1944
PyThreadState_DeleteCurrent(void)
1945
0
{
1946
0
    PyThreadState *tstate = current_fast_get();
1947
0
    _PyThreadState_DeleteCurrent(tstate);
1948
0
}
1949
1950
1951
// Unlinks and removes all thread states from `tstate->interp`, with the
1952
// exception of the one passed as an argument. However, it does not delete
1953
// these thread states. Instead, it returns the removed thread states as a
1954
// linked list.
1955
//
1956
// Note that if there is a current thread state, it *must* be the one
1957
// passed as argument.  Also, this won't touch any interpreters other
1958
// than the current one, since we don't know which thread state should
1959
// be kept in those other interpreters.
1960
PyThreadState *
1961
_PyThreadState_RemoveExcept(PyThreadState *tstate)
1962
0
{
1963
0
    assert(tstate != NULL);
1964
0
    PyInterpreterState *interp = tstate->interp;
1965
0
    _PyRuntimeState *runtime = interp->runtime;
1966
1967
#ifdef Py_GIL_DISABLED
1968
    assert(runtime->stoptheworld.world_stopped);
1969
#endif
1970
1971
0
    HEAD_LOCK(runtime);
1972
    /* Remove all thread states, except tstate, from the linked list of
1973
       thread states. */
1974
0
    PyThreadState *list = interp->threads.head;
1975
0
    if (list == tstate) {
1976
0
        list = tstate->next;
1977
0
    }
1978
0
    if (tstate->prev) {
1979
0
        tstate->prev->next = tstate->next;
1980
0
    }
1981
0
    if (tstate->next) {
1982
0
        tstate->next->prev = tstate->prev;
1983
0
    }
1984
0
    tstate->prev = tstate->next = NULL;
1985
0
    interp->threads.head = tstate;
1986
0
    HEAD_UNLOCK(runtime);
1987
1988
0
    return list;
1989
0
}
1990
1991
// Deletes the thread states in the linked list `list`.
1992
//
1993
// This is intended to be used in conjunction with _PyThreadState_RemoveExcept.
1994
//
1995
// If `is_after_fork` is true, the thread states are immediately freed.
1996
// Otherwise, they are decref'd because they may still be referenced by an
1997
// OS thread.
1998
void
1999
_PyThreadState_DeleteList(PyThreadState *list, int is_after_fork)
2000
0
{
2001
    // The world can't be stopped because we PyThreadState_Clear() can
2002
    // call destructors.
2003
0
    assert(!_PyRuntime.stoptheworld.world_stopped);
2004
2005
0
    PyThreadState *p, *next;
2006
0
    for (p = list; p; p = next) {
2007
0
        next = p->next;
2008
0
        PyThreadState_Clear(p);
2009
0
        if (is_after_fork) {
2010
0
            free_threadstate((_PyThreadStateImpl *)p);
2011
0
        }
2012
0
        else {
2013
0
            decref_threadstate((_PyThreadStateImpl *)p);
2014
0
        }
2015
0
    }
2016
0
}
2017
2018
2019
//----------
2020
// accessors
2021
//----------
2022
2023
/* An extension mechanism to store arbitrary additional per-thread state.
2024
   PyThreadState_GetDict() returns a dictionary that can be used to hold such
2025
   state; the caller should pick a unique key and store its state there.  If
2026
   PyThreadState_GetDict() returns NULL, an exception has *not* been raised
2027
   and the caller should assume no per-thread state is available. */
2028
2029
PyObject *
2030
_PyThreadState_GetDict(PyThreadState *tstate)
2031
52
{
2032
52
    assert(tstate != NULL);
2033
52
    if (tstate->dict == NULL) {
2034
1
        tstate->dict = PyDict_New();
2035
1
        if (tstate->dict == NULL) {
2036
0
            _PyErr_Clear(tstate);
2037
0
        }
2038
1
    }
2039
52
    return tstate->dict;
2040
52
}
2041
2042
2043
PyObject *
2044
PyThreadState_GetDict(void)
2045
52
{
2046
52
    PyThreadState *tstate = current_fast_get();
2047
52
    if (tstate == NULL) {
2048
0
        return NULL;
2049
0
    }
2050
52
    return _PyThreadState_GetDict(tstate);
2051
52
}
2052
2053
2054
PyInterpreterState *
2055
PyThreadState_GetInterpreter(PyThreadState *tstate)
2056
0
{
2057
0
    assert(tstate != NULL);
2058
0
    return tstate->interp;
2059
0
}
2060
2061
2062
PyFrameObject*
2063
PyThreadState_GetFrame(PyThreadState *tstate)
2064
1.79M
{
2065
1.79M
    assert(tstate != NULL);
2066
1.79M
    _PyInterpreterFrame *f = _PyThreadState_GetFrame(tstate);
2067
1.79M
    if (f == NULL) {
2068
0
        return NULL;
2069
0
    }
2070
1.79M
    PyFrameObject *frame = _PyFrame_GetFrameObject(f);
2071
1.79M
    if (frame == NULL) {
2072
0
        PyErr_Clear();
2073
0
    }
2074
1.79M
    return (PyFrameObject*)Py_XNewRef(frame);
2075
1.79M
}
2076
2077
2078
uint64_t
2079
PyThreadState_GetID(PyThreadState *tstate)
2080
0
{
2081
0
    assert(tstate != NULL);
2082
0
    return tstate->id;
2083
0
}
2084
2085
2086
static inline void
2087
tstate_activate(PyThreadState *tstate)
2088
293k
{
2089
293k
    assert(tstate != NULL);
2090
    // XXX assert(tstate_is_alive(tstate));
2091
293k
    assert(tstate_is_bound(tstate));
2092
293k
    assert(!tstate->_status.active);
2093
2094
293k
    assert(!tstate->_status.bound_gilstate ||
2095
293k
           tstate == gilstate_get());
2096
293k
    if (!tstate->_status.bound_gilstate) {
2097
0
        bind_gilstate_tstate(tstate);
2098
0
    }
2099
2100
293k
    tstate->_status.active = 1;
2101
293k
}
2102
2103
static inline void
2104
tstate_deactivate(PyThreadState *tstate)
2105
293k
{
2106
293k
    assert(tstate != NULL);
2107
    // XXX assert(tstate_is_alive(tstate));
2108
293k
    assert(tstate_is_bound(tstate));
2109
293k
    assert(tstate->_status.active);
2110
2111
#if Py_STATS
2112
    _PyStats_Detach((_PyThreadStateImpl *)tstate);
2113
#endif
2114
2115
293k
    tstate->_status.active = 0;
2116
2117
    // We do not unbind the gilstate tstate here.
2118
    // It will still be used in PyGILState_Ensure().
2119
293k
}
2120
2121
static int
2122
tstate_try_attach(PyThreadState *tstate)
2123
293k
{
2124
#ifdef Py_GIL_DISABLED
2125
    int expected = _Py_THREAD_DETACHED;
2126
    return _Py_atomic_compare_exchange_int(&tstate->state,
2127
                                           &expected,
2128
                                           _Py_THREAD_ATTACHED);
2129
#else
2130
293k
    assert(tstate->state == _Py_THREAD_DETACHED);
2131
293k
    tstate->state = _Py_THREAD_ATTACHED;
2132
293k
    return 1;
2133
293k
#endif
2134
293k
}
2135
2136
static void
2137
tstate_set_detached(PyThreadState *tstate, int detached_state)
2138
293k
{
2139
293k
    assert(_Py_atomic_load_int_relaxed(&tstate->state) == _Py_THREAD_ATTACHED);
2140
#ifdef Py_GIL_DISABLED
2141
    _Py_atomic_store_int(&tstate->state, detached_state);
2142
#else
2143
293k
    tstate->state = detached_state;
2144
293k
#endif
2145
293k
}
2146
2147
static void
2148
tstate_wait_attach(PyThreadState *tstate)
2149
0
{
2150
0
    do {
2151
0
        int state = _Py_atomic_load_int_relaxed(&tstate->state);
2152
0
        if (state == _Py_THREAD_SUSPENDED) {
2153
            // Wait until we're switched out of SUSPENDED to DETACHED.
2154
0
            _PyParkingLot_Park(&tstate->state, &state, sizeof(tstate->state),
2155
0
                               /*timeout=*/-1, NULL, /*detach=*/0);
2156
0
        }
2157
0
        else if (state == _Py_THREAD_SHUTTING_DOWN) {
2158
            // We're shutting down, so we can't attach.
2159
0
            _PyThreadState_HangThread(tstate);
2160
0
        }
2161
0
        else {
2162
0
            assert(state == _Py_THREAD_DETACHED);
2163
0
        }
2164
        // Once we're back in DETACHED we can re-attach
2165
0
    } while (!tstate_try_attach(tstate));
2166
0
}
2167
2168
void
2169
_PyThreadState_Attach(PyThreadState *tstate)
2170
293k
{
2171
#if defined(Py_DEBUG)
2172
    // This is called from PyEval_RestoreThread(). Similar
2173
    // to it, we need to ensure errno doesn't change.
2174
    int err = errno;
2175
#endif
2176
2177
293k
    _Py_EnsureTstateNotNULL(tstate);
2178
293k
    if (current_fast_get() != NULL) {
2179
0
        Py_FatalError("non-NULL old thread state");
2180
0
    }
2181
293k
    _PyThreadStateImpl *_tstate = (_PyThreadStateImpl *)tstate;
2182
293k
    if (_tstate->c_stack_hard_limit == 0) {
2183
22
        _Py_InitializeRecursionLimits(tstate);
2184
22
    }
2185
2186
293k
    while (1) {
2187
293k
        _PyEval_AcquireLock(tstate);
2188
2189
        // XXX assert(tstate_is_alive(tstate));
2190
293k
        current_fast_set(&_PyRuntime, tstate);
2191
293k
        if (!tstate_try_attach(tstate)) {
2192
0
            tstate_wait_attach(tstate);
2193
0
        }
2194
293k
        tstate_activate(tstate);
2195
2196
#ifdef Py_GIL_DISABLED
2197
        if (_PyEval_IsGILEnabled(tstate) && !tstate->holds_gil) {
2198
            // The GIL was enabled between our call to _PyEval_AcquireLock()
2199
            // and when we attached (the GIL can't go from enabled to disabled
2200
            // here because only a thread holding the GIL can disable
2201
            // it). Detach and try again.
2202
            tstate_set_detached(tstate, _Py_THREAD_DETACHED);
2203
            tstate_deactivate(tstate);
2204
            current_fast_clear(&_PyRuntime);
2205
            continue;
2206
        }
2207
        _Py_qsbr_attach(((_PyThreadStateImpl *)tstate)->qsbr);
2208
#endif
2209
293k
        break;
2210
293k
    }
2211
2212
    // Resume previous critical section. This acquires the lock(s) from the
2213
    // top-most critical section.
2214
293k
    if (tstate->critical_section != 0) {
2215
0
        _PyCriticalSection_Resume(tstate);
2216
0
    }
2217
2218
#ifdef Py_STATS
2219
    _PyStats_Attach((_PyThreadStateImpl *)tstate);
2220
#endif
2221
2222
#if defined(Py_DEBUG)
2223
    errno = err;
2224
#endif
2225
293k
}
2226
2227
static void
2228
detach_thread(PyThreadState *tstate, int detached_state)
2229
293k
{
2230
    // XXX assert(tstate_is_alive(tstate) && tstate_is_bound(tstate));
2231
293k
    assert(_Py_atomic_load_int_relaxed(&tstate->state) == _Py_THREAD_ATTACHED);
2232
293k
    assert(tstate == current_fast_get());
2233
293k
    if (tstate->critical_section != 0) {
2234
0
        _PyCriticalSection_SuspendAll(tstate);
2235
0
    }
2236
#ifdef Py_GIL_DISABLED
2237
    _Py_qsbr_detach(((_PyThreadStateImpl *)tstate)->qsbr);
2238
#endif
2239
293k
    tstate_deactivate(tstate);
2240
293k
    tstate_set_detached(tstate, detached_state);
2241
293k
    current_fast_clear(&_PyRuntime);
2242
293k
    _PyEval_ReleaseLock(tstate->interp, tstate, 0);
2243
293k
}
2244
2245
void
2246
_PyThreadState_Detach(PyThreadState *tstate)
2247
293k
{
2248
293k
    detach_thread(tstate, _Py_THREAD_DETACHED);
2249
293k
}
2250
2251
void
2252
_PyThreadState_Suspend(PyThreadState *tstate)
2253
0
{
2254
0
    _PyRuntimeState *runtime = &_PyRuntime;
2255
2256
0
    assert(_Py_atomic_load_int_relaxed(&tstate->state) == _Py_THREAD_ATTACHED);
2257
2258
0
    struct _stoptheworld_state *stw = NULL;
2259
0
    HEAD_LOCK(runtime);
2260
0
    if (runtime->stoptheworld.requested) {
2261
0
        stw = &runtime->stoptheworld;
2262
0
    }
2263
0
    else if (tstate->interp->stoptheworld.requested) {
2264
0
        stw = &tstate->interp->stoptheworld;
2265
0
    }
2266
0
    HEAD_UNLOCK(runtime);
2267
2268
0
    if (stw == NULL) {
2269
        // Switch directly to "detached" if there is no active stop-the-world
2270
        // request.
2271
0
        detach_thread(tstate, _Py_THREAD_DETACHED);
2272
0
        return;
2273
0
    }
2274
2275
    // Switch to "suspended" state.
2276
0
    detach_thread(tstate, _Py_THREAD_SUSPENDED);
2277
2278
    // Decrease the count of remaining threads needing to park.
2279
0
    HEAD_LOCK(runtime);
2280
0
    decrement_stoptheworld_countdown(stw);
2281
0
    HEAD_UNLOCK(runtime);
2282
0
}
2283
2284
void
2285
_PyThreadState_SetShuttingDown(PyThreadState *tstate)
2286
0
{
2287
0
    _Py_atomic_store_int(&tstate->state, _Py_THREAD_SHUTTING_DOWN);
2288
#ifdef Py_GIL_DISABLED
2289
    _PyParkingLot_UnparkAll(&tstate->state);
2290
#endif
2291
0
}
2292
2293
// Decrease stop-the-world counter of remaining number of threads that need to
2294
// pause. If we are the final thread to pause, notify the requesting thread.
2295
static void
2296
decrement_stoptheworld_countdown(struct _stoptheworld_state *stw)
2297
0
{
2298
0
    assert(stw->thread_countdown > 0);
2299
0
    if (--stw->thread_countdown == 0) {
2300
0
        _PyEvent_Notify(&stw->stop_event);
2301
0
    }
2302
0
}
2303
2304
#ifdef Py_GIL_DISABLED
2305
// Interpreter for _Py_FOR_EACH_STW_INTERP(). For global stop-the-world events,
2306
// we start with the first interpreter and then iterate over all interpreters.
2307
// For per-interpreter stop-the-world events, we only operate on the one
2308
// interpreter.
2309
static PyInterpreterState *
2310
interp_for_stop_the_world(struct _stoptheworld_state *stw)
2311
{
2312
    return (stw->is_global
2313
        ? PyInterpreterState_Head()
2314
        : _Py_CONTAINER_OF(stw, PyInterpreterState, stoptheworld));
2315
}
2316
2317
// Loops over threads for a stop-the-world event.
2318
// For global: all threads in all interpreters
2319
// For per-interpreter: all threads in the interpreter
2320
#define _Py_FOR_EACH_STW_INTERP(stw, i)                                     \
2321
    for (PyInterpreterState *i = interp_for_stop_the_world((stw));          \
2322
            i != NULL; i = ((stw->is_global) ? i->next : NULL))
2323
2324
2325
// Try to transition threads atomically from the "detached" state to the
2326
// "gc stopped" state. Returns true if all threads are in the "gc stopped"
2327
static bool
2328
park_detached_threads(struct _stoptheworld_state *stw)
2329
{
2330
    int num_parked = 0;
2331
    _Py_FOR_EACH_STW_INTERP(stw, i) {
2332
        _Py_FOR_EACH_TSTATE_UNLOCKED(i, t) {
2333
            int state = _Py_atomic_load_int_relaxed(&t->state);
2334
            if (state == _Py_THREAD_DETACHED) {
2335
                // Atomically transition to "suspended" if in "detached" state.
2336
                if (_Py_atomic_compare_exchange_int(
2337
                                &t->state, &state, _Py_THREAD_SUSPENDED)) {
2338
                    num_parked++;
2339
                }
2340
            }
2341
            else if (state == _Py_THREAD_ATTACHED && t != stw->requester) {
2342
                _Py_set_eval_breaker_bit(t, _PY_EVAL_PLEASE_STOP_BIT);
2343
            }
2344
        }
2345
    }
2346
    stw->thread_countdown -= num_parked;
2347
    assert(stw->thread_countdown >= 0);
2348
    return num_parked > 0 && stw->thread_countdown == 0;
2349
}
2350
2351
static void
2352
stop_the_world(struct _stoptheworld_state *stw)
2353
{
2354
    _PyRuntimeState *runtime = &_PyRuntime;
2355
2356
    // gh-137433: Acquire the rwmutex first to avoid deadlocks with daemon
2357
    // threads that may hang when blocked on lock acquisition.
2358
    if (stw->is_global) {
2359
        _PyRWMutex_Lock(&runtime->stoptheworld_mutex);
2360
    }
2361
    else {
2362
        _PyRWMutex_RLock(&runtime->stoptheworld_mutex);
2363
    }
2364
    PyMutex_Lock(&stw->mutex);
2365
2366
    HEAD_LOCK(runtime);
2367
    stw->requested = 1;
2368
    stw->thread_countdown = 0;
2369
    stw->stop_event = (PyEvent){0};  // zero-initialize (unset)
2370
    stw->requester = _PyThreadState_GET();  // may be NULL
2371
    FT_STAT_WORLD_STOP_INC();
2372
2373
    _Py_FOR_EACH_STW_INTERP(stw, i) {
2374
        _Py_FOR_EACH_TSTATE_UNLOCKED(i, t) {
2375
            if (t != stw->requester) {
2376
                // Count all the other threads (we don't wait on ourself).
2377
                stw->thread_countdown++;
2378
            }
2379
        }
2380
    }
2381
2382
    if (stw->thread_countdown == 0) {
2383
        HEAD_UNLOCK(runtime);
2384
        stw->world_stopped = 1;
2385
        return;
2386
    }
2387
2388
    for (;;) {
2389
        // Switch threads that are detached to the GC stopped state
2390
        bool stopped_all_threads = park_detached_threads(stw);
2391
        HEAD_UNLOCK(runtime);
2392
2393
        if (stopped_all_threads) {
2394
            break;
2395
        }
2396
2397
        PyTime_t wait_ns = 1000*1000;  // 1ms (arbitrary, may need tuning)
2398
        int detach = 0;
2399
        if (PyEvent_WaitTimed(&stw->stop_event, wait_ns, detach)) {
2400
            assert(stw->thread_countdown == 0);
2401
            break;
2402
        }
2403
2404
        HEAD_LOCK(runtime);
2405
    }
2406
    stw->world_stopped = 1;
2407
}
2408
2409
static void
2410
start_the_world(struct _stoptheworld_state *stw)
2411
{
2412
    _PyRuntimeState *runtime = &_PyRuntime;
2413
    assert(PyMutex_IsLocked(&stw->mutex));
2414
2415
    HEAD_LOCK(runtime);
2416
    stw->requested = 0;
2417
    stw->world_stopped = 0;
2418
    // Switch threads back to the detached state.
2419
    _Py_FOR_EACH_STW_INTERP(stw, i) {
2420
        _Py_FOR_EACH_TSTATE_UNLOCKED(i, t) {
2421
            if (t != stw->requester) {
2422
                assert(_Py_atomic_load_int_relaxed(&t->state) ==
2423
                       _Py_THREAD_SUSPENDED);
2424
                _Py_atomic_store_int(&t->state, _Py_THREAD_DETACHED);
2425
                _PyParkingLot_UnparkAll(&t->state);
2426
            }
2427
        }
2428
    }
2429
    stw->requester = NULL;
2430
    HEAD_UNLOCK(runtime);
2431
    PyMutex_Unlock(&stw->mutex);
2432
    if (stw->is_global) {
2433
        _PyRWMutex_Unlock(&runtime->stoptheworld_mutex);
2434
    }
2435
    else {
2436
        _PyRWMutex_RUnlock(&runtime->stoptheworld_mutex);
2437
    }
2438
}
2439
#endif  // Py_GIL_DISABLED
2440
2441
void
2442
_PyEval_StopTheWorldAll(_PyRuntimeState *runtime)
2443
0
{
2444
#ifdef Py_GIL_DISABLED
2445
    stop_the_world(&runtime->stoptheworld);
2446
#endif
2447
0
}
2448
2449
void
2450
_PyEval_StartTheWorldAll(_PyRuntimeState *runtime)
2451
0
{
2452
#ifdef Py_GIL_DISABLED
2453
    start_the_world(&runtime->stoptheworld);
2454
#endif
2455
0
}
2456
2457
void
2458
_PyEval_StopTheWorld(PyInterpreterState *interp)
2459
0
{
2460
#ifdef Py_GIL_DISABLED
2461
    stop_the_world(&interp->stoptheworld);
2462
#endif
2463
0
}
2464
2465
void
2466
_PyEval_StartTheWorld(PyInterpreterState *interp)
2467
0
{
2468
#ifdef Py_GIL_DISABLED
2469
    start_the_world(&interp->stoptheworld);
2470
#endif
2471
0
}
2472
2473
//----------
2474
// other API
2475
//----------
2476
2477
/* Asynchronously raise an exception in a thread.
2478
   Requested by Just van Rossum and Alex Martelli.
2479
   To prevent naive misuse, you must write your own extension
2480
   to call this, or use ctypes.  Must be called with the GIL held.
2481
   Returns the number of tstates modified (normally 1, but 0 if `id` didn't
2482
   match any known thread id).  Can be called with exc=NULL to clear an
2483
   existing async exception.  This raises no exceptions. */
2484
2485
// XXX Move this to Python/ceval_gil.c?
2486
// XXX Deprecate this.
2487
int
2488
PyThreadState_SetAsyncExc(unsigned long id, PyObject *exc)
2489
0
{
2490
0
    PyInterpreterState *interp = _PyInterpreterState_GET();
2491
2492
    /* Although the GIL is held, a few C API functions can be called
2493
     * without the GIL held, and in particular some that create and
2494
     * destroy thread and interpreter states.  Those can mutate the
2495
     * list of thread states we're traversing, so to prevent that we lock
2496
     * head_mutex for the duration.
2497
     */
2498
0
    PyThreadState *tstate = NULL;
2499
0
    _Py_FOR_EACH_TSTATE_BEGIN(interp, t) {
2500
0
        if (t->thread_id == id) {
2501
0
            tstate = t;
2502
0
            break;
2503
0
        }
2504
0
    }
2505
0
    _Py_FOR_EACH_TSTATE_END(interp);
2506
2507
0
    if (tstate != NULL) {
2508
        /* Tricky:  we need to decref the current value
2509
         * (if any) in tstate->async_exc, but that can in turn
2510
         * allow arbitrary Python code to run, including
2511
         * perhaps calls to this function.  To prevent
2512
         * deadlock, we need to release head_mutex before
2513
         * the decref.
2514
         */
2515
0
        Py_XINCREF(exc);
2516
0
        PyObject *old_exc = _Py_atomic_exchange_ptr(&tstate->async_exc, exc);
2517
2518
0
        Py_XDECREF(old_exc);
2519
0
        _Py_set_eval_breaker_bit(tstate, _PY_ASYNC_EXCEPTION_BIT);
2520
0
    }
2521
2522
0
    return tstate != NULL;
2523
0
}
2524
2525
//---------------------------------
2526
// API for the current thread state
2527
//---------------------------------
2528
2529
PyThreadState *
2530
PyThreadState_GetUnchecked(void)
2531
0
{
2532
0
    return current_fast_get();
2533
0
}
2534
2535
2536
PyThreadState *
2537
PyThreadState_Get(void)
2538
145M
{
2539
145M
    PyThreadState *tstate = current_fast_get();
2540
145M
    _Py_EnsureTstateNotNULL(tstate);
2541
145M
    return tstate;
2542
145M
}
2543
2544
PyThreadState *
2545
_PyThreadState_Swap(_PyRuntimeState *runtime, PyThreadState *newts)
2546
0
{
2547
0
    PyThreadState *oldts = current_fast_get();
2548
0
    if (oldts != NULL) {
2549
0
        _PyThreadState_Detach(oldts);
2550
0
    }
2551
0
    if (newts != NULL) {
2552
0
        _PyThreadState_Attach(newts);
2553
0
    }
2554
0
    return oldts;
2555
0
}
2556
2557
PyThreadState *
2558
PyThreadState_Swap(PyThreadState *newts)
2559
0
{
2560
0
    return _PyThreadState_Swap(&_PyRuntime, newts);
2561
0
}
2562
2563
2564
void
2565
_PyThreadState_Bind(PyThreadState *tstate)
2566
22
{
2567
    // gh-104690: If Python is being finalized and PyInterpreterState_Delete()
2568
    // was called, tstate becomes a dangling pointer.
2569
22
    assert(_PyThreadState_CheckConsistency(tstate));
2570
2571
22
    bind_tstate(tstate);
2572
    // This makes sure there's a gilstate tstate bound
2573
    // as soon as possible.
2574
22
    if (gilstate_get() == NULL) {
2575
22
        bind_gilstate_tstate(tstate);
2576
22
    }
2577
22
}
2578
2579
#if defined(Py_GIL_DISABLED) && !defined(Py_LIMITED_API)
2580
uintptr_t
2581
_Py_GetThreadLocal_Addr(void)
2582
{
2583
    // gh-112535: Use the address of the thread-local PyThreadState variable as
2584
    // a unique identifier for the current thread. Each thread has a unique
2585
    // _Py_tss_tstate variable with a unique address.
2586
    return (uintptr_t)&_Py_tss_tstate;
2587
}
2588
#endif
2589
2590
/***********************************/
2591
/* routines for advanced debuggers */
2592
/***********************************/
2593
2594
// (requested by David Beazley)
2595
// Don't use unless you know what you are doing!
2596
2597
PyInterpreterState *
2598
PyInterpreterState_Head(void)
2599
0
{
2600
0
    return _PyRuntime.interpreters.head;
2601
0
}
2602
2603
PyInterpreterState *
2604
PyInterpreterState_Main(void)
2605
0
{
2606
0
    return _PyInterpreterState_Main();
2607
0
}
2608
2609
PyInterpreterState *
2610
0
PyInterpreterState_Next(PyInterpreterState *interp) {
2611
0
    return interp->next;
2612
0
}
2613
2614
PyThreadState *
2615
14.7k
PyInterpreterState_ThreadHead(PyInterpreterState *interp) {
2616
14.7k
    return interp->threads.head;
2617
14.7k
}
2618
2619
PyThreadState *
2620
14.7k
PyThreadState_Next(PyThreadState *tstate) {
2621
14.7k
    return tstate->next;
2622
14.7k
}
2623
2624
2625
/********************************************/
2626
/* reporting execution state of all threads */
2627
/********************************************/
2628
2629
/* The implementation of sys._current_frames().  This is intended to be
2630
   called with the GIL held, as it will be when called via
2631
   sys._current_frames().  It's possible it would work fine even without
2632
   the GIL held, but haven't thought enough about that.
2633
*/
2634
PyObject *
2635
_PyThread_CurrentFrames(void)
2636
0
{
2637
0
    _PyRuntimeState *runtime = &_PyRuntime;
2638
0
    PyThreadState *tstate = current_fast_get();
2639
0
    if (_PySys_Audit(tstate, "sys._current_frames", NULL) < 0) {
2640
0
        return NULL;
2641
0
    }
2642
2643
0
    PyObject *result = PyDict_New();
2644
0
    if (result == NULL) {
2645
0
        return NULL;
2646
0
    }
2647
2648
    /* for i in all interpreters:
2649
     *     for t in all of i's thread states:
2650
     *          if t's frame isn't NULL, map t's id to its frame
2651
     * Because these lists can mutate even when the GIL is held, we
2652
     * need to grab head_mutex for the duration.
2653
     */
2654
0
    _PyEval_StopTheWorldAll(runtime);
2655
0
    HEAD_LOCK(runtime);
2656
0
    PyInterpreterState *i;
2657
0
    for (i = runtime->interpreters.head; i != NULL; i = i->next) {
2658
0
        _Py_FOR_EACH_TSTATE_UNLOCKED(i, t) {
2659
0
            _PyInterpreterFrame *frame = t->current_frame;
2660
0
            frame = _PyFrame_GetFirstComplete(frame);
2661
0
            if (frame == NULL) {
2662
0
                continue;
2663
0
            }
2664
0
            PyObject *id = PyLong_FromUnsignedLong(t->thread_id);
2665
0
            if (id == NULL) {
2666
0
                goto fail;
2667
0
            }
2668
0
            PyObject *frameobj = (PyObject *)_PyFrame_GetFrameObject(frame);
2669
0
            if (frameobj == NULL) {
2670
0
                Py_DECREF(id);
2671
0
                goto fail;
2672
0
            }
2673
0
            int stat = PyDict_SetItem(result, id, frameobj);
2674
0
            Py_DECREF(id);
2675
0
            if (stat < 0) {
2676
0
                goto fail;
2677
0
            }
2678
0
        }
2679
0
    }
2680
0
    goto done;
2681
2682
0
fail:
2683
0
    Py_CLEAR(result);
2684
2685
0
done:
2686
0
    HEAD_UNLOCK(runtime);
2687
0
    _PyEval_StartTheWorldAll(runtime);
2688
0
    return result;
2689
0
}
2690
2691
/* The implementation of sys._current_exceptions().  This is intended to be
2692
   called with the GIL held, as it will be when called via
2693
   sys._current_exceptions().  It's possible it would work fine even without
2694
   the GIL held, but haven't thought enough about that.
2695
*/
2696
PyObject *
2697
_PyThread_CurrentExceptions(void)
2698
0
{
2699
0
    _PyRuntimeState *runtime = &_PyRuntime;
2700
0
    PyThreadState *tstate = current_fast_get();
2701
2702
0
    _Py_EnsureTstateNotNULL(tstate);
2703
2704
0
    if (_PySys_Audit(tstate, "sys._current_exceptions", NULL) < 0) {
2705
0
        return NULL;
2706
0
    }
2707
2708
0
    PyObject *result = PyDict_New();
2709
0
    if (result == NULL) {
2710
0
        return NULL;
2711
0
    }
2712
2713
    /* for i in all interpreters:
2714
     *     for t in all of i's thread states:
2715
     *          if t's frame isn't NULL, map t's id to its frame
2716
     * Because these lists can mutate even when the GIL is held, we
2717
     * need to grab head_mutex for the duration.
2718
     */
2719
0
    _PyEval_StopTheWorldAll(runtime);
2720
0
    HEAD_LOCK(runtime);
2721
0
    PyInterpreterState *i;
2722
0
    for (i = runtime->interpreters.head; i != NULL; i = i->next) {
2723
0
        _Py_FOR_EACH_TSTATE_UNLOCKED(i, t) {
2724
0
            _PyErr_StackItem *err_info = _PyErr_GetTopmostException(t);
2725
0
            if (err_info == NULL) {
2726
0
                continue;
2727
0
            }
2728
0
            PyObject *id = PyLong_FromUnsignedLong(t->thread_id);
2729
0
            if (id == NULL) {
2730
0
                goto fail;
2731
0
            }
2732
0
            PyObject *exc = err_info->exc_value;
2733
0
            assert(exc == NULL ||
2734
0
                   exc == Py_None ||
2735
0
                   PyExceptionInstance_Check(exc));
2736
2737
0
            int stat = PyDict_SetItem(result, id, exc == NULL ? Py_None : exc);
2738
0
            Py_DECREF(id);
2739
0
            if (stat < 0) {
2740
0
                goto fail;
2741
0
            }
2742
0
        }
2743
0
    }
2744
0
    goto done;
2745
2746
0
fail:
2747
0
    Py_CLEAR(result);
2748
2749
0
done:
2750
0
    HEAD_UNLOCK(runtime);
2751
0
    _PyEval_StartTheWorldAll(runtime);
2752
0
    return result;
2753
0
}
2754
2755
2756
/***********************************/
2757
/* Python "auto thread state" API. */
2758
/***********************************/
2759
2760
/* Internal initialization/finalization functions called by
2761
   Py_Initialize/Py_FinalizeEx
2762
*/
2763
PyStatus
2764
_PyGILState_Init(PyInterpreterState *interp)
2765
22
{
2766
22
    if (!_Py_IsMainInterpreter(interp)) {
2767
        /* Currently, PyGILState is shared by all interpreters. The main
2768
         * interpreter is responsible to initialize it. */
2769
0
        return _PyStatus_OK();
2770
0
    }
2771
22
    _PyRuntimeState *runtime = interp->runtime;
2772
22
    assert(gilstate_get() == NULL);
2773
22
    assert(runtime->gilstate.autoInterpreterState == NULL);
2774
22
    runtime->gilstate.autoInterpreterState = interp;
2775
22
    return _PyStatus_OK();
2776
22
}
2777
2778
void
2779
_PyGILState_Fini(PyInterpreterState *interp)
2780
0
{
2781
0
    if (!_Py_IsMainInterpreter(interp)) {
2782
        /* Currently, PyGILState is shared by all interpreters. The main
2783
         * interpreter is responsible to initialize it. */
2784
0
        return;
2785
0
    }
2786
0
    interp->runtime->gilstate.autoInterpreterState = NULL;
2787
0
}
2788
2789
2790
// XXX Drop this.
2791
void
2792
_PyGILState_SetTstate(PyThreadState *tstate)
2793
22
{
2794
    /* must init with valid states */
2795
22
    assert(tstate != NULL);
2796
22
    assert(tstate->interp != NULL);
2797
2798
22
    if (!_Py_IsMainInterpreter(tstate->interp)) {
2799
        /* Currently, PyGILState is shared by all interpreters. The main
2800
         * interpreter is responsible to initialize it. */
2801
0
        return;
2802
0
    }
2803
2804
22
#ifndef NDEBUG
2805
22
    _PyRuntimeState *runtime = tstate->interp->runtime;
2806
2807
22
    assert(runtime->gilstate.autoInterpreterState == tstate->interp);
2808
22
    assert(gilstate_get() == tstate);
2809
22
    assert(tstate->gilstate_counter == 1);
2810
22
#endif
2811
22
}
2812
2813
PyInterpreterState *
2814
_PyGILState_GetInterpreterStateUnsafe(void)
2815
0
{
2816
0
    return _PyRuntime.gilstate.autoInterpreterState;
2817
0
}
2818
2819
/* The public functions */
2820
2821
PyThreadState *
2822
PyGILState_GetThisThreadState(void)
2823
0
{
2824
0
    return gilstate_get();
2825
0
}
2826
2827
int
2828
PyGILState_Check(void)
2829
0
{
2830
0
    _PyRuntimeState *runtime = &_PyRuntime;
2831
0
    if (!_Py_atomic_load_int_relaxed(&runtime->gilstate.check_enabled)) {
2832
0
        return 1;
2833
0
    }
2834
2835
0
    PyThreadState *tstate = current_fast_get();
2836
0
    if (tstate == NULL) {
2837
0
        return 0;
2838
0
    }
2839
2840
0
    PyThreadState *tcur = gilstate_get();
2841
0
    return (tstate == tcur);
2842
0
}
2843
2844
PyGILState_STATE
2845
PyGILState_Ensure(void)
2846
0
{
2847
0
    _PyRuntimeState *runtime = &_PyRuntime;
2848
2849
    /* Note that we do not auto-init Python here - apart from
2850
       potential races with 2 threads auto-initializing, pep-311
2851
       spells out other issues.  Embedders are expected to have
2852
       called Py_Initialize(). */
2853
2854
    /* Ensure that _PyEval_InitThreads() and _PyGILState_Init() have been
2855
       called by Py_Initialize()
2856
2857
       TODO: This isn't thread-safe. There's no protection here against
2858
       concurrent finalization of the interpreter; it's simply a guard
2859
       for *after* the interpreter has finalized.
2860
     */
2861
0
    if (!_PyEval_ThreadsInitialized() || runtime->gilstate.autoInterpreterState == NULL) {
2862
0
        PyThread_hang_thread();
2863
0
    }
2864
2865
0
    PyThreadState *tcur = gilstate_get();
2866
0
    int has_gil;
2867
0
    if (tcur == NULL) {
2868
        /* Create a new Python thread state for this thread */
2869
        // XXX Use PyInterpreterState_EnsureThreadState()?
2870
0
        tcur = new_threadstate(runtime->gilstate.autoInterpreterState,
2871
0
                               _PyThreadState_WHENCE_GILSTATE);
2872
0
        if (tcur == NULL) {
2873
0
            Py_FatalError("Couldn't create thread-state for new thread");
2874
0
        }
2875
0
        bind_tstate(tcur);
2876
0
        bind_gilstate_tstate(tcur);
2877
2878
        /* This is our thread state!  We'll need to delete it in the
2879
           matching call to PyGILState_Release(). */
2880
0
        assert(tcur->gilstate_counter == 1);
2881
0
        tcur->gilstate_counter = 0;
2882
0
        has_gil = 0; /* new thread state is never current */
2883
0
    }
2884
0
    else {
2885
0
        has_gil = holds_gil(tcur);
2886
0
    }
2887
2888
0
    if (!has_gil) {
2889
0
        PyEval_RestoreThread(tcur);
2890
0
    }
2891
2892
    /* Update our counter in the thread-state - no need for locks:
2893
       - tcur will remain valid as we hold the GIL.
2894
       - the counter is safe as we are the only thread "allowed"
2895
         to modify this value
2896
    */
2897
0
    ++tcur->gilstate_counter;
2898
2899
0
    return has_gil ? PyGILState_LOCKED : PyGILState_UNLOCKED;
2900
0
}
2901
2902
void
2903
PyGILState_Release(PyGILState_STATE oldstate)
2904
0
{
2905
0
    PyThreadState *tstate = gilstate_get();
2906
0
    if (tstate == NULL) {
2907
0
        Py_FatalError("auto-releasing thread-state, "
2908
0
                      "but no thread-state for this thread");
2909
0
    }
2910
2911
    /* We must hold the GIL and have our thread state current */
2912
0
    if (!holds_gil(tstate)) {
2913
0
        _Py_FatalErrorFormat(__func__,
2914
0
                             "thread state %p must be current when releasing",
2915
0
                             tstate);
2916
0
    }
2917
0
    --tstate->gilstate_counter;
2918
0
    assert(tstate->gilstate_counter >= 0); /* illegal counter value */
2919
2920
    /* If we're going to destroy this thread-state, we must
2921
     * clear it while the GIL is held, as destructors may run.
2922
     */
2923
0
    if (tstate->gilstate_counter == 0) {
2924
        /* can't have been locked when we created it */
2925
0
        assert(oldstate == PyGILState_UNLOCKED);
2926
        // XXX Unbind tstate here.
2927
        // gh-119585: `PyThreadState_Clear()` may call destructors that
2928
        // themselves use PyGILState_Ensure and PyGILState_Release, so make
2929
        // sure that gilstate_counter is not zero when calling it.
2930
0
        ++tstate->gilstate_counter;
2931
0
        PyThreadState_Clear(tstate);
2932
0
        --tstate->gilstate_counter;
2933
        /* Delete the thread-state.  Note this releases the GIL too!
2934
         * It's vital that the GIL be held here, to avoid shutdown
2935
         * races; see bugs 225673 and 1061968 (that nasty bug has a
2936
         * habit of coming back).
2937
         */
2938
0
        assert(tstate->gilstate_counter == 0);
2939
0
        assert(current_fast_get() == tstate);
2940
0
        _PyThreadState_DeleteCurrent(tstate);
2941
0
    }
2942
    /* Release the lock if necessary */
2943
0
    else if (oldstate == PyGILState_UNLOCKED) {
2944
0
        PyEval_SaveThread();
2945
0
    }
2946
0
}
2947
2948
2949
/*************/
2950
/* Other API */
2951
/*************/
2952
2953
_PyFrameEvalFunction
2954
_PyInterpreterState_GetEvalFrameFunc(PyInterpreterState *interp)
2955
0
{
2956
0
    if (interp->eval_frame == NULL) {
2957
0
        return _PyEval_EvalFrameDefault;
2958
0
    }
2959
0
    return interp->eval_frame;
2960
0
}
2961
2962
2963
void
2964
_PyInterpreterState_SetEvalFrameFunc(PyInterpreterState *interp,
2965
                                     _PyFrameEvalFunction eval_frame)
2966
0
{
2967
0
    if (eval_frame == _PyEval_EvalFrameDefault) {
2968
0
        eval_frame = NULL;
2969
0
    }
2970
0
    if (eval_frame == interp->eval_frame) {
2971
0
        return;
2972
0
    }
2973
#ifdef _Py_TIER2
2974
    if (eval_frame != NULL) {
2975
        _Py_Executors_InvalidateAll(interp, 1);
2976
    }
2977
#endif
2978
0
    RARE_EVENT_INC(set_eval_frame_func);
2979
0
    _PyEval_StopTheWorld(interp);
2980
0
    interp->eval_frame = eval_frame;
2981
0
    _PyEval_StartTheWorld(interp);
2982
0
}
2983
2984
2985
const PyConfig*
2986
_PyInterpreterState_GetConfig(PyInterpreterState *interp)
2987
32.6M
{
2988
32.6M
    return &interp->config;
2989
32.6M
}
2990
2991
2992
const PyConfig*
2993
_Py_GetConfig(void)
2994
82.4k
{
2995
82.4k
    PyThreadState *tstate = current_fast_get();
2996
82.4k
    _Py_EnsureTstateNotNULL(tstate);
2997
82.4k
    return _PyInterpreterState_GetConfig(tstate->interp);
2998
82.4k
}
2999
3000
3001
int
3002
_PyInterpreterState_HasFeature(PyInterpreterState *interp, unsigned long feature)
3003
0
{
3004
0
    return ((interp->feature_flags & feature) != 0);
3005
0
}
3006
3007
3008
386k
#define MINIMUM_OVERHEAD 1000
3009
3010
static PyObject **
3011
push_chunk(PyThreadState *tstate, int size)
3012
386k
{
3013
386k
    int allocate_size = _PY_DATA_STACK_CHUNK_SIZE;
3014
386k
    while (allocate_size < (int)sizeof(PyObject*)*(size + MINIMUM_OVERHEAD)) {
3015
0
        allocate_size *= 2;
3016
0
    }
3017
386k
    _PyStackChunk *new = allocate_chunk(allocate_size, tstate->datastack_chunk);
3018
386k
    if (new == NULL) {
3019
0
        return NULL;
3020
0
    }
3021
386k
    if (tstate->datastack_chunk) {
3022
386k
        tstate->datastack_chunk->top = tstate->datastack_top -
3023
386k
                                       &tstate->datastack_chunk->data[0];
3024
386k
    }
3025
386k
    tstate->datastack_chunk = new;
3026
386k
    tstate->datastack_limit = (PyObject **)(((char *)new) + allocate_size);
3027
    // When new is the "root" chunk (i.e. new->previous == NULL), we can keep
3028
    // _PyThreadState_PopFrame from freeing it later by "skipping" over the
3029
    // first element:
3030
386k
    PyObject **res = &new->data[new->previous == NULL];
3031
386k
    tstate->datastack_top = res + size;
3032
386k
    return res;
3033
386k
}
3034
3035
_PyInterpreterFrame *
3036
_PyThreadState_PushFrame(PyThreadState *tstate, size_t size)
3037
34.4M
{
3038
34.4M
    assert(size < INT_MAX/sizeof(PyObject *));
3039
34.4M
    if (_PyThreadState_HasStackSpace(tstate, (int)size)) {
3040
34.0M
        _PyInterpreterFrame *res = (_PyInterpreterFrame *)tstate->datastack_top;
3041
34.0M
        tstate->datastack_top += size;
3042
34.0M
        return res;
3043
34.0M
    }
3044
386k
    return (_PyInterpreterFrame *)push_chunk(tstate, (int)size);
3045
34.4M
}
3046
3047
void
3048
_PyThreadState_PopFrame(PyThreadState *tstate, _PyInterpreterFrame * frame)
3049
161M
{
3050
161M
    assert(tstate->datastack_chunk);
3051
161M
    PyObject **base = (PyObject **)frame;
3052
161M
    if (base == &tstate->datastack_chunk->data[0]) {
3053
386k
        _PyStackChunk *chunk = tstate->datastack_chunk;
3054
386k
        _PyStackChunk *previous = chunk->previous;
3055
        // push_chunk ensures that the root chunk is never popped:
3056
386k
        assert(previous);
3057
386k
        tstate->datastack_top = &previous->data[previous->top];
3058
386k
        tstate->datastack_chunk = previous;
3059
386k
        _PyObject_VirtualFree(chunk, chunk->size);
3060
386k
        tstate->datastack_limit = (PyObject **)(((char *)previous) + previous->size);
3061
386k
    }
3062
161M
    else {
3063
161M
        assert(tstate->datastack_top);
3064
161M
        assert(tstate->datastack_top >= base);
3065
161M
        tstate->datastack_top = base;
3066
161M
    }
3067
161M
}
3068
3069
3070
#ifndef NDEBUG
3071
// Check that a Python thread state valid. In practice, this function is used
3072
// on a Python debug build to check if 'tstate' is a dangling pointer, if the
3073
// PyThreadState memory has been freed.
3074
//
3075
// Usage:
3076
//
3077
//     assert(_PyThreadState_CheckConsistency(tstate));
3078
int
3079
_PyThreadState_CheckConsistency(PyThreadState *tstate)
3080
587k
{
3081
587k
    assert(!_PyMem_IsPtrFreed(tstate));
3082
587k
    assert(!_PyMem_IsPtrFreed(tstate->interp));
3083
587k
    return 1;
3084
587k
}
3085
#endif
3086
3087
3088
// Check if a Python thread must call _PyThreadState_HangThread(), rather than
3089
// taking the GIL or attaching to the interpreter if Py_Finalize() has been
3090
// called.
3091
//
3092
// When this function is called by a daemon thread after Py_Finalize() has been
3093
// called, the GIL may no longer exist.
3094
//
3095
// tstate must be non-NULL.
3096
int
3097
_PyThreadState_MustExit(PyThreadState *tstate)
3098
587k
{
3099
587k
    int state = _Py_atomic_load_int_relaxed(&tstate->state);
3100
587k
    return state == _Py_THREAD_SHUTTING_DOWN;
3101
587k
}
3102
3103
void
3104
_PyThreadState_HangThread(PyThreadState *tstate)
3105
0
{
3106
0
    _PyThreadStateImpl *tstate_impl = (_PyThreadStateImpl *)tstate;
3107
0
    decref_threadstate(tstate_impl);
3108
0
    PyThread_hang_thread();
3109
0
}
3110
3111
/********************/
3112
/* mimalloc support */
3113
/********************/
3114
3115
static void
3116
tstate_mimalloc_bind(PyThreadState *tstate)
3117
22
{
3118
#ifdef Py_GIL_DISABLED
3119
    struct _mimalloc_thread_state *mts = &((_PyThreadStateImpl*)tstate)->mimalloc;
3120
3121
    // Initialize the mimalloc thread state. This must be called from the
3122
    // same thread that will use the thread state. The "mem" heap doubles as
3123
    // the "backing" heap.
3124
    mi_tld_t *tld = &mts->tld;
3125
    _mi_tld_init(tld, &mts->heaps[_Py_MIMALLOC_HEAP_MEM]);
3126
    llist_init(&mts->page_list);
3127
3128
    // Exiting threads push any remaining in-use segments to the abandoned
3129
    // pool to be re-claimed later by other threads. We use per-interpreter
3130
    // pools to keep Python objects from different interpreters separate.
3131
    tld->segments.abandoned = &tstate->interp->mimalloc.abandoned_pool;
3132
3133
    // Don't fill in the first N bytes up to ob_type in debug builds. We may
3134
    // access ob_tid and the refcount fields in the dict and list lock-less
3135
    // accesses, so they must remain valid for a while after deallocation.
3136
    size_t base_offset = offsetof(PyObject, ob_type);
3137
    if (_PyMem_DebugEnabled()) {
3138
        // The debug allocator adds two words at the beginning of each block.
3139
        base_offset += 2 * sizeof(size_t);
3140
    }
3141
    size_t debug_offsets[_Py_MIMALLOC_HEAP_COUNT] = {
3142
        [_Py_MIMALLOC_HEAP_OBJECT] = base_offset,
3143
        [_Py_MIMALLOC_HEAP_GC] = base_offset,
3144
        [_Py_MIMALLOC_HEAP_GC_PRE] = base_offset + 2 * sizeof(PyObject *),
3145
    };
3146
3147
    // Initialize each heap
3148
    for (uint8_t i = 0; i < _Py_MIMALLOC_HEAP_COUNT; i++) {
3149
        _mi_heap_init_ex(&mts->heaps[i], tld, _mi_arena_id_none(), false, i);
3150
        mts->heaps[i].debug_offset = (uint8_t)debug_offsets[i];
3151
    }
3152
3153
    // Heaps that store Python objects should use QSBR to delay freeing
3154
    // mimalloc pages while there may be concurrent lock-free readers.
3155
    mts->heaps[_Py_MIMALLOC_HEAP_OBJECT].page_use_qsbr = true;
3156
    mts->heaps[_Py_MIMALLOC_HEAP_GC].page_use_qsbr = true;
3157
    mts->heaps[_Py_MIMALLOC_HEAP_GC_PRE].page_use_qsbr = true;
3158
3159
    // By default, object allocations use _Py_MIMALLOC_HEAP_OBJECT.
3160
    // _PyObject_GC_New() and similar functions temporarily override this to
3161
    // use one of the GC heaps.
3162
    mts->current_object_heap = &mts->heaps[_Py_MIMALLOC_HEAP_OBJECT];
3163
3164
    _Py_atomic_store_int(&mts->initialized, 1);
3165
#endif
3166
22
}
3167
3168
void
3169
_PyThreadState_ClearMimallocHeaps(PyThreadState *tstate)
3170
0
{
3171
#ifdef Py_GIL_DISABLED
3172
    if (!tstate->_status.bound) {
3173
        // The mimalloc heaps are only initialized when the thread is bound.
3174
        return;
3175
    }
3176
3177
    _PyThreadStateImpl *tstate_impl = (_PyThreadStateImpl *)tstate;
3178
    for (Py_ssize_t i = 0; i < _Py_MIMALLOC_HEAP_COUNT; i++) {
3179
        // Abandon all segments in use by this thread. This pushes them to
3180
        // a shared pool to later be reclaimed by other threads. It's important
3181
        // to do this before the thread state is destroyed so that objects
3182
        // remain visible to the GC.
3183
        _mi_heap_collect_abandon(&tstate_impl->mimalloc.heaps[i]);
3184
    }
3185
#endif
3186
0
}
3187
3188
3189
int
3190
_Py_IsMainThread(void)
3191
28.6M
{
3192
28.6M
    unsigned long thread = PyThread_get_thread_ident();
3193
28.6M
    return (thread == _PyRuntime.main_thread);
3194
28.6M
}
3195
3196
3197
PyInterpreterState *
3198
_PyInterpreterState_Main(void)
3199
28.3M
{
3200
28.3M
    return _PyRuntime.interpreters.main;
3201
28.3M
}
3202
3203
3204
int
3205
_Py_IsMainInterpreterFinalizing(PyInterpreterState *interp)
3206
0
{
3207
    /* bpo-39877: Access _PyRuntime directly rather than using
3208
       tstate->interp->runtime to support calls from Python daemon threads.
3209
       After Py_Finalize() has been called, tstate can be a dangling pointer:
3210
       point to PyThreadState freed memory. */
3211
0
    return (_PyRuntimeState_GetFinalizing(&_PyRuntime) != NULL &&
3212
0
            interp == &_PyRuntime._main_interpreter);
3213
0
}
3214
3215
3216
const PyConfig *
3217
_Py_GetMainConfig(void)
3218
0
{
3219
0
    PyInterpreterState *interp = _PyInterpreterState_Main();
3220
0
    if (interp == NULL) {
3221
0
        return NULL;
3222
0
    }
3223
0
    return _PyInterpreterState_GetConfig(interp);
3224
0
}