Coverage Report

Created: 2026-02-26 06:53

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/cpython/Python/pystate.c
Line
Count
Source
1
2
/* Thread and interpreter state structures and their interfaces */
3
4
#include "Python.h"
5
#include "pycore_abstract.h"      // _PyIndex_Check()
6
#include "pycore_audit.h"         // _Py_AuditHookEntry
7
#include "pycore_backoff.h"       // JUMP_BACKWARD_INITIAL_VALUE, SIDE_EXIT_INITIAL_VALUE
8
#include "pycore_ceval.h"         // _PyEval_AcquireLock()
9
#include "pycore_codecs.h"        // _PyCodec_Fini()
10
#include "pycore_critical_section.h" // _PyCriticalSection_Resume()
11
#include "pycore_dtoa.h"          // _dtoa_state_INIT()
12
#include "pycore_freelist.h"      // _PyObject_ClearFreeLists()
13
#include "pycore_initconfig.h"    // _PyStatus_OK()
14
#include "pycore_interpframe.h"   // _PyThreadState_HasStackSpace()
15
#include "pycore_object.h"        // _PyType_InitCache()
16
#include "pycore_obmalloc.h"      // _PyMem_obmalloc_state_on_heap()
17
#include "pycore_optimizer.h"     // JIT_CLEANUP_THRESHOLD
18
#include "pycore_parking_lot.h"   // _PyParkingLot_AfterFork()
19
#include "pycore_pyerrors.h"      // _PyErr_Clear()
20
#include "pycore_pylifecycle.h"   // _PyAST_Fini()
21
#include "pycore_pymem.h"         // _PyMem_DebugEnabled()
22
#include "pycore_runtime.h"       // _PyRuntime
23
#include "pycore_runtime_init.h"  // _PyRuntimeState_INIT
24
#include "pycore_stackref.h"      // Py_STACKREF_DEBUG
25
#include "pycore_stats.h"         // FT_STAT_WORLD_STOP_INC()
26
#include "pycore_time.h"          // _PyTime_Init()
27
#include "pycore_uniqueid.h"      // _PyObject_FinalizePerThreadRefcounts()
28
29
30
/* --------------------------------------------------------------------------
31
CAUTION
32
33
Always use PyMem_RawMalloc() and PyMem_RawFree() directly in this file.  A
34
number of these functions are advertised as safe to call when the GIL isn't
35
held, and in a debug build Python redirects (e.g.) PyMem_NEW (etc) to Python's
36
debugging obmalloc functions.  Those aren't thread-safe (they rely on the GIL
37
to avoid the expense of doing their own locking).
38
-------------------------------------------------------------------------- */
39
40
#ifdef HAVE_DLOPEN
41
#  ifdef HAVE_DLFCN_H
42
#    include <dlfcn.h>
43
#  endif
44
#  if !HAVE_DECL_RTLD_LAZY
45
#    define RTLD_LAZY 1
46
#  endif
47
#endif
48
49
50
/****************************************/
51
/* helpers for the current thread state */
52
/****************************************/
53
54
// API for the current thread state is further down.
55
56
/* "current" means one of:
57
   - bound to the current OS thread
58
   - holds the GIL
59
 */
60
61
//-------------------------------------------------
62
// a highly efficient lookup for the current thread
63
//-------------------------------------------------
64
65
/*
66
   The stored thread state is set by PyThreadState_Swap().
67
68
   For each of these functions, the GIL must be held by the current thread.
69
 */
70
71
72
/* The attached thread state for the current thread. */
73
_Py_thread_local PyThreadState *_Py_tss_tstate = NULL;
74
75
/* The "bound" thread state used by PyGILState_Ensure(),
76
   also known as a "gilstate." */
77
_Py_thread_local PyThreadState *_Py_tss_gilstate = NULL;
78
79
/* The interpreter of the attached thread state,
80
   and is same as tstate->interp. */
81
_Py_thread_local PyInterpreterState *_Py_tss_interp = NULL;
82
83
static inline PyThreadState *
84
current_fast_get(void)
85
84.0M
{
86
84.0M
    return _Py_tss_tstate;
87
84.0M
}
88
89
static inline void
90
current_fast_set(_PyRuntimeState *Py_UNUSED(runtime), PyThreadState *tstate)
91
3.68M
{
92
3.68M
    assert(tstate != NULL);
93
3.68M
    _Py_tss_tstate = tstate;
94
3.68M
    assert(tstate->interp != NULL);
95
3.68M
    _Py_tss_interp = tstate->interp;
96
3.68M
}
97
98
static inline void
99
current_fast_clear(_PyRuntimeState *Py_UNUSED(runtime))
100
3.68M
{
101
3.68M
    _Py_tss_tstate = NULL;
102
3.68M
    _Py_tss_interp = NULL;
103
3.68M
}
104
105
#define tstate_verify_not_active(tstate) \
106
0
    if (tstate == current_fast_get()) { \
107
0
        _Py_FatalErrorFormat(__func__, "tstate %p is still current", tstate); \
108
0
    }
109
110
PyThreadState *
111
_PyThreadState_GetCurrent(void)
112
11.7M
{
113
11.7M
    return current_fast_get();
114
11.7M
}
115
116
117
//---------------------------------------------
118
// The thread state used by PyGILState_Ensure()
119
//---------------------------------------------
120
121
/*
122
   The stored thread state is set by bind_tstate() (AKA PyThreadState_Bind().
123
124
   The GIL does no need to be held for these.
125
  */
126
127
static inline PyThreadState *
128
gilstate_get(void)
129
64
{
130
64
    return _Py_tss_gilstate;
131
64
}
132
133
static inline void
134
gilstate_set(PyThreadState *tstate)
135
32
{
136
32
    assert(tstate != NULL);
137
32
    _Py_tss_gilstate = tstate;
138
32
}
139
140
static inline void
141
gilstate_clear(void)
142
0
{
143
0
    _Py_tss_gilstate = NULL;
144
0
}
145
146
147
#ifndef NDEBUG
148
static inline int tstate_is_alive(PyThreadState *tstate);
149
150
static inline int
151
tstate_is_bound(PyThreadState *tstate)
152
{
153
    return tstate->_status.bound && !tstate->_status.unbound;
154
}
155
#endif  // !NDEBUG
156
157
static void bind_gilstate_tstate(PyThreadState *);
158
static void unbind_gilstate_tstate(PyThreadState *);
159
160
static void tstate_mimalloc_bind(PyThreadState *);
161
162
static void
163
bind_tstate(PyThreadState *tstate)
164
32
{
165
32
    assert(tstate != NULL);
166
32
    assert(tstate_is_alive(tstate) && !tstate->_status.bound);
167
32
    assert(!tstate->_status.unbound);  // just in case
168
32
    assert(!tstate->_status.bound_gilstate);
169
32
    assert(tstate != gilstate_get());
170
32
    assert(!tstate->_status.active);
171
32
    assert(tstate->thread_id == 0);
172
32
    assert(tstate->native_thread_id == 0);
173
174
    // Currently we don't necessarily store the thread state
175
    // in thread-local storage (e.g. per-interpreter).
176
177
32
    tstate->thread_id = PyThread_get_thread_ident();
178
32
#ifdef PY_HAVE_THREAD_NATIVE_ID
179
32
    tstate->native_thread_id = PyThread_get_thread_native_id();
180
32
#endif
181
182
#ifdef Py_GIL_DISABLED
183
    // Initialize biased reference counting inter-thread queue. Note that this
184
    // needs to be initialized from the active thread.
185
    _Py_brc_init_thread(tstate);
186
#endif
187
188
    // mimalloc state needs to be initialized from the active thread.
189
32
    tstate_mimalloc_bind(tstate);
190
191
32
    tstate->_status.bound = 1;
192
32
}
193
194
static void
195
unbind_tstate(PyThreadState *tstate)
196
0
{
197
0
    assert(tstate != NULL);
198
0
    assert(tstate_is_bound(tstate));
199
0
#ifndef HAVE_PTHREAD_STUBS
200
0
    assert(tstate->thread_id > 0);
201
0
#endif
202
0
#ifdef PY_HAVE_THREAD_NATIVE_ID
203
0
    assert(tstate->native_thread_id > 0);
204
0
#endif
205
206
    // We leave thread_id and native_thread_id alone
207
    // since they can be useful for debugging.
208
    // Check the `_status` field to know if these values
209
    // are still valid.
210
211
    // We leave tstate->_status.bound set to 1
212
    // to indicate it was previously bound.
213
0
    tstate->_status.unbound = 1;
214
0
}
215
216
217
/* Stick the thread state for this thread in thread specific storage.
218
219
   When a thread state is created for a thread by some mechanism
220
   other than PyGILState_Ensure(), it's important that the GILState
221
   machinery knows about it so it doesn't try to create another
222
   thread state for the thread.
223
   (This is a better fix for SF bug #1010677 than the first one attempted.)
224
225
   The only situation where you can legitimately have more than one
226
   thread state for an OS level thread is when there are multiple
227
   interpreters.
228
229
   Before 3.12, the PyGILState_*() APIs didn't work with multiple
230
   interpreters (see bpo-10915 and bpo-15751), so this function used
231
   to set TSS only once.  Thus, the first thread state created for that
232
   given OS level thread would "win", which seemed reasonable behaviour.
233
*/
234
235
static void
236
bind_gilstate_tstate(PyThreadState *tstate)
237
32
{
238
32
    assert(tstate != NULL);
239
32
    assert(tstate_is_alive(tstate));
240
32
    assert(tstate_is_bound(tstate));
241
    // XXX assert(!tstate->_status.active);
242
32
    assert(!tstate->_status.bound_gilstate);
243
244
32
    PyThreadState *tcur = gilstate_get();
245
32
    assert(tstate != tcur);
246
247
32
    if (tcur != NULL) {
248
0
        tcur->_status.bound_gilstate = 0;
249
0
    }
250
32
    gilstate_set(tstate);
251
32
    tstate->_status.bound_gilstate = 1;
252
32
}
253
254
static void
255
unbind_gilstate_tstate(PyThreadState *tstate)
256
0
{
257
0
    assert(tstate != NULL);
258
    // XXX assert(tstate_is_alive(tstate));
259
0
    assert(tstate_is_bound(tstate));
260
    // XXX assert(!tstate->_status.active);
261
0
    assert(tstate->_status.bound_gilstate);
262
0
    assert(tstate == gilstate_get());
263
0
    gilstate_clear();
264
0
    tstate->_status.bound_gilstate = 0;
265
0
}
266
267
268
//----------------------------------------------
269
// the thread state that currently holds the GIL
270
//----------------------------------------------
271
272
/* This is not exported, as it is not reliable!  It can only
273
   ever be compared to the state for the *current* thread.
274
   * If not equal, then it doesn't matter that the actual
275
     value may change immediately after comparison, as it can't
276
     possibly change to the current thread's state.
277
   * If equal, then the current thread holds the lock, so the value can't
278
     change until we yield the lock.
279
*/
280
static int
281
holds_gil(PyThreadState *tstate)
282
0
{
283
    // XXX Fall back to tstate->interp->runtime->ceval.gil.last_holder
284
    // (and tstate->interp->runtime->ceval.gil.locked).
285
0
    assert(tstate != NULL);
286
    /* Must be the tstate for this thread */
287
0
    assert(tstate == gilstate_get());
288
0
    return tstate == current_fast_get();
289
0
}
290
291
292
/****************************/
293
/* the global runtime state */
294
/****************************/
295
296
//----------
297
// lifecycle
298
//----------
299
300
/* Suppress deprecation warning for PyBytesObject.ob_shash */
301
_Py_COMP_DIAG_PUSH
302
_Py_COMP_DIAG_IGNORE_DEPR_DECLS
303
/* We use "initial" if the runtime gets re-used
304
   (e.g. Py_Finalize() followed by Py_Initialize().
305
   Note that we initialize "initial" relative to _PyRuntime,
306
   to ensure pre-initialized pointers point to the active
307
   runtime state (and not "initial"). */
308
static const _PyRuntimeState initial = _PyRuntimeState_INIT(_PyRuntime, "");
309
_Py_COMP_DIAG_POP
310
311
#define LOCKS_INIT(runtime) \
312
0
    { \
313
0
        &(runtime)->interpreters.mutex, \
314
0
        &(runtime)->xi.data_lookup.registry.mutex, \
315
0
        &(runtime)->unicode_state.ids.mutex, \
316
0
        &(runtime)->imports.extensions.mutex, \
317
0
        &(runtime)->ceval.pending_mainthread.mutex, \
318
0
        &(runtime)->atexit.mutex, \
319
0
        &(runtime)->audit_hooks.mutex, \
320
0
        &(runtime)->allocators.mutex, \
321
0
        &(runtime)->_main_interpreter.types.mutex, \
322
0
        &(runtime)->_main_interpreter.code_state.mutex, \
323
0
    }
324
325
static void
326
init_runtime(_PyRuntimeState *runtime,
327
             void *open_code_hook, void *open_code_userdata,
328
             _Py_AuditHookEntry *audit_hook_head,
329
             Py_ssize_t unicode_next_index)
330
32
{
331
32
    assert(!runtime->preinitializing);
332
32
    assert(!runtime->preinitialized);
333
32
    assert(!runtime->core_initialized);
334
32
    assert(!runtime->initialized);
335
32
    assert(!runtime->_initialized);
336
337
32
    runtime->open_code_hook = open_code_hook;
338
32
    runtime->open_code_userdata = open_code_userdata;
339
32
    runtime->audit_hooks.head = audit_hook_head;
340
341
32
    PyPreConfig_InitPythonConfig(&runtime->preconfig);
342
343
    // Set it to the ID of the main thread of the main interpreter.
344
32
    runtime->main_thread = PyThread_get_thread_ident();
345
346
32
    runtime->unicode_state.ids.next_index = unicode_next_index;
347
32
    runtime->_initialized = 1;
348
32
}
349
350
PyStatus
351
_PyRuntimeState_Init(_PyRuntimeState *runtime)
352
32
{
353
    /* We preserve the hook across init, because there is
354
       currently no public API to set it between runtime
355
       initialization and interpreter initialization. */
356
32
    void *open_code_hook = runtime->open_code_hook;
357
32
    void *open_code_userdata = runtime->open_code_userdata;
358
32
    _Py_AuditHookEntry *audit_hook_head = runtime->audit_hooks.head;
359
    // bpo-42882: Preserve next_index value if Py_Initialize()/Py_Finalize()
360
    // is called multiple times.
361
32
    Py_ssize_t unicode_next_index = runtime->unicode_state.ids.next_index;
362
363
32
    if (runtime->_initialized) {
364
        // Py_Initialize() must be running again.
365
        // Reset to _PyRuntimeState_INIT.
366
0
        memcpy(runtime, &initial, sizeof(*runtime));
367
        // Preserve the cookie from the original runtime.
368
0
        memcpy(runtime->debug_offsets.cookie, _Py_Debug_Cookie, 8);
369
0
        assert(!runtime->_initialized);
370
0
    }
371
372
32
    PyStatus status = _PyTime_Init(&runtime->time);
373
32
    if (_PyStatus_EXCEPTION(status)) {
374
0
        return status;
375
0
    }
376
377
32
    init_runtime(runtime, open_code_hook, open_code_userdata, audit_hook_head,
378
32
                 unicode_next_index);
379
380
32
    return _PyStatus_OK();
381
32
}
382
383
void
384
_PyRuntimeState_Fini(_PyRuntimeState *runtime)
385
0
{
386
#ifdef Py_REF_DEBUG
387
    /* The count is cleared by _Py_FinalizeRefTotal(). */
388
    assert(runtime->object_state.interpreter_leaks == 0);
389
#endif
390
0
    gilstate_clear();
391
0
}
392
393
#ifdef HAVE_FORK
394
/* This function is called from PyOS_AfterFork_Child to ensure that
395
   newly created child processes do not share locks with the parent. */
396
PyStatus
397
_PyRuntimeState_ReInitThreads(_PyRuntimeState *runtime)
398
0
{
399
    // This was initially set in _PyRuntimeState_Init().
400
0
    runtime->main_thread = PyThread_get_thread_ident();
401
402
    // Clears the parking lot. Any waiting threads are dead. This must be
403
    // called before releasing any locks that use the parking lot.
404
0
    _PyParkingLot_AfterFork();
405
406
    // Re-initialize global locks
407
0
    PyMutex *locks[] = LOCKS_INIT(runtime);
408
0
    for (size_t i = 0; i < Py_ARRAY_LENGTH(locks); i++) {
409
0
        _PyMutex_at_fork_reinit(locks[i]);
410
0
    }
411
#ifdef Py_GIL_DISABLED
412
    for (PyInterpreterState *interp = runtime->interpreters.head;
413
         interp != NULL; interp = interp->next)
414
    {
415
        for (int i = 0; i < NUM_WEAKREF_LIST_LOCKS; i++) {
416
            _PyMutex_at_fork_reinit(&interp->weakref_locks[i]);
417
        }
418
    }
419
#endif
420
421
0
    _PyTypes_AfterFork();
422
423
0
    _PyThread_AfterFork(&runtime->threads);
424
425
0
    return _PyStatus_OK();
426
0
}
427
#endif
428
429
430
/*************************************/
431
/* the per-interpreter runtime state */
432
/*************************************/
433
434
//----------
435
// lifecycle
436
//----------
437
438
/* Calling this indicates that the runtime is ready to create interpreters. */
439
440
PyStatus
441
_PyInterpreterState_Enable(_PyRuntimeState *runtime)
442
32
{
443
32
    struct pyinterpreters *interpreters = &runtime->interpreters;
444
32
    interpreters->next_id = 0;
445
32
    return _PyStatus_OK();
446
32
}
447
448
static PyInterpreterState *
449
alloc_interpreter(void)
450
0
{
451
    // Aligned allocation for PyInterpreterState.
452
    // the first word of the memory block is used to store
453
    // the original pointer to be used later to free the memory.
454
0
    size_t alignment = _Alignof(PyInterpreterState);
455
0
    size_t allocsize = sizeof(PyInterpreterState) + sizeof(void *) + alignment - 1;
456
0
    void *mem = PyMem_RawCalloc(1, allocsize);
457
0
    if (mem == NULL) {
458
0
        return NULL;
459
0
    }
460
0
    void *ptr = _Py_ALIGN_UP((char *)mem + sizeof(void *), alignment);
461
0
    ((void **)ptr)[-1] = mem;
462
0
    assert(_Py_IS_ALIGNED(ptr, alignment));
463
0
    return ptr;
464
0
}
465
466
static void
467
free_interpreter(PyInterpreterState *interp)
468
0
{
469
#ifdef Py_STATS
470
    if (interp->pystats_struct) {
471
        PyMem_RawFree(interp->pystats_struct);
472
        interp->pystats_struct = NULL;
473
    }
474
#endif
475
    // The main interpreter is statically allocated so
476
    // should not be freed.
477
0
    if (interp != &_PyRuntime._main_interpreter) {
478
0
        if (_PyMem_obmalloc_state_on_heap(interp)) {
479
            // interpreter has its own obmalloc state, free it
480
0
            PyMem_RawFree(interp->obmalloc);
481
0
            interp->obmalloc = NULL;
482
0
        }
483
0
        assert(_Py_IS_ALIGNED(interp, _Alignof(PyInterpreterState)));
484
0
        PyMem_RawFree(((void **)interp)[-1]);
485
0
    }
486
0
}
487
488
#ifndef NDEBUG
489
static inline int check_interpreter_whence(long);
490
#endif
491
492
extern _Py_CODEUNIT *
493
_Py_LazyJitShim(
494
    struct _PyExecutorObject *exec, _PyInterpreterFrame *frame, _PyStackRef *stack_pointer, PyThreadState *tstate
495
);
496
497
/* Get the interpreter state to a minimal consistent state.
498
   Further init happens in pylifecycle.c before it can be used.
499
   All fields not initialized here are expected to be zeroed out,
500
   e.g. by PyMem_RawCalloc() or memset(), or otherwise pre-initialized.
501
   The runtime state is not manipulated.  Instead it is assumed that
502
   the interpreter is getting added to the runtime.
503
504
   Note that the main interpreter was statically initialized as part
505
   of the runtime and most state is already set properly.  That leaves
506
   a small number of fields to initialize dynamically, as well as some
507
   that are initialized lazily.
508
509
   For subinterpreters we memcpy() the main interpreter in
510
   PyInterpreterState_New(), leaving it in the same mostly-initialized
511
   state.  The only difference is that the interpreter has some
512
   self-referential state that is statically initializexd to the
513
   main interpreter.  We fix those fields here, in addition
514
   to the other dynamically initialized fields.
515
  */
516
517
static inline bool
518
is_env_enabled(const char *env_name)
519
64
{
520
64
    char *env = Py_GETENV(env_name);
521
64
    return env && *env != '\0' && *env != '0';
522
64
}
523
524
static inline bool
525
is_env_disabled(const char *env_name)
526
32
{
527
32
    char *env = Py_GETENV(env_name);
528
32
    return env != NULL && *env == '0';
529
32
}
530
531
static inline void
532
init_policy(uint16_t *target, const char *env_name, uint16_t default_value,
533
            long min_value, long max_value)
534
128
{
535
128
    *target = default_value;
536
128
    char *env = Py_GETENV(env_name);
537
128
    if (env && *env != '\0') {
538
0
        long value = atol(env);
539
0
        if (value >= min_value && value <= max_value) {
540
0
            *target = (uint16_t)value;
541
0
        }
542
0
    }
543
128
}
544
545
static PyStatus
546
init_interpreter(PyInterpreterState *interp,
547
                 _PyRuntimeState *runtime, int64_t id,
548
                 PyInterpreterState *next,
549
                 long whence)
550
32
{
551
32
    if (interp->_initialized) {
552
0
        return _PyStatus_ERR("interpreter already initialized");
553
0
    }
554
555
32
    assert(interp->_whence == _PyInterpreterState_WHENCE_NOTSET);
556
32
    assert(check_interpreter_whence(whence) == 0);
557
32
    interp->_whence = whence;
558
559
32
    assert(runtime != NULL);
560
32
    interp->runtime = runtime;
561
562
32
    assert(id > 0 || (id == 0 && interp == runtime->interpreters.main));
563
32
    interp->id = id;
564
565
32
    interp->id_refcount = 0;
566
567
32
    assert(runtime->interpreters.head == interp);
568
32
    assert(next != NULL || (interp == runtime->interpreters.main));
569
32
    interp->next = next;
570
571
32
    interp->threads.preallocated = &interp->_initial_thread;
572
573
    // We would call _PyObject_InitState() at this point
574
    // if interp->feature_flags were alredy set.
575
576
32
    _PyEval_InitState(interp);
577
32
    _PyGC_InitState(&interp->gc);
578
32
    PyConfig_InitPythonConfig(&interp->config);
579
32
    _PyType_InitCache(interp);
580
#ifdef Py_GIL_DISABLED
581
    _Py_brc_init_state(interp);
582
#endif
583
584
32
    llist_init(&interp->mem_free_queue.head);
585
32
    llist_init(&interp->asyncio_tasks_head);
586
32
    interp->asyncio_tasks_lock = (PyMutex){0};
587
544
    for (int i = 0; i < _PY_MONITORING_UNGROUPED_EVENTS; i++) {
588
512
        interp->monitors.tools[i] = 0;
589
512
    }
590
288
    for (int t = 0; t < PY_MONITORING_TOOL_IDS; t++) {
591
5.12k
        for (int e = 0; e < _PY_MONITORING_EVENTS; e++) {
592
4.86k
            interp->monitoring_callables[t][e] = NULL;
593
594
4.86k
        }
595
256
        interp->monitoring_tool_versions[t] = 0;
596
256
    }
597
32
    interp->_code_object_generation = 0;
598
32
    interp->jit = false;
599
32
    interp->compiling = false;
600
32
    interp->executor_list_head = NULL;
601
32
    interp->executor_deletion_list_head = NULL;
602
32
    interp->executor_creation_counter = JIT_CLEANUP_THRESHOLD;
603
604
    // Initialize optimization configuration from environment variables
605
    // PYTHON_JIT_STRESS sets aggressive defaults for testing, but can be overridden
606
32
    uint16_t jump_default = JUMP_BACKWARD_INITIAL_VALUE;
607
32
    uint16_t side_exit_default = SIDE_EXIT_INITIAL_VALUE;
608
609
32
    if (is_env_enabled("PYTHON_JIT_STRESS")) {
610
0
        jump_default = 63;
611
0
        side_exit_default = 63;
612
0
    }
613
614
32
    init_policy(&interp->opt_config.jump_backward_initial_value,
615
32
                "PYTHON_JIT_JUMP_BACKWARD_INITIAL_VALUE",
616
32
                jump_default, 1, MAX_VALUE);
617
32
    init_policy(&interp->opt_config.jump_backward_initial_backoff,
618
32
                "PYTHON_JIT_JUMP_BACKWARD_INITIAL_BACKOFF",
619
32
                JUMP_BACKWARD_INITIAL_BACKOFF, 0, MAX_BACKOFF);
620
32
    init_policy(&interp->opt_config.side_exit_initial_value,
621
32
                "PYTHON_JIT_SIDE_EXIT_INITIAL_VALUE",
622
32
                side_exit_default, 1, MAX_VALUE);
623
32
    init_policy(&interp->opt_config.side_exit_initial_backoff,
624
32
                "PYTHON_JIT_SIDE_EXIT_INITIAL_BACKOFF",
625
32
                SIDE_EXIT_INITIAL_BACKOFF, 0, MAX_BACKOFF);
626
627
32
    interp->opt_config.specialization_enabled = !is_env_enabled("PYTHON_SPECIALIZATION_OFF");
628
32
    interp->opt_config.uops_optimize_enabled = !is_env_disabled("PYTHON_UOPS_OPTIMIZE");
629
32
    if (interp != &runtime->_main_interpreter) {
630
        /* Fix the self-referential, statically initialized fields. */
631
0
        interp->dtoa = (struct _dtoa_state)_dtoa_state_INIT(interp);
632
0
    }
633
#if !defined(Py_GIL_DISABLED) && defined(Py_STACKREF_DEBUG)
634
    interp->next_stackref = INITIAL_STACKREF_INDEX;
635
    _Py_hashtable_allocator_t alloc = {
636
        .malloc = malloc,
637
        .free = free,
638
    };
639
    interp->open_stackrefs_table = _Py_hashtable_new_full(
640
        _Py_hashtable_hash_ptr,
641
        _Py_hashtable_compare_direct,
642
        NULL,
643
        NULL,
644
        &alloc
645
    );
646
#  ifdef Py_STACKREF_CLOSE_DEBUG
647
    interp->closed_stackrefs_table = _Py_hashtable_new_full(
648
        _Py_hashtable_hash_ptr,
649
        _Py_hashtable_compare_direct,
650
        NULL,
651
        NULL,
652
        &alloc
653
    );
654
#  endif
655
    _Py_stackref_associate(interp, Py_None, PyStackRef_None);
656
    _Py_stackref_associate(interp, Py_False, PyStackRef_False);
657
    _Py_stackref_associate(interp, Py_True, PyStackRef_True);
658
#endif
659
660
32
    interp->_initialized = 1;
661
32
    return _PyStatus_OK();
662
32
}
663
664
665
PyStatus
666
_PyInterpreterState_New(PyThreadState *tstate, PyInterpreterState **pinterp)
667
32
{
668
32
    *pinterp = NULL;
669
670
    // Don't get runtime from tstate since tstate can be NULL
671
32
    _PyRuntimeState *runtime = &_PyRuntime;
672
673
    // tstate is NULL when pycore_create_interpreter() calls
674
    // _PyInterpreterState_New() to create the main interpreter.
675
32
    if (tstate != NULL) {
676
0
        if (_PySys_Audit(tstate, "cpython.PyInterpreterState_New", NULL) < 0) {
677
0
            return _PyStatus_ERR("sys.audit failed");
678
0
        }
679
0
    }
680
681
    /* We completely serialize creation of multiple interpreters, since
682
       it simplifies things here and blocking concurrent calls isn't a problem.
683
       Regardless, we must fully block subinterpreter creation until
684
       after the main interpreter is created. */
685
32
    HEAD_LOCK(runtime);
686
687
32
    struct pyinterpreters *interpreters = &runtime->interpreters;
688
32
    int64_t id = interpreters->next_id;
689
32
    interpreters->next_id += 1;
690
691
    // Allocate the interpreter and add it to the runtime state.
692
32
    PyInterpreterState *interp;
693
32
    PyStatus status;
694
32
    PyInterpreterState *old_head = interpreters->head;
695
32
    if (old_head == NULL) {
696
        // We are creating the main interpreter.
697
32
        assert(interpreters->main == NULL);
698
32
        assert(id == 0);
699
700
32
        interp = &runtime->_main_interpreter;
701
32
        assert(interp->id == 0);
702
32
        assert(interp->next == NULL);
703
704
32
        interpreters->main = interp;
705
32
    }
706
0
    else {
707
0
        assert(interpreters->main != NULL);
708
0
        assert(id != 0);
709
710
0
        interp = alloc_interpreter();
711
0
        if (interp == NULL) {
712
0
            status = _PyStatus_NO_MEMORY();
713
0
            goto error;
714
0
        }
715
        // Set to _PyInterpreterState_INIT.
716
0
        memcpy(interp, &initial._main_interpreter, sizeof(*interp));
717
718
0
        if (id < 0) {
719
            /* overflow or Py_Initialize() not called yet! */
720
0
            status = _PyStatus_ERR("failed to get an interpreter ID");
721
0
            goto error;
722
0
        }
723
0
    }
724
32
    interpreters->head = interp;
725
726
32
    long whence = _PyInterpreterState_WHENCE_UNKNOWN;
727
32
    status = init_interpreter(interp, runtime,
728
32
                              id, old_head, whence);
729
32
    if (_PyStatus_EXCEPTION(status)) {
730
0
        goto error;
731
0
    }
732
733
32
    HEAD_UNLOCK(runtime);
734
735
32
    assert(interp != NULL);
736
32
    *pinterp = interp;
737
32
    return _PyStatus_OK();
738
739
0
error:
740
0
    HEAD_UNLOCK(runtime);
741
742
0
    if (interp != NULL) {
743
0
        free_interpreter(interp);
744
0
    }
745
0
    return status;
746
32
}
747
748
749
PyInterpreterState *
750
PyInterpreterState_New(void)
751
0
{
752
    // tstate can be NULL
753
0
    PyThreadState *tstate = current_fast_get();
754
755
0
    PyInterpreterState *interp;
756
0
    PyStatus status = _PyInterpreterState_New(tstate, &interp);
757
0
    if (_PyStatus_EXCEPTION(status)) {
758
0
        Py_ExitStatusException(status);
759
0
    }
760
0
    assert(interp != NULL);
761
0
    return interp;
762
0
}
763
764
#if !defined(Py_GIL_DISABLED) && defined(Py_STACKREF_DEBUG)
765
extern void
766
_Py_stackref_report_leaks(PyInterpreterState *interp);
767
#endif
768
769
static void
770
interpreter_clear(PyInterpreterState *interp, PyThreadState *tstate)
771
0
{
772
0
    assert(interp != NULL);
773
0
    assert(tstate != NULL);
774
0
    _PyRuntimeState *runtime = interp->runtime;
775
776
    /* XXX Conditions we need to enforce:
777
778
       * the GIL must be held by the current thread
779
       * tstate must be the "current" thread state (current_fast_get())
780
       * tstate->interp must be interp
781
       * for the main interpreter, tstate must be the main thread
782
     */
783
    // XXX Ideally, we would not rely on any thread state in this function
784
    // (and we would drop the "tstate" argument).
785
786
0
    if (_PySys_Audit(tstate, "cpython.PyInterpreterState_Clear", NULL) < 0) {
787
0
        _PyErr_Clear(tstate);
788
0
    }
789
790
    // Clear the current/main thread state last.
791
0
    _Py_FOR_EACH_TSTATE_BEGIN(interp, p) {
792
        // See https://github.com/python/cpython/issues/102126
793
        // Must be called without HEAD_LOCK held as it can deadlock
794
        // if any finalizer tries to acquire that lock.
795
0
        HEAD_UNLOCK(runtime);
796
0
        PyThreadState_Clear(p);
797
0
        HEAD_LOCK(runtime);
798
0
    }
799
0
    _Py_FOR_EACH_TSTATE_END(interp);
800
0
    if (tstate->interp == interp) {
801
        /* We fix tstate->_status below when we for sure aren't using it
802
           (e.g. no longer need the GIL). */
803
        // XXX Eliminate the need to do this.
804
0
        tstate->_status.cleared = 0;
805
0
    }
806
807
    /* It is possible that any of the objects below have a finalizer
808
       that runs Python code or otherwise relies on a thread state
809
       or even the interpreter state.  For now we trust that isn't
810
       a problem.
811
     */
812
    // XXX Make sure we properly deal with problematic finalizers.
813
814
0
    Py_CLEAR(interp->audit_hooks);
815
816
    // gh-140257: Threads have already been cleared, but daemon threads may
817
    // still access eval_breaker atomically via take_gil() right before they
818
    // hang. Use an atomic store to prevent data races during finalization.
819
0
    interp->ceval.instrumentation_version = 0;
820
0
    _Py_atomic_store_uintptr(&tstate->eval_breaker, 0);
821
822
0
    for (int i = 0; i < _PY_MONITORING_UNGROUPED_EVENTS; i++) {
823
0
        interp->monitors.tools[i] = 0;
824
0
    }
825
0
    for (int t = 0; t < PY_MONITORING_TOOL_IDS; t++) {
826
0
        for (int e = 0; e < _PY_MONITORING_EVENTS; e++) {
827
0
            Py_CLEAR(interp->monitoring_callables[t][e]);
828
0
        }
829
0
    }
830
0
    for (int t = 0; t < PY_MONITORING_TOOL_IDS; t++) {
831
0
        Py_CLEAR(interp->monitoring_tool_names[t]);
832
0
    }
833
0
    interp->_code_object_generation = 0;
834
#ifdef Py_GIL_DISABLED
835
    interp->tlbc_indices.tlbc_generation = 0;
836
#endif
837
838
0
    PyConfig_Clear(&interp->config);
839
0
    _PyCodec_Fini(interp);
840
841
0
    assert(interp->imports.modules == NULL);
842
0
    assert(interp->imports.modules_by_index == NULL);
843
0
    assert(interp->imports.importlib == NULL);
844
0
    assert(interp->imports.import_func == NULL);
845
846
0
    Py_CLEAR(interp->sysdict_copy);
847
0
    Py_CLEAR(interp->builtins_copy);
848
0
    Py_CLEAR(interp->dict);
849
0
#ifdef HAVE_FORK
850
0
    Py_CLEAR(interp->before_forkers);
851
0
    Py_CLEAR(interp->after_forkers_parent);
852
0
    Py_CLEAR(interp->after_forkers_child);
853
0
#endif
854
855
856
#ifdef _Py_TIER2
857
    _Py_ClearExecutorDeletionList(interp);
858
#endif
859
0
    _PyAST_Fini(interp);
860
0
    _PyAtExit_Fini(interp);
861
862
    // All Python types must be destroyed before the last GC collection. Python
863
    // types create a reference cycle to themselves in their in their
864
    // PyTypeObject.tp_mro member (the tuple contains the type).
865
866
    /* Last garbage collection on this interpreter */
867
0
    _PyGC_CollectNoFail(tstate);
868
0
    _PyGC_Fini(interp);
869
870
    // Finalize warnings after last gc so that any finalizers can
871
    // access warnings state
872
0
    _PyWarnings_Fini(interp);
873
0
    struct _PyExecutorObject *cold = interp->cold_executor;
874
0
    if (cold != NULL) {
875
0
        interp->cold_executor = NULL;
876
0
        assert(cold->vm_data.valid);
877
0
        assert(!cold->vm_data.cold);
878
0
        _PyExecutor_Free(cold);
879
0
    }
880
881
0
    struct _PyExecutorObject *cold_dynamic = interp->cold_dynamic_executor;
882
0
    if (cold_dynamic != NULL) {
883
0
        interp->cold_dynamic_executor = NULL;
884
0
        assert(cold_dynamic->vm_data.valid);
885
0
        assert(!cold_dynamic->vm_data.cold);
886
0
        _PyExecutor_Free(cold_dynamic);
887
0
    }
888
    /* We don't clear sysdict and builtins until the end of this function.
889
       Because clearing other attributes can execute arbitrary Python code
890
       which requires sysdict and builtins. */
891
0
    PyDict_Clear(interp->sysdict);
892
0
    PyDict_Clear(interp->builtins);
893
0
    Py_CLEAR(interp->sysdict);
894
0
    Py_CLEAR(interp->builtins);
895
896
#if !defined(Py_GIL_DISABLED) && defined(Py_STACKREF_DEBUG)
897
#  ifdef Py_STACKREF_CLOSE_DEBUG
898
    _Py_hashtable_destroy(interp->closed_stackrefs_table);
899
    interp->closed_stackrefs_table = NULL;
900
#  endif
901
    _Py_stackref_report_leaks(interp);
902
    _Py_hashtable_destroy(interp->open_stackrefs_table);
903
    interp->open_stackrefs_table = NULL;
904
#endif
905
906
0
    if (tstate->interp == interp) {
907
        /* We are now safe to fix tstate->_status.cleared. */
908
        // XXX Do this (much) earlier?
909
0
        tstate->_status.cleared = 1;
910
0
    }
911
912
0
    for (int i=0; i < DICT_MAX_WATCHERS; i++) {
913
0
        interp->dict_state.watchers[i] = NULL;
914
0
    }
915
916
0
    for (int i=0; i < TYPE_MAX_WATCHERS; i++) {
917
0
        interp->type_watchers[i] = NULL;
918
0
    }
919
920
0
    for (int i=0; i < FUNC_MAX_WATCHERS; i++) {
921
0
        interp->func_watchers[i] = NULL;
922
0
    }
923
0
    interp->active_func_watchers = 0;
924
925
0
    for (int i=0; i < CODE_MAX_WATCHERS; i++) {
926
0
        interp->code_watchers[i] = NULL;
927
0
    }
928
0
    interp->active_code_watchers = 0;
929
930
0
    for (int i=0; i < CONTEXT_MAX_WATCHERS; i++) {
931
0
        interp->context_watchers[i] = NULL;
932
0
    }
933
0
    interp->active_context_watchers = 0;
934
    // XXX Once we have one allocator per interpreter (i.e.
935
    // per-interpreter GC) we must ensure that all of the interpreter's
936
    // objects have been cleaned up at the point.
937
938
    // We could clear interp->threads.freelist here
939
    // if it held more than just the initial thread state.
940
0
}
941
942
943
void
944
PyInterpreterState_Clear(PyInterpreterState *interp)
945
0
{
946
    // Use the current Python thread state to call audit hooks and to collect
947
    // garbage. It can be different than the current Python thread state
948
    // of 'interp'.
949
0
    PyThreadState *current_tstate = current_fast_get();
950
0
    _PyImport_ClearCore(interp);
951
0
    interpreter_clear(interp, current_tstate);
952
0
}
953
954
955
void
956
_PyInterpreterState_Clear(PyThreadState *tstate)
957
0
{
958
0
    _PyImport_ClearCore(tstate->interp);
959
0
    interpreter_clear(tstate->interp, tstate);
960
0
}
961
962
963
static inline void tstate_deactivate(PyThreadState *tstate);
964
static void tstate_set_detached(PyThreadState *tstate, int detached_state);
965
static void zapthreads(PyInterpreterState *interp);
966
967
void
968
PyInterpreterState_Delete(PyInterpreterState *interp)
969
0
{
970
0
    _PyRuntimeState *runtime = interp->runtime;
971
0
    struct pyinterpreters *interpreters = &runtime->interpreters;
972
973
    // XXX Clearing the "current" thread state should happen before
974
    // we start finalizing the interpreter (or the current thread state).
975
0
    PyThreadState *tcur = current_fast_get();
976
0
    if (tcur != NULL && interp == tcur->interp) {
977
        /* Unset current thread.  After this, many C API calls become crashy. */
978
0
        _PyThreadState_Detach(tcur);
979
0
    }
980
981
0
    zapthreads(interp);
982
983
    // XXX These two calls should be done at the end of clear_interpreter(),
984
    // but currently some objects get decref'ed after that.
985
#ifdef Py_REF_DEBUG
986
    _PyInterpreterState_FinalizeRefTotal(interp);
987
#endif
988
0
    _PyInterpreterState_FinalizeAllocatedBlocks(interp);
989
990
0
    HEAD_LOCK(runtime);
991
0
    PyInterpreterState **p;
992
0
    for (p = &interpreters->head; ; p = &(*p)->next) {
993
0
        if (*p == NULL) {
994
0
            Py_FatalError("NULL interpreter");
995
0
        }
996
0
        if (*p == interp) {
997
0
            break;
998
0
        }
999
0
    }
1000
0
    if (interp->threads.head != NULL) {
1001
0
        Py_FatalError("remaining threads");
1002
0
    }
1003
0
    *p = interp->next;
1004
1005
0
    if (interpreters->main == interp) {
1006
0
        interpreters->main = NULL;
1007
0
        if (interpreters->head != NULL) {
1008
0
            Py_FatalError("remaining subinterpreters");
1009
0
        }
1010
0
    }
1011
0
    HEAD_UNLOCK(runtime);
1012
1013
0
    _Py_qsbr_fini(interp);
1014
1015
0
    _PyObject_FiniState(interp);
1016
1017
0
    PyConfig_Clear(&interp->config);
1018
1019
0
    free_interpreter(interp);
1020
0
}
1021
1022
1023
#ifdef HAVE_FORK
1024
/*
1025
 * Delete all interpreter states except the main interpreter.  If there
1026
 * is a current interpreter state, it *must* be the main interpreter.
1027
 */
1028
PyStatus
1029
_PyInterpreterState_DeleteExceptMain(_PyRuntimeState *runtime)
1030
0
{
1031
0
    struct pyinterpreters *interpreters = &runtime->interpreters;
1032
1033
0
    PyThreadState *tstate = _PyThreadState_Swap(runtime, NULL);
1034
0
    if (tstate != NULL && tstate->interp != interpreters->main) {
1035
0
        return _PyStatus_ERR("not main interpreter");
1036
0
    }
1037
1038
0
    HEAD_LOCK(runtime);
1039
0
    PyInterpreterState *interp = interpreters->head;
1040
0
    interpreters->head = NULL;
1041
0
    while (interp != NULL) {
1042
0
        if (interp == interpreters->main) {
1043
0
            interpreters->main->next = NULL;
1044
0
            interpreters->head = interp;
1045
0
            interp = interp->next;
1046
0
            continue;
1047
0
        }
1048
1049
        // XXX Won't this fail since PyInterpreterState_Clear() requires
1050
        // the "current" tstate to be set?
1051
0
        PyInterpreterState_Clear(interp);  // XXX must activate?
1052
0
        zapthreads(interp);
1053
0
        PyInterpreterState *prev_interp = interp;
1054
0
        interp = interp->next;
1055
0
        free_interpreter(prev_interp);
1056
0
    }
1057
0
    HEAD_UNLOCK(runtime);
1058
1059
0
    if (interpreters->head == NULL) {
1060
0
        return _PyStatus_ERR("missing main interpreter");
1061
0
    }
1062
0
    _PyThreadState_Swap(runtime, tstate);
1063
0
    return _PyStatus_OK();
1064
0
}
1065
#endif
1066
1067
static inline void
1068
set_main_thread(PyInterpreterState *interp, PyThreadState *tstate)
1069
0
{
1070
0
    _Py_atomic_store_ptr_relaxed(&interp->threads.main, tstate);
1071
0
}
1072
1073
static inline PyThreadState *
1074
get_main_thread(PyInterpreterState *interp)
1075
0
{
1076
0
    return _Py_atomic_load_ptr_relaxed(&interp->threads.main);
1077
0
}
1078
1079
void
1080
_PyErr_SetInterpreterAlreadyRunning(void)
1081
0
{
1082
0
    PyErr_SetString(PyExc_InterpreterError, "interpreter already running");
1083
0
}
1084
1085
int
1086
_PyInterpreterState_SetRunningMain(PyInterpreterState *interp)
1087
0
{
1088
0
    if (get_main_thread(interp) != NULL) {
1089
0
        _PyErr_SetInterpreterAlreadyRunning();
1090
0
        return -1;
1091
0
    }
1092
0
    PyThreadState *tstate = current_fast_get();
1093
0
    _Py_EnsureTstateNotNULL(tstate);
1094
0
    if (tstate->interp != interp) {
1095
0
        PyErr_SetString(PyExc_RuntimeError,
1096
0
                        "current tstate has wrong interpreter");
1097
0
        return -1;
1098
0
    }
1099
0
    set_main_thread(interp, tstate);
1100
1101
0
    return 0;
1102
0
}
1103
1104
void
1105
_PyInterpreterState_SetNotRunningMain(PyInterpreterState *interp)
1106
0
{
1107
0
    assert(get_main_thread(interp) == current_fast_get());
1108
0
    set_main_thread(interp, NULL);
1109
0
}
1110
1111
int
1112
_PyInterpreterState_IsRunningMain(PyInterpreterState *interp)
1113
0
{
1114
0
    if (get_main_thread(interp) != NULL) {
1115
0
        return 1;
1116
0
    }
1117
    // Embedders might not know to call _PyInterpreterState_SetRunningMain(),
1118
    // so their main thread wouldn't show it is running the main interpreter's
1119
    // program.  (Py_Main() doesn't have this problem.)  For now this isn't
1120
    // critical.  If it were, we would need to infer "running main" from other
1121
    // information, like if it's the main interpreter.  We used to do that
1122
    // but the naive approach led to some inconsistencies that caused problems.
1123
0
    return 0;
1124
0
}
1125
1126
int
1127
_PyThreadState_IsRunningMain(PyThreadState *tstate)
1128
0
{
1129
0
    PyInterpreterState *interp = tstate->interp;
1130
    // See the note in _PyInterpreterState_IsRunningMain() about
1131
    // possible false negatives here for embedders.
1132
0
    return get_main_thread(interp) == tstate;
1133
0
}
1134
1135
void
1136
_PyInterpreterState_ReinitRunningMain(PyThreadState *tstate)
1137
0
{
1138
0
    PyInterpreterState *interp = tstate->interp;
1139
0
    if (get_main_thread(interp) != tstate) {
1140
0
        set_main_thread(interp, NULL);
1141
0
    }
1142
0
}
1143
1144
1145
//----------
1146
// accessors
1147
//----------
1148
1149
int
1150
_PyInterpreterState_IsReady(PyInterpreterState *interp)
1151
0
{
1152
0
    return interp->_ready;
1153
0
}
1154
1155
#ifndef NDEBUG
1156
static inline int
1157
check_interpreter_whence(long whence)
1158
{
1159
    if(whence < 0) {
1160
        return -1;
1161
    }
1162
    if (whence > _PyInterpreterState_WHENCE_MAX) {
1163
        return -1;
1164
    }
1165
    return 0;
1166
}
1167
#endif
1168
1169
long
1170
_PyInterpreterState_GetWhence(PyInterpreterState *interp)
1171
0
{
1172
0
    assert(check_interpreter_whence(interp->_whence) == 0);
1173
0
    return interp->_whence;
1174
0
}
1175
1176
void
1177
_PyInterpreterState_SetWhence(PyInterpreterState *interp, long whence)
1178
32
{
1179
32
    assert(interp->_whence != _PyInterpreterState_WHENCE_NOTSET);
1180
32
    assert(check_interpreter_whence(whence) == 0);
1181
32
    interp->_whence = whence;
1182
32
}
1183
1184
1185
PyObject *
1186
_Py_GetMainModule(PyThreadState *tstate)
1187
0
{
1188
    // We return None to indicate "not found" or "bogus".
1189
0
    PyObject *modules = _PyImport_GetModulesRef(tstate->interp);
1190
0
    if (modules == Py_None) {
1191
0
        return modules;
1192
0
    }
1193
0
    PyObject *module = NULL;
1194
0
    (void)PyMapping_GetOptionalItem(modules, &_Py_ID(__main__), &module);
1195
0
    Py_DECREF(modules);
1196
0
    if (module == NULL && !PyErr_Occurred()) {
1197
0
        Py_RETURN_NONE;
1198
0
    }
1199
0
    return module;
1200
0
}
1201
1202
int
1203
_Py_CheckMainModule(PyObject *module)
1204
0
{
1205
0
    if (module == NULL || module == Py_None) {
1206
0
        if (!PyErr_Occurred()) {
1207
0
            (void)_PyErr_SetModuleNotFoundError(&_Py_ID(__main__));
1208
0
        }
1209
0
        return -1;
1210
0
    }
1211
0
    if (!Py_IS_TYPE(module, &PyModule_Type)) {
1212
        /* The __main__ module has been tampered with. */
1213
0
        PyObject *msg = PyUnicode_FromString("invalid __main__ module");
1214
0
        if (msg != NULL) {
1215
0
            (void)PyErr_SetImportError(msg, &_Py_ID(__main__), NULL);
1216
0
            Py_DECREF(msg);
1217
0
        }
1218
0
        return -1;
1219
0
    }
1220
0
    return 0;
1221
0
}
1222
1223
1224
PyObject *
1225
PyInterpreterState_GetDict(PyInterpreterState *interp)
1226
170
{
1227
170
    if (interp->dict == NULL) {
1228
12
        interp->dict = PyDict_New();
1229
12
        if (interp->dict == NULL) {
1230
0
            PyErr_Clear();
1231
0
        }
1232
12
    }
1233
    /* Returning NULL means no per-interpreter dict is available. */
1234
170
    return interp->dict;
1235
170
}
1236
1237
1238
//----------
1239
// interp ID
1240
//----------
1241
1242
int64_t
1243
_PyInterpreterState_ObjectToID(PyObject *idobj)
1244
0
{
1245
0
    if (!_PyIndex_Check(idobj)) {
1246
0
        PyErr_Format(PyExc_TypeError,
1247
0
                     "interpreter ID must be an int, got %.100s",
1248
0
                     Py_TYPE(idobj)->tp_name);
1249
0
        return -1;
1250
0
    }
1251
1252
    // This may raise OverflowError.
1253
    // For now, we don't worry about if LLONG_MAX < INT64_MAX.
1254
0
    long long id = PyLong_AsLongLong(idobj);
1255
0
    if (id == -1 && PyErr_Occurred()) {
1256
0
        return -1;
1257
0
    }
1258
1259
0
    if (id < 0) {
1260
0
        PyErr_Format(PyExc_ValueError,
1261
0
                     "interpreter ID must be a non-negative int, got %R",
1262
0
                     idobj);
1263
0
        return -1;
1264
0
    }
1265
#if LLONG_MAX > INT64_MAX
1266
    else if (id > INT64_MAX) {
1267
        PyErr_SetString(PyExc_OverflowError, "int too big to convert");
1268
        return -1;
1269
    }
1270
#endif
1271
0
    else {
1272
0
        return (int64_t)id;
1273
0
    }
1274
0
}
1275
1276
int64_t
1277
PyInterpreterState_GetID(PyInterpreterState *interp)
1278
0
{
1279
0
    if (interp == NULL) {
1280
0
        PyErr_SetString(PyExc_RuntimeError, "no interpreter provided");
1281
0
        return -1;
1282
0
    }
1283
0
    return interp->id;
1284
0
}
1285
1286
PyObject *
1287
_PyInterpreterState_GetIDObject(PyInterpreterState *interp)
1288
0
{
1289
0
    int64_t interpid = interp->id;
1290
0
    if (interpid < 0) {
1291
0
        return NULL;
1292
0
    }
1293
0
    assert(interpid < LLONG_MAX);
1294
0
    return PyLong_FromLongLong(interpid);
1295
0
}
1296
1297
1298
1299
void
1300
_PyInterpreterState_IDIncref(PyInterpreterState *interp)
1301
0
{
1302
0
    _Py_atomic_add_ssize(&interp->id_refcount, 1);
1303
0
}
1304
1305
1306
void
1307
_PyInterpreterState_IDDecref(PyInterpreterState *interp)
1308
0
{
1309
0
    _PyRuntimeState *runtime = interp->runtime;
1310
1311
0
    Py_ssize_t refcount = _Py_atomic_add_ssize(&interp->id_refcount, -1);
1312
1313
0
    if (refcount == 1 && interp->requires_idref) {
1314
0
        PyThreadState *tstate =
1315
0
            _PyThreadState_NewBound(interp, _PyThreadState_WHENCE_FINI);
1316
1317
        // XXX Possible GILState issues?
1318
0
        PyThreadState *save_tstate = _PyThreadState_Swap(runtime, tstate);
1319
0
        Py_EndInterpreter(tstate);
1320
0
        _PyThreadState_Swap(runtime, save_tstate);
1321
0
    }
1322
0
}
1323
1324
int
1325
_PyInterpreterState_RequiresIDRef(PyInterpreterState *interp)
1326
0
{
1327
0
    return interp->requires_idref;
1328
0
}
1329
1330
void
1331
_PyInterpreterState_RequireIDRef(PyInterpreterState *interp, int required)
1332
0
{
1333
0
    interp->requires_idref = required ? 1 : 0;
1334
0
}
1335
1336
1337
//-----------------------------
1338
// look up an interpreter state
1339
//-----------------------------
1340
1341
/* Return the interpreter associated with the current OS thread.
1342
1343
   The GIL must be held.
1344
  */
1345
1346
PyInterpreterState*
1347
PyInterpreterState_Get(void)
1348
266
{
1349
266
    _Py_AssertHoldsTstate();
1350
266
    PyInterpreterState *interp = _Py_tss_interp;
1351
266
    if (interp == NULL) {
1352
0
        Py_FatalError("no current interpreter");
1353
0
    }
1354
266
    return interp;
1355
266
}
1356
1357
1358
static PyInterpreterState *
1359
interp_look_up_id(_PyRuntimeState *runtime, int64_t requested_id)
1360
0
{
1361
0
    PyInterpreterState *interp = runtime->interpreters.head;
1362
0
    while (interp != NULL) {
1363
0
        int64_t id = interp->id;
1364
0
        assert(id >= 0);
1365
0
        if (requested_id == id) {
1366
0
            return interp;
1367
0
        }
1368
0
        interp = PyInterpreterState_Next(interp);
1369
0
    }
1370
0
    return NULL;
1371
0
}
1372
1373
/* Return the interpreter state with the given ID.
1374
1375
   Fail with RuntimeError if the interpreter is not found. */
1376
1377
PyInterpreterState *
1378
_PyInterpreterState_LookUpID(int64_t requested_id)
1379
0
{
1380
0
    PyInterpreterState *interp = NULL;
1381
0
    if (requested_id >= 0) {
1382
0
        _PyRuntimeState *runtime = &_PyRuntime;
1383
0
        HEAD_LOCK(runtime);
1384
0
        interp = interp_look_up_id(runtime, requested_id);
1385
0
        HEAD_UNLOCK(runtime);
1386
0
    }
1387
0
    if (interp == NULL && !PyErr_Occurred()) {
1388
0
        PyErr_Format(PyExc_InterpreterNotFoundError,
1389
0
                     "unrecognized interpreter ID %lld", requested_id);
1390
0
    }
1391
0
    return interp;
1392
0
}
1393
1394
PyInterpreterState *
1395
_PyInterpreterState_LookUpIDObject(PyObject *requested_id)
1396
0
{
1397
0
    int64_t id = _PyInterpreterState_ObjectToID(requested_id);
1398
0
    if (id < 0) {
1399
0
        return NULL;
1400
0
    }
1401
0
    return _PyInterpreterState_LookUpID(id);
1402
0
}
1403
1404
1405
/********************************/
1406
/* the per-thread runtime state */
1407
/********************************/
1408
1409
#ifndef NDEBUG
1410
static inline int
1411
tstate_is_alive(PyThreadState *tstate)
1412
{
1413
    return (tstate->_status.initialized &&
1414
            !tstate->_status.finalized &&
1415
            !tstate->_status.cleared &&
1416
            !tstate->_status.finalizing);
1417
}
1418
#endif
1419
1420
1421
//----------
1422
// lifecycle
1423
//----------
1424
1425
static _PyStackChunk*
1426
allocate_chunk(int size_in_bytes, _PyStackChunk* previous)
1427
252k
{
1428
252k
    assert(size_in_bytes % sizeof(PyObject **) == 0);
1429
252k
    _PyStackChunk *res = _PyObject_VirtualAlloc(size_in_bytes);
1430
252k
    if (res == NULL) {
1431
0
        return NULL;
1432
0
    }
1433
252k
    res->previous = previous;
1434
252k
    res->size = size_in_bytes;
1435
252k
    res->top = 0;
1436
252k
    return res;
1437
252k
}
1438
1439
static void
1440
reset_threadstate(_PyThreadStateImpl *tstate)
1441
0
{
1442
    // Set to _PyThreadState_INIT directly?
1443
0
    memcpy(tstate,
1444
0
           &initial._main_interpreter._initial_thread,
1445
0
           sizeof(*tstate));
1446
0
}
1447
1448
static _PyThreadStateImpl *
1449
alloc_threadstate(PyInterpreterState *interp)
1450
32
{
1451
32
    _PyThreadStateImpl *tstate;
1452
1453
    // Try the preallocated tstate first.
1454
32
    tstate = _Py_atomic_exchange_ptr(&interp->threads.preallocated, NULL);
1455
1456
    // Fall back to the allocator.
1457
32
    if (tstate == NULL) {
1458
0
        tstate = PyMem_RawCalloc(1, sizeof(_PyThreadStateImpl));
1459
0
        if (tstate == NULL) {
1460
0
            return NULL;
1461
0
        }
1462
0
        reset_threadstate(tstate);
1463
0
    }
1464
32
    return tstate;
1465
32
}
1466
1467
static void
1468
free_threadstate(_PyThreadStateImpl *tstate)
1469
0
{
1470
0
    PyInterpreterState *interp = tstate->base.interp;
1471
#ifdef Py_STATS
1472
    _PyStats_ThreadFini(tstate);
1473
#endif
1474
    // The initial thread state of the interpreter is allocated
1475
    // as part of the interpreter state so should not be freed.
1476
0
    if (tstate == &interp->_initial_thread) {
1477
        // Make it available again.
1478
0
        reset_threadstate(tstate);
1479
0
        assert(interp->threads.preallocated == NULL);
1480
0
        _Py_atomic_store_ptr(&interp->threads.preallocated, tstate);
1481
0
    }
1482
0
    else {
1483
0
        PyMem_RawFree(tstate);
1484
0
    }
1485
0
}
1486
1487
static void
1488
decref_threadstate(_PyThreadStateImpl *tstate)
1489
0
{
1490
0
    if (_Py_atomic_add_ssize(&tstate->refcount, -1) == 1) {
1491
        // The last reference to the thread state is gone.
1492
0
        free_threadstate(tstate);
1493
0
    }
1494
0
}
1495
1496
/* Get the thread state to a minimal consistent state.
1497
   Further init happens in pylifecycle.c before it can be used.
1498
   All fields not initialized here are expected to be zeroed out,
1499
   e.g. by PyMem_RawCalloc() or memset(), or otherwise pre-initialized.
1500
   The interpreter state is not manipulated.  Instead it is assumed that
1501
   the thread is getting added to the interpreter.
1502
  */
1503
1504
static void
1505
init_threadstate(_PyThreadStateImpl *_tstate,
1506
                 PyInterpreterState *interp, uint64_t id, int whence)
1507
32
{
1508
32
    PyThreadState *tstate = (PyThreadState *)_tstate;
1509
32
    if (tstate->_status.initialized) {
1510
0
        Py_FatalError("thread state already initialized");
1511
0
    }
1512
1513
32
    assert(interp != NULL);
1514
32
    tstate->interp = interp;
1515
32
    tstate->eval_breaker =
1516
32
        _Py_atomic_load_uintptr_relaxed(&interp->ceval.instrumentation_version);
1517
1518
    // next/prev are set in add_threadstate().
1519
32
    assert(tstate->next == NULL);
1520
32
    assert(tstate->prev == NULL);
1521
1522
32
    assert(tstate->_whence == _PyThreadState_WHENCE_NOTSET);
1523
32
    assert(whence >= 0 && whence <= _PyThreadState_WHENCE_THREADING_DAEMON);
1524
32
    tstate->_whence = whence;
1525
1526
32
    assert(id > 0);
1527
32
    tstate->id = id;
1528
1529
    // thread_id and native_thread_id are set in bind_tstate().
1530
1531
32
    tstate->py_recursion_limit = interp->ceval.recursion_limit;
1532
32
    tstate->py_recursion_remaining = interp->ceval.recursion_limit;
1533
32
    tstate->exc_info = &tstate->exc_state;
1534
1535
    // PyGILState_Release must not try to delete this thread state.
1536
    // This is cleared when PyGILState_Ensure() creates the thread state.
1537
32
    tstate->gilstate_counter = 1;
1538
1539
    // Initialize the embedded base frame - sentinel at the bottom of the frame stack
1540
32
    _tstate->base_frame.previous = NULL;
1541
32
    _tstate->base_frame.f_executable = PyStackRef_None;
1542
32
    _tstate->base_frame.f_funcobj = PyStackRef_NULL;
1543
32
    _tstate->base_frame.f_globals = NULL;
1544
32
    _tstate->base_frame.f_builtins = NULL;
1545
32
    _tstate->base_frame.f_locals = NULL;
1546
32
    _tstate->base_frame.frame_obj = NULL;
1547
32
    _tstate->base_frame.instr_ptr = NULL;
1548
32
    _tstate->base_frame.stackpointer = _tstate->base_frame.localsplus;
1549
32
    _tstate->base_frame.return_offset = 0;
1550
32
    _tstate->base_frame.owner = FRAME_OWNED_BY_INTERPRETER;
1551
32
    _tstate->base_frame.visited = 0;
1552
#ifdef Py_DEBUG
1553
    _tstate->base_frame.lltrace = 0;
1554
#endif
1555
#ifdef Py_GIL_DISABLED
1556
    _tstate->base_frame.tlbc_index = 0;
1557
#endif
1558
32
    _tstate->base_frame.localsplus[0] = PyStackRef_NULL;
1559
1560
    // current_frame starts pointing to the base frame
1561
32
    tstate->current_frame = &_tstate->base_frame;
1562
    // base_frame pointer for profilers to validate stack unwinding
1563
32
    tstate->base_frame = &_tstate->base_frame;
1564
32
    tstate->datastack_chunk = NULL;
1565
32
    tstate->datastack_top = NULL;
1566
32
    tstate->datastack_limit = NULL;
1567
32
    tstate->what_event = -1;
1568
32
    tstate->current_executor = NULL;
1569
32
    tstate->jit_exit = NULL;
1570
32
    tstate->dict_global_version = 0;
1571
1572
32
    _tstate->c_stack_soft_limit = UINTPTR_MAX;
1573
32
    _tstate->c_stack_top = 0;
1574
32
    _tstate->c_stack_hard_limit = 0;
1575
1576
32
    _tstate->c_stack_init_base = 0;
1577
32
    _tstate->c_stack_init_top = 0;
1578
1579
32
    _tstate->asyncio_running_loop = NULL;
1580
32
    _tstate->asyncio_running_task = NULL;
1581
1582
#ifdef _Py_TIER2
1583
    _tstate->jit_tracer_state = NULL;
1584
#endif
1585
32
    tstate->delete_later = NULL;
1586
1587
32
    llist_init(&_tstate->mem_free_queue);
1588
32
    llist_init(&_tstate->asyncio_tasks_head);
1589
32
    if (interp->stoptheworld.requested || _PyRuntime.stoptheworld.requested) {
1590
        // Start in the suspended state if there is an ongoing stop-the-world.
1591
0
        tstate->state = _Py_THREAD_SUSPENDED;
1592
0
    }
1593
1594
32
    tstate->_status.initialized = 1;
1595
32
}
1596
1597
static void
1598
add_threadstate(PyInterpreterState *interp, PyThreadState *tstate,
1599
                PyThreadState *next)
1600
32
{
1601
32
    assert(interp->threads.head != tstate);
1602
32
    if (next != NULL) {
1603
0
        assert(next->prev == NULL || next->prev == tstate);
1604
0
        next->prev = tstate;
1605
0
    }
1606
32
    tstate->next = next;
1607
32
    assert(tstate->prev == NULL);
1608
32
    interp->threads.head = tstate;
1609
32
}
1610
1611
static PyThreadState *
1612
new_threadstate(PyInterpreterState *interp, int whence)
1613
32
{
1614
    // Allocate the thread state.
1615
32
    _PyThreadStateImpl *tstate = alloc_threadstate(interp);
1616
32
    if (tstate == NULL) {
1617
0
        return NULL;
1618
0
    }
1619
1620
#ifdef Py_GIL_DISABLED
1621
    Py_ssize_t qsbr_idx = _Py_qsbr_reserve(interp);
1622
    if (qsbr_idx < 0) {
1623
        free_threadstate(tstate);
1624
        return NULL;
1625
    }
1626
    int32_t tlbc_idx = _Py_ReserveTLBCIndex(interp);
1627
    if (tlbc_idx < 0) {
1628
        free_threadstate(tstate);
1629
        return NULL;
1630
    }
1631
#endif
1632
#ifdef Py_STATS
1633
    // The PyStats structure is quite large and is allocated separated from tstate.
1634
    if (!_PyStats_ThreadInit(interp, tstate)) {
1635
        free_threadstate(tstate);
1636
        return NULL;
1637
    }
1638
#endif
1639
1640
    /* We serialize concurrent creation to protect global state. */
1641
32
    HEAD_LOCK(interp->runtime);
1642
1643
    // Initialize the new thread state.
1644
32
    interp->threads.next_unique_id += 1;
1645
32
    uint64_t id = interp->threads.next_unique_id;
1646
32
    init_threadstate(tstate, interp, id, whence);
1647
1648
    // Add the new thread state to the interpreter.
1649
32
    PyThreadState *old_head = interp->threads.head;
1650
32
    add_threadstate(interp, (PyThreadState *)tstate, old_head);
1651
1652
32
    HEAD_UNLOCK(interp->runtime);
1653
1654
#ifdef Py_GIL_DISABLED
1655
    // Must be called with lock unlocked to avoid lock ordering deadlocks.
1656
    _Py_qsbr_register(tstate, interp, qsbr_idx);
1657
    tstate->tlbc_index = tlbc_idx;
1658
#endif
1659
1660
32
    return (PyThreadState *)tstate;
1661
32
}
1662
1663
PyThreadState *
1664
PyThreadState_New(PyInterpreterState *interp)
1665
0
{
1666
0
    return _PyThreadState_NewBound(interp, _PyThreadState_WHENCE_UNKNOWN);
1667
0
}
1668
1669
PyThreadState *
1670
_PyThreadState_NewBound(PyInterpreterState *interp, int whence)
1671
0
{
1672
0
    PyThreadState *tstate = new_threadstate(interp, whence);
1673
0
    if (tstate) {
1674
0
        bind_tstate(tstate);
1675
        // This makes sure there's a gilstate tstate bound
1676
        // as soon as possible.
1677
0
        if (gilstate_get() == NULL) {
1678
0
            bind_gilstate_tstate(tstate);
1679
0
        }
1680
0
    }
1681
0
    return tstate;
1682
0
}
1683
1684
// This must be followed by a call to _PyThreadState_Bind();
1685
PyThreadState *
1686
_PyThreadState_New(PyInterpreterState *interp, int whence)
1687
32
{
1688
32
    return new_threadstate(interp, whence);
1689
32
}
1690
1691
// We keep this for stable ABI compabibility.
1692
PyAPI_FUNC(PyThreadState*)
1693
_PyThreadState_Prealloc(PyInterpreterState *interp)
1694
0
{
1695
0
    return _PyThreadState_New(interp, _PyThreadState_WHENCE_UNKNOWN);
1696
0
}
1697
1698
// We keep this around for (accidental) stable ABI compatibility.
1699
// Realistically, no extensions are using it.
1700
PyAPI_FUNC(void)
1701
_PyThreadState_Init(PyThreadState *tstate)
1702
0
{
1703
0
    Py_FatalError("_PyThreadState_Init() is for internal use only");
1704
0
}
1705
1706
1707
static void
1708
clear_datastack(PyThreadState *tstate)
1709
0
{
1710
0
    _PyStackChunk *chunk = tstate->datastack_chunk;
1711
0
    tstate->datastack_chunk = NULL;
1712
0
    while (chunk != NULL) {
1713
0
        _PyStackChunk *prev = chunk->previous;
1714
0
        _PyObject_VirtualFree(chunk, chunk->size);
1715
0
        chunk = prev;
1716
0
    }
1717
0
}
1718
1719
void
1720
PyThreadState_Clear(PyThreadState *tstate)
1721
0
{
1722
0
    assert(tstate->_status.initialized && !tstate->_status.cleared);
1723
0
    assert(current_fast_get()->interp == tstate->interp);
1724
    // GH-126016: In the _interpreters module, KeyboardInterrupt exceptions
1725
    // during PyEval_EvalCode() are sent to finalization, which doesn't let us
1726
    // mark threads as "not running main". So, for now this assertion is
1727
    // disabled.
1728
    // XXX assert(!_PyThreadState_IsRunningMain(tstate));
1729
    // XXX assert(!tstate->_status.bound || tstate->_status.unbound);
1730
0
    tstate->_status.finalizing = 1;  // just in case
1731
1732
    /* XXX Conditions we need to enforce:
1733
1734
       * the GIL must be held by the current thread
1735
       * current_fast_get()->interp must match tstate->interp
1736
       * for the main interpreter, current_fast_get() must be the main thread
1737
     */
1738
1739
0
    int verbose = _PyInterpreterState_GetConfig(tstate->interp)->verbose;
1740
1741
0
    if (verbose && tstate->current_frame != tstate->base_frame) {
1742
        /* bpo-20526: After the main thread calls
1743
           _PyInterpreterState_SetFinalizing() in Py_FinalizeEx()
1744
           (or in Py_EndInterpreter() for subinterpreters),
1745
           threads must exit when trying to take the GIL.
1746
           If a thread exit in the middle of _PyEval_EvalFrameDefault(),
1747
           tstate->frame is not reset to its previous value.
1748
           It is more likely with daemon threads, but it can happen
1749
           with regular threads if threading._shutdown() fails
1750
           (ex: interrupted by CTRL+C). */
1751
0
        fprintf(stderr,
1752
0
          "PyThreadState_Clear: warning: thread still has a frame\n");
1753
0
    }
1754
1755
0
    if (verbose && tstate->current_exception != NULL) {
1756
0
        fprintf(stderr, "PyThreadState_Clear: warning: thread has an exception set\n");
1757
0
        _PyErr_Print(tstate);
1758
0
    }
1759
1760
    /* At this point tstate shouldn't be used any more,
1761
       neither to run Python code nor for other uses.
1762
1763
       This is tricky when current_fast_get() == tstate, in the same way
1764
       as noted in interpreter_clear() above.  The below finalizers
1765
       can possibly run Python code or otherwise use the partially
1766
       cleared thread state.  For now we trust that isn't a problem
1767
       in practice.
1768
     */
1769
    // XXX Deal with the possibility of problematic finalizers.
1770
1771
    /* Don't clear tstate->pyframe: it is a borrowed reference */
1772
1773
0
    Py_CLEAR(tstate->threading_local_key);
1774
0
    Py_CLEAR(tstate->threading_local_sentinel);
1775
1776
0
    Py_CLEAR(((_PyThreadStateImpl *)tstate)->asyncio_running_loop);
1777
0
    Py_CLEAR(((_PyThreadStateImpl *)tstate)->asyncio_running_task);
1778
1779
1780
0
    PyMutex_Lock(&tstate->interp->asyncio_tasks_lock);
1781
    // merge any lingering tasks from thread state to interpreter's
1782
    // tasks list
1783
0
    llist_concat(&tstate->interp->asyncio_tasks_head,
1784
0
                 &((_PyThreadStateImpl *)tstate)->asyncio_tasks_head);
1785
0
    PyMutex_Unlock(&tstate->interp->asyncio_tasks_lock);
1786
1787
0
    Py_CLEAR(tstate->dict);
1788
0
    Py_CLEAR(tstate->async_exc);
1789
1790
0
    Py_CLEAR(tstate->current_exception);
1791
1792
0
    Py_CLEAR(tstate->exc_state.exc_value);
1793
1794
    /* The stack of exception states should contain just this thread. */
1795
0
    if (verbose && tstate->exc_info != &tstate->exc_state) {
1796
0
        fprintf(stderr,
1797
0
          "PyThreadState_Clear: warning: thread still has a generator\n");
1798
0
    }
1799
1800
0
    if (tstate->c_profilefunc != NULL) {
1801
0
        FT_ATOMIC_ADD_SSIZE(tstate->interp->sys_profiling_threads, -1);
1802
0
        tstate->c_profilefunc = NULL;
1803
0
    }
1804
0
    if (tstate->c_tracefunc != NULL) {
1805
0
        FT_ATOMIC_ADD_SSIZE(tstate->interp->sys_tracing_threads, -1);
1806
0
        tstate->c_tracefunc = NULL;
1807
0
    }
1808
1809
0
    Py_CLEAR(tstate->c_profileobj);
1810
0
    Py_CLEAR(tstate->c_traceobj);
1811
1812
0
    Py_CLEAR(tstate->async_gen_firstiter);
1813
0
    Py_CLEAR(tstate->async_gen_finalizer);
1814
1815
0
    Py_CLEAR(tstate->context);
1816
1817
#ifdef Py_GIL_DISABLED
1818
    // Each thread should clear own freelists in free-threading builds.
1819
    struct _Py_freelists *freelists = _Py_freelists_GET();
1820
    _PyObject_ClearFreeLists(freelists, 1);
1821
1822
    // Flush the thread's local GC allocation count to the global count
1823
    // before the thread state is cleared, otherwise the count is lost.
1824
    _PyThreadStateImpl *tstate_impl = (_PyThreadStateImpl *)tstate;
1825
    _Py_atomic_add_int(&tstate->interp->gc.young.count,
1826
                       (int)tstate_impl->gc.alloc_count);
1827
    tstate_impl->gc.alloc_count = 0;
1828
1829
    // Merge our thread-local refcounts into the type's own refcount and
1830
    // free our local refcount array.
1831
    _PyObject_FinalizePerThreadRefcounts(tstate_impl);
1832
1833
    // Remove ourself from the biased reference counting table of threads.
1834
    _Py_brc_remove_thread(tstate);
1835
1836
    // Release our thread-local copies of the bytecode for reuse by another
1837
    // thread
1838
    _Py_ClearTLBCIndex(tstate_impl);
1839
#endif
1840
1841
    // Merge our queue of pointers to be freed into the interpreter queue.
1842
0
    _PyMem_AbandonDelayed(tstate);
1843
1844
0
    _PyThreadState_ClearMimallocHeaps(tstate);
1845
1846
#ifdef _Py_TIER2
1847
    _PyJit_TracerFree((_PyThreadStateImpl *)tstate);
1848
#endif
1849
1850
0
    tstate->_status.cleared = 1;
1851
1852
    // XXX Call _PyThreadStateSwap(runtime, NULL) here if "current".
1853
    // XXX Do it as early in the function as possible.
1854
0
}
1855
1856
static void
1857
decrement_stoptheworld_countdown(struct _stoptheworld_state *stw);
1858
1859
/* Common code for PyThreadState_Delete() and PyThreadState_DeleteCurrent() */
1860
static void
1861
tstate_delete_common(PyThreadState *tstate, int release_gil)
1862
0
{
1863
0
    assert(tstate->_status.cleared && !tstate->_status.finalized);
1864
0
    tstate_verify_not_active(tstate);
1865
0
    assert(!_PyThreadState_IsRunningMain(tstate));
1866
1867
0
    PyInterpreterState *interp = tstate->interp;
1868
0
    if (interp == NULL) {
1869
0
        Py_FatalError("NULL interpreter");
1870
0
    }
1871
0
    _PyRuntimeState *runtime = interp->runtime;
1872
1873
0
    HEAD_LOCK(runtime);
1874
0
    if (tstate->prev) {
1875
0
        tstate->prev->next = tstate->next;
1876
0
    }
1877
0
    else {
1878
0
        interp->threads.head = tstate->next;
1879
0
    }
1880
0
    if (tstate->next) {
1881
0
        tstate->next->prev = tstate->prev;
1882
0
    }
1883
0
    if (tstate->state != _Py_THREAD_SUSPENDED) {
1884
        // Any ongoing stop-the-world request should not wait for us because
1885
        // our thread is getting deleted.
1886
0
        if (interp->stoptheworld.requested) {
1887
0
            decrement_stoptheworld_countdown(&interp->stoptheworld);
1888
0
        }
1889
0
        if (runtime->stoptheworld.requested) {
1890
0
            decrement_stoptheworld_countdown(&runtime->stoptheworld);
1891
0
        }
1892
0
    }
1893
1894
#if defined(Py_REF_DEBUG) && defined(Py_GIL_DISABLED)
1895
    // Add our portion of the total refcount to the interpreter's total.
1896
    _PyThreadStateImpl *tstate_impl = (_PyThreadStateImpl *)tstate;
1897
    tstate->interp->object_state.reftotal += tstate_impl->reftotal;
1898
    tstate_impl->reftotal = 0;
1899
    assert(tstate_impl->refcounts.values == NULL);
1900
#endif
1901
1902
#if _Py_TIER2
1903
    _PyJit_TracerFree((_PyThreadStateImpl *)tstate);
1904
#endif
1905
1906
0
    HEAD_UNLOCK(runtime);
1907
1908
    // XXX Unbind in PyThreadState_Clear(), or earlier
1909
    // (and assert not-equal here)?
1910
0
    if (tstate->_status.bound_gilstate) {
1911
0
        unbind_gilstate_tstate(tstate);
1912
0
    }
1913
0
    if (tstate->_status.bound) {
1914
0
        unbind_tstate(tstate);
1915
0
    }
1916
1917
    // XXX Move to PyThreadState_Clear()?
1918
0
    clear_datastack(tstate);
1919
1920
0
    if (release_gil) {
1921
0
        _PyEval_ReleaseLock(tstate->interp, tstate, 1);
1922
0
    }
1923
1924
#ifdef Py_GIL_DISABLED
1925
    _Py_qsbr_unregister(tstate);
1926
#endif
1927
1928
0
    tstate->_status.finalized = 1;
1929
0
}
1930
1931
static void
1932
zapthreads(PyInterpreterState *interp)
1933
0
{
1934
0
    PyThreadState *tstate;
1935
    /* No need to lock the mutex here because this should only happen
1936
       when the threads are all really dead (XXX famous last words).
1937
1938
       Cannot use _Py_FOR_EACH_TSTATE_UNLOCKED because we are freeing
1939
       the thread states here.
1940
    */
1941
0
    while ((tstate = interp->threads.head) != NULL) {
1942
0
        tstate_verify_not_active(tstate);
1943
0
        tstate_delete_common(tstate, 0);
1944
0
        free_threadstate((_PyThreadStateImpl *)tstate);
1945
0
    }
1946
0
}
1947
1948
1949
void
1950
PyThreadState_Delete(PyThreadState *tstate)
1951
0
{
1952
0
    _Py_EnsureTstateNotNULL(tstate);
1953
0
    tstate_verify_not_active(tstate);
1954
0
    tstate_delete_common(tstate, 0);
1955
0
    free_threadstate((_PyThreadStateImpl *)tstate);
1956
0
}
1957
1958
1959
void
1960
_PyThreadState_DeleteCurrent(PyThreadState *tstate)
1961
0
{
1962
0
    _Py_EnsureTstateNotNULL(tstate);
1963
#ifdef Py_GIL_DISABLED
1964
    _Py_qsbr_detach(((_PyThreadStateImpl *)tstate)->qsbr);
1965
#endif
1966
#ifdef Py_STATS
1967
    _PyStats_Detach((_PyThreadStateImpl *)tstate);
1968
#endif
1969
0
    current_fast_clear(tstate->interp->runtime);
1970
0
    tstate_delete_common(tstate, 1);  // release GIL as part of call
1971
0
    free_threadstate((_PyThreadStateImpl *)tstate);
1972
0
}
1973
1974
void
1975
PyThreadState_DeleteCurrent(void)
1976
0
{
1977
0
    PyThreadState *tstate = current_fast_get();
1978
0
    _PyThreadState_DeleteCurrent(tstate);
1979
0
}
1980
1981
1982
// Unlinks and removes all thread states from `tstate->interp`, with the
1983
// exception of the one passed as an argument. However, it does not delete
1984
// these thread states. Instead, it returns the removed thread states as a
1985
// linked list.
1986
//
1987
// Note that if there is a current thread state, it *must* be the one
1988
// passed as argument.  Also, this won't touch any interpreters other
1989
// than the current one, since we don't know which thread state should
1990
// be kept in those other interpreters.
1991
PyThreadState *
1992
_PyThreadState_RemoveExcept(PyThreadState *tstate)
1993
0
{
1994
0
    assert(tstate != NULL);
1995
0
    PyInterpreterState *interp = tstate->interp;
1996
0
    _PyRuntimeState *runtime = interp->runtime;
1997
1998
#ifdef Py_GIL_DISABLED
1999
    assert(runtime->stoptheworld.world_stopped);
2000
#endif
2001
2002
0
    HEAD_LOCK(runtime);
2003
    /* Remove all thread states, except tstate, from the linked list of
2004
       thread states. */
2005
0
    PyThreadState *list = interp->threads.head;
2006
0
    if (list == tstate) {
2007
0
        list = tstate->next;
2008
0
    }
2009
0
    if (tstate->prev) {
2010
0
        tstate->prev->next = tstate->next;
2011
0
    }
2012
0
    if (tstate->next) {
2013
0
        tstate->next->prev = tstate->prev;
2014
0
    }
2015
0
    tstate->prev = tstate->next = NULL;
2016
0
    interp->threads.head = tstate;
2017
0
    HEAD_UNLOCK(runtime);
2018
2019
0
    return list;
2020
0
}
2021
2022
// Deletes the thread states in the linked list `list`.
2023
//
2024
// This is intended to be used in conjunction with _PyThreadState_RemoveExcept.
2025
//
2026
// If `is_after_fork` is true, the thread states are immediately freed.
2027
// Otherwise, they are decref'd because they may still be referenced by an
2028
// OS thread.
2029
void
2030
_PyThreadState_DeleteList(PyThreadState *list, int is_after_fork)
2031
0
{
2032
    // The world can't be stopped because we PyThreadState_Clear() can
2033
    // call destructors.
2034
0
    assert(!_PyRuntime.stoptheworld.world_stopped);
2035
2036
0
    PyThreadState *p, *next;
2037
0
    for (p = list; p; p = next) {
2038
0
        next = p->next;
2039
0
        PyThreadState_Clear(p);
2040
0
        if (is_after_fork) {
2041
0
            free_threadstate((_PyThreadStateImpl *)p);
2042
0
        }
2043
0
        else {
2044
0
            decref_threadstate((_PyThreadStateImpl *)p);
2045
0
        }
2046
0
    }
2047
0
}
2048
2049
2050
//----------
2051
// accessors
2052
//----------
2053
2054
/* An extension mechanism to store arbitrary additional per-thread state.
2055
   PyThreadState_GetDict() returns a dictionary that can be used to hold such
2056
   state; the caller should pick a unique key and store its state there.  If
2057
   PyThreadState_GetDict() returns NULL, an exception has *not* been raised
2058
   and the caller should assume no per-thread state is available. */
2059
2060
PyObject *
2061
_PyThreadState_GetDict(PyThreadState *tstate)
2062
7.61M
{
2063
7.61M
    assert(tstate != NULL);
2064
7.61M
    if (tstate->dict == NULL) {
2065
2
        tstate->dict = PyDict_New();
2066
2
        if (tstate->dict == NULL) {
2067
0
            _PyErr_Clear(tstate);
2068
0
        }
2069
2
    }
2070
7.61M
    return tstate->dict;
2071
7.61M
}
2072
2073
2074
PyObject *
2075
PyThreadState_GetDict(void)
2076
7.61M
{
2077
7.61M
    PyThreadState *tstate = current_fast_get();
2078
7.61M
    if (tstate == NULL) {
2079
0
        return NULL;
2080
0
    }
2081
7.61M
    return _PyThreadState_GetDict(tstate);
2082
7.61M
}
2083
2084
2085
PyInterpreterState *
2086
PyThreadState_GetInterpreter(PyThreadState *tstate)
2087
0
{
2088
0
    assert(tstate != NULL);
2089
0
    return tstate->interp;
2090
0
}
2091
2092
2093
PyFrameObject*
2094
PyThreadState_GetFrame(PyThreadState *tstate)
2095
1.46M
{
2096
1.46M
    assert(tstate != NULL);
2097
1.46M
    _PyInterpreterFrame *f = _PyThreadState_GetFrame(tstate);
2098
1.46M
    if (f == NULL) {
2099
0
        return NULL;
2100
0
    }
2101
1.46M
    PyFrameObject *frame = _PyFrame_GetFrameObject(f);
2102
1.46M
    if (frame == NULL) {
2103
0
        PyErr_Clear();
2104
0
    }
2105
1.46M
    return (PyFrameObject*)Py_XNewRef(frame);
2106
1.46M
}
2107
2108
2109
uint64_t
2110
PyThreadState_GetID(PyThreadState *tstate)
2111
0
{
2112
0
    assert(tstate != NULL);
2113
0
    return tstate->id;
2114
0
}
2115
2116
2117
static inline void
2118
tstate_activate(PyThreadState *tstate)
2119
3.68M
{
2120
3.68M
    assert(tstate != NULL);
2121
    // XXX assert(tstate_is_alive(tstate));
2122
3.68M
    assert(tstate_is_bound(tstate));
2123
3.68M
    assert(!tstate->_status.active);
2124
2125
3.68M
    assert(!tstate->_status.bound_gilstate ||
2126
3.68M
           tstate == gilstate_get());
2127
3.68M
    if (!tstate->_status.bound_gilstate) {
2128
0
        bind_gilstate_tstate(tstate);
2129
0
    }
2130
2131
3.68M
    tstate->_status.active = 1;
2132
3.68M
}
2133
2134
static inline void
2135
tstate_deactivate(PyThreadState *tstate)
2136
3.68M
{
2137
3.68M
    assert(tstate != NULL);
2138
    // XXX assert(tstate_is_alive(tstate));
2139
3.68M
    assert(tstate_is_bound(tstate));
2140
3.68M
    assert(tstate->_status.active);
2141
2142
#if Py_STATS
2143
    _PyStats_Detach((_PyThreadStateImpl *)tstate);
2144
#endif
2145
2146
3.68M
    tstate->_status.active = 0;
2147
2148
    // We do not unbind the gilstate tstate here.
2149
    // It will still be used in PyGILState_Ensure().
2150
3.68M
}
2151
2152
static int
2153
tstate_try_attach(PyThreadState *tstate)
2154
3.68M
{
2155
#ifdef Py_GIL_DISABLED
2156
    int expected = _Py_THREAD_DETACHED;
2157
    return _Py_atomic_compare_exchange_int(&tstate->state,
2158
                                           &expected,
2159
                                           _Py_THREAD_ATTACHED);
2160
#else
2161
3.68M
    assert(tstate->state == _Py_THREAD_DETACHED);
2162
3.68M
    tstate->state = _Py_THREAD_ATTACHED;
2163
3.68M
    return 1;
2164
3.68M
#endif
2165
3.68M
}
2166
2167
static void
2168
tstate_set_detached(PyThreadState *tstate, int detached_state)
2169
3.68M
{
2170
3.68M
    assert(_Py_atomic_load_int_relaxed(&tstate->state) == _Py_THREAD_ATTACHED);
2171
#ifdef Py_GIL_DISABLED
2172
    _Py_atomic_store_int(&tstate->state, detached_state);
2173
#else
2174
3.68M
    tstate->state = detached_state;
2175
3.68M
#endif
2176
3.68M
}
2177
2178
static void
2179
tstate_wait_attach(PyThreadState *tstate)
2180
0
{
2181
0
    do {
2182
0
        int state = _Py_atomic_load_int_relaxed(&tstate->state);
2183
0
        if (state == _Py_THREAD_SUSPENDED) {
2184
            // Wait until we're switched out of SUSPENDED to DETACHED.
2185
0
            _PyParkingLot_Park(&tstate->state, &state, sizeof(tstate->state),
2186
0
                               /*timeout=*/-1, NULL, /*detach=*/0);
2187
0
        }
2188
0
        else if (state == _Py_THREAD_SHUTTING_DOWN) {
2189
            // We're shutting down, so we can't attach.
2190
0
            _PyThreadState_HangThread(tstate);
2191
0
        }
2192
0
        else {
2193
0
            assert(state == _Py_THREAD_DETACHED);
2194
0
        }
2195
        // Once we're back in DETACHED we can re-attach
2196
0
    } while (!tstate_try_attach(tstate));
2197
0
}
2198
2199
void
2200
_PyThreadState_Attach(PyThreadState *tstate)
2201
3.68M
{
2202
#if defined(Py_DEBUG)
2203
    // This is called from PyEval_RestoreThread(). Similar
2204
    // to it, we need to ensure errno doesn't change.
2205
    int err = errno;
2206
#endif
2207
2208
3.68M
    _Py_EnsureTstateNotNULL(tstate);
2209
3.68M
    if (current_fast_get() != NULL) {
2210
0
        Py_FatalError("non-NULL old thread state");
2211
0
    }
2212
3.68M
    _PyThreadStateImpl *_tstate = (_PyThreadStateImpl *)tstate;
2213
3.68M
    if (_tstate->c_stack_hard_limit == 0) {
2214
32
        _Py_InitializeRecursionLimits(tstate);
2215
32
    }
2216
2217
3.68M
    while (1) {
2218
3.68M
        _PyEval_AcquireLock(tstate);
2219
2220
        // XXX assert(tstate_is_alive(tstate));
2221
3.68M
        current_fast_set(&_PyRuntime, tstate);
2222
3.68M
        if (!tstate_try_attach(tstate)) {
2223
0
            tstate_wait_attach(tstate);
2224
0
        }
2225
3.68M
        tstate_activate(tstate);
2226
2227
#ifdef Py_GIL_DISABLED
2228
        if (_PyEval_IsGILEnabled(tstate) && !tstate->holds_gil) {
2229
            // The GIL was enabled between our call to _PyEval_AcquireLock()
2230
            // and when we attached (the GIL can't go from enabled to disabled
2231
            // here because only a thread holding the GIL can disable
2232
            // it). Detach and try again.
2233
            tstate_set_detached(tstate, _Py_THREAD_DETACHED);
2234
            tstate_deactivate(tstate);
2235
            current_fast_clear(&_PyRuntime);
2236
            continue;
2237
        }
2238
        _Py_qsbr_attach(((_PyThreadStateImpl *)tstate)->qsbr);
2239
#endif
2240
3.68M
        break;
2241
3.68M
    }
2242
2243
    // Resume previous critical section. This acquires the lock(s) from the
2244
    // top-most critical section.
2245
3.68M
    if (tstate->critical_section != 0) {
2246
0
        _PyCriticalSection_Resume(tstate);
2247
0
    }
2248
2249
#ifdef Py_STATS
2250
    _PyStats_Attach((_PyThreadStateImpl *)tstate);
2251
#endif
2252
2253
#if defined(Py_DEBUG)
2254
    errno = err;
2255
#endif
2256
3.68M
}
2257
2258
static void
2259
detach_thread(PyThreadState *tstate, int detached_state)
2260
3.68M
{
2261
    // XXX assert(tstate_is_alive(tstate) && tstate_is_bound(tstate));
2262
3.68M
    assert(_Py_atomic_load_int_relaxed(&tstate->state) == _Py_THREAD_ATTACHED);
2263
3.68M
    assert(tstate == current_fast_get());
2264
3.68M
    if (tstate->critical_section != 0) {
2265
0
        _PyCriticalSection_SuspendAll(tstate);
2266
0
    }
2267
#ifdef Py_GIL_DISABLED
2268
    _Py_qsbr_detach(((_PyThreadStateImpl *)tstate)->qsbr);
2269
#endif
2270
3.68M
    tstate_deactivate(tstate);
2271
3.68M
    tstate_set_detached(tstate, detached_state);
2272
3.68M
    current_fast_clear(&_PyRuntime);
2273
3.68M
    _PyEval_ReleaseLock(tstate->interp, tstate, 0);
2274
3.68M
}
2275
2276
void
2277
_PyThreadState_Detach(PyThreadState *tstate)
2278
3.68M
{
2279
3.68M
    detach_thread(tstate, _Py_THREAD_DETACHED);
2280
3.68M
}
2281
2282
void
2283
_PyThreadState_Suspend(PyThreadState *tstate)
2284
0
{
2285
0
    _PyRuntimeState *runtime = &_PyRuntime;
2286
2287
0
    assert(_Py_atomic_load_int_relaxed(&tstate->state) == _Py_THREAD_ATTACHED);
2288
2289
0
    struct _stoptheworld_state *stw = NULL;
2290
0
    HEAD_LOCK(runtime);
2291
0
    if (runtime->stoptheworld.requested) {
2292
0
        stw = &runtime->stoptheworld;
2293
0
    }
2294
0
    else if (tstate->interp->stoptheworld.requested) {
2295
0
        stw = &tstate->interp->stoptheworld;
2296
0
    }
2297
0
    HEAD_UNLOCK(runtime);
2298
2299
0
    if (stw == NULL) {
2300
        // Switch directly to "detached" if there is no active stop-the-world
2301
        // request.
2302
0
        detach_thread(tstate, _Py_THREAD_DETACHED);
2303
0
        return;
2304
0
    }
2305
2306
    // Switch to "suspended" state.
2307
0
    detach_thread(tstate, _Py_THREAD_SUSPENDED);
2308
2309
    // Decrease the count of remaining threads needing to park.
2310
0
    HEAD_LOCK(runtime);
2311
0
    decrement_stoptheworld_countdown(stw);
2312
0
    HEAD_UNLOCK(runtime);
2313
0
}
2314
2315
void
2316
_PyThreadState_SetShuttingDown(PyThreadState *tstate)
2317
0
{
2318
0
    _Py_atomic_store_int(&tstate->state, _Py_THREAD_SHUTTING_DOWN);
2319
#ifdef Py_GIL_DISABLED
2320
    _PyParkingLot_UnparkAll(&tstate->state);
2321
#endif
2322
0
}
2323
2324
// Decrease stop-the-world counter of remaining number of threads that need to
2325
// pause. If we are the final thread to pause, notify the requesting thread.
2326
static void
2327
decrement_stoptheworld_countdown(struct _stoptheworld_state *stw)
2328
0
{
2329
0
    assert(stw->thread_countdown > 0);
2330
0
    if (--stw->thread_countdown == 0) {
2331
0
        _PyEvent_Notify(&stw->stop_event);
2332
0
    }
2333
0
}
2334
2335
#ifdef Py_GIL_DISABLED
2336
// Interpreter for _Py_FOR_EACH_STW_INTERP(). For global stop-the-world events,
2337
// we start with the first interpreter and then iterate over all interpreters.
2338
// For per-interpreter stop-the-world events, we only operate on the one
2339
// interpreter.
2340
static PyInterpreterState *
2341
interp_for_stop_the_world(struct _stoptheworld_state *stw)
2342
{
2343
    return (stw->is_global
2344
        ? PyInterpreterState_Head()
2345
        : _Py_CONTAINER_OF(stw, PyInterpreterState, stoptheworld));
2346
}
2347
2348
// Loops over threads for a stop-the-world event.
2349
// For global: all threads in all interpreters
2350
// For per-interpreter: all threads in the interpreter
2351
#define _Py_FOR_EACH_STW_INTERP(stw, i)                                     \
2352
    for (PyInterpreterState *i = interp_for_stop_the_world((stw));          \
2353
            i != NULL; i = ((stw->is_global) ? i->next : NULL))
2354
2355
2356
// Try to transition threads atomically from the "detached" state to the
2357
// "gc stopped" state. Returns true if all threads are in the "gc stopped"
2358
static bool
2359
park_detached_threads(struct _stoptheworld_state *stw)
2360
{
2361
    int num_parked = 0;
2362
    _Py_FOR_EACH_STW_INTERP(stw, i) {
2363
        _Py_FOR_EACH_TSTATE_UNLOCKED(i, t) {
2364
            int state = _Py_atomic_load_int_relaxed(&t->state);
2365
            if (state == _Py_THREAD_DETACHED) {
2366
                // Atomically transition to "suspended" if in "detached" state.
2367
                if (_Py_atomic_compare_exchange_int(
2368
                                &t->state, &state, _Py_THREAD_SUSPENDED)) {
2369
                    num_parked++;
2370
                }
2371
            }
2372
            else if (state == _Py_THREAD_ATTACHED && t != stw->requester) {
2373
                _Py_set_eval_breaker_bit(t, _PY_EVAL_PLEASE_STOP_BIT);
2374
            }
2375
        }
2376
    }
2377
    stw->thread_countdown -= num_parked;
2378
    assert(stw->thread_countdown >= 0);
2379
    return num_parked > 0 && stw->thread_countdown == 0;
2380
}
2381
2382
static void
2383
stop_the_world(struct _stoptheworld_state *stw)
2384
{
2385
    _PyRuntimeState *runtime = &_PyRuntime;
2386
2387
    // gh-137433: Acquire the rwmutex first to avoid deadlocks with daemon
2388
    // threads that may hang when blocked on lock acquisition.
2389
    if (stw->is_global) {
2390
        _PyRWMutex_Lock(&runtime->stoptheworld_mutex);
2391
    }
2392
    else {
2393
        _PyRWMutex_RLock(&runtime->stoptheworld_mutex);
2394
    }
2395
    PyMutex_Lock(&stw->mutex);
2396
2397
    HEAD_LOCK(runtime);
2398
    stw->requested = 1;
2399
    stw->thread_countdown = 0;
2400
    stw->stop_event = (PyEvent){0};  // zero-initialize (unset)
2401
    stw->requester = _PyThreadState_GET();  // may be NULL
2402
    FT_STAT_WORLD_STOP_INC();
2403
2404
    _Py_FOR_EACH_STW_INTERP(stw, i) {
2405
        _Py_FOR_EACH_TSTATE_UNLOCKED(i, t) {
2406
            if (t != stw->requester) {
2407
                // Count all the other threads (we don't wait on ourself).
2408
                stw->thread_countdown++;
2409
            }
2410
        }
2411
    }
2412
2413
    if (stw->thread_countdown == 0) {
2414
        HEAD_UNLOCK(runtime);
2415
        stw->world_stopped = 1;
2416
        return;
2417
    }
2418
2419
    for (;;) {
2420
        // Switch threads that are detached to the GC stopped state
2421
        bool stopped_all_threads = park_detached_threads(stw);
2422
        HEAD_UNLOCK(runtime);
2423
2424
        if (stopped_all_threads) {
2425
            break;
2426
        }
2427
2428
        PyTime_t wait_ns = 1000*1000;  // 1ms (arbitrary, may need tuning)
2429
        int detach = 0;
2430
        if (PyEvent_WaitTimed(&stw->stop_event, wait_ns, detach)) {
2431
            assert(stw->thread_countdown == 0);
2432
            break;
2433
        }
2434
2435
        HEAD_LOCK(runtime);
2436
    }
2437
    stw->world_stopped = 1;
2438
}
2439
2440
static void
2441
start_the_world(struct _stoptheworld_state *stw)
2442
{
2443
    _PyRuntimeState *runtime = &_PyRuntime;
2444
    assert(PyMutex_IsLocked(&stw->mutex));
2445
2446
    HEAD_LOCK(runtime);
2447
    stw->requested = 0;
2448
    stw->world_stopped = 0;
2449
    // Switch threads back to the detached state.
2450
    _Py_FOR_EACH_STW_INTERP(stw, i) {
2451
        _Py_FOR_EACH_TSTATE_UNLOCKED(i, t) {
2452
            if (t != stw->requester) {
2453
                assert(_Py_atomic_load_int_relaxed(&t->state) ==
2454
                       _Py_THREAD_SUSPENDED);
2455
                _Py_atomic_store_int(&t->state, _Py_THREAD_DETACHED);
2456
                _PyParkingLot_UnparkAll(&t->state);
2457
            }
2458
        }
2459
    }
2460
    stw->requester = NULL;
2461
    HEAD_UNLOCK(runtime);
2462
    PyMutex_Unlock(&stw->mutex);
2463
    if (stw->is_global) {
2464
        _PyRWMutex_Unlock(&runtime->stoptheworld_mutex);
2465
    }
2466
    else {
2467
        _PyRWMutex_RUnlock(&runtime->stoptheworld_mutex);
2468
    }
2469
}
2470
#endif  // Py_GIL_DISABLED
2471
2472
void
2473
_PyEval_StopTheWorldAll(_PyRuntimeState *runtime)
2474
0
{
2475
#ifdef Py_GIL_DISABLED
2476
    stop_the_world(&runtime->stoptheworld);
2477
#endif
2478
0
}
2479
2480
void
2481
_PyEval_StartTheWorldAll(_PyRuntimeState *runtime)
2482
0
{
2483
#ifdef Py_GIL_DISABLED
2484
    start_the_world(&runtime->stoptheworld);
2485
#endif
2486
0
}
2487
2488
void
2489
_PyEval_StopTheWorld(PyInterpreterState *interp)
2490
254
{
2491
#ifdef Py_GIL_DISABLED
2492
    stop_the_world(&interp->stoptheworld);
2493
#endif
2494
254
}
2495
2496
void
2497
_PyEval_StartTheWorld(PyInterpreterState *interp)
2498
254
{
2499
#ifdef Py_GIL_DISABLED
2500
    start_the_world(&interp->stoptheworld);
2501
#endif
2502
254
}
2503
2504
//----------
2505
// other API
2506
//----------
2507
2508
/* Asynchronously raise an exception in a thread.
2509
   Requested by Just van Rossum and Alex Martelli.
2510
   To prevent naive misuse, you must write your own extension
2511
   to call this, or use ctypes.  Must be called with the GIL held.
2512
   Returns the number of tstates modified (normally 1, but 0 if `id` didn't
2513
   match any known thread id).  Can be called with exc=NULL to clear an
2514
   existing async exception.  This raises no exceptions. */
2515
2516
// XXX Move this to Python/ceval_gil.c?
2517
// XXX Deprecate this.
2518
int
2519
PyThreadState_SetAsyncExc(unsigned long id, PyObject *exc)
2520
0
{
2521
0
    PyInterpreterState *interp = _PyInterpreterState_GET();
2522
2523
    /* Although the GIL is held, a few C API functions can be called
2524
     * without the GIL held, and in particular some that create and
2525
     * destroy thread and interpreter states.  Those can mutate the
2526
     * list of thread states we're traversing, so to prevent that we lock
2527
     * head_mutex for the duration.
2528
     */
2529
0
    PyThreadState *tstate = NULL;
2530
0
    _Py_FOR_EACH_TSTATE_BEGIN(interp, t) {
2531
0
        if (t->thread_id == id) {
2532
0
            tstate = t;
2533
0
            break;
2534
0
        }
2535
0
    }
2536
0
    _Py_FOR_EACH_TSTATE_END(interp);
2537
2538
0
    if (tstate != NULL) {
2539
        /* Tricky:  we need to decref the current value
2540
         * (if any) in tstate->async_exc, but that can in turn
2541
         * allow arbitrary Python code to run, including
2542
         * perhaps calls to this function.  To prevent
2543
         * deadlock, we need to release head_mutex before
2544
         * the decref.
2545
         */
2546
0
        Py_XINCREF(exc);
2547
0
        PyObject *old_exc = _Py_atomic_exchange_ptr(&tstate->async_exc, exc);
2548
2549
0
        Py_XDECREF(old_exc);
2550
0
        _Py_set_eval_breaker_bit(tstate, _PY_ASYNC_EXCEPTION_BIT);
2551
0
    }
2552
2553
0
    return tstate != NULL;
2554
0
}
2555
2556
//---------------------------------
2557
// API for the current thread state
2558
//---------------------------------
2559
2560
PyThreadState *
2561
PyThreadState_GetUnchecked(void)
2562
0
{
2563
0
    return current_fast_get();
2564
0
}
2565
2566
2567
PyThreadState *
2568
PyThreadState_Get(void)
2569
60.9M
{
2570
60.9M
    PyThreadState *tstate = current_fast_get();
2571
60.9M
    _Py_EnsureTstateNotNULL(tstate);
2572
60.9M
    return tstate;
2573
60.9M
}
2574
2575
PyThreadState *
2576
_PyThreadState_Swap(_PyRuntimeState *runtime, PyThreadState *newts)
2577
0
{
2578
0
    PyThreadState *oldts = current_fast_get();
2579
0
    if (oldts != NULL) {
2580
0
        _PyThreadState_Detach(oldts);
2581
0
    }
2582
0
    if (newts != NULL) {
2583
0
        _PyThreadState_Attach(newts);
2584
0
    }
2585
0
    return oldts;
2586
0
}
2587
2588
PyThreadState *
2589
PyThreadState_Swap(PyThreadState *newts)
2590
0
{
2591
0
    return _PyThreadState_Swap(&_PyRuntime, newts);
2592
0
}
2593
2594
2595
void
2596
_PyThreadState_Bind(PyThreadState *tstate)
2597
32
{
2598
    // gh-104690: If Python is being finalized and PyInterpreterState_Delete()
2599
    // was called, tstate becomes a dangling pointer.
2600
32
    assert(_PyThreadState_CheckConsistency(tstate));
2601
2602
32
    bind_tstate(tstate);
2603
    // This makes sure there's a gilstate tstate bound
2604
    // as soon as possible.
2605
32
    if (gilstate_get() == NULL) {
2606
32
        bind_gilstate_tstate(tstate);
2607
32
    }
2608
32
}
2609
2610
#if defined(Py_GIL_DISABLED) && !defined(Py_LIMITED_API)
2611
uintptr_t
2612
_Py_GetThreadLocal_Addr(void)
2613
{
2614
    // gh-112535: Use the address of the thread-local PyThreadState variable as
2615
    // a unique identifier for the current thread. Each thread has a unique
2616
    // _Py_tss_tstate variable with a unique address.
2617
    return (uintptr_t)&_Py_tss_tstate;
2618
}
2619
#endif
2620
2621
/***********************************/
2622
/* routines for advanced debuggers */
2623
/***********************************/
2624
2625
// (requested by David Beazley)
2626
// Don't use unless you know what you are doing!
2627
2628
PyInterpreterState *
2629
PyInterpreterState_Head(void)
2630
0
{
2631
0
    return _PyRuntime.interpreters.head;
2632
0
}
2633
2634
PyInterpreterState *
2635
PyInterpreterState_Main(void)
2636
0
{
2637
0
    return _PyInterpreterState_Main();
2638
0
}
2639
2640
PyInterpreterState *
2641
0
PyInterpreterState_Next(PyInterpreterState *interp) {
2642
0
    return interp->next;
2643
0
}
2644
2645
PyThreadState *
2646
20.4k
PyInterpreterState_ThreadHead(PyInterpreterState *interp) {
2647
20.4k
    return interp->threads.head;
2648
20.4k
}
2649
2650
PyThreadState *
2651
20.4k
PyThreadState_Next(PyThreadState *tstate) {
2652
20.4k
    return tstate->next;
2653
20.4k
}
2654
2655
2656
/********************************************/
2657
/* reporting execution state of all threads */
2658
/********************************************/
2659
2660
/* The implementation of sys._current_frames().  This is intended to be
2661
   called with the GIL held, as it will be when called via
2662
   sys._current_frames().  It's possible it would work fine even without
2663
   the GIL held, but haven't thought enough about that.
2664
*/
2665
PyObject *
2666
_PyThread_CurrentFrames(void)
2667
0
{
2668
0
    _PyRuntimeState *runtime = &_PyRuntime;
2669
0
    PyThreadState *tstate = current_fast_get();
2670
0
    if (_PySys_Audit(tstate, "sys._current_frames", NULL) < 0) {
2671
0
        return NULL;
2672
0
    }
2673
2674
0
    PyObject *result = PyDict_New();
2675
0
    if (result == NULL) {
2676
0
        return NULL;
2677
0
    }
2678
2679
    /* for i in all interpreters:
2680
     *     for t in all of i's thread states:
2681
     *          if t's frame isn't NULL, map t's id to its frame
2682
     * Because these lists can mutate even when the GIL is held, we
2683
     * need to grab head_mutex for the duration.
2684
     */
2685
0
    _PyEval_StopTheWorldAll(runtime);
2686
0
    HEAD_LOCK(runtime);
2687
0
    PyInterpreterState *i;
2688
0
    for (i = runtime->interpreters.head; i != NULL; i = i->next) {
2689
0
        _Py_FOR_EACH_TSTATE_UNLOCKED(i, t) {
2690
0
            _PyInterpreterFrame *frame = t->current_frame;
2691
0
            frame = _PyFrame_GetFirstComplete(frame);
2692
0
            if (frame == NULL) {
2693
0
                continue;
2694
0
            }
2695
0
            PyObject *id = PyLong_FromUnsignedLong(t->thread_id);
2696
0
            if (id == NULL) {
2697
0
                goto fail;
2698
0
            }
2699
0
            PyObject *frameobj = (PyObject *)_PyFrame_GetFrameObject(frame);
2700
0
            if (frameobj == NULL) {
2701
0
                Py_DECREF(id);
2702
0
                goto fail;
2703
0
            }
2704
0
            int stat = PyDict_SetItem(result, id, frameobj);
2705
0
            Py_DECREF(id);
2706
0
            if (stat < 0) {
2707
0
                goto fail;
2708
0
            }
2709
0
        }
2710
0
    }
2711
0
    goto done;
2712
2713
0
fail:
2714
0
    Py_CLEAR(result);
2715
2716
0
done:
2717
0
    HEAD_UNLOCK(runtime);
2718
0
    _PyEval_StartTheWorldAll(runtime);
2719
0
    return result;
2720
0
}
2721
2722
/* The implementation of sys._current_exceptions().  This is intended to be
2723
   called with the GIL held, as it will be when called via
2724
   sys._current_exceptions().  It's possible it would work fine even without
2725
   the GIL held, but haven't thought enough about that.
2726
*/
2727
PyObject *
2728
_PyThread_CurrentExceptions(void)
2729
0
{
2730
0
    _PyRuntimeState *runtime = &_PyRuntime;
2731
0
    PyThreadState *tstate = current_fast_get();
2732
2733
0
    _Py_EnsureTstateNotNULL(tstate);
2734
2735
0
    if (_PySys_Audit(tstate, "sys._current_exceptions", NULL) < 0) {
2736
0
        return NULL;
2737
0
    }
2738
2739
0
    PyObject *result = PyDict_New();
2740
0
    if (result == NULL) {
2741
0
        return NULL;
2742
0
    }
2743
2744
    /* for i in all interpreters:
2745
     *     for t in all of i's thread states:
2746
     *          if t's frame isn't NULL, map t's id to its frame
2747
     * Because these lists can mutate even when the GIL is held, we
2748
     * need to grab head_mutex for the duration.
2749
     */
2750
0
    _PyEval_StopTheWorldAll(runtime);
2751
0
    HEAD_LOCK(runtime);
2752
0
    PyInterpreterState *i;
2753
0
    for (i = runtime->interpreters.head; i != NULL; i = i->next) {
2754
0
        _Py_FOR_EACH_TSTATE_UNLOCKED(i, t) {
2755
0
            _PyErr_StackItem *err_info = _PyErr_GetTopmostException(t);
2756
0
            if (err_info == NULL) {
2757
0
                continue;
2758
0
            }
2759
0
            PyObject *id = PyLong_FromUnsignedLong(t->thread_id);
2760
0
            if (id == NULL) {
2761
0
                goto fail;
2762
0
            }
2763
0
            PyObject *exc = err_info->exc_value;
2764
0
            assert(exc == NULL ||
2765
0
                   exc == Py_None ||
2766
0
                   PyExceptionInstance_Check(exc));
2767
2768
0
            int stat = PyDict_SetItem(result, id, exc == NULL ? Py_None : exc);
2769
0
            Py_DECREF(id);
2770
0
            if (stat < 0) {
2771
0
                goto fail;
2772
0
            }
2773
0
        }
2774
0
    }
2775
0
    goto done;
2776
2777
0
fail:
2778
0
    Py_CLEAR(result);
2779
2780
0
done:
2781
0
    HEAD_UNLOCK(runtime);
2782
0
    _PyEval_StartTheWorldAll(runtime);
2783
0
    return result;
2784
0
}
2785
2786
2787
/***********************************/
2788
/* Python "auto thread state" API. */
2789
/***********************************/
2790
2791
/* Internal initialization/finalization functions called by
2792
   Py_Initialize/Py_FinalizeEx
2793
*/
2794
PyStatus
2795
_PyGILState_Init(PyInterpreterState *interp)
2796
32
{
2797
32
    if (!_Py_IsMainInterpreter(interp)) {
2798
        /* Currently, PyGILState is shared by all interpreters. The main
2799
         * interpreter is responsible to initialize it. */
2800
0
        return _PyStatus_OK();
2801
0
    }
2802
32
    _PyRuntimeState *runtime = interp->runtime;
2803
32
    assert(gilstate_get() == NULL);
2804
32
    assert(runtime->gilstate.autoInterpreterState == NULL);
2805
32
    runtime->gilstate.autoInterpreterState = interp;
2806
32
    return _PyStatus_OK();
2807
32
}
2808
2809
void
2810
_PyGILState_Fini(PyInterpreterState *interp)
2811
0
{
2812
0
    if (!_Py_IsMainInterpreter(interp)) {
2813
        /* Currently, PyGILState is shared by all interpreters. The main
2814
         * interpreter is responsible to initialize it. */
2815
0
        return;
2816
0
    }
2817
0
    interp->runtime->gilstate.autoInterpreterState = NULL;
2818
0
}
2819
2820
2821
// XXX Drop this.
2822
void
2823
_PyGILState_SetTstate(PyThreadState *tstate)
2824
32
{
2825
    /* must init with valid states */
2826
32
    assert(tstate != NULL);
2827
32
    assert(tstate->interp != NULL);
2828
2829
32
    if (!_Py_IsMainInterpreter(tstate->interp)) {
2830
        /* Currently, PyGILState is shared by all interpreters. The main
2831
         * interpreter is responsible to initialize it. */
2832
0
        return;
2833
0
    }
2834
2835
#ifndef NDEBUG
2836
    _PyRuntimeState *runtime = tstate->interp->runtime;
2837
2838
    assert(runtime->gilstate.autoInterpreterState == tstate->interp);
2839
    assert(gilstate_get() == tstate);
2840
    assert(tstate->gilstate_counter == 1);
2841
#endif
2842
32
}
2843
2844
PyInterpreterState *
2845
_PyGILState_GetInterpreterStateUnsafe(void)
2846
0
{
2847
0
    return _PyRuntime.gilstate.autoInterpreterState;
2848
0
}
2849
2850
/* The public functions */
2851
2852
PyThreadState *
2853
PyGILState_GetThisThreadState(void)
2854
0
{
2855
0
    return gilstate_get();
2856
0
}
2857
2858
int
2859
PyGILState_Check(void)
2860
0
{
2861
0
    _PyRuntimeState *runtime = &_PyRuntime;
2862
0
    if (!_Py_atomic_load_int_relaxed(&runtime->gilstate.check_enabled)) {
2863
0
        return 1;
2864
0
    }
2865
2866
0
    PyThreadState *tstate = current_fast_get();
2867
0
    if (tstate == NULL) {
2868
0
        return 0;
2869
0
    }
2870
2871
0
    PyThreadState *tcur = gilstate_get();
2872
0
    return (tstate == tcur);
2873
0
}
2874
2875
PyGILState_STATE
2876
PyGILState_Ensure(void)
2877
0
{
2878
0
    _PyRuntimeState *runtime = &_PyRuntime;
2879
2880
    /* Note that we do not auto-init Python here - apart from
2881
       potential races with 2 threads auto-initializing, pep-311
2882
       spells out other issues.  Embedders are expected to have
2883
       called Py_Initialize(). */
2884
2885
    /* Ensure that _PyEval_InitThreads() and _PyGILState_Init() have been
2886
       called by Py_Initialize()
2887
2888
       TODO: This isn't thread-safe. There's no protection here against
2889
       concurrent finalization of the interpreter; it's simply a guard
2890
       for *after* the interpreter has finalized.
2891
     */
2892
0
    if (!_PyEval_ThreadsInitialized() || runtime->gilstate.autoInterpreterState == NULL) {
2893
0
        PyThread_hang_thread();
2894
0
    }
2895
2896
0
    PyThreadState *tcur = gilstate_get();
2897
0
    int has_gil;
2898
0
    if (tcur == NULL) {
2899
        /* Create a new Python thread state for this thread */
2900
        // XXX Use PyInterpreterState_EnsureThreadState()?
2901
0
        tcur = new_threadstate(runtime->gilstate.autoInterpreterState,
2902
0
                               _PyThreadState_WHENCE_GILSTATE);
2903
0
        if (tcur == NULL) {
2904
0
            Py_FatalError("Couldn't create thread-state for new thread");
2905
0
        }
2906
0
        bind_tstate(tcur);
2907
0
        bind_gilstate_tstate(tcur);
2908
2909
        /* This is our thread state!  We'll need to delete it in the
2910
           matching call to PyGILState_Release(). */
2911
0
        assert(tcur->gilstate_counter == 1);
2912
0
        tcur->gilstate_counter = 0;
2913
0
        has_gil = 0; /* new thread state is never current */
2914
0
    }
2915
0
    else {
2916
0
        has_gil = holds_gil(tcur);
2917
0
    }
2918
2919
0
    if (!has_gil) {
2920
0
        PyEval_RestoreThread(tcur);
2921
0
    }
2922
2923
    /* Update our counter in the thread-state - no need for locks:
2924
       - tcur will remain valid as we hold the GIL.
2925
       - the counter is safe as we are the only thread "allowed"
2926
         to modify this value
2927
    */
2928
0
    ++tcur->gilstate_counter;
2929
2930
0
    return has_gil ? PyGILState_LOCKED : PyGILState_UNLOCKED;
2931
0
}
2932
2933
void
2934
PyGILState_Release(PyGILState_STATE oldstate)
2935
0
{
2936
0
    PyThreadState *tstate = gilstate_get();
2937
0
    if (tstate == NULL) {
2938
0
        Py_FatalError("auto-releasing thread-state, "
2939
0
                      "but no thread-state for this thread");
2940
0
    }
2941
2942
    /* We must hold the GIL and have our thread state current */
2943
0
    if (!holds_gil(tstate)) {
2944
0
        _Py_FatalErrorFormat(__func__,
2945
0
                             "thread state %p must be current when releasing",
2946
0
                             tstate);
2947
0
    }
2948
0
    --tstate->gilstate_counter;
2949
0
    assert(tstate->gilstate_counter >= 0); /* illegal counter value */
2950
2951
    /* If we're going to destroy this thread-state, we must
2952
     * clear it while the GIL is held, as destructors may run.
2953
     */
2954
0
    if (tstate->gilstate_counter == 0) {
2955
        /* can't have been locked when we created it */
2956
0
        assert(oldstate == PyGILState_UNLOCKED);
2957
        // XXX Unbind tstate here.
2958
        // gh-119585: `PyThreadState_Clear()` may call destructors that
2959
        // themselves use PyGILState_Ensure and PyGILState_Release, so make
2960
        // sure that gilstate_counter is not zero when calling it.
2961
0
        ++tstate->gilstate_counter;
2962
0
        PyThreadState_Clear(tstate);
2963
0
        --tstate->gilstate_counter;
2964
        /* Delete the thread-state.  Note this releases the GIL too!
2965
         * It's vital that the GIL be held here, to avoid shutdown
2966
         * races; see bugs 225673 and 1061968 (that nasty bug has a
2967
         * habit of coming back).
2968
         */
2969
0
        assert(tstate->gilstate_counter == 0);
2970
0
        assert(current_fast_get() == tstate);
2971
0
        _PyThreadState_DeleteCurrent(tstate);
2972
0
    }
2973
    /* Release the lock if necessary */
2974
0
    else if (oldstate == PyGILState_UNLOCKED) {
2975
0
        PyEval_SaveThread();
2976
0
    }
2977
0
}
2978
2979
2980
/*************/
2981
/* Other API */
2982
/*************/
2983
2984
_PyFrameEvalFunction
2985
_PyInterpreterState_GetEvalFrameFunc(PyInterpreterState *interp)
2986
0
{
2987
0
    if (interp->eval_frame == NULL) {
2988
0
        return _PyEval_EvalFrameDefault;
2989
0
    }
2990
0
    return interp->eval_frame;
2991
0
}
2992
2993
2994
void
2995
_PyInterpreterState_SetEvalFrameFunc(PyInterpreterState *interp,
2996
                                     _PyFrameEvalFunction eval_frame)
2997
0
{
2998
0
    if (eval_frame == _PyEval_EvalFrameDefault) {
2999
0
        eval_frame = NULL;
3000
0
    }
3001
0
    if (eval_frame == interp->eval_frame) {
3002
0
        return;
3003
0
    }
3004
#ifdef _Py_TIER2
3005
    if (eval_frame != NULL) {
3006
        _Py_Executors_InvalidateAll(interp, 1);
3007
    }
3008
#endif
3009
0
    RARE_EVENT_INC(set_eval_frame_func);
3010
0
    _PyEval_StopTheWorld(interp);
3011
0
    interp->eval_frame = eval_frame;
3012
0
    _PyEval_StartTheWorld(interp);
3013
0
}
3014
3015
3016
const PyConfig*
3017
_PyInterpreterState_GetConfig(PyInterpreterState *interp)
3018
133M
{
3019
133M
    return &interp->config;
3020
133M
}
3021
3022
3023
const PyConfig*
3024
_Py_GetConfig(void)
3025
134k
{
3026
134k
    PyThreadState *tstate = current_fast_get();
3027
134k
    _Py_EnsureTstateNotNULL(tstate);
3028
134k
    return _PyInterpreterState_GetConfig(tstate->interp);
3029
134k
}
3030
3031
3032
int
3033
_PyInterpreterState_HasFeature(PyInterpreterState *interp, unsigned long feature)
3034
0
{
3035
0
    return ((interp->feature_flags & feature) != 0);
3036
0
}
3037
3038
3039
252k
#define MINIMUM_OVERHEAD 1000
3040
3041
static PyObject **
3042
push_chunk(PyThreadState *tstate, int size)
3043
252k
{
3044
252k
    int allocate_size = _PY_DATA_STACK_CHUNK_SIZE;
3045
252k
    while (allocate_size < (int)sizeof(PyObject*)*(size + MINIMUM_OVERHEAD)) {
3046
0
        allocate_size *= 2;
3047
0
    }
3048
252k
    _PyStackChunk *new = allocate_chunk(allocate_size, tstate->datastack_chunk);
3049
252k
    if (new == NULL) {
3050
0
        return NULL;
3051
0
    }
3052
252k
    if (tstate->datastack_chunk) {
3053
251k
        tstate->datastack_chunk->top = tstate->datastack_top -
3054
251k
                                       &tstate->datastack_chunk->data[0];
3055
251k
    }
3056
252k
    tstate->datastack_chunk = new;
3057
252k
    tstate->datastack_limit = (PyObject **)(((char *)new) + allocate_size);
3058
    // When new is the "root" chunk (i.e. new->previous == NULL), we can keep
3059
    // _PyThreadState_PopFrame from freeing it later by "skipping" over the
3060
    // first element:
3061
252k
    PyObject **res = &new->data[new->previous == NULL];
3062
252k
    tstate->datastack_top = res + size;
3063
252k
    return res;
3064
252k
}
3065
3066
_PyInterpreterFrame *
3067
_PyThreadState_PushFrame(PyThreadState *tstate, size_t size)
3068
223M
{
3069
223M
    assert(size < INT_MAX/sizeof(PyObject *));
3070
223M
    if (_PyThreadState_HasStackSpace(tstate, (int)size)) {
3071
223M
        _PyInterpreterFrame *res = (_PyInterpreterFrame *)tstate->datastack_top;
3072
223M
        tstate->datastack_top += size;
3073
223M
        return res;
3074
223M
    }
3075
252k
    return (_PyInterpreterFrame *)push_chunk(tstate, (int)size);
3076
223M
}
3077
3078
void
3079
_PyThreadState_PopFrame(PyThreadState *tstate, _PyInterpreterFrame * frame)
3080
1.08G
{
3081
1.08G
    assert(tstate->datastack_chunk);
3082
1.08G
    PyObject **base = (PyObject **)frame;
3083
1.08G
    if (base == &tstate->datastack_chunk->data[0]) {
3084
251k
        _PyStackChunk *chunk = tstate->datastack_chunk;
3085
251k
        _PyStackChunk *previous = chunk->previous;
3086
        // push_chunk ensures that the root chunk is never popped:
3087
251k
        assert(previous);
3088
251k
        tstate->datastack_top = &previous->data[previous->top];
3089
251k
        tstate->datastack_chunk = previous;
3090
251k
        _PyObject_VirtualFree(chunk, chunk->size);
3091
251k
        tstate->datastack_limit = (PyObject **)(((char *)previous) + previous->size);
3092
251k
    }
3093
1.08G
    else {
3094
1.08G
        assert(tstate->datastack_top);
3095
1.08G
        assert(tstate->datastack_top >= base);
3096
1.08G
        tstate->datastack_top = base;
3097
1.08G
    }
3098
1.08G
}
3099
3100
3101
#ifndef NDEBUG
3102
// Check that a Python thread state valid. In practice, this function is used
3103
// on a Python debug build to check if 'tstate' is a dangling pointer, if the
3104
// PyThreadState memory has been freed.
3105
//
3106
// Usage:
3107
//
3108
//     assert(_PyThreadState_CheckConsistency(tstate));
3109
int
3110
_PyThreadState_CheckConsistency(PyThreadState *tstate)
3111
{
3112
    assert(!_PyMem_IsPtrFreed(tstate));
3113
    assert(!_PyMem_IsPtrFreed(tstate->interp));
3114
    return 1;
3115
}
3116
#endif
3117
3118
3119
// Check if a Python thread must call _PyThreadState_HangThread(), rather than
3120
// taking the GIL or attaching to the interpreter if Py_Finalize() has been
3121
// called.
3122
//
3123
// When this function is called by a daemon thread after Py_Finalize() has been
3124
// called, the GIL may no longer exist.
3125
//
3126
// tstate must be non-NULL.
3127
int
3128
_PyThreadState_MustExit(PyThreadState *tstate)
3129
7.37M
{
3130
7.37M
    int state = _Py_atomic_load_int_relaxed(&tstate->state);
3131
7.37M
    return state == _Py_THREAD_SHUTTING_DOWN;
3132
7.37M
}
3133
3134
void
3135
_PyThreadState_HangThread(PyThreadState *tstate)
3136
0
{
3137
0
    _PyThreadStateImpl *tstate_impl = (_PyThreadStateImpl *)tstate;
3138
0
    decref_threadstate(tstate_impl);
3139
0
    PyThread_hang_thread();
3140
0
}
3141
3142
/********************/
3143
/* mimalloc support */
3144
/********************/
3145
3146
static void
3147
tstate_mimalloc_bind(PyThreadState *tstate)
3148
32
{
3149
#ifdef Py_GIL_DISABLED
3150
    struct _mimalloc_thread_state *mts = &((_PyThreadStateImpl*)tstate)->mimalloc;
3151
3152
    // Initialize the mimalloc thread state. This must be called from the
3153
    // same thread that will use the thread state. The "mem" heap doubles as
3154
    // the "backing" heap.
3155
    mi_tld_t *tld = &mts->tld;
3156
    _mi_tld_init(tld, &mts->heaps[_Py_MIMALLOC_HEAP_MEM]);
3157
    llist_init(&mts->page_list);
3158
3159
    // Exiting threads push any remaining in-use segments to the abandoned
3160
    // pool to be re-claimed later by other threads. We use per-interpreter
3161
    // pools to keep Python objects from different interpreters separate.
3162
    tld->segments.abandoned = &tstate->interp->mimalloc.abandoned_pool;
3163
3164
    // Don't fill in the first N bytes up to ob_type in debug builds. We may
3165
    // access ob_tid and the refcount fields in the dict and list lock-less
3166
    // accesses, so they must remain valid for a while after deallocation.
3167
    size_t base_offset = offsetof(PyObject, ob_type);
3168
    if (_PyMem_DebugEnabled()) {
3169
        // The debug allocator adds two words at the beginning of each block.
3170
        base_offset += 2 * sizeof(size_t);
3171
    }
3172
    size_t debug_offsets[_Py_MIMALLOC_HEAP_COUNT] = {
3173
        [_Py_MIMALLOC_HEAP_OBJECT] = base_offset,
3174
        [_Py_MIMALLOC_HEAP_GC] = base_offset,
3175
        [_Py_MIMALLOC_HEAP_GC_PRE] = base_offset + 2 * sizeof(PyObject *),
3176
    };
3177
3178
    // Initialize each heap
3179
    for (uint8_t i = 0; i < _Py_MIMALLOC_HEAP_COUNT; i++) {
3180
        _mi_heap_init_ex(&mts->heaps[i], tld, _mi_arena_id_none(), false, i);
3181
        mts->heaps[i].debug_offset = (uint8_t)debug_offsets[i];
3182
    }
3183
3184
    // Heaps that store Python objects should use QSBR to delay freeing
3185
    // mimalloc pages while there may be concurrent lock-free readers.
3186
    mts->heaps[_Py_MIMALLOC_HEAP_OBJECT].page_use_qsbr = true;
3187
    mts->heaps[_Py_MIMALLOC_HEAP_GC].page_use_qsbr = true;
3188
    mts->heaps[_Py_MIMALLOC_HEAP_GC_PRE].page_use_qsbr = true;
3189
3190
    // By default, object allocations use _Py_MIMALLOC_HEAP_OBJECT.
3191
    // _PyObject_GC_New() and similar functions temporarily override this to
3192
    // use one of the GC heaps.
3193
    mts->current_object_heap = &mts->heaps[_Py_MIMALLOC_HEAP_OBJECT];
3194
3195
    _Py_atomic_store_int(&mts->initialized, 1);
3196
#endif
3197
32
}
3198
3199
void
3200
_PyThreadState_ClearMimallocHeaps(PyThreadState *tstate)
3201
0
{
3202
#ifdef Py_GIL_DISABLED
3203
    if (!tstate->_status.bound) {
3204
        // The mimalloc heaps are only initialized when the thread is bound.
3205
        return;
3206
    }
3207
3208
    _PyThreadStateImpl *tstate_impl = (_PyThreadStateImpl *)tstate;
3209
    for (Py_ssize_t i = 0; i < _Py_MIMALLOC_HEAP_COUNT; i++) {
3210
        // Abandon all segments in use by this thread. This pushes them to
3211
        // a shared pool to later be reclaimed by other threads. It's important
3212
        // to do this before the thread state is destroyed so that objects
3213
        // remain visible to the GC.
3214
        _mi_heap_collect_abandon(&tstate_impl->mimalloc.heaps[i]);
3215
    }
3216
#endif
3217
0
}
3218
3219
3220
int
3221
_Py_IsMainThread(void)
3222
105M
{
3223
105M
    unsigned long thread = PyThread_get_thread_ident();
3224
105M
    return (thread == _PyRuntime.main_thread);
3225
105M
}
3226
3227
3228
PyInterpreterState *
3229
_PyInterpreterState_Main(void)
3230
101M
{
3231
101M
    return _PyRuntime.interpreters.main;
3232
101M
}
3233
3234
3235
int
3236
_Py_IsMainInterpreterFinalizing(PyInterpreterState *interp)
3237
0
{
3238
    /* bpo-39877: Access _PyRuntime directly rather than using
3239
       tstate->interp->runtime to support calls from Python daemon threads.
3240
       After Py_Finalize() has been called, tstate can be a dangling pointer:
3241
       point to PyThreadState freed memory. */
3242
0
    return (_PyRuntimeState_GetFinalizing(&_PyRuntime) != NULL &&
3243
0
            interp == &_PyRuntime._main_interpreter);
3244
0
}
3245
3246
3247
const PyConfig *
3248
_Py_GetMainConfig(void)
3249
0
{
3250
0
    PyInterpreterState *interp = _PyInterpreterState_Main();
3251
0
    if (interp == NULL) {
3252
0
        return NULL;
3253
0
    }
3254
0
    return _PyInterpreterState_GetConfig(interp);
3255
0
}