Coverage Report

Created: 2026-03-23 06:45

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/cpython/Python/pystate.c
Line
Count
Source
1
2
/* Thread and interpreter state structures and their interfaces */
3
4
#include "Python.h"
5
#include "pycore_abstract.h"      // _PyIndex_Check()
6
#include "pycore_audit.h"         // _Py_AuditHookEntry
7
#include "pycore_backoff.h"       // JUMP_BACKWARD_INITIAL_VALUE, SIDE_EXIT_INITIAL_VALUE
8
#include "pycore_ceval.h"         // _PyEval_AcquireLock()
9
#include "pycore_codecs.h"        // _PyCodec_Fini()
10
#include "pycore_critical_section.h" // _PyCriticalSection_Resume()
11
#include "pycore_dtoa.h"          // _dtoa_state_INIT()
12
#include "pycore_freelist.h"      // _PyObject_ClearFreeLists()
13
#include "pycore_initconfig.h"    // _PyStatus_OK()
14
#include "pycore_interpframe.h"   // _PyThreadState_HasStackSpace()
15
#include "pycore_object.h"        // _PyType_InitCache()
16
#include "pycore_obmalloc.h"      // _PyMem_obmalloc_state_on_heap()
17
#include "pycore_optimizer.h"     // JIT_CLEANUP_THRESHOLD
18
#include "pycore_parking_lot.h"   // _PyParkingLot_AfterFork()
19
#include "pycore_pyerrors.h"      // _PyErr_Clear()
20
#include "pycore_pylifecycle.h"   // _PyAST_Fini()
21
#include "pycore_pymem.h"         // _PyMem_DebugEnabled()
22
#include "pycore_runtime.h"       // _PyRuntime
23
#include "pycore_runtime_init.h"  // _PyRuntimeState_INIT
24
#include "pycore_stackref.h"      // Py_STACKREF_DEBUG
25
#include "pycore_stats.h"         // FT_STAT_WORLD_STOP_INC()
26
#include "pycore_time.h"          // _PyTime_Init()
27
#include "pycore_uniqueid.h"      // _PyObject_FinalizePerThreadRefcounts()
28
29
30
/* --------------------------------------------------------------------------
31
CAUTION
32
33
Always use PyMem_RawMalloc() and PyMem_RawFree() directly in this file.  A
34
number of these functions are advertised as safe to call when the GIL isn't
35
held, and in a debug build Python redirects (e.g.) PyMem_NEW (etc) to Python's
36
debugging obmalloc functions.  Those aren't thread-safe (they rely on the GIL
37
to avoid the expense of doing their own locking).
38
-------------------------------------------------------------------------- */
39
40
#ifdef HAVE_DLOPEN
41
#  ifdef HAVE_DLFCN_H
42
#    include <dlfcn.h>
43
#  endif
44
#  if !HAVE_DECL_RTLD_LAZY
45
#    define RTLD_LAZY 1
46
#  endif
47
#endif
48
49
50
/****************************************/
51
/* helpers for the current thread state */
52
/****************************************/
53
54
// API for the current thread state is further down.
55
56
/* "current" means one of:
57
   - bound to the current OS thread
58
   - holds the GIL
59
 */
60
61
//-------------------------------------------------
62
// a highly efficient lookup for the current thread
63
//-------------------------------------------------
64
65
/*
66
   The stored thread state is set by PyThreadState_Swap().
67
68
   For each of these functions, the GIL must be held by the current thread.
69
 */
70
71
72
/* The attached thread state for the current thread. */
73
_Py_thread_local PyThreadState *_Py_tss_tstate = NULL;
74
75
/* The "bound" thread state used by PyGILState_Ensure(),
76
   also known as a "gilstate." */
77
_Py_thread_local PyThreadState *_Py_tss_gilstate = NULL;
78
79
/* The interpreter of the attached thread state,
80
   and is same as tstate->interp. */
81
_Py_thread_local PyInterpreterState *_Py_tss_interp = NULL;
82
83
static inline PyThreadState *
84
current_fast_get(void)
85
161M
{
86
161M
    return _Py_tss_tstate;
87
161M
}
88
89
static inline void
90
current_fast_set(_PyRuntimeState *Py_UNUSED(runtime), PyThreadState *tstate)
91
2.81M
{
92
2.81M
    assert(tstate != NULL);
93
2.81M
    _Py_tss_tstate = tstate;
94
2.81M
    assert(tstate->interp != NULL);
95
2.81M
    _Py_tss_interp = tstate->interp;
96
2.81M
}
97
98
static inline void
99
current_fast_clear(_PyRuntimeState *Py_UNUSED(runtime))
100
2.81M
{
101
2.81M
    _Py_tss_tstate = NULL;
102
2.81M
    _Py_tss_interp = NULL;
103
2.81M
}
104
105
#define tstate_verify_not_active(tstate) \
106
0
    if (tstate == current_fast_get()) { \
107
0
        _Py_FatalErrorFormat(__func__, "tstate %p is still current", tstate); \
108
0
    }
109
110
PyThreadState *
111
_PyThreadState_GetCurrent(void)
112
8.64M
{
113
8.64M
    return current_fast_get();
114
8.64M
}
115
116
117
//---------------------------------------------
118
// The thread state used by PyGILState_Ensure()
119
//---------------------------------------------
120
121
/*
122
   The stored thread state is set by bind_tstate() (AKA PyThreadState_Bind().
123
124
   The GIL does no need to be held for these.
125
  */
126
127
static inline PyThreadState *
128
gilstate_get(void)
129
72
{
130
72
    return _Py_tss_gilstate;
131
72
}
132
133
static inline void
134
gilstate_set(PyThreadState *tstate)
135
36
{
136
36
    assert(tstate != NULL);
137
36
    _Py_tss_gilstate = tstate;
138
36
}
139
140
static inline void
141
gilstate_clear(void)
142
0
{
143
0
    _Py_tss_gilstate = NULL;
144
0
}
145
146
147
#ifndef NDEBUG
148
static inline int tstate_is_alive(PyThreadState *tstate);
149
150
static inline int
151
tstate_is_bound(PyThreadState *tstate)
152
{
153
    return tstate->_status.bound && !tstate->_status.unbound;
154
}
155
#endif  // !NDEBUG
156
157
static void bind_gilstate_tstate(PyThreadState *);
158
static void unbind_gilstate_tstate(PyThreadState *);
159
160
static void tstate_mimalloc_bind(PyThreadState *);
161
162
static void
163
bind_tstate(PyThreadState *tstate)
164
36
{
165
36
    assert(tstate != NULL);
166
36
    assert(tstate_is_alive(tstate) && !tstate->_status.bound);
167
36
    assert(!tstate->_status.unbound);  // just in case
168
36
    assert(!tstate->_status.bound_gilstate);
169
36
    assert(tstate != gilstate_get());
170
36
    assert(!tstate->_status.active);
171
36
    assert(tstate->thread_id == 0);
172
36
    assert(tstate->native_thread_id == 0);
173
174
    // Currently we don't necessarily store the thread state
175
    // in thread-local storage (e.g. per-interpreter).
176
177
36
    tstate->thread_id = PyThread_get_thread_ident();
178
36
#ifdef PY_HAVE_THREAD_NATIVE_ID
179
36
    tstate->native_thread_id = PyThread_get_thread_native_id();
180
36
#endif
181
182
#ifdef Py_GIL_DISABLED
183
    // Initialize biased reference counting inter-thread queue. Note that this
184
    // needs to be initialized from the active thread.
185
    _Py_brc_init_thread(tstate);
186
#endif
187
188
    // mimalloc state needs to be initialized from the active thread.
189
36
    tstate_mimalloc_bind(tstate);
190
191
36
    tstate->_status.bound = 1;
192
36
}
193
194
static void
195
unbind_tstate(PyThreadState *tstate)
196
0
{
197
0
    assert(tstate != NULL);
198
0
    assert(tstate_is_bound(tstate));
199
0
#ifndef HAVE_PTHREAD_STUBS
200
0
    assert(tstate->thread_id > 0);
201
0
#endif
202
0
#ifdef PY_HAVE_THREAD_NATIVE_ID
203
0
    assert(tstate->native_thread_id > 0);
204
0
#endif
205
206
    // We leave thread_id and native_thread_id alone
207
    // since they can be useful for debugging.
208
    // Check the `_status` field to know if these values
209
    // are still valid.
210
211
    // We leave tstate->_status.bound set to 1
212
    // to indicate it was previously bound.
213
0
    tstate->_status.unbound = 1;
214
0
}
215
216
217
/* Stick the thread state for this thread in thread specific storage.
218
219
   When a thread state is created for a thread by some mechanism
220
   other than PyGILState_Ensure(), it's important that the GILState
221
   machinery knows about it so it doesn't try to create another
222
   thread state for the thread.
223
   (This is a better fix for SF bug #1010677 than the first one attempted.)
224
225
   The only situation where you can legitimately have more than one
226
   thread state for an OS level thread is when there are multiple
227
   interpreters.
228
229
   Before 3.12, the PyGILState_*() APIs didn't work with multiple
230
   interpreters (see bpo-10915 and bpo-15751), so this function used
231
   to set TSS only once.  Thus, the first thread state created for that
232
   given OS level thread would "win", which seemed reasonable behaviour.
233
*/
234
235
static void
236
bind_gilstate_tstate(PyThreadState *tstate)
237
36
{
238
36
    assert(tstate != NULL);
239
36
    assert(tstate_is_alive(tstate));
240
36
    assert(tstate_is_bound(tstate));
241
    // XXX assert(!tstate->_status.active);
242
36
    assert(!tstate->_status.bound_gilstate);
243
244
36
    PyThreadState *tcur = gilstate_get();
245
36
    assert(tstate != tcur);
246
247
36
    if (tcur != NULL) {
248
0
        tcur->_status.bound_gilstate = 0;
249
0
    }
250
36
    gilstate_set(tstate);
251
36
    tstate->_status.bound_gilstate = 1;
252
36
}
253
254
static void
255
unbind_gilstate_tstate(PyThreadState *tstate)
256
0
{
257
0
    assert(tstate != NULL);
258
    // XXX assert(tstate_is_alive(tstate));
259
0
    assert(tstate_is_bound(tstate));
260
    // XXX assert(!tstate->_status.active);
261
0
    assert(tstate->_status.bound_gilstate);
262
0
    assert(tstate == gilstate_get());
263
0
    gilstate_clear();
264
0
    tstate->_status.bound_gilstate = 0;
265
0
}
266
267
268
//----------------------------------------------
269
// the thread state that currently holds the GIL
270
//----------------------------------------------
271
272
/* This is not exported, as it is not reliable!  It can only
273
   ever be compared to the state for the *current* thread.
274
   * If not equal, then it doesn't matter that the actual
275
     value may change immediately after comparison, as it can't
276
     possibly change to the current thread's state.
277
   * If equal, then the current thread holds the lock, so the value can't
278
     change until we yield the lock.
279
*/
280
static int
281
holds_gil(PyThreadState *tstate)
282
0
{
283
    // XXX Fall back to tstate->interp->runtime->ceval.gil.last_holder
284
    // (and tstate->interp->runtime->ceval.gil.locked).
285
0
    assert(tstate != NULL);
286
    /* Must be the tstate for this thread */
287
0
    assert(tstate == gilstate_get());
288
0
    return tstate == current_fast_get();
289
0
}
290
291
292
/****************************/
293
/* the global runtime state */
294
/****************************/
295
296
//----------
297
// lifecycle
298
//----------
299
300
/* Suppress deprecation warning for PyBytesObject.ob_shash */
301
_Py_COMP_DIAG_PUSH
302
_Py_COMP_DIAG_IGNORE_DEPR_DECLS
303
/* We use "initial" if the runtime gets re-used
304
   (e.g. Py_Finalize() followed by Py_Initialize().
305
   Note that we initialize "initial" relative to _PyRuntime,
306
   to ensure pre-initialized pointers point to the active
307
   runtime state (and not "initial"). */
308
static const _PyRuntimeState initial = _PyRuntimeState_INIT(_PyRuntime, "");
309
_Py_COMP_DIAG_POP
310
311
#define LOCKS_INIT(runtime) \
312
0
    { \
313
0
        &(runtime)->interpreters.mutex, \
314
0
        &(runtime)->xi.data_lookup.registry.mutex, \
315
0
        &(runtime)->unicode_state.ids.mutex, \
316
0
        &(runtime)->imports.extensions.mutex, \
317
0
        &(runtime)->ceval.pending_mainthread.mutex, \
318
0
        &(runtime)->atexit.mutex, \
319
0
        &(runtime)->audit_hooks.mutex, \
320
0
        &(runtime)->allocators.mutex, \
321
0
        &(runtime)->_main_interpreter.types.mutex, \
322
0
        &(runtime)->_main_interpreter.code_state.mutex, \
323
0
    }
324
325
static void
326
init_runtime(_PyRuntimeState *runtime,
327
             void *open_code_hook, void *open_code_userdata,
328
             _Py_AuditHookEntry *audit_hook_head,
329
             Py_ssize_t unicode_next_index)
330
36
{
331
36
    assert(!runtime->preinitializing);
332
36
    assert(!runtime->preinitialized);
333
36
    assert(!runtime->core_initialized);
334
36
    assert(!runtime->initialized);
335
36
    assert(!runtime->_initialized);
336
337
36
    runtime->open_code_hook = open_code_hook;
338
36
    runtime->open_code_userdata = open_code_userdata;
339
36
    runtime->audit_hooks.head = audit_hook_head;
340
341
36
    PyPreConfig_InitPythonConfig(&runtime->preconfig);
342
343
    // Set it to the ID of the main thread of the main interpreter.
344
36
    runtime->main_thread = PyThread_get_thread_ident();
345
346
36
    runtime->unicode_state.ids.next_index = unicode_next_index;
347
36
    runtime->_initialized = 1;
348
36
}
349
350
PyStatus
351
_PyRuntimeState_Init(_PyRuntimeState *runtime)
352
36
{
353
    /* We preserve the hook across init, because there is
354
       currently no public API to set it between runtime
355
       initialization and interpreter initialization. */
356
36
    void *open_code_hook = runtime->open_code_hook;
357
36
    void *open_code_userdata = runtime->open_code_userdata;
358
36
    _Py_AuditHookEntry *audit_hook_head = runtime->audit_hooks.head;
359
    // bpo-42882: Preserve next_index value if Py_Initialize()/Py_Finalize()
360
    // is called multiple times.
361
36
    Py_ssize_t unicode_next_index = runtime->unicode_state.ids.next_index;
362
363
36
    if (runtime->_initialized) {
364
        // Py_Initialize() must be running again.
365
        // Reset to _PyRuntimeState_INIT.
366
0
        memcpy(runtime, &initial, sizeof(*runtime));
367
        // Preserve the cookie from the original runtime.
368
0
        memcpy(runtime->debug_offsets.cookie, _Py_Debug_Cookie, 8);
369
0
        assert(!runtime->_initialized);
370
0
    }
371
372
36
    PyStatus status = _PyTime_Init(&runtime->time);
373
36
    if (_PyStatus_EXCEPTION(status)) {
374
0
        return status;
375
0
    }
376
377
36
    init_runtime(runtime, open_code_hook, open_code_userdata, audit_hook_head,
378
36
                 unicode_next_index);
379
380
36
    return _PyStatus_OK();
381
36
}
382
383
void
384
_PyRuntimeState_Fini(_PyRuntimeState *runtime)
385
0
{
386
#ifdef Py_REF_DEBUG
387
    /* The count is cleared by _Py_FinalizeRefTotal(). */
388
    assert(runtime->object_state.interpreter_leaks == 0);
389
#endif
390
0
    gilstate_clear();
391
0
}
392
393
#ifdef HAVE_FORK
394
/* This function is called from PyOS_AfterFork_Child to ensure that
395
   newly created child processes do not share locks with the parent. */
396
PyStatus
397
_PyRuntimeState_ReInitThreads(_PyRuntimeState *runtime)
398
0
{
399
    // This was initially set in _PyRuntimeState_Init().
400
0
    runtime->main_thread = PyThread_get_thread_ident();
401
402
    // Clears the parking lot. Any waiting threads are dead. This must be
403
    // called before releasing any locks that use the parking lot.
404
0
    _PyParkingLot_AfterFork();
405
406
    // Re-initialize global locks
407
0
    PyMutex *locks[] = LOCKS_INIT(runtime);
408
0
    for (size_t i = 0; i < Py_ARRAY_LENGTH(locks); i++) {
409
0
        _PyMutex_at_fork_reinit(locks[i]);
410
0
    }
411
#ifdef Py_GIL_DISABLED
412
    for (PyInterpreterState *interp = runtime->interpreters.head;
413
         interp != NULL; interp = interp->next)
414
    {
415
        for (int i = 0; i < NUM_WEAKREF_LIST_LOCKS; i++) {
416
            _PyMutex_at_fork_reinit(&interp->weakref_locks[i]);
417
        }
418
    }
419
#endif
420
421
0
    _PyTypes_AfterFork();
422
423
0
    _PyThread_AfterFork(&runtime->threads);
424
425
0
    return _PyStatus_OK();
426
0
}
427
#endif
428
429
430
/*************************************/
431
/* the per-interpreter runtime state */
432
/*************************************/
433
434
//----------
435
// lifecycle
436
//----------
437
438
/* Calling this indicates that the runtime is ready to create interpreters. */
439
440
PyStatus
441
_PyInterpreterState_Enable(_PyRuntimeState *runtime)
442
36
{
443
36
    struct pyinterpreters *interpreters = &runtime->interpreters;
444
36
    interpreters->next_id = 0;
445
36
    return _PyStatus_OK();
446
36
}
447
448
static PyInterpreterState *
449
alloc_interpreter(void)
450
0
{
451
    // Aligned allocation for PyInterpreterState.
452
    // the first word of the memory block is used to store
453
    // the original pointer to be used later to free the memory.
454
0
    size_t alignment = _Alignof(PyInterpreterState);
455
0
    size_t allocsize = sizeof(PyInterpreterState) + sizeof(void *) + alignment - 1;
456
0
    void *mem = PyMem_RawCalloc(1, allocsize);
457
0
    if (mem == NULL) {
458
0
        return NULL;
459
0
    }
460
0
    void *ptr = _Py_ALIGN_UP((char *)mem + sizeof(void *), alignment);
461
0
    ((void **)ptr)[-1] = mem;
462
0
    assert(_Py_IS_ALIGNED(ptr, alignment));
463
0
    return ptr;
464
0
}
465
466
static void
467
free_interpreter(PyInterpreterState *interp)
468
0
{
469
#ifdef Py_STATS
470
    if (interp->pystats_struct) {
471
        PyMem_RawFree(interp->pystats_struct);
472
        interp->pystats_struct = NULL;
473
    }
474
#endif
475
    // The main interpreter is statically allocated so
476
    // should not be freed.
477
0
    if (interp != &_PyRuntime._main_interpreter) {
478
0
        if (_PyMem_obmalloc_state_on_heap(interp)) {
479
            // interpreter has its own obmalloc state, free it
480
0
            PyMem_RawFree(interp->obmalloc);
481
0
            interp->obmalloc = NULL;
482
0
        }
483
0
        assert(_Py_IS_ALIGNED(interp, _Alignof(PyInterpreterState)));
484
0
        PyMem_RawFree(((void **)interp)[-1]);
485
0
    }
486
0
}
487
488
#ifndef NDEBUG
489
static inline int check_interpreter_whence(long);
490
#endif
491
492
extern _Py_CODEUNIT *
493
_Py_LazyJitShim(
494
    struct _PyExecutorObject *exec, _PyInterpreterFrame *frame, _PyStackRef *stack_pointer, PyThreadState *tstate
495
);
496
497
/* Get the interpreter state to a minimal consistent state.
498
   Further init happens in pylifecycle.c before it can be used.
499
   All fields not initialized here are expected to be zeroed out,
500
   e.g. by PyMem_RawCalloc() or memset(), or otherwise pre-initialized.
501
   The runtime state is not manipulated.  Instead it is assumed that
502
   the interpreter is getting added to the runtime.
503
504
   Note that the main interpreter was statically initialized as part
505
   of the runtime and most state is already set properly.  That leaves
506
   a small number of fields to initialize dynamically, as well as some
507
   that are initialized lazily.
508
509
   For subinterpreters we memcpy() the main interpreter in
510
   PyInterpreterState_New(), leaving it in the same mostly-initialized
511
   state.  The only difference is that the interpreter has some
512
   self-referential state that is statically initializexd to the
513
   main interpreter.  We fix those fields here, in addition
514
   to the other dynamically initialized fields.
515
  */
516
517
static inline bool
518
is_env_enabled(const char *env_name)
519
72
{
520
72
    char *env = Py_GETENV(env_name);
521
72
    return env && *env != '\0' && *env != '0';
522
72
}
523
524
static inline bool
525
is_env_disabled(const char *env_name)
526
36
{
527
36
    char *env = Py_GETENV(env_name);
528
36
    return env != NULL && *env == '0';
529
36
}
530
531
static inline void
532
init_policy(uint16_t *target, const char *env_name, uint16_t default_value,
533
            long min_value, long max_value)
534
216
{
535
216
    *target = default_value;
536
216
    char *env = Py_GETENV(env_name);
537
216
    if (env && *env != '\0') {
538
0
        long value = atol(env);
539
0
        if (value >= min_value && value <= max_value) {
540
0
            *target = (uint16_t)value;
541
0
        }
542
0
    }
543
216
}
544
545
static PyStatus
546
init_interpreter(PyInterpreterState *interp,
547
                 _PyRuntimeState *runtime, int64_t id,
548
                 PyInterpreterState *next,
549
                 long whence)
550
36
{
551
36
    if (interp->_initialized) {
552
0
        return _PyStatus_ERR("interpreter already initialized");
553
0
    }
554
555
36
    assert(interp->_whence == _PyInterpreterState_WHENCE_NOTSET);
556
36
    assert(check_interpreter_whence(whence) == 0);
557
36
    interp->_whence = whence;
558
559
36
    assert(runtime != NULL);
560
36
    interp->runtime = runtime;
561
562
36
    assert(id > 0 || (id == 0 && interp == runtime->interpreters.main));
563
36
    interp->id = id;
564
565
36
    interp->id_refcount = 0;
566
567
36
    assert(runtime->interpreters.head == interp);
568
36
    assert(next != NULL || (interp == runtime->interpreters.main));
569
36
    interp->next = next;
570
571
36
    interp->threads.preallocated = &interp->_initial_thread;
572
573
    // We would call _PyObject_InitState() at this point
574
    // if interp->feature_flags were alredy set.
575
576
36
    _PyEval_InitState(interp);
577
36
    _PyGC_InitState(&interp->gc);
578
36
    PyConfig_InitPythonConfig(&interp->config);
579
36
    _PyType_InitCache(interp);
580
#ifdef Py_GIL_DISABLED
581
    _Py_brc_init_state(interp);
582
#endif
583
584
36
    llist_init(&interp->mem_free_queue.head);
585
36
    llist_init(&interp->asyncio_tasks_head);
586
36
    interp->asyncio_tasks_lock = (PyMutex){0};
587
612
    for (int i = 0; i < _PY_MONITORING_UNGROUPED_EVENTS; i++) {
588
576
        interp->monitors.tools[i] = 0;
589
576
    }
590
324
    for (int t = 0; t < PY_MONITORING_TOOL_IDS; t++) {
591
5.76k
        for (int e = 0; e < _PY_MONITORING_EVENTS; e++) {
592
5.47k
            interp->monitoring_callables[t][e] = NULL;
593
594
5.47k
        }
595
288
        interp->monitoring_tool_versions[t] = 0;
596
288
    }
597
36
    interp->_code_object_generation = 0;
598
36
    interp->jit = false;
599
36
    interp->compiling = false;
600
36
    interp->executor_blooms = NULL;
601
36
    interp->executor_ptrs = NULL;
602
36
    interp->executor_count = 0;
603
36
    interp->executor_capacity = 0;
604
36
    interp->executor_deletion_list_head = NULL;
605
36
    interp->executor_creation_counter = JIT_CLEANUP_THRESHOLD;
606
607
    // Initialize optimization configuration from environment variables
608
    // PYTHON_JIT_STRESS sets aggressive defaults for testing, but can be overridden
609
36
    uint16_t jump_default = JUMP_BACKWARD_INITIAL_VALUE;
610
36
    uint16_t resume_default = RESUME_INITIAL_VALUE;
611
36
    uint16_t side_exit_default = SIDE_EXIT_INITIAL_VALUE;
612
613
36
    if (is_env_enabled("PYTHON_JIT_STRESS")) {
614
0
        jump_default = 63;
615
0
        side_exit_default = 63;
616
0
        resume_default = 127;
617
0
    }
618
619
36
    init_policy(&interp->opt_config.jump_backward_initial_value,
620
36
                "PYTHON_JIT_JUMP_BACKWARD_INITIAL_VALUE",
621
36
                jump_default, 1, MAX_VALUE);
622
36
    init_policy(&interp->opt_config.jump_backward_initial_backoff,
623
36
                "PYTHON_JIT_JUMP_BACKWARD_INITIAL_BACKOFF",
624
36
                JUMP_BACKWARD_INITIAL_BACKOFF, 0, MAX_BACKOFF);
625
36
    init_policy(&interp->opt_config.resume_initial_value,
626
36
                "PYTHON_JIT_RESUME_INITIAL_VALUE",
627
36
                resume_default, 1, MAX_VALUE);
628
36
    init_policy(&interp->opt_config.resume_initial_backoff,
629
36
                "PYTHON_JIT_RESUME_INITIAL_BACKOFF",
630
36
                RESUME_INITIAL_BACKOFF, 0, MAX_BACKOFF);
631
36
    init_policy(&interp->opt_config.side_exit_initial_value,
632
36
                "PYTHON_JIT_SIDE_EXIT_INITIAL_VALUE",
633
36
                side_exit_default, 1, MAX_VALUE);
634
36
    init_policy(&interp->opt_config.side_exit_initial_backoff,
635
36
                "PYTHON_JIT_SIDE_EXIT_INITIAL_BACKOFF",
636
36
                SIDE_EXIT_INITIAL_BACKOFF, 0, MAX_BACKOFF);
637
638
36
    interp->opt_config.specialization_enabled = !is_env_enabled("PYTHON_SPECIALIZATION_OFF");
639
36
    interp->opt_config.uops_optimize_enabled = !is_env_disabled("PYTHON_UOPS_OPTIMIZE");
640
36
    if (interp != &runtime->_main_interpreter) {
641
        /* Fix the self-referential, statically initialized fields. */
642
0
        interp->dtoa = (struct _dtoa_state)_dtoa_state_INIT(interp);
643
0
    }
644
#if !defined(Py_GIL_DISABLED) && defined(Py_STACKREF_DEBUG)
645
    interp->next_stackref = INITIAL_STACKREF_INDEX;
646
    _Py_hashtable_allocator_t alloc = {
647
        .malloc = malloc,
648
        .free = free,
649
    };
650
    interp->open_stackrefs_table = _Py_hashtable_new_full(
651
        _Py_hashtable_hash_ptr,
652
        _Py_hashtable_compare_direct,
653
        NULL,
654
        NULL,
655
        &alloc
656
    );
657
#  ifdef Py_STACKREF_CLOSE_DEBUG
658
    interp->closed_stackrefs_table = _Py_hashtable_new_full(
659
        _Py_hashtable_hash_ptr,
660
        _Py_hashtable_compare_direct,
661
        NULL,
662
        NULL,
663
        &alloc
664
    );
665
#  endif
666
    _Py_stackref_associate(interp, Py_None, PyStackRef_None);
667
    _Py_stackref_associate(interp, Py_False, PyStackRef_False);
668
    _Py_stackref_associate(interp, Py_True, PyStackRef_True);
669
#endif
670
671
36
    interp->_initialized = 1;
672
36
    return _PyStatus_OK();
673
36
}
674
675
676
PyStatus
677
_PyInterpreterState_New(PyThreadState *tstate, PyInterpreterState **pinterp)
678
36
{
679
36
    *pinterp = NULL;
680
681
    // Don't get runtime from tstate since tstate can be NULL
682
36
    _PyRuntimeState *runtime = &_PyRuntime;
683
684
    // tstate is NULL when pycore_create_interpreter() calls
685
    // _PyInterpreterState_New() to create the main interpreter.
686
36
    if (tstate != NULL) {
687
0
        if (_PySys_Audit(tstate, "cpython.PyInterpreterState_New", NULL) < 0) {
688
0
            return _PyStatus_ERR("sys.audit failed");
689
0
        }
690
0
    }
691
692
    /* We completely serialize creation of multiple interpreters, since
693
       it simplifies things here and blocking concurrent calls isn't a problem.
694
       Regardless, we must fully block subinterpreter creation until
695
       after the main interpreter is created. */
696
36
    HEAD_LOCK(runtime);
697
698
36
    struct pyinterpreters *interpreters = &runtime->interpreters;
699
36
    int64_t id = interpreters->next_id;
700
36
    interpreters->next_id += 1;
701
702
    // Allocate the interpreter and add it to the runtime state.
703
36
    PyInterpreterState *interp;
704
36
    PyStatus status;
705
36
    PyInterpreterState *old_head = interpreters->head;
706
36
    if (old_head == NULL) {
707
        // We are creating the main interpreter.
708
36
        assert(interpreters->main == NULL);
709
36
        assert(id == 0);
710
711
36
        interp = &runtime->_main_interpreter;
712
36
        assert(interp->id == 0);
713
36
        assert(interp->next == NULL);
714
715
36
        interpreters->main = interp;
716
36
    }
717
0
    else {
718
0
        assert(interpreters->main != NULL);
719
0
        assert(id != 0);
720
721
0
        interp = alloc_interpreter();
722
0
        if (interp == NULL) {
723
0
            status = _PyStatus_NO_MEMORY();
724
0
            goto error;
725
0
        }
726
        // Set to _PyInterpreterState_INIT.
727
0
        memcpy(interp, &initial._main_interpreter, sizeof(*interp));
728
729
0
        if (id < 0) {
730
            /* overflow or Py_Initialize() not called yet! */
731
0
            status = _PyStatus_ERR("failed to get an interpreter ID");
732
0
            goto error;
733
0
        }
734
0
    }
735
36
    interpreters->head = interp;
736
737
36
    long whence = _PyInterpreterState_WHENCE_UNKNOWN;
738
36
    status = init_interpreter(interp, runtime,
739
36
                              id, old_head, whence);
740
36
    if (_PyStatus_EXCEPTION(status)) {
741
0
        goto error;
742
0
    }
743
744
36
    HEAD_UNLOCK(runtime);
745
746
36
    assert(interp != NULL);
747
36
    *pinterp = interp;
748
36
    return _PyStatus_OK();
749
750
0
error:
751
0
    HEAD_UNLOCK(runtime);
752
753
0
    if (interp != NULL) {
754
0
        free_interpreter(interp);
755
0
    }
756
0
    return status;
757
36
}
758
759
760
PyInterpreterState *
761
PyInterpreterState_New(void)
762
0
{
763
    // tstate can be NULL
764
0
    PyThreadState *tstate = current_fast_get();
765
766
0
    PyInterpreterState *interp;
767
0
    PyStatus status = _PyInterpreterState_New(tstate, &interp);
768
0
    if (_PyStatus_EXCEPTION(status)) {
769
0
        Py_ExitStatusException(status);
770
0
    }
771
0
    assert(interp != NULL);
772
0
    return interp;
773
0
}
774
775
#if !defined(Py_GIL_DISABLED) && defined(Py_STACKREF_DEBUG)
776
extern void
777
_Py_stackref_report_leaks(PyInterpreterState *interp);
778
#endif
779
780
static void
781
interpreter_clear(PyInterpreterState *interp, PyThreadState *tstate)
782
0
{
783
0
    assert(interp != NULL);
784
0
    assert(tstate != NULL);
785
0
    _PyRuntimeState *runtime = interp->runtime;
786
787
    /* XXX Conditions we need to enforce:
788
789
       * the GIL must be held by the current thread
790
       * tstate must be the "current" thread state (current_fast_get())
791
       * tstate->interp must be interp
792
       * for the main interpreter, tstate must be the main thread
793
     */
794
    // XXX Ideally, we would not rely on any thread state in this function
795
    // (and we would drop the "tstate" argument).
796
797
0
    if (_PySys_Audit(tstate, "cpython.PyInterpreterState_Clear", NULL) < 0) {
798
0
        _PyErr_Clear(tstate);
799
0
    }
800
801
    // Clear the current/main thread state last.
802
0
    _Py_FOR_EACH_TSTATE_BEGIN(interp, p) {
803
        // See https://github.com/python/cpython/issues/102126
804
        // Must be called without HEAD_LOCK held as it can deadlock
805
        // if any finalizer tries to acquire that lock.
806
0
        HEAD_UNLOCK(runtime);
807
0
        PyThreadState_Clear(p);
808
0
        HEAD_LOCK(runtime);
809
0
    }
810
0
    _Py_FOR_EACH_TSTATE_END(interp);
811
0
    if (tstate->interp == interp) {
812
        /* We fix tstate->_status below when we for sure aren't using it
813
           (e.g. no longer need the GIL). */
814
        // XXX Eliminate the need to do this.
815
0
        tstate->_status.cleared = 0;
816
0
    }
817
818
    /* It is possible that any of the objects below have a finalizer
819
       that runs Python code or otherwise relies on a thread state
820
       or even the interpreter state.  For now we trust that isn't
821
       a problem.
822
     */
823
    // XXX Make sure we properly deal with problematic finalizers.
824
825
0
    Py_CLEAR(interp->audit_hooks);
826
827
    // gh-140257: Threads have already been cleared, but daemon threads may
828
    // still access eval_breaker atomically via take_gil() right before they
829
    // hang. Use an atomic store to prevent data races during finalization.
830
0
    interp->ceval.instrumentation_version = 0;
831
0
    _Py_atomic_store_uintptr(&tstate->eval_breaker, 0);
832
833
0
    for (int i = 0; i < _PY_MONITORING_UNGROUPED_EVENTS; i++) {
834
0
        interp->monitors.tools[i] = 0;
835
0
    }
836
0
    for (int t = 0; t < PY_MONITORING_TOOL_IDS; t++) {
837
0
        for (int e = 0; e < _PY_MONITORING_EVENTS; e++) {
838
0
            Py_CLEAR(interp->monitoring_callables[t][e]);
839
0
        }
840
0
    }
841
0
    for (int t = 0; t < PY_MONITORING_TOOL_IDS; t++) {
842
0
        Py_CLEAR(interp->monitoring_tool_names[t]);
843
0
    }
844
0
    interp->_code_object_generation = 0;
845
#ifdef Py_GIL_DISABLED
846
    interp->tlbc_indices.tlbc_generation = 0;
847
#endif
848
849
0
    PyConfig_Clear(&interp->config);
850
0
    _PyCodec_Fini(interp);
851
852
0
    assert(interp->imports.modules == NULL);
853
0
    assert(interp->imports.modules_by_index == NULL);
854
0
    assert(interp->imports.importlib == NULL);
855
0
    assert(interp->imports.import_func == NULL);
856
857
0
    Py_CLEAR(interp->sysdict_copy);
858
0
    Py_CLEAR(interp->builtins_copy);
859
0
    Py_CLEAR(interp->dict);
860
0
#ifdef HAVE_FORK
861
0
    Py_CLEAR(interp->before_forkers);
862
0
    Py_CLEAR(interp->after_forkers_parent);
863
0
    Py_CLEAR(interp->after_forkers_child);
864
0
#endif
865
866
867
#ifdef _Py_TIER2
868
    _Py_ClearExecutorDeletionList(interp);
869
#endif
870
0
    _PyAST_Fini(interp);
871
0
    _PyAtExit_Fini(interp);
872
873
    // All Python types must be destroyed before the last GC collection. Python
874
    // types create a reference cycle to themselves in their in their
875
    // PyTypeObject.tp_mro member (the tuple contains the type).
876
877
    /* Last garbage collection on this interpreter */
878
0
    _PyGC_CollectNoFail(tstate);
879
0
    _PyGC_Fini(interp);
880
881
    // Finalize warnings after last gc so that any finalizers can
882
    // access warnings state
883
0
    _PyWarnings_Fini(interp);
884
0
    struct _PyExecutorObject *cold = interp->cold_executor;
885
0
    if (cold != NULL) {
886
0
        interp->cold_executor = NULL;
887
0
        assert(cold->vm_data.valid);
888
0
        assert(!cold->vm_data.cold);
889
0
        _PyExecutor_Free(cold);
890
0
    }
891
892
0
    struct _PyExecutorObject *cold_dynamic = interp->cold_dynamic_executor;
893
0
    if (cold_dynamic != NULL) {
894
0
        interp->cold_dynamic_executor = NULL;
895
0
        assert(cold_dynamic->vm_data.valid);
896
0
        assert(!cold_dynamic->vm_data.cold);
897
0
        _PyExecutor_Free(cold_dynamic);
898
0
    }
899
    /* We don't clear sysdict and builtins until the end of this function.
900
       Because clearing other attributes can execute arbitrary Python code
901
       which requires sysdict and builtins. */
902
0
    PyDict_Clear(interp->sysdict);
903
0
    PyDict_Clear(interp->builtins);
904
0
    Py_CLEAR(interp->sysdict);
905
0
    Py_CLEAR(interp->builtins);
906
907
#if !defined(Py_GIL_DISABLED) && defined(Py_STACKREF_DEBUG)
908
#  ifdef Py_STACKREF_CLOSE_DEBUG
909
    _Py_hashtable_destroy(interp->closed_stackrefs_table);
910
    interp->closed_stackrefs_table = NULL;
911
#  endif
912
    _Py_stackref_report_leaks(interp);
913
    _Py_hashtable_destroy(interp->open_stackrefs_table);
914
    interp->open_stackrefs_table = NULL;
915
#endif
916
917
0
    if (tstate->interp == interp) {
918
        /* We are now safe to fix tstate->_status.cleared. */
919
        // XXX Do this (much) earlier?
920
0
        tstate->_status.cleared = 1;
921
0
    }
922
923
0
    for (int i=0; i < DICT_MAX_WATCHERS; i++) {
924
0
        interp->dict_state.watchers[i] = NULL;
925
0
    }
926
927
0
    for (int i=0; i < TYPE_MAX_WATCHERS; i++) {
928
0
        interp->type_watchers[i] = NULL;
929
0
    }
930
931
0
    for (int i=0; i < FUNC_MAX_WATCHERS; i++) {
932
0
        interp->func_watchers[i] = NULL;
933
0
    }
934
0
    interp->active_func_watchers = 0;
935
936
0
    for (int i=0; i < CODE_MAX_WATCHERS; i++) {
937
0
        interp->code_watchers[i] = NULL;
938
0
    }
939
0
    interp->active_code_watchers = 0;
940
941
0
    for (int i=0; i < CONTEXT_MAX_WATCHERS; i++) {
942
0
        interp->context_watchers[i] = NULL;
943
0
    }
944
0
    interp->active_context_watchers = 0;
945
    // XXX Once we have one allocator per interpreter (i.e.
946
    // per-interpreter GC) we must ensure that all of the interpreter's
947
    // objects have been cleaned up at the point.
948
949
    // We could clear interp->threads.freelist here
950
    // if it held more than just the initial thread state.
951
0
}
952
953
954
void
955
PyInterpreterState_Clear(PyInterpreterState *interp)
956
0
{
957
    // Use the current Python thread state to call audit hooks and to collect
958
    // garbage. It can be different than the current Python thread state
959
    // of 'interp'.
960
0
    PyThreadState *current_tstate = current_fast_get();
961
0
    _PyImport_ClearCore(interp);
962
0
    interpreter_clear(interp, current_tstate);
963
0
}
964
965
966
void
967
_PyInterpreterState_Clear(PyThreadState *tstate)
968
0
{
969
0
    _PyImport_ClearCore(tstate->interp);
970
0
    interpreter_clear(tstate->interp, tstate);
971
0
}
972
973
974
static inline void tstate_deactivate(PyThreadState *tstate);
975
static void tstate_set_detached(PyThreadState *tstate, int detached_state);
976
static void zapthreads(PyInterpreterState *interp);
977
978
void
979
PyInterpreterState_Delete(PyInterpreterState *interp)
980
0
{
981
0
    _PyRuntimeState *runtime = interp->runtime;
982
0
    struct pyinterpreters *interpreters = &runtime->interpreters;
983
984
    // XXX Clearing the "current" thread state should happen before
985
    // we start finalizing the interpreter (or the current thread state).
986
0
    PyThreadState *tcur = current_fast_get();
987
0
    if (tcur != NULL && interp == tcur->interp) {
988
        /* Unset current thread.  After this, many C API calls become crashy. */
989
0
        _PyThreadState_Detach(tcur);
990
0
    }
991
992
0
    zapthreads(interp);
993
994
    // XXX These two calls should be done at the end of clear_interpreter(),
995
    // but currently some objects get decref'ed after that.
996
#ifdef Py_REF_DEBUG
997
    _PyInterpreterState_FinalizeRefTotal(interp);
998
#endif
999
0
    _PyInterpreterState_FinalizeAllocatedBlocks(interp);
1000
1001
0
    HEAD_LOCK(runtime);
1002
0
    PyInterpreterState **p;
1003
0
    for (p = &interpreters->head; ; p = &(*p)->next) {
1004
0
        if (*p == NULL) {
1005
0
            Py_FatalError("NULL interpreter");
1006
0
        }
1007
0
        if (*p == interp) {
1008
0
            break;
1009
0
        }
1010
0
    }
1011
0
    if (interp->threads.head != NULL) {
1012
0
        Py_FatalError("remaining threads");
1013
0
    }
1014
0
    *p = interp->next;
1015
1016
0
    if (interpreters->main == interp) {
1017
0
        interpreters->main = NULL;
1018
0
        if (interpreters->head != NULL) {
1019
0
            Py_FatalError("remaining subinterpreters");
1020
0
        }
1021
0
    }
1022
0
    HEAD_UNLOCK(runtime);
1023
1024
0
    _Py_qsbr_fini(interp);
1025
1026
0
    _PyObject_FiniState(interp);
1027
1028
0
    PyConfig_Clear(&interp->config);
1029
1030
0
    free_interpreter(interp);
1031
0
}
1032
1033
1034
#ifdef HAVE_FORK
1035
/*
1036
 * Delete all interpreter states except the main interpreter.  If there
1037
 * is a current interpreter state, it *must* be the main interpreter.
1038
 */
1039
PyStatus
1040
_PyInterpreterState_DeleteExceptMain(_PyRuntimeState *runtime)
1041
0
{
1042
0
    struct pyinterpreters *interpreters = &runtime->interpreters;
1043
1044
0
    PyThreadState *tstate = _PyThreadState_Swap(runtime, NULL);
1045
0
    if (tstate != NULL && tstate->interp != interpreters->main) {
1046
0
        return _PyStatus_ERR("not main interpreter");
1047
0
    }
1048
1049
0
    HEAD_LOCK(runtime);
1050
0
    PyInterpreterState *interp = interpreters->head;
1051
0
    interpreters->head = NULL;
1052
0
    while (interp != NULL) {
1053
0
        if (interp == interpreters->main) {
1054
0
            interpreters->main->next = NULL;
1055
0
            interpreters->head = interp;
1056
0
            interp = interp->next;
1057
0
            continue;
1058
0
        }
1059
1060
        // XXX Won't this fail since PyInterpreterState_Clear() requires
1061
        // the "current" tstate to be set?
1062
0
        PyInterpreterState_Clear(interp);  // XXX must activate?
1063
0
        zapthreads(interp);
1064
0
        PyInterpreterState *prev_interp = interp;
1065
0
        interp = interp->next;
1066
0
        free_interpreter(prev_interp);
1067
0
    }
1068
0
    HEAD_UNLOCK(runtime);
1069
1070
0
    if (interpreters->head == NULL) {
1071
0
        return _PyStatus_ERR("missing main interpreter");
1072
0
    }
1073
0
    _PyThreadState_Swap(runtime, tstate);
1074
0
    return _PyStatus_OK();
1075
0
}
1076
#endif
1077
1078
static inline void
1079
set_main_thread(PyInterpreterState *interp, PyThreadState *tstate)
1080
0
{
1081
0
    _Py_atomic_store_ptr_relaxed(&interp->threads.main, tstate);
1082
0
}
1083
1084
static inline PyThreadState *
1085
get_main_thread(PyInterpreterState *interp)
1086
0
{
1087
0
    return _Py_atomic_load_ptr_relaxed(&interp->threads.main);
1088
0
}
1089
1090
void
1091
_PyErr_SetInterpreterAlreadyRunning(void)
1092
0
{
1093
0
    PyErr_SetString(PyExc_InterpreterError, "interpreter already running");
1094
0
}
1095
1096
int
1097
_PyInterpreterState_SetRunningMain(PyInterpreterState *interp)
1098
0
{
1099
0
    if (get_main_thread(interp) != NULL) {
1100
0
        _PyErr_SetInterpreterAlreadyRunning();
1101
0
        return -1;
1102
0
    }
1103
0
    PyThreadState *tstate = current_fast_get();
1104
0
    _Py_EnsureTstateNotNULL(tstate);
1105
0
    if (tstate->interp != interp) {
1106
0
        PyErr_SetString(PyExc_RuntimeError,
1107
0
                        "current tstate has wrong interpreter");
1108
0
        return -1;
1109
0
    }
1110
0
    set_main_thread(interp, tstate);
1111
1112
0
    return 0;
1113
0
}
1114
1115
void
1116
_PyInterpreterState_SetNotRunningMain(PyInterpreterState *interp)
1117
0
{
1118
0
    assert(get_main_thread(interp) == current_fast_get());
1119
0
    set_main_thread(interp, NULL);
1120
0
}
1121
1122
int
1123
_PyInterpreterState_IsRunningMain(PyInterpreterState *interp)
1124
0
{
1125
0
    if (get_main_thread(interp) != NULL) {
1126
0
        return 1;
1127
0
    }
1128
    // Embedders might not know to call _PyInterpreterState_SetRunningMain(),
1129
    // so their main thread wouldn't show it is running the main interpreter's
1130
    // program.  (Py_Main() doesn't have this problem.)  For now this isn't
1131
    // critical.  If it were, we would need to infer "running main" from other
1132
    // information, like if it's the main interpreter.  We used to do that
1133
    // but the naive approach led to some inconsistencies that caused problems.
1134
0
    return 0;
1135
0
}
1136
1137
int
1138
_PyThreadState_IsRunningMain(PyThreadState *tstate)
1139
0
{
1140
0
    PyInterpreterState *interp = tstate->interp;
1141
    // See the note in _PyInterpreterState_IsRunningMain() about
1142
    // possible false negatives here for embedders.
1143
0
    return get_main_thread(interp) == tstate;
1144
0
}
1145
1146
void
1147
_PyInterpreterState_ReinitRunningMain(PyThreadState *tstate)
1148
0
{
1149
0
    PyInterpreterState *interp = tstate->interp;
1150
0
    if (get_main_thread(interp) != tstate) {
1151
0
        set_main_thread(interp, NULL);
1152
0
    }
1153
0
}
1154
1155
1156
//----------
1157
// accessors
1158
//----------
1159
1160
int
1161
_PyInterpreterState_IsReady(PyInterpreterState *interp)
1162
0
{
1163
0
    return interp->_ready;
1164
0
}
1165
1166
#ifndef NDEBUG
1167
static inline int
1168
check_interpreter_whence(long whence)
1169
{
1170
    if(whence < 0) {
1171
        return -1;
1172
    }
1173
    if (whence > _PyInterpreterState_WHENCE_MAX) {
1174
        return -1;
1175
    }
1176
    return 0;
1177
}
1178
#endif
1179
1180
long
1181
_PyInterpreterState_GetWhence(PyInterpreterState *interp)
1182
0
{
1183
0
    assert(check_interpreter_whence(interp->_whence) == 0);
1184
0
    return interp->_whence;
1185
0
}
1186
1187
void
1188
_PyInterpreterState_SetWhence(PyInterpreterState *interp, long whence)
1189
36
{
1190
36
    assert(interp->_whence != _PyInterpreterState_WHENCE_NOTSET);
1191
36
    assert(check_interpreter_whence(whence) == 0);
1192
36
    interp->_whence = whence;
1193
36
}
1194
1195
1196
PyObject *
1197
_Py_GetMainModule(PyThreadState *tstate)
1198
0
{
1199
    // We return None to indicate "not found" or "bogus".
1200
0
    PyObject *modules = _PyImport_GetModulesRef(tstate->interp);
1201
0
    if (modules == Py_None) {
1202
0
        return modules;
1203
0
    }
1204
0
    PyObject *module = NULL;
1205
0
    (void)PyMapping_GetOptionalItem(modules, &_Py_ID(__main__), &module);
1206
0
    Py_DECREF(modules);
1207
0
    if (module == NULL && !PyErr_Occurred()) {
1208
0
        Py_RETURN_NONE;
1209
0
    }
1210
0
    return module;
1211
0
}
1212
1213
int
1214
_Py_CheckMainModule(PyObject *module)
1215
0
{
1216
0
    if (module == NULL || module == Py_None) {
1217
0
        if (!PyErr_Occurred()) {
1218
0
            (void)_PyErr_SetModuleNotFoundError(&_Py_ID(__main__));
1219
0
        }
1220
0
        return -1;
1221
0
    }
1222
0
    if (!Py_IS_TYPE(module, &PyModule_Type)) {
1223
        /* The __main__ module has been tampered with. */
1224
0
        PyObject *msg = PyUnicode_FromString("invalid __main__ module");
1225
0
        if (msg != NULL) {
1226
0
            (void)PyErr_SetImportError(msg, &_Py_ID(__main__), NULL);
1227
0
            Py_DECREF(msg);
1228
0
        }
1229
0
        return -1;
1230
0
    }
1231
0
    return 0;
1232
0
}
1233
1234
1235
PyObject *
1236
PyInterpreterState_GetDict(PyInterpreterState *interp)
1237
22.4k
{
1238
22.4k
    if (interp->dict == NULL) {
1239
16
        interp->dict = PyDict_New();
1240
16
        if (interp->dict == NULL) {
1241
0
            PyErr_Clear();
1242
0
        }
1243
16
    }
1244
    /* Returning NULL means no per-interpreter dict is available. */
1245
22.4k
    return interp->dict;
1246
22.4k
}
1247
1248
1249
//----------
1250
// interp ID
1251
//----------
1252
1253
int64_t
1254
_PyInterpreterState_ObjectToID(PyObject *idobj)
1255
0
{
1256
0
    if (!_PyIndex_Check(idobj)) {
1257
0
        PyErr_Format(PyExc_TypeError,
1258
0
                     "interpreter ID must be an int, got %.100s",
1259
0
                     Py_TYPE(idobj)->tp_name);
1260
0
        return -1;
1261
0
    }
1262
1263
    // This may raise OverflowError.
1264
    // For now, we don't worry about if LLONG_MAX < INT64_MAX.
1265
0
    long long id = PyLong_AsLongLong(idobj);
1266
0
    if (id == -1 && PyErr_Occurred()) {
1267
0
        return -1;
1268
0
    }
1269
1270
0
    if (id < 0) {
1271
0
        PyErr_Format(PyExc_ValueError,
1272
0
                     "interpreter ID must be a non-negative int, got %R",
1273
0
                     idobj);
1274
0
        return -1;
1275
0
    }
1276
#if LLONG_MAX > INT64_MAX
1277
    else if (id > INT64_MAX) {
1278
        PyErr_SetString(PyExc_OverflowError, "int too big to convert");
1279
        return -1;
1280
    }
1281
#endif
1282
0
    else {
1283
0
        return (int64_t)id;
1284
0
    }
1285
0
}
1286
1287
int64_t
1288
PyInterpreterState_GetID(PyInterpreterState *interp)
1289
0
{
1290
0
    if (interp == NULL) {
1291
0
        PyErr_SetString(PyExc_RuntimeError, "no interpreter provided");
1292
0
        return -1;
1293
0
    }
1294
0
    return interp->id;
1295
0
}
1296
1297
PyObject *
1298
_PyInterpreterState_GetIDObject(PyInterpreterState *interp)
1299
0
{
1300
0
    int64_t interpid = interp->id;
1301
0
    if (interpid < 0) {
1302
0
        return NULL;
1303
0
    }
1304
0
    assert(interpid < LLONG_MAX);
1305
0
    return PyLong_FromLongLong(interpid);
1306
0
}
1307
1308
1309
1310
void
1311
_PyInterpreterState_IDIncref(PyInterpreterState *interp)
1312
0
{
1313
0
    _Py_atomic_add_ssize(&interp->id_refcount, 1);
1314
0
}
1315
1316
1317
void
1318
_PyInterpreterState_IDDecref(PyInterpreterState *interp)
1319
0
{
1320
0
    _PyRuntimeState *runtime = interp->runtime;
1321
1322
0
    Py_ssize_t refcount = _Py_atomic_add_ssize(&interp->id_refcount, -1);
1323
1324
0
    if (refcount == 1 && interp->requires_idref) {
1325
0
        PyThreadState *tstate =
1326
0
            _PyThreadState_NewBound(interp, _PyThreadState_WHENCE_FINI);
1327
1328
        // XXX Possible GILState issues?
1329
0
        PyThreadState *save_tstate = _PyThreadState_Swap(runtime, tstate);
1330
0
        Py_EndInterpreter(tstate);
1331
0
        _PyThreadState_Swap(runtime, save_tstate);
1332
0
    }
1333
0
}
1334
1335
int
1336
_PyInterpreterState_RequiresIDRef(PyInterpreterState *interp)
1337
0
{
1338
0
    return interp->requires_idref;
1339
0
}
1340
1341
void
1342
_PyInterpreterState_RequireIDRef(PyInterpreterState *interp, int required)
1343
0
{
1344
0
    interp->requires_idref = required ? 1 : 0;
1345
0
}
1346
1347
1348
//-----------------------------
1349
// look up an interpreter state
1350
//-----------------------------
1351
1352
/* Return the interpreter associated with the current OS thread.
1353
1354
   The GIL must be held.
1355
  */
1356
1357
PyInterpreterState*
1358
PyInterpreterState_Get(void)
1359
22.5k
{
1360
22.5k
    _Py_AssertHoldsTstate();
1361
22.5k
    PyInterpreterState *interp = _Py_tss_interp;
1362
22.5k
    if (interp == NULL) {
1363
0
        Py_FatalError("no current interpreter");
1364
0
    }
1365
22.5k
    return interp;
1366
22.5k
}
1367
1368
1369
static PyInterpreterState *
1370
interp_look_up_id(_PyRuntimeState *runtime, int64_t requested_id)
1371
0
{
1372
0
    PyInterpreterState *interp = runtime->interpreters.head;
1373
0
    while (interp != NULL) {
1374
0
        int64_t id = interp->id;
1375
0
        assert(id >= 0);
1376
0
        if (requested_id == id) {
1377
0
            return interp;
1378
0
        }
1379
0
        interp = PyInterpreterState_Next(interp);
1380
0
    }
1381
0
    return NULL;
1382
0
}
1383
1384
/* Return the interpreter state with the given ID.
1385
1386
   Fail with RuntimeError if the interpreter is not found. */
1387
1388
PyInterpreterState *
1389
_PyInterpreterState_LookUpID(int64_t requested_id)
1390
0
{
1391
0
    PyInterpreterState *interp = NULL;
1392
0
    if (requested_id >= 0) {
1393
0
        _PyRuntimeState *runtime = &_PyRuntime;
1394
0
        HEAD_LOCK(runtime);
1395
0
        interp = interp_look_up_id(runtime, requested_id);
1396
0
        HEAD_UNLOCK(runtime);
1397
0
    }
1398
0
    if (interp == NULL && !PyErr_Occurred()) {
1399
0
        PyErr_Format(PyExc_InterpreterNotFoundError,
1400
0
                     "unrecognized interpreter ID %lld", requested_id);
1401
0
    }
1402
0
    return interp;
1403
0
}
1404
1405
PyInterpreterState *
1406
_PyInterpreterState_LookUpIDObject(PyObject *requested_id)
1407
0
{
1408
0
    int64_t id = _PyInterpreterState_ObjectToID(requested_id);
1409
0
    if (id < 0) {
1410
0
        return NULL;
1411
0
    }
1412
0
    return _PyInterpreterState_LookUpID(id);
1413
0
}
1414
1415
1416
/********************************/
1417
/* the per-thread runtime state */
1418
/********************************/
1419
1420
#ifndef NDEBUG
1421
static inline int
1422
tstate_is_alive(PyThreadState *tstate)
1423
{
1424
    return (tstate->_status.initialized &&
1425
            !tstate->_status.finalized &&
1426
            !tstate->_status.cleared &&
1427
            !tstate->_status.finalizing);
1428
}
1429
#endif
1430
1431
1432
//----------
1433
// lifecycle
1434
//----------
1435
1436
static _PyStackChunk*
1437
allocate_chunk(int size_in_bytes, _PyStackChunk* previous)
1438
43.0k
{
1439
43.0k
    assert(size_in_bytes % sizeof(PyObject **) == 0);
1440
43.0k
    _PyStackChunk *res = _PyObject_VirtualAlloc(size_in_bytes);
1441
43.0k
    if (res == NULL) {
1442
0
        return NULL;
1443
0
    }
1444
43.0k
    res->previous = previous;
1445
43.0k
    res->size = size_in_bytes;
1446
43.0k
    res->top = 0;
1447
43.0k
    return res;
1448
43.0k
}
1449
1450
static void
1451
reset_threadstate(_PyThreadStateImpl *tstate)
1452
0
{
1453
    // Set to _PyThreadState_INIT directly?
1454
0
    memcpy(tstate,
1455
0
           &initial._main_interpreter._initial_thread,
1456
0
           sizeof(*tstate));
1457
0
}
1458
1459
static _PyThreadStateImpl *
1460
alloc_threadstate(PyInterpreterState *interp)
1461
36
{
1462
36
    _PyThreadStateImpl *tstate;
1463
1464
    // Try the preallocated tstate first.
1465
36
    tstate = _Py_atomic_exchange_ptr(&interp->threads.preallocated, NULL);
1466
1467
    // Fall back to the allocator.
1468
36
    if (tstate == NULL) {
1469
0
        tstate = PyMem_RawCalloc(1, sizeof(_PyThreadStateImpl));
1470
0
        if (tstate == NULL) {
1471
0
            return NULL;
1472
0
        }
1473
0
        reset_threadstate(tstate);
1474
0
    }
1475
36
    return tstate;
1476
36
}
1477
1478
static void
1479
free_threadstate(_PyThreadStateImpl *tstate)
1480
0
{
1481
0
    PyInterpreterState *interp = tstate->base.interp;
1482
#ifdef Py_STATS
1483
    _PyStats_ThreadFini(tstate);
1484
#endif
1485
    // The initial thread state of the interpreter is allocated
1486
    // as part of the interpreter state so should not be freed.
1487
0
    if (tstate == &interp->_initial_thread) {
1488
        // Make it available again.
1489
0
        reset_threadstate(tstate);
1490
0
        assert(interp->threads.preallocated == NULL);
1491
0
        _Py_atomic_store_ptr(&interp->threads.preallocated, tstate);
1492
0
    }
1493
0
    else {
1494
0
        PyMem_RawFree(tstate);
1495
0
    }
1496
0
}
1497
1498
static void
1499
decref_threadstate(_PyThreadStateImpl *tstate)
1500
0
{
1501
0
    if (_Py_atomic_add_ssize(&tstate->refcount, -1) == 1) {
1502
        // The last reference to the thread state is gone.
1503
0
        free_threadstate(tstate);
1504
0
    }
1505
0
}
1506
1507
/* Get the thread state to a minimal consistent state.
1508
   Further init happens in pylifecycle.c before it can be used.
1509
   All fields not initialized here are expected to be zeroed out,
1510
   e.g. by PyMem_RawCalloc() or memset(), or otherwise pre-initialized.
1511
   The interpreter state is not manipulated.  Instead it is assumed that
1512
   the thread is getting added to the interpreter.
1513
  */
1514
1515
static void
1516
init_threadstate(_PyThreadStateImpl *_tstate,
1517
                 PyInterpreterState *interp, uint64_t id, int whence)
1518
36
{
1519
36
    PyThreadState *tstate = (PyThreadState *)_tstate;
1520
36
    if (tstate->_status.initialized) {
1521
0
        Py_FatalError("thread state already initialized");
1522
0
    }
1523
1524
36
    assert(interp != NULL);
1525
36
    tstate->interp = interp;
1526
36
    tstate->eval_breaker =
1527
36
        _Py_atomic_load_uintptr_relaxed(&interp->ceval.instrumentation_version);
1528
1529
    // next/prev are set in add_threadstate().
1530
36
    assert(tstate->next == NULL);
1531
36
    assert(tstate->prev == NULL);
1532
1533
36
    assert(tstate->_whence == _PyThreadState_WHENCE_NOTSET);
1534
36
    assert(whence >= 0 && whence <= _PyThreadState_WHENCE_THREADING_DAEMON);
1535
36
    tstate->_whence = whence;
1536
1537
36
    assert(id > 0);
1538
36
    tstate->id = id;
1539
1540
    // thread_id and native_thread_id are set in bind_tstate().
1541
1542
36
    tstate->py_recursion_limit = interp->ceval.recursion_limit;
1543
36
    tstate->py_recursion_remaining = interp->ceval.recursion_limit;
1544
36
    tstate->exc_info = &tstate->exc_state;
1545
1546
    // PyGILState_Release must not try to delete this thread state.
1547
    // This is cleared when PyGILState_Ensure() creates the thread state.
1548
36
    tstate->gilstate_counter = 1;
1549
1550
    // Initialize the embedded base frame - sentinel at the bottom of the frame stack
1551
36
    _tstate->base_frame.previous = NULL;
1552
36
    _tstate->base_frame.f_executable = PyStackRef_None;
1553
36
    _tstate->base_frame.f_funcobj = PyStackRef_NULL;
1554
36
    _tstate->base_frame.f_globals = NULL;
1555
36
    _tstate->base_frame.f_builtins = NULL;
1556
36
    _tstate->base_frame.f_locals = NULL;
1557
36
    _tstate->base_frame.frame_obj = NULL;
1558
36
    _tstate->base_frame.instr_ptr = NULL;
1559
36
    _tstate->base_frame.stackpointer = _tstate->base_frame.localsplus;
1560
36
    _tstate->base_frame.return_offset = 0;
1561
36
    _tstate->base_frame.owner = FRAME_OWNED_BY_INTERPRETER;
1562
36
    _tstate->base_frame.visited = 0;
1563
#ifdef Py_DEBUG
1564
    _tstate->base_frame.lltrace = 0;
1565
#endif
1566
#ifdef Py_GIL_DISABLED
1567
    _tstate->base_frame.tlbc_index = 0;
1568
#endif
1569
36
    _tstate->base_frame.localsplus[0] = PyStackRef_NULL;
1570
1571
    // current_frame starts pointing to the base frame
1572
36
    tstate->current_frame = &_tstate->base_frame;
1573
    // base_frame pointer for profilers to validate stack unwinding
1574
36
    tstate->base_frame = &_tstate->base_frame;
1575
36
    tstate->datastack_chunk = NULL;
1576
36
    tstate->datastack_top = NULL;
1577
36
    tstate->datastack_limit = NULL;
1578
36
    tstate->datastack_cached_chunk = NULL;
1579
36
    tstate->what_event = -1;
1580
36
    tstate->current_executor = NULL;
1581
36
    tstate->jit_exit = NULL;
1582
36
    tstate->dict_global_version = 0;
1583
1584
36
    _tstate->c_stack_soft_limit = UINTPTR_MAX;
1585
36
    _tstate->c_stack_top = 0;
1586
36
    _tstate->c_stack_hard_limit = 0;
1587
1588
36
    _tstate->c_stack_init_base = 0;
1589
36
    _tstate->c_stack_init_top = 0;
1590
1591
36
    _tstate->asyncio_running_loop = NULL;
1592
36
    _tstate->asyncio_running_task = NULL;
1593
1594
#ifdef _Py_TIER2
1595
    _tstate->jit_tracer_state = NULL;
1596
#endif
1597
36
    tstate->delete_later = NULL;
1598
1599
36
    llist_init(&_tstate->mem_free_queue);
1600
36
    llist_init(&_tstate->asyncio_tasks_head);
1601
36
    if (interp->stoptheworld.requested || _PyRuntime.stoptheworld.requested) {
1602
        // Start in the suspended state if there is an ongoing stop-the-world.
1603
0
        tstate->state = _Py_THREAD_SUSPENDED;
1604
0
    }
1605
1606
36
    tstate->_status.initialized = 1;
1607
36
}
1608
1609
static void
1610
add_threadstate(PyInterpreterState *interp, PyThreadState *tstate,
1611
                PyThreadState *next)
1612
36
{
1613
36
    assert(interp->threads.head != tstate);
1614
36
    if (next != NULL) {
1615
0
        assert(next->prev == NULL || next->prev == tstate);
1616
0
        next->prev = tstate;
1617
0
    }
1618
36
    tstate->next = next;
1619
36
    assert(tstate->prev == NULL);
1620
36
    interp->threads.head = tstate;
1621
36
}
1622
1623
static PyThreadState *
1624
new_threadstate(PyInterpreterState *interp, int whence)
1625
36
{
1626
    // Allocate the thread state.
1627
36
    _PyThreadStateImpl *tstate = alloc_threadstate(interp);
1628
36
    if (tstate == NULL) {
1629
0
        return NULL;
1630
0
    }
1631
1632
#ifdef Py_GIL_DISABLED
1633
    Py_ssize_t qsbr_idx = _Py_qsbr_reserve(interp);
1634
    if (qsbr_idx < 0) {
1635
        free_threadstate(tstate);
1636
        return NULL;
1637
    }
1638
    int32_t tlbc_idx = _Py_ReserveTLBCIndex(interp);
1639
    if (tlbc_idx < 0) {
1640
        free_threadstate(tstate);
1641
        return NULL;
1642
    }
1643
#endif
1644
#ifdef Py_STATS
1645
    // The PyStats structure is quite large and is allocated separated from tstate.
1646
    if (!_PyStats_ThreadInit(interp, tstate)) {
1647
        free_threadstate(tstate);
1648
        return NULL;
1649
    }
1650
#endif
1651
1652
    /* We serialize concurrent creation to protect global state. */
1653
36
    HEAD_LOCK(interp->runtime);
1654
1655
    // Initialize the new thread state.
1656
36
    interp->threads.next_unique_id += 1;
1657
36
    uint64_t id = interp->threads.next_unique_id;
1658
36
    init_threadstate(tstate, interp, id, whence);
1659
1660
    // Add the new thread state to the interpreter.
1661
36
    PyThreadState *old_head = interp->threads.head;
1662
36
    add_threadstate(interp, (PyThreadState *)tstate, old_head);
1663
1664
36
    HEAD_UNLOCK(interp->runtime);
1665
1666
#ifdef Py_GIL_DISABLED
1667
    // Must be called with lock unlocked to avoid lock ordering deadlocks.
1668
    _Py_qsbr_register(tstate, interp, qsbr_idx);
1669
    tstate->tlbc_index = tlbc_idx;
1670
#endif
1671
1672
36
    return (PyThreadState *)tstate;
1673
36
}
1674
1675
PyThreadState *
1676
PyThreadState_New(PyInterpreterState *interp)
1677
0
{
1678
0
    return _PyThreadState_NewBound(interp, _PyThreadState_WHENCE_UNKNOWN);
1679
0
}
1680
1681
PyThreadState *
1682
_PyThreadState_NewBound(PyInterpreterState *interp, int whence)
1683
0
{
1684
0
    PyThreadState *tstate = new_threadstate(interp, whence);
1685
0
    if (tstate) {
1686
0
        bind_tstate(tstate);
1687
        // This makes sure there's a gilstate tstate bound
1688
        // as soon as possible.
1689
0
        if (gilstate_get() == NULL) {
1690
0
            bind_gilstate_tstate(tstate);
1691
0
        }
1692
0
    }
1693
0
    return tstate;
1694
0
}
1695
1696
// This must be followed by a call to _PyThreadState_Bind();
1697
PyThreadState *
1698
_PyThreadState_New(PyInterpreterState *interp, int whence)
1699
36
{
1700
36
    return new_threadstate(interp, whence);
1701
36
}
1702
1703
// We keep this for stable ABI compabibility.
1704
PyAPI_FUNC(PyThreadState*)
1705
_PyThreadState_Prealloc(PyInterpreterState *interp)
1706
0
{
1707
0
    return _PyThreadState_New(interp, _PyThreadState_WHENCE_UNKNOWN);
1708
0
}
1709
1710
// We keep this around for (accidental) stable ABI compatibility.
1711
// Realistically, no extensions are using it.
1712
PyAPI_FUNC(void)
1713
_PyThreadState_Init(PyThreadState *tstate)
1714
0
{
1715
0
    Py_FatalError("_PyThreadState_Init() is for internal use only");
1716
0
}
1717
1718
1719
static void
1720
clear_datastack(PyThreadState *tstate)
1721
0
{
1722
0
    _PyStackChunk *chunk = tstate->datastack_chunk;
1723
0
    tstate->datastack_chunk = NULL;
1724
0
    while (chunk != NULL) {
1725
0
        _PyStackChunk *prev = chunk->previous;
1726
0
        _PyObject_VirtualFree(chunk, chunk->size);
1727
0
        chunk = prev;
1728
0
    }
1729
0
    if (tstate->datastack_cached_chunk != NULL) {
1730
0
        _PyObject_VirtualFree(tstate->datastack_cached_chunk,
1731
0
                              tstate->datastack_cached_chunk->size);
1732
0
        tstate->datastack_cached_chunk = NULL;
1733
0
    }
1734
0
}
1735
1736
void
1737
PyThreadState_Clear(PyThreadState *tstate)
1738
0
{
1739
0
    assert(tstate->_status.initialized && !tstate->_status.cleared);
1740
0
    assert(current_fast_get()->interp == tstate->interp);
1741
    // GH-126016: In the _interpreters module, KeyboardInterrupt exceptions
1742
    // during PyEval_EvalCode() are sent to finalization, which doesn't let us
1743
    // mark threads as "not running main". So, for now this assertion is
1744
    // disabled.
1745
    // XXX assert(!_PyThreadState_IsRunningMain(tstate));
1746
    // XXX assert(!tstate->_status.bound || tstate->_status.unbound);
1747
0
    tstate->_status.finalizing = 1;  // just in case
1748
1749
    /* XXX Conditions we need to enforce:
1750
1751
       * the GIL must be held by the current thread
1752
       * current_fast_get()->interp must match tstate->interp
1753
       * for the main interpreter, current_fast_get() must be the main thread
1754
     */
1755
1756
0
    int verbose = _PyInterpreterState_GetConfig(tstate->interp)->verbose;
1757
1758
0
    if (verbose && tstate->current_frame != tstate->base_frame) {
1759
        /* bpo-20526: After the main thread calls
1760
           _PyInterpreterState_SetFinalizing() in Py_FinalizeEx()
1761
           (or in Py_EndInterpreter() for subinterpreters),
1762
           threads must exit when trying to take the GIL.
1763
           If a thread exit in the middle of _PyEval_EvalFrameDefault(),
1764
           tstate->frame is not reset to its previous value.
1765
           It is more likely with daemon threads, but it can happen
1766
           with regular threads if threading._shutdown() fails
1767
           (ex: interrupted by CTRL+C). */
1768
0
        fprintf(stderr,
1769
0
          "PyThreadState_Clear: warning: thread still has a frame\n");
1770
0
    }
1771
1772
0
    if (verbose && tstate->current_exception != NULL) {
1773
0
        fprintf(stderr, "PyThreadState_Clear: warning: thread has an exception set\n");
1774
0
        _PyErr_Print(tstate);
1775
0
    }
1776
1777
    /* At this point tstate shouldn't be used any more,
1778
       neither to run Python code nor for other uses.
1779
1780
       This is tricky when current_fast_get() == tstate, in the same way
1781
       as noted in interpreter_clear() above.  The below finalizers
1782
       can possibly run Python code or otherwise use the partially
1783
       cleared thread state.  For now we trust that isn't a problem
1784
       in practice.
1785
     */
1786
    // XXX Deal with the possibility of problematic finalizers.
1787
1788
    /* Don't clear tstate->pyframe: it is a borrowed reference */
1789
1790
0
    Py_CLEAR(tstate->threading_local_key);
1791
0
    Py_CLEAR(tstate->threading_local_sentinel);
1792
1793
0
    Py_CLEAR(((_PyThreadStateImpl *)tstate)->asyncio_running_loop);
1794
0
    Py_CLEAR(((_PyThreadStateImpl *)tstate)->asyncio_running_task);
1795
1796
1797
0
    PyMutex_Lock(&tstate->interp->asyncio_tasks_lock);
1798
    // merge any lingering tasks from thread state to interpreter's
1799
    // tasks list
1800
0
    llist_concat(&tstate->interp->asyncio_tasks_head,
1801
0
                 &((_PyThreadStateImpl *)tstate)->asyncio_tasks_head);
1802
0
    PyMutex_Unlock(&tstate->interp->asyncio_tasks_lock);
1803
1804
0
    Py_CLEAR(tstate->dict);
1805
0
    Py_CLEAR(tstate->async_exc);
1806
1807
0
    Py_CLEAR(tstate->current_exception);
1808
1809
0
    Py_CLEAR(tstate->exc_state.exc_value);
1810
1811
    /* The stack of exception states should contain just this thread. */
1812
0
    if (verbose && tstate->exc_info != &tstate->exc_state) {
1813
0
        fprintf(stderr,
1814
0
          "PyThreadState_Clear: warning: thread still has a generator\n");
1815
0
    }
1816
1817
0
    if (tstate->c_profilefunc != NULL) {
1818
0
        FT_ATOMIC_ADD_SSIZE(tstate->interp->sys_profiling_threads, -1);
1819
0
        tstate->c_profilefunc = NULL;
1820
0
    }
1821
0
    if (tstate->c_tracefunc != NULL) {
1822
0
        FT_ATOMIC_ADD_SSIZE(tstate->interp->sys_tracing_threads, -1);
1823
0
        tstate->c_tracefunc = NULL;
1824
0
    }
1825
1826
0
    Py_CLEAR(tstate->c_profileobj);
1827
0
    Py_CLEAR(tstate->c_traceobj);
1828
1829
0
    Py_CLEAR(tstate->async_gen_firstiter);
1830
0
    Py_CLEAR(tstate->async_gen_finalizer);
1831
1832
0
    Py_CLEAR(tstate->context);
1833
1834
#ifdef Py_GIL_DISABLED
1835
    // Each thread should clear own freelists in free-threading builds.
1836
    struct _Py_freelists *freelists = _Py_freelists_GET();
1837
    _PyObject_ClearFreeLists(freelists, 1);
1838
1839
    // Flush the thread's local GC allocation count to the global count
1840
    // before the thread state is cleared, otherwise the count is lost.
1841
    _PyThreadStateImpl *tstate_impl = (_PyThreadStateImpl *)tstate;
1842
    _Py_atomic_add_int(&tstate->interp->gc.young.count,
1843
                       (int)tstate_impl->gc.alloc_count);
1844
    tstate_impl->gc.alloc_count = 0;
1845
1846
    // Merge our thread-local refcounts into the type's own refcount and
1847
    // free our local refcount array.
1848
    _PyObject_FinalizePerThreadRefcounts(tstate_impl);
1849
1850
    // Remove ourself from the biased reference counting table of threads.
1851
    _Py_brc_remove_thread(tstate);
1852
1853
    // Release our thread-local copies of the bytecode for reuse by another
1854
    // thread
1855
    _Py_ClearTLBCIndex(tstate_impl);
1856
#endif
1857
1858
    // Merge our queue of pointers to be freed into the interpreter queue.
1859
0
    _PyMem_AbandonDelayed(tstate);
1860
1861
0
    _PyThreadState_ClearMimallocHeaps(tstate);
1862
1863
#ifdef _Py_TIER2
1864
    _PyJit_TracerFree((_PyThreadStateImpl *)tstate);
1865
#endif
1866
1867
0
    tstate->_status.cleared = 1;
1868
1869
    // XXX Call _PyThreadStateSwap(runtime, NULL) here if "current".
1870
    // XXX Do it as early in the function as possible.
1871
0
}
1872
1873
static void
1874
decrement_stoptheworld_countdown(struct _stoptheworld_state *stw);
1875
1876
/* Common code for PyThreadState_Delete() and PyThreadState_DeleteCurrent() */
1877
static void
1878
tstate_delete_common(PyThreadState *tstate, int release_gil)
1879
0
{
1880
0
    assert(tstate->_status.cleared && !tstate->_status.finalized);
1881
0
    tstate_verify_not_active(tstate);
1882
0
    assert(!_PyThreadState_IsRunningMain(tstate));
1883
1884
0
    PyInterpreterState *interp = tstate->interp;
1885
0
    if (interp == NULL) {
1886
0
        Py_FatalError("NULL interpreter");
1887
0
    }
1888
0
    _PyRuntimeState *runtime = interp->runtime;
1889
1890
0
    HEAD_LOCK(runtime);
1891
0
    if (tstate->prev) {
1892
0
        tstate->prev->next = tstate->next;
1893
0
    }
1894
0
    else {
1895
0
        interp->threads.head = tstate->next;
1896
0
    }
1897
0
    if (tstate->next) {
1898
0
        tstate->next->prev = tstate->prev;
1899
0
    }
1900
0
    if (tstate->state != _Py_THREAD_SUSPENDED) {
1901
        // Any ongoing stop-the-world request should not wait for us because
1902
        // our thread is getting deleted.
1903
0
        if (interp->stoptheworld.requested) {
1904
0
            decrement_stoptheworld_countdown(&interp->stoptheworld);
1905
0
        }
1906
0
        if (runtime->stoptheworld.requested) {
1907
0
            decrement_stoptheworld_countdown(&runtime->stoptheworld);
1908
0
        }
1909
0
    }
1910
1911
#if defined(Py_REF_DEBUG) && defined(Py_GIL_DISABLED)
1912
    // Add our portion of the total refcount to the interpreter's total.
1913
    _PyThreadStateImpl *tstate_impl = (_PyThreadStateImpl *)tstate;
1914
    tstate->interp->object_state.reftotal += tstate_impl->reftotal;
1915
    tstate_impl->reftotal = 0;
1916
    assert(tstate_impl->refcounts.values == NULL);
1917
#endif
1918
1919
#if _Py_TIER2
1920
    _PyJit_TracerFree((_PyThreadStateImpl *)tstate);
1921
#endif
1922
1923
0
    HEAD_UNLOCK(runtime);
1924
1925
    // XXX Unbind in PyThreadState_Clear(), or earlier
1926
    // (and assert not-equal here)?
1927
0
    if (tstate->_status.bound_gilstate) {
1928
0
        unbind_gilstate_tstate(tstate);
1929
0
    }
1930
0
    if (tstate->_status.bound) {
1931
0
        unbind_tstate(tstate);
1932
0
    }
1933
1934
    // XXX Move to PyThreadState_Clear()?
1935
0
    clear_datastack(tstate);
1936
1937
0
    if (release_gil) {
1938
0
        _PyEval_ReleaseLock(tstate->interp, tstate, 1);
1939
0
    }
1940
1941
#ifdef Py_GIL_DISABLED
1942
    _Py_qsbr_unregister(tstate);
1943
#endif
1944
1945
0
    tstate->_status.finalized = 1;
1946
0
}
1947
1948
static void
1949
zapthreads(PyInterpreterState *interp)
1950
0
{
1951
0
    PyThreadState *tstate;
1952
    /* No need to lock the mutex here because this should only happen
1953
       when the threads are all really dead (XXX famous last words).
1954
1955
       Cannot use _Py_FOR_EACH_TSTATE_UNLOCKED because we are freeing
1956
       the thread states here.
1957
    */
1958
0
    while ((tstate = interp->threads.head) != NULL) {
1959
0
        tstate_verify_not_active(tstate);
1960
0
        tstate_delete_common(tstate, 0);
1961
0
        free_threadstate((_PyThreadStateImpl *)tstate);
1962
0
    }
1963
0
}
1964
1965
1966
void
1967
PyThreadState_Delete(PyThreadState *tstate)
1968
0
{
1969
0
    _Py_EnsureTstateNotNULL(tstate);
1970
0
    tstate_verify_not_active(tstate);
1971
0
    tstate_delete_common(tstate, 0);
1972
0
    free_threadstate((_PyThreadStateImpl *)tstate);
1973
0
}
1974
1975
1976
void
1977
_PyThreadState_DeleteCurrent(PyThreadState *tstate)
1978
0
{
1979
0
    _Py_EnsureTstateNotNULL(tstate);
1980
#ifdef Py_GIL_DISABLED
1981
    _Py_qsbr_detach(((_PyThreadStateImpl *)tstate)->qsbr);
1982
#endif
1983
#ifdef Py_STATS
1984
    _PyStats_Detach((_PyThreadStateImpl *)tstate);
1985
#endif
1986
0
    current_fast_clear(tstate->interp->runtime);
1987
0
    tstate_delete_common(tstate, 1);  // release GIL as part of call
1988
0
    free_threadstate((_PyThreadStateImpl *)tstate);
1989
0
}
1990
1991
void
1992
PyThreadState_DeleteCurrent(void)
1993
0
{
1994
0
    PyThreadState *tstate = current_fast_get();
1995
0
    _PyThreadState_DeleteCurrent(tstate);
1996
0
}
1997
1998
1999
// Unlinks and removes all thread states from `tstate->interp`, with the
2000
// exception of the one passed as an argument. However, it does not delete
2001
// these thread states. Instead, it returns the removed thread states as a
2002
// linked list.
2003
//
2004
// Note that if there is a current thread state, it *must* be the one
2005
// passed as argument.  Also, this won't touch any interpreters other
2006
// than the current one, since we don't know which thread state should
2007
// be kept in those other interpreters.
2008
PyThreadState *
2009
_PyThreadState_RemoveExcept(PyThreadState *tstate)
2010
0
{
2011
0
    assert(tstate != NULL);
2012
0
    PyInterpreterState *interp = tstate->interp;
2013
0
    _PyRuntimeState *runtime = interp->runtime;
2014
2015
#ifdef Py_GIL_DISABLED
2016
    assert(runtime->stoptheworld.world_stopped);
2017
#endif
2018
2019
0
    HEAD_LOCK(runtime);
2020
    /* Remove all thread states, except tstate, from the linked list of
2021
       thread states. */
2022
0
    PyThreadState *list = interp->threads.head;
2023
0
    if (list == tstate) {
2024
0
        list = tstate->next;
2025
0
    }
2026
0
    if (tstate->prev) {
2027
0
        tstate->prev->next = tstate->next;
2028
0
    }
2029
0
    if (tstate->next) {
2030
0
        tstate->next->prev = tstate->prev;
2031
0
    }
2032
0
    tstate->prev = tstate->next = NULL;
2033
0
    interp->threads.head = tstate;
2034
0
    HEAD_UNLOCK(runtime);
2035
2036
0
    return list;
2037
0
}
2038
2039
// Deletes the thread states in the linked list `list`.
2040
//
2041
// This is intended to be used in conjunction with _PyThreadState_RemoveExcept.
2042
//
2043
// If `is_after_fork` is true, the thread states are immediately freed.
2044
// Otherwise, they are decref'd because they may still be referenced by an
2045
// OS thread.
2046
void
2047
_PyThreadState_DeleteList(PyThreadState *list, int is_after_fork)
2048
0
{
2049
    // The world can't be stopped because we PyThreadState_Clear() can
2050
    // call destructors.
2051
0
    assert(!_PyRuntime.stoptheworld.world_stopped);
2052
2053
0
    PyThreadState *p, *next;
2054
0
    for (p = list; p; p = next) {
2055
0
        next = p->next;
2056
0
        PyThreadState_Clear(p);
2057
0
        if (is_after_fork) {
2058
0
            free_threadstate((_PyThreadStateImpl *)p);
2059
0
        }
2060
0
        else {
2061
0
            decref_threadstate((_PyThreadStateImpl *)p);
2062
0
        }
2063
0
    }
2064
0
}
2065
2066
2067
//----------
2068
// accessors
2069
//----------
2070
2071
/* An extension mechanism to store arbitrary additional per-thread state.
2072
   PyThreadState_GetDict() returns a dictionary that can be used to hold such
2073
   state; the caller should pick a unique key and store its state there.  If
2074
   PyThreadState_GetDict() returns NULL, an exception has *not* been raised
2075
   and the caller should assume no per-thread state is available. */
2076
2077
PyObject *
2078
_PyThreadState_GetDict(PyThreadState *tstate)
2079
7.45M
{
2080
7.45M
    assert(tstate != NULL);
2081
7.45M
    if (tstate->dict == NULL) {
2082
4
        tstate->dict = PyDict_New();
2083
4
        if (tstate->dict == NULL) {
2084
0
            _PyErr_Clear(tstate);
2085
0
        }
2086
4
    }
2087
7.45M
    return tstate->dict;
2088
7.45M
}
2089
2090
2091
PyObject *
2092
PyThreadState_GetDict(void)
2093
7.45M
{
2094
7.45M
    PyThreadState *tstate = current_fast_get();
2095
7.45M
    if (tstate == NULL) {
2096
0
        return NULL;
2097
0
    }
2098
7.45M
    return _PyThreadState_GetDict(tstate);
2099
7.45M
}
2100
2101
2102
PyInterpreterState *
2103
PyThreadState_GetInterpreter(PyThreadState *tstate)
2104
0
{
2105
0
    assert(tstate != NULL);
2106
0
    return tstate->interp;
2107
0
}
2108
2109
2110
PyFrameObject*
2111
PyThreadState_GetFrame(PyThreadState *tstate)
2112
701k
{
2113
701k
    assert(tstate != NULL);
2114
701k
    _PyInterpreterFrame *f = _PyThreadState_GetFrame(tstate);
2115
701k
    if (f == NULL) {
2116
0
        return NULL;
2117
0
    }
2118
701k
    PyFrameObject *frame = _PyFrame_GetFrameObject(f);
2119
701k
    if (frame == NULL) {
2120
0
        PyErr_Clear();
2121
0
    }
2122
701k
    return (PyFrameObject*)Py_XNewRef(frame);
2123
701k
}
2124
2125
2126
uint64_t
2127
PyThreadState_GetID(PyThreadState *tstate)
2128
0
{
2129
0
    assert(tstate != NULL);
2130
0
    return tstate->id;
2131
0
}
2132
2133
2134
static inline void
2135
tstate_activate(PyThreadState *tstate)
2136
2.81M
{
2137
2.81M
    assert(tstate != NULL);
2138
    // XXX assert(tstate_is_alive(tstate));
2139
2.81M
    assert(tstate_is_bound(tstate));
2140
2.81M
    assert(!tstate->_status.active);
2141
2142
2.81M
    assert(!tstate->_status.bound_gilstate ||
2143
2.81M
           tstate == gilstate_get());
2144
2.81M
    if (!tstate->_status.bound_gilstate) {
2145
0
        bind_gilstate_tstate(tstate);
2146
0
    }
2147
2148
2.81M
    tstate->_status.active = 1;
2149
2.81M
}
2150
2151
static inline void
2152
tstate_deactivate(PyThreadState *tstate)
2153
2.81M
{
2154
2.81M
    assert(tstate != NULL);
2155
    // XXX assert(tstate_is_alive(tstate));
2156
2.81M
    assert(tstate_is_bound(tstate));
2157
2.81M
    assert(tstate->_status.active);
2158
2159
#if Py_STATS
2160
    _PyStats_Detach((_PyThreadStateImpl *)tstate);
2161
#endif
2162
2163
2.81M
    tstate->_status.active = 0;
2164
2165
    // We do not unbind the gilstate tstate here.
2166
    // It will still be used in PyGILState_Ensure().
2167
2.81M
}
2168
2169
static int
2170
tstate_try_attach(PyThreadState *tstate)
2171
2.81M
{
2172
#ifdef Py_GIL_DISABLED
2173
    int expected = _Py_THREAD_DETACHED;
2174
    return _Py_atomic_compare_exchange_int(&tstate->state,
2175
                                           &expected,
2176
                                           _Py_THREAD_ATTACHED);
2177
#else
2178
2.81M
    assert(tstate->state == _Py_THREAD_DETACHED);
2179
2.81M
    tstate->state = _Py_THREAD_ATTACHED;
2180
2.81M
    return 1;
2181
2.81M
#endif
2182
2.81M
}
2183
2184
static void
2185
tstate_set_detached(PyThreadState *tstate, int detached_state)
2186
2.81M
{
2187
2.81M
    assert(_Py_atomic_load_int_relaxed(&tstate->state) == _Py_THREAD_ATTACHED);
2188
#ifdef Py_GIL_DISABLED
2189
    _Py_atomic_store_int(&tstate->state, detached_state);
2190
#else
2191
2.81M
    tstate->state = detached_state;
2192
2.81M
#endif
2193
2.81M
}
2194
2195
static void
2196
tstate_wait_attach(PyThreadState *tstate)
2197
0
{
2198
0
    do {
2199
0
        int state = _Py_atomic_load_int_relaxed(&tstate->state);
2200
0
        if (state == _Py_THREAD_SUSPENDED) {
2201
            // Wait until we're switched out of SUSPENDED to DETACHED.
2202
0
            _PyParkingLot_Park(&tstate->state, &state, sizeof(tstate->state),
2203
0
                               /*timeout=*/-1, NULL, /*detach=*/0);
2204
0
        }
2205
0
        else if (state == _Py_THREAD_SHUTTING_DOWN) {
2206
            // We're shutting down, so we can't attach.
2207
0
            _PyThreadState_HangThread(tstate);
2208
0
        }
2209
0
        else {
2210
0
            assert(state == _Py_THREAD_DETACHED);
2211
0
        }
2212
        // Once we're back in DETACHED we can re-attach
2213
0
    } while (!tstate_try_attach(tstate));
2214
0
}
2215
2216
void
2217
_PyThreadState_Attach(PyThreadState *tstate)
2218
2.81M
{
2219
#if defined(Py_DEBUG)
2220
    // This is called from PyEval_RestoreThread(). Similar
2221
    // to it, we need to ensure errno doesn't change.
2222
    int err = errno;
2223
#endif
2224
2225
2.81M
    _Py_EnsureTstateNotNULL(tstate);
2226
2.81M
    if (current_fast_get() != NULL) {
2227
0
        Py_FatalError("non-NULL old thread state");
2228
0
    }
2229
2.81M
    _PyThreadStateImpl *_tstate = (_PyThreadStateImpl *)tstate;
2230
2.81M
    if (_tstate->c_stack_hard_limit == 0) {
2231
36
        _Py_InitializeRecursionLimits(tstate);
2232
36
    }
2233
2234
2.81M
    while (1) {
2235
2.81M
        _PyEval_AcquireLock(tstate);
2236
2237
        // XXX assert(tstate_is_alive(tstate));
2238
2.81M
        current_fast_set(&_PyRuntime, tstate);
2239
2.81M
        if (!tstate_try_attach(tstate)) {
2240
0
            tstate_wait_attach(tstate);
2241
0
        }
2242
2.81M
        tstate_activate(tstate);
2243
2244
#ifdef Py_GIL_DISABLED
2245
        if (_PyEval_IsGILEnabled(tstate) && !tstate->holds_gil) {
2246
            // The GIL was enabled between our call to _PyEval_AcquireLock()
2247
            // and when we attached (the GIL can't go from enabled to disabled
2248
            // here because only a thread holding the GIL can disable
2249
            // it). Detach and try again.
2250
            tstate_set_detached(tstate, _Py_THREAD_DETACHED);
2251
            tstate_deactivate(tstate);
2252
            current_fast_clear(&_PyRuntime);
2253
            continue;
2254
        }
2255
        _Py_qsbr_attach(((_PyThreadStateImpl *)tstate)->qsbr);
2256
#endif
2257
2.81M
        break;
2258
2.81M
    }
2259
2260
    // Resume previous critical section. This acquires the lock(s) from the
2261
    // top-most critical section.
2262
2.81M
    if (tstate->critical_section != 0) {
2263
0
        _PyCriticalSection_Resume(tstate);
2264
0
    }
2265
2266
#ifdef Py_STATS
2267
    _PyStats_Attach((_PyThreadStateImpl *)tstate);
2268
#endif
2269
2270
#if defined(Py_DEBUG)
2271
    errno = err;
2272
#endif
2273
2.81M
}
2274
2275
static void
2276
detach_thread(PyThreadState *tstate, int detached_state)
2277
2.81M
{
2278
    // XXX assert(tstate_is_alive(tstate) && tstate_is_bound(tstate));
2279
2.81M
    assert(_Py_atomic_load_int_relaxed(&tstate->state) == _Py_THREAD_ATTACHED);
2280
2.81M
    assert(tstate == current_fast_get());
2281
2.81M
    if (tstate->critical_section != 0) {
2282
0
        _PyCriticalSection_SuspendAll(tstate);
2283
0
    }
2284
#ifdef Py_GIL_DISABLED
2285
    _Py_qsbr_detach(((_PyThreadStateImpl *)tstate)->qsbr);
2286
#endif
2287
2.81M
    tstate_deactivate(tstate);
2288
2.81M
    tstate_set_detached(tstate, detached_state);
2289
2.81M
    current_fast_clear(&_PyRuntime);
2290
2.81M
    _PyEval_ReleaseLock(tstate->interp, tstate, 0);
2291
2.81M
}
2292
2293
void
2294
_PyThreadState_Detach(PyThreadState *tstate)
2295
2.81M
{
2296
2.81M
    detach_thread(tstate, _Py_THREAD_DETACHED);
2297
2.81M
}
2298
2299
void
2300
_PyThreadState_Suspend(PyThreadState *tstate)
2301
0
{
2302
0
    _PyRuntimeState *runtime = &_PyRuntime;
2303
2304
0
    assert(_Py_atomic_load_int_relaxed(&tstate->state) == _Py_THREAD_ATTACHED);
2305
2306
0
    struct _stoptheworld_state *stw = NULL;
2307
0
    HEAD_LOCK(runtime);
2308
0
    if (runtime->stoptheworld.requested) {
2309
0
        stw = &runtime->stoptheworld;
2310
0
    }
2311
0
    else if (tstate->interp->stoptheworld.requested) {
2312
0
        stw = &tstate->interp->stoptheworld;
2313
0
    }
2314
0
    HEAD_UNLOCK(runtime);
2315
2316
0
    if (stw == NULL) {
2317
        // Switch directly to "detached" if there is no active stop-the-world
2318
        // request.
2319
0
        detach_thread(tstate, _Py_THREAD_DETACHED);
2320
0
        return;
2321
0
    }
2322
2323
    // Switch to "suspended" state.
2324
0
    detach_thread(tstate, _Py_THREAD_SUSPENDED);
2325
2326
    // Decrease the count of remaining threads needing to park.
2327
0
    HEAD_LOCK(runtime);
2328
0
    decrement_stoptheworld_countdown(stw);
2329
0
    HEAD_UNLOCK(runtime);
2330
0
}
2331
2332
void
2333
_PyThreadState_SetShuttingDown(PyThreadState *tstate)
2334
0
{
2335
0
    _Py_atomic_store_int(&tstate->state, _Py_THREAD_SHUTTING_DOWN);
2336
#ifdef Py_GIL_DISABLED
2337
    _PyParkingLot_UnparkAll(&tstate->state);
2338
#endif
2339
0
}
2340
2341
// Decrease stop-the-world counter of remaining number of threads that need to
2342
// pause. If we are the final thread to pause, notify the requesting thread.
2343
static void
2344
decrement_stoptheworld_countdown(struct _stoptheworld_state *stw)
2345
0
{
2346
0
    assert(stw->thread_countdown > 0);
2347
0
    if (--stw->thread_countdown == 0) {
2348
0
        _PyEvent_Notify(&stw->stop_event);
2349
0
    }
2350
0
}
2351
2352
#ifdef Py_GIL_DISABLED
2353
// Interpreter for _Py_FOR_EACH_STW_INTERP(). For global stop-the-world events,
2354
// we start with the first interpreter and then iterate over all interpreters.
2355
// For per-interpreter stop-the-world events, we only operate on the one
2356
// interpreter.
2357
static PyInterpreterState *
2358
interp_for_stop_the_world(struct _stoptheworld_state *stw)
2359
{
2360
    return (stw->is_global
2361
        ? PyInterpreterState_Head()
2362
        : _Py_CONTAINER_OF(stw, PyInterpreterState, stoptheworld));
2363
}
2364
2365
// Loops over threads for a stop-the-world event.
2366
// For global: all threads in all interpreters
2367
// For per-interpreter: all threads in the interpreter
2368
#define _Py_FOR_EACH_STW_INTERP(stw, i)                                     \
2369
    for (PyInterpreterState *i = interp_for_stop_the_world((stw));          \
2370
            i != NULL; i = ((stw->is_global) ? i->next : NULL))
2371
2372
2373
// Try to transition threads atomically from the "detached" state to the
2374
// "gc stopped" state. Returns true if all threads are in the "gc stopped"
2375
static bool
2376
park_detached_threads(struct _stoptheworld_state *stw)
2377
{
2378
    int num_parked = 0;
2379
    _Py_FOR_EACH_STW_INTERP(stw, i) {
2380
        _Py_FOR_EACH_TSTATE_UNLOCKED(i, t) {
2381
            int state = _Py_atomic_load_int_relaxed(&t->state);
2382
            if (state == _Py_THREAD_DETACHED) {
2383
                // Atomically transition to "suspended" if in "detached" state.
2384
                if (_Py_atomic_compare_exchange_int(
2385
                                &t->state, &state, _Py_THREAD_SUSPENDED)) {
2386
                    num_parked++;
2387
                }
2388
            }
2389
            else if (state == _Py_THREAD_ATTACHED && t != stw->requester) {
2390
                _Py_set_eval_breaker_bit(t, _PY_EVAL_PLEASE_STOP_BIT);
2391
            }
2392
        }
2393
    }
2394
    stw->thread_countdown -= num_parked;
2395
    assert(stw->thread_countdown >= 0);
2396
    return num_parked > 0 && stw->thread_countdown == 0;
2397
}
2398
2399
static void
2400
stop_the_world(struct _stoptheworld_state *stw)
2401
{
2402
    _PyRuntimeState *runtime = &_PyRuntime;
2403
2404
    // gh-137433: Acquire the rwmutex first to avoid deadlocks with daemon
2405
    // threads that may hang when blocked on lock acquisition.
2406
    if (stw->is_global) {
2407
        _PyRWMutex_Lock(&runtime->stoptheworld_mutex);
2408
    }
2409
    else {
2410
        _PyRWMutex_RLock(&runtime->stoptheworld_mutex);
2411
    }
2412
    PyMutex_Lock(&stw->mutex);
2413
2414
    HEAD_LOCK(runtime);
2415
    stw->requested = 1;
2416
    stw->thread_countdown = 0;
2417
    stw->stop_event = (PyEvent){0};  // zero-initialize (unset)
2418
    stw->requester = _PyThreadState_GET();  // may be NULL
2419
    FT_STAT_WORLD_STOP_INC();
2420
2421
    _Py_FOR_EACH_STW_INTERP(stw, i) {
2422
        _Py_FOR_EACH_TSTATE_UNLOCKED(i, t) {
2423
            if (t != stw->requester) {
2424
                // Count all the other threads (we don't wait on ourself).
2425
                stw->thread_countdown++;
2426
            }
2427
        }
2428
    }
2429
2430
    if (stw->thread_countdown == 0) {
2431
        HEAD_UNLOCK(runtime);
2432
        stw->world_stopped = 1;
2433
        return;
2434
    }
2435
2436
    for (;;) {
2437
        // Switch threads that are detached to the GC stopped state
2438
        bool stopped_all_threads = park_detached_threads(stw);
2439
        HEAD_UNLOCK(runtime);
2440
2441
        if (stopped_all_threads) {
2442
            break;
2443
        }
2444
2445
        PyTime_t wait_ns = 1000*1000;  // 1ms (arbitrary, may need tuning)
2446
        int detach = 0;
2447
        if (PyEvent_WaitTimed(&stw->stop_event, wait_ns, detach)) {
2448
            assert(stw->thread_countdown == 0);
2449
            break;
2450
        }
2451
2452
        HEAD_LOCK(runtime);
2453
    }
2454
    stw->world_stopped = 1;
2455
}
2456
2457
static void
2458
start_the_world(struct _stoptheworld_state *stw)
2459
{
2460
    _PyRuntimeState *runtime = &_PyRuntime;
2461
    assert(PyMutex_IsLocked(&stw->mutex));
2462
2463
    HEAD_LOCK(runtime);
2464
    stw->requested = 0;
2465
    stw->world_stopped = 0;
2466
    // Switch threads back to the detached state.
2467
    _Py_FOR_EACH_STW_INTERP(stw, i) {
2468
        _Py_FOR_EACH_TSTATE_UNLOCKED(i, t) {
2469
            if (t != stw->requester) {
2470
                assert(_Py_atomic_load_int_relaxed(&t->state) ==
2471
                       _Py_THREAD_SUSPENDED);
2472
                _Py_atomic_store_int(&t->state, _Py_THREAD_DETACHED);
2473
                _PyParkingLot_UnparkAll(&t->state);
2474
            }
2475
        }
2476
    }
2477
    stw->requester = NULL;
2478
    HEAD_UNLOCK(runtime);
2479
    PyMutex_Unlock(&stw->mutex);
2480
    if (stw->is_global) {
2481
        _PyRWMutex_Unlock(&runtime->stoptheworld_mutex);
2482
    }
2483
    else {
2484
        _PyRWMutex_RUnlock(&runtime->stoptheworld_mutex);
2485
    }
2486
}
2487
#endif  // Py_GIL_DISABLED
2488
2489
void
2490
_PyEval_StopTheWorldAll(_PyRuntimeState *runtime)
2491
0
{
2492
#ifdef Py_GIL_DISABLED
2493
    stop_the_world(&runtime->stoptheworld);
2494
#endif
2495
0
}
2496
2497
void
2498
_PyEval_StartTheWorldAll(_PyRuntimeState *runtime)
2499
0
{
2500
#ifdef Py_GIL_DISABLED
2501
    start_the_world(&runtime->stoptheworld);
2502
#endif
2503
0
}
2504
2505
void
2506
_PyEval_StopTheWorld(PyInterpreterState *interp)
2507
7.65k
{
2508
#ifdef Py_GIL_DISABLED
2509
    stop_the_world(&interp->stoptheworld);
2510
#endif
2511
7.65k
}
2512
2513
void
2514
_PyEval_StartTheWorld(PyInterpreterState *interp)
2515
7.65k
{
2516
#ifdef Py_GIL_DISABLED
2517
    start_the_world(&interp->stoptheworld);
2518
#endif
2519
7.65k
}
2520
2521
//----------
2522
// other API
2523
//----------
2524
2525
/* Asynchronously raise an exception in a thread.
2526
   Requested by Just van Rossum and Alex Martelli.
2527
   To prevent naive misuse, you must write your own extension
2528
   to call this, or use ctypes.  Must be called with the GIL held.
2529
   Returns the number of tstates modified (normally 1, but 0 if `id` didn't
2530
   match any known thread id).  Can be called with exc=NULL to clear an
2531
   existing async exception.  This raises no exceptions. */
2532
2533
// XXX Move this to Python/ceval_gil.c?
2534
// XXX Deprecate this.
2535
int
2536
PyThreadState_SetAsyncExc(unsigned long id, PyObject *exc)
2537
0
{
2538
0
    PyInterpreterState *interp = _PyInterpreterState_GET();
2539
2540
    /* Although the GIL is held, a few C API functions can be called
2541
     * without the GIL held, and in particular some that create and
2542
     * destroy thread and interpreter states.  Those can mutate the
2543
     * list of thread states we're traversing, so to prevent that we lock
2544
     * head_mutex for the duration.
2545
     */
2546
0
    PyThreadState *tstate = NULL;
2547
0
    _Py_FOR_EACH_TSTATE_BEGIN(interp, t) {
2548
0
        if (t->thread_id == id) {
2549
0
            tstate = t;
2550
0
            break;
2551
0
        }
2552
0
    }
2553
0
    _Py_FOR_EACH_TSTATE_END(interp);
2554
2555
0
    if (tstate != NULL) {
2556
        /* Tricky:  we need to decref the current value
2557
         * (if any) in tstate->async_exc, but that can in turn
2558
         * allow arbitrary Python code to run, including
2559
         * perhaps calls to this function.  To prevent
2560
         * deadlock, we need to release head_mutex before
2561
         * the decref.
2562
         */
2563
0
        Py_XINCREF(exc);
2564
0
        PyObject *old_exc = _Py_atomic_exchange_ptr(&tstate->async_exc, exc);
2565
2566
0
        Py_XDECREF(old_exc);
2567
0
        _Py_set_eval_breaker_bit(tstate, _PY_ASYNC_EXCEPTION_BIT);
2568
0
    }
2569
2570
0
    return tstate != NULL;
2571
0
}
2572
2573
//---------------------------------
2574
// API for the current thread state
2575
//---------------------------------
2576
2577
PyThreadState *
2578
PyThreadState_GetUnchecked(void)
2579
0
{
2580
0
    return current_fast_get();
2581
0
}
2582
2583
2584
PyThreadState *
2585
PyThreadState_Get(void)
2586
142M
{
2587
142M
    PyThreadState *tstate = current_fast_get();
2588
142M
    _Py_EnsureTstateNotNULL(tstate);
2589
142M
    return tstate;
2590
142M
}
2591
2592
PyThreadState *
2593
_PyThreadState_Swap(_PyRuntimeState *runtime, PyThreadState *newts)
2594
0
{
2595
0
    PyThreadState *oldts = current_fast_get();
2596
0
    if (oldts != NULL) {
2597
0
        _PyThreadState_Detach(oldts);
2598
0
    }
2599
0
    if (newts != NULL) {
2600
0
        _PyThreadState_Attach(newts);
2601
0
    }
2602
0
    return oldts;
2603
0
}
2604
2605
PyThreadState *
2606
PyThreadState_Swap(PyThreadState *newts)
2607
0
{
2608
0
    return _PyThreadState_Swap(&_PyRuntime, newts);
2609
0
}
2610
2611
2612
void
2613
_PyThreadState_Bind(PyThreadState *tstate)
2614
36
{
2615
    // gh-104690: If Python is being finalized and PyInterpreterState_Delete()
2616
    // was called, tstate becomes a dangling pointer.
2617
36
    assert(_PyThreadState_CheckConsistency(tstate));
2618
2619
36
    bind_tstate(tstate);
2620
    // This makes sure there's a gilstate tstate bound
2621
    // as soon as possible.
2622
36
    if (gilstate_get() == NULL) {
2623
36
        bind_gilstate_tstate(tstate);
2624
36
    }
2625
36
}
2626
2627
#if defined(Py_GIL_DISABLED) && !defined(Py_LIMITED_API)
2628
uintptr_t
2629
_Py_GetThreadLocal_Addr(void)
2630
{
2631
    // gh-112535: Use the address of the thread-local PyThreadState variable as
2632
    // a unique identifier for the current thread. Each thread has a unique
2633
    // _Py_tss_tstate variable with a unique address.
2634
    return (uintptr_t)&_Py_tss_tstate;
2635
}
2636
#endif
2637
2638
/***********************************/
2639
/* routines for advanced debuggers */
2640
/***********************************/
2641
2642
// (requested by David Beazley)
2643
// Don't use unless you know what you are doing!
2644
2645
PyInterpreterState *
2646
PyInterpreterState_Head(void)
2647
0
{
2648
0
    return _PyRuntime.interpreters.head;
2649
0
}
2650
2651
PyInterpreterState *
2652
PyInterpreterState_Main(void)
2653
0
{
2654
0
    return _PyInterpreterState_Main();
2655
0
}
2656
2657
PyInterpreterState *
2658
0
PyInterpreterState_Next(PyInterpreterState *interp) {
2659
0
    return interp->next;
2660
0
}
2661
2662
PyThreadState *
2663
21.0k
PyInterpreterState_ThreadHead(PyInterpreterState *interp) {
2664
21.0k
    return interp->threads.head;
2665
21.0k
}
2666
2667
PyThreadState *
2668
21.0k
PyThreadState_Next(PyThreadState *tstate) {
2669
21.0k
    return tstate->next;
2670
21.0k
}
2671
2672
2673
/********************************************/
2674
/* reporting execution state of all threads */
2675
/********************************************/
2676
2677
/* The implementation of sys._current_frames().  This is intended to be
2678
   called with the GIL held, as it will be when called via
2679
   sys._current_frames().  It's possible it would work fine even without
2680
   the GIL held, but haven't thought enough about that.
2681
*/
2682
PyObject *
2683
_PyThread_CurrentFrames(void)
2684
0
{
2685
0
    _PyRuntimeState *runtime = &_PyRuntime;
2686
0
    PyThreadState *tstate = current_fast_get();
2687
0
    if (_PySys_Audit(tstate, "sys._current_frames", NULL) < 0) {
2688
0
        return NULL;
2689
0
    }
2690
2691
0
    PyObject *result = PyDict_New();
2692
0
    if (result == NULL) {
2693
0
        return NULL;
2694
0
    }
2695
2696
    /* for i in all interpreters:
2697
     *     for t in all of i's thread states:
2698
     *          if t's frame isn't NULL, map t's id to its frame
2699
     * Because these lists can mutate even when the GIL is held, we
2700
     * need to grab head_mutex for the duration.
2701
     */
2702
0
    _PyEval_StopTheWorldAll(runtime);
2703
0
    HEAD_LOCK(runtime);
2704
0
    PyInterpreterState *i;
2705
0
    for (i = runtime->interpreters.head; i != NULL; i = i->next) {
2706
0
        _Py_FOR_EACH_TSTATE_UNLOCKED(i, t) {
2707
0
            _PyInterpreterFrame *frame = t->current_frame;
2708
0
            frame = _PyFrame_GetFirstComplete(frame);
2709
0
            if (frame == NULL) {
2710
0
                continue;
2711
0
            }
2712
0
            PyObject *id = PyLong_FromUnsignedLong(t->thread_id);
2713
0
            if (id == NULL) {
2714
0
                goto fail;
2715
0
            }
2716
0
            PyObject *frameobj = (PyObject *)_PyFrame_GetFrameObject(frame);
2717
0
            if (frameobj == NULL) {
2718
0
                Py_DECREF(id);
2719
0
                goto fail;
2720
0
            }
2721
0
            int stat = PyDict_SetItem(result, id, frameobj);
2722
0
            Py_DECREF(id);
2723
0
            if (stat < 0) {
2724
0
                goto fail;
2725
0
            }
2726
0
        }
2727
0
    }
2728
0
    goto done;
2729
2730
0
fail:
2731
0
    Py_CLEAR(result);
2732
2733
0
done:
2734
0
    HEAD_UNLOCK(runtime);
2735
0
    _PyEval_StartTheWorldAll(runtime);
2736
0
    return result;
2737
0
}
2738
2739
/* The implementation of sys._current_exceptions().  This is intended to be
2740
   called with the GIL held, as it will be when called via
2741
   sys._current_exceptions().  It's possible it would work fine even without
2742
   the GIL held, but haven't thought enough about that.
2743
*/
2744
PyObject *
2745
_PyThread_CurrentExceptions(void)
2746
0
{
2747
0
    _PyRuntimeState *runtime = &_PyRuntime;
2748
0
    PyThreadState *tstate = current_fast_get();
2749
2750
0
    _Py_EnsureTstateNotNULL(tstate);
2751
2752
0
    if (_PySys_Audit(tstate, "sys._current_exceptions", NULL) < 0) {
2753
0
        return NULL;
2754
0
    }
2755
2756
0
    PyObject *result = PyDict_New();
2757
0
    if (result == NULL) {
2758
0
        return NULL;
2759
0
    }
2760
2761
    /* for i in all interpreters:
2762
     *     for t in all of i's thread states:
2763
     *          if t's frame isn't NULL, map t's id to its frame
2764
     * Because these lists can mutate even when the GIL is held, we
2765
     * need to grab head_mutex for the duration.
2766
     */
2767
0
    _PyEval_StopTheWorldAll(runtime);
2768
0
    HEAD_LOCK(runtime);
2769
0
    PyInterpreterState *i;
2770
0
    for (i = runtime->interpreters.head; i != NULL; i = i->next) {
2771
0
        _Py_FOR_EACH_TSTATE_UNLOCKED(i, t) {
2772
0
            _PyErr_StackItem *err_info = _PyErr_GetTopmostException(t);
2773
0
            if (err_info == NULL) {
2774
0
                continue;
2775
0
            }
2776
0
            PyObject *id = PyLong_FromUnsignedLong(t->thread_id);
2777
0
            if (id == NULL) {
2778
0
                goto fail;
2779
0
            }
2780
0
            PyObject *exc = err_info->exc_value;
2781
0
            assert(exc == NULL ||
2782
0
                   exc == Py_None ||
2783
0
                   PyExceptionInstance_Check(exc));
2784
2785
0
            int stat = PyDict_SetItem(result, id, exc == NULL ? Py_None : exc);
2786
0
            Py_DECREF(id);
2787
0
            if (stat < 0) {
2788
0
                goto fail;
2789
0
            }
2790
0
        }
2791
0
    }
2792
0
    goto done;
2793
2794
0
fail:
2795
0
    Py_CLEAR(result);
2796
2797
0
done:
2798
0
    HEAD_UNLOCK(runtime);
2799
0
    _PyEval_StartTheWorldAll(runtime);
2800
0
    return result;
2801
0
}
2802
2803
2804
/***********************************/
2805
/* Python "auto thread state" API. */
2806
/***********************************/
2807
2808
/* Internal initialization/finalization functions called by
2809
   Py_Initialize/Py_FinalizeEx
2810
*/
2811
PyStatus
2812
_PyGILState_Init(PyInterpreterState *interp)
2813
36
{
2814
36
    if (!_Py_IsMainInterpreter(interp)) {
2815
        /* Currently, PyGILState is shared by all interpreters. The main
2816
         * interpreter is responsible to initialize it. */
2817
0
        return _PyStatus_OK();
2818
0
    }
2819
36
    _PyRuntimeState *runtime = interp->runtime;
2820
36
    assert(gilstate_get() == NULL);
2821
36
    assert(runtime->gilstate.autoInterpreterState == NULL);
2822
36
    runtime->gilstate.autoInterpreterState = interp;
2823
36
    return _PyStatus_OK();
2824
36
}
2825
2826
void
2827
_PyGILState_Fini(PyInterpreterState *interp)
2828
0
{
2829
0
    if (!_Py_IsMainInterpreter(interp)) {
2830
        /* Currently, PyGILState is shared by all interpreters. The main
2831
         * interpreter is responsible to initialize it. */
2832
0
        return;
2833
0
    }
2834
0
    interp->runtime->gilstate.autoInterpreterState = NULL;
2835
0
}
2836
2837
2838
// XXX Drop this.
2839
void
2840
_PyGILState_SetTstate(PyThreadState *tstate)
2841
36
{
2842
    /* must init with valid states */
2843
36
    assert(tstate != NULL);
2844
36
    assert(tstate->interp != NULL);
2845
2846
36
    if (!_Py_IsMainInterpreter(tstate->interp)) {
2847
        /* Currently, PyGILState is shared by all interpreters. The main
2848
         * interpreter is responsible to initialize it. */
2849
0
        return;
2850
0
    }
2851
2852
#ifndef NDEBUG
2853
    _PyRuntimeState *runtime = tstate->interp->runtime;
2854
2855
    assert(runtime->gilstate.autoInterpreterState == tstate->interp);
2856
    assert(gilstate_get() == tstate);
2857
    assert(tstate->gilstate_counter == 1);
2858
#endif
2859
36
}
2860
2861
PyInterpreterState *
2862
_PyGILState_GetInterpreterStateUnsafe(void)
2863
0
{
2864
0
    return _PyRuntime.gilstate.autoInterpreterState;
2865
0
}
2866
2867
/* The public functions */
2868
2869
PyThreadState *
2870
PyGILState_GetThisThreadState(void)
2871
0
{
2872
0
    return gilstate_get();
2873
0
}
2874
2875
int
2876
PyGILState_Check(void)
2877
0
{
2878
0
    _PyRuntimeState *runtime = &_PyRuntime;
2879
0
    if (!_Py_atomic_load_int_relaxed(&runtime->gilstate.check_enabled)) {
2880
0
        return 1;
2881
0
    }
2882
2883
0
    PyThreadState *tstate = current_fast_get();
2884
0
    if (tstate == NULL) {
2885
0
        return 0;
2886
0
    }
2887
2888
0
    PyThreadState *tcur = gilstate_get();
2889
0
    return (tstate == tcur);
2890
0
}
2891
2892
PyGILState_STATE
2893
PyGILState_Ensure(void)
2894
0
{
2895
0
    _PyRuntimeState *runtime = &_PyRuntime;
2896
2897
    /* Note that we do not auto-init Python here - apart from
2898
       potential races with 2 threads auto-initializing, pep-311
2899
       spells out other issues.  Embedders are expected to have
2900
       called Py_Initialize(). */
2901
2902
    /* Ensure that _PyEval_InitThreads() and _PyGILState_Init() have been
2903
       called by Py_Initialize()
2904
2905
       TODO: This isn't thread-safe. There's no protection here against
2906
       concurrent finalization of the interpreter; it's simply a guard
2907
       for *after* the interpreter has finalized.
2908
     */
2909
0
    if (!_PyEval_ThreadsInitialized() || runtime->gilstate.autoInterpreterState == NULL) {
2910
0
        PyThread_hang_thread();
2911
0
    }
2912
2913
0
    PyThreadState *tcur = gilstate_get();
2914
0
    int has_gil;
2915
0
    if (tcur == NULL) {
2916
        /* Create a new Python thread state for this thread */
2917
        // XXX Use PyInterpreterState_EnsureThreadState()?
2918
0
        tcur = new_threadstate(runtime->gilstate.autoInterpreterState,
2919
0
                               _PyThreadState_WHENCE_GILSTATE);
2920
0
        if (tcur == NULL) {
2921
0
            Py_FatalError("Couldn't create thread-state for new thread");
2922
0
        }
2923
0
        bind_tstate(tcur);
2924
0
        bind_gilstate_tstate(tcur);
2925
2926
        /* This is our thread state!  We'll need to delete it in the
2927
           matching call to PyGILState_Release(). */
2928
0
        assert(tcur->gilstate_counter == 1);
2929
0
        tcur->gilstate_counter = 0;
2930
0
        has_gil = 0; /* new thread state is never current */
2931
0
    }
2932
0
    else {
2933
0
        has_gil = holds_gil(tcur);
2934
0
    }
2935
2936
0
    if (!has_gil) {
2937
0
        PyEval_RestoreThread(tcur);
2938
0
    }
2939
2940
    /* Update our counter in the thread-state - no need for locks:
2941
       - tcur will remain valid as we hold the GIL.
2942
       - the counter is safe as we are the only thread "allowed"
2943
         to modify this value
2944
    */
2945
0
    ++tcur->gilstate_counter;
2946
2947
0
    return has_gil ? PyGILState_LOCKED : PyGILState_UNLOCKED;
2948
0
}
2949
2950
void
2951
PyGILState_Release(PyGILState_STATE oldstate)
2952
0
{
2953
0
    PyThreadState *tstate = gilstate_get();
2954
0
    if (tstate == NULL) {
2955
0
        Py_FatalError("auto-releasing thread-state, "
2956
0
                      "but no thread-state for this thread");
2957
0
    }
2958
2959
    /* We must hold the GIL and have our thread state current */
2960
0
    if (!holds_gil(tstate)) {
2961
0
        _Py_FatalErrorFormat(__func__,
2962
0
                             "thread state %p must be current when releasing",
2963
0
                             tstate);
2964
0
    }
2965
0
    --tstate->gilstate_counter;
2966
0
    assert(tstate->gilstate_counter >= 0); /* illegal counter value */
2967
2968
    /* If we're going to destroy this thread-state, we must
2969
     * clear it while the GIL is held, as destructors may run.
2970
     */
2971
0
    if (tstate->gilstate_counter == 0) {
2972
        /* can't have been locked when we created it */
2973
0
        assert(oldstate == PyGILState_UNLOCKED);
2974
        // XXX Unbind tstate here.
2975
        // gh-119585: `PyThreadState_Clear()` may call destructors that
2976
        // themselves use PyGILState_Ensure and PyGILState_Release, so make
2977
        // sure that gilstate_counter is not zero when calling it.
2978
0
        ++tstate->gilstate_counter;
2979
0
        PyThreadState_Clear(tstate);
2980
0
        --tstate->gilstate_counter;
2981
        /* Delete the thread-state.  Note this releases the GIL too!
2982
         * It's vital that the GIL be held here, to avoid shutdown
2983
         * races; see bugs 225673 and 1061968 (that nasty bug has a
2984
         * habit of coming back).
2985
         */
2986
0
        assert(tstate->gilstate_counter == 0);
2987
0
        assert(current_fast_get() == tstate);
2988
0
        _PyThreadState_DeleteCurrent(tstate);
2989
0
    }
2990
    /* Release the lock if necessary */
2991
0
    else if (oldstate == PyGILState_UNLOCKED) {
2992
0
        PyEval_SaveThread();
2993
0
    }
2994
0
}
2995
2996
2997
/*************/
2998
/* Other API */
2999
/*************/
3000
3001
_PyFrameEvalFunction
3002
_PyInterpreterState_GetEvalFrameFunc(PyInterpreterState *interp)
3003
0
{
3004
0
    if (interp->eval_frame == NULL) {
3005
0
        return _PyEval_EvalFrameDefault;
3006
0
    }
3007
0
    return interp->eval_frame;
3008
0
}
3009
3010
3011
void
3012
_PyInterpreterState_SetEvalFrameFunc(PyInterpreterState *interp,
3013
                                     _PyFrameEvalFunction eval_frame)
3014
0
{
3015
0
    if (eval_frame == _PyEval_EvalFrameDefault) {
3016
0
        eval_frame = NULL;
3017
0
    }
3018
0
    if (eval_frame == interp->eval_frame) {
3019
0
        return;
3020
0
    }
3021
#ifdef _Py_TIER2
3022
    if (eval_frame != NULL) {
3023
        _Py_Executors_InvalidateAll(interp, 1);
3024
    }
3025
#endif
3026
0
    RARE_EVENT_INC(set_eval_frame_func);
3027
0
    _PyEval_StopTheWorld(interp);
3028
0
    interp->eval_frame = eval_frame;
3029
0
    _PyEval_StartTheWorld(interp);
3030
0
}
3031
3032
3033
const PyConfig*
3034
_PyInterpreterState_GetConfig(PyInterpreterState *interp)
3035
99.1M
{
3036
99.1M
    return &interp->config;
3037
99.1M
}
3038
3039
3040
const PyConfig*
3041
_Py_GetConfig(void)
3042
202k
{
3043
202k
    PyThreadState *tstate = current_fast_get();
3044
202k
    _Py_EnsureTstateNotNULL(tstate);
3045
202k
    return _PyInterpreterState_GetConfig(tstate->interp);
3046
202k
}
3047
3048
3049
int
3050
_PyInterpreterState_HasFeature(PyInterpreterState *interp, unsigned long feature)
3051
0
{
3052
0
    return ((interp->feature_flags & feature) != 0);
3053
0
}
3054
3055
3056
274k
#define MINIMUM_OVERHEAD 1000
3057
3058
static PyObject **
3059
push_chunk(PyThreadState *tstate, int size)
3060
274k
{
3061
274k
    int allocate_size = _PY_DATA_STACK_CHUNK_SIZE;
3062
274k
    while (allocate_size < (int)sizeof(PyObject*)*(size + MINIMUM_OVERHEAD)) {
3063
0
        allocate_size *= 2;
3064
0
    }
3065
274k
    _PyStackChunk *new;
3066
274k
    if (tstate->datastack_cached_chunk != NULL
3067
231k
        && (size_t)allocate_size <= tstate->datastack_cached_chunk->size)
3068
231k
    {
3069
231k
        new = tstate->datastack_cached_chunk;
3070
231k
        tstate->datastack_cached_chunk = NULL;
3071
231k
        new->previous = tstate->datastack_chunk;
3072
231k
        new->top = 0;
3073
231k
    }
3074
43.0k
    else {
3075
43.0k
        new = allocate_chunk(allocate_size, tstate->datastack_chunk);
3076
43.0k
        if (new == NULL) {
3077
0
            return NULL;
3078
0
        }
3079
43.0k
    }
3080
274k
    if (tstate->datastack_chunk) {
3081
274k
        tstate->datastack_chunk->top = tstate->datastack_top -
3082
274k
                                       &tstate->datastack_chunk->data[0];
3083
274k
    }
3084
274k
    tstate->datastack_chunk = new;
3085
274k
    tstate->datastack_limit = (PyObject **)(((char *)new) + allocate_size);
3086
    // When new is the "root" chunk (i.e. new->previous == NULL), we can keep
3087
    // _PyThreadState_PopFrame from freeing it later by "skipping" over the
3088
    // first element:
3089
274k
    PyObject **res = &new->data[new->previous == NULL];
3090
274k
    tstate->datastack_top = res + size;
3091
274k
    return res;
3092
274k
}
3093
3094
_PyInterpreterFrame *
3095
_PyThreadState_PushFrame(PyThreadState *tstate, size_t size)
3096
232M
{
3097
232M
    assert(size < INT_MAX/sizeof(PyObject *));
3098
232M
    if (_PyThreadState_HasStackSpace(tstate, (int)size)) {
3099
232M
        _PyInterpreterFrame *res = (_PyInterpreterFrame *)tstate->datastack_top;
3100
232M
        tstate->datastack_top += size;
3101
232M
        return res;
3102
232M
    }
3103
274k
    return (_PyInterpreterFrame *)push_chunk(tstate, (int)size);
3104
232M
}
3105
3106
void
3107
_PyThreadState_PopFrame(PyThreadState *tstate, _PyInterpreterFrame * frame)
3108
1.06G
{
3109
1.06G
    assert(tstate->datastack_chunk);
3110
1.06G
    PyObject **base = (PyObject **)frame;
3111
1.06G
    if (base == &tstate->datastack_chunk->data[0]) {
3112
274k
        _PyStackChunk *chunk = tstate->datastack_chunk;
3113
274k
        _PyStackChunk *previous = chunk->previous;
3114
274k
        _PyStackChunk *cached = tstate->datastack_cached_chunk;
3115
        // push_chunk ensures that the root chunk is never popped:
3116
274k
        assert(previous);
3117
274k
        tstate->datastack_top = &previous->data[previous->top];
3118
274k
        tstate->datastack_chunk = previous;
3119
274k
        tstate->datastack_limit = (PyObject **)(((char *)previous) + previous->size);
3120
274k
        chunk->previous = NULL;
3121
274k
        if (cached != NULL) {
3122
43.0k
            _PyObject_VirtualFree(cached, cached->size);
3123
43.0k
        }
3124
274k
        tstate->datastack_cached_chunk = chunk;
3125
274k
    }
3126
1.05G
    else {
3127
1.05G
        assert(tstate->datastack_top);
3128
1.05G
        assert(tstate->datastack_top >= base);
3129
1.05G
        tstate->datastack_top = base;
3130
1.05G
    }
3131
1.06G
}
3132
3133
3134
#ifndef NDEBUG
3135
// Check that a Python thread state valid. In practice, this function is used
3136
// on a Python debug build to check if 'tstate' is a dangling pointer, if the
3137
// PyThreadState memory has been freed.
3138
//
3139
// Usage:
3140
//
3141
//     assert(_PyThreadState_CheckConsistency(tstate));
3142
int
3143
_PyThreadState_CheckConsistency(PyThreadState *tstate)
3144
{
3145
    assert(!_PyMem_IsPtrFreed(tstate));
3146
    assert(!_PyMem_IsPtrFreed(tstate->interp));
3147
    return 1;
3148
}
3149
#endif
3150
3151
3152
// Check if a Python thread must call _PyThreadState_HangThread(), rather than
3153
// taking the GIL or attaching to the interpreter if Py_Finalize() has been
3154
// called.
3155
//
3156
// When this function is called by a daemon thread after Py_Finalize() has been
3157
// called, the GIL may no longer exist.
3158
//
3159
// tstate must be non-NULL.
3160
int
3161
_PyThreadState_MustExit(PyThreadState *tstate)
3162
5.62M
{
3163
5.62M
    int state = _Py_atomic_load_int_relaxed(&tstate->state);
3164
5.62M
    return state == _Py_THREAD_SHUTTING_DOWN;
3165
5.62M
}
3166
3167
void
3168
_PyThreadState_HangThread(PyThreadState *tstate)
3169
0
{
3170
0
    _PyThreadStateImpl *tstate_impl = (_PyThreadStateImpl *)tstate;
3171
0
    decref_threadstate(tstate_impl);
3172
0
    PyThread_hang_thread();
3173
0
}
3174
3175
/********************/
3176
/* mimalloc support */
3177
/********************/
3178
3179
static void
3180
tstate_mimalloc_bind(PyThreadState *tstate)
3181
36
{
3182
#ifdef Py_GIL_DISABLED
3183
    struct _mimalloc_thread_state *mts = &((_PyThreadStateImpl*)tstate)->mimalloc;
3184
3185
    // Initialize the mimalloc thread state. This must be called from the
3186
    // same thread that will use the thread state. The "mem" heap doubles as
3187
    // the "backing" heap.
3188
    mi_tld_t *tld = &mts->tld;
3189
    _mi_tld_init(tld, &mts->heaps[_Py_MIMALLOC_HEAP_MEM]);
3190
    llist_init(&mts->page_list);
3191
3192
    // Exiting threads push any remaining in-use segments to the abandoned
3193
    // pool to be re-claimed later by other threads. We use per-interpreter
3194
    // pools to keep Python objects from different interpreters separate.
3195
    tld->segments.abandoned = &tstate->interp->mimalloc.abandoned_pool;
3196
3197
    // Don't fill in the first N bytes up to ob_type in debug builds. We may
3198
    // access ob_tid and the refcount fields in the dict and list lock-less
3199
    // accesses, so they must remain valid for a while after deallocation.
3200
    size_t base_offset = offsetof(PyObject, ob_type);
3201
    if (_PyMem_DebugEnabled()) {
3202
        // The debug allocator adds two words at the beginning of each block.
3203
        base_offset += 2 * sizeof(size_t);
3204
    }
3205
    size_t debug_offsets[_Py_MIMALLOC_HEAP_COUNT] = {
3206
        [_Py_MIMALLOC_HEAP_OBJECT] = base_offset,
3207
        [_Py_MIMALLOC_HEAP_GC] = base_offset,
3208
        [_Py_MIMALLOC_HEAP_GC_PRE] = base_offset + 2 * sizeof(PyObject *),
3209
    };
3210
3211
    // Initialize each heap
3212
    for (uint8_t i = 0; i < _Py_MIMALLOC_HEAP_COUNT; i++) {
3213
        _mi_heap_init_ex(&mts->heaps[i], tld, _mi_arena_id_none(), false, i);
3214
        mts->heaps[i].debug_offset = (uint8_t)debug_offsets[i];
3215
    }
3216
3217
    // Heaps that store Python objects should use QSBR to delay freeing
3218
    // mimalloc pages while there may be concurrent lock-free readers.
3219
    mts->heaps[_Py_MIMALLOC_HEAP_OBJECT].page_use_qsbr = true;
3220
    mts->heaps[_Py_MIMALLOC_HEAP_GC].page_use_qsbr = true;
3221
    mts->heaps[_Py_MIMALLOC_HEAP_GC_PRE].page_use_qsbr = true;
3222
3223
    // By default, object allocations use _Py_MIMALLOC_HEAP_OBJECT.
3224
    // _PyObject_GC_New() and similar functions temporarily override this to
3225
    // use one of the GC heaps.
3226
    mts->current_object_heap = &mts->heaps[_Py_MIMALLOC_HEAP_OBJECT];
3227
3228
    _Py_atomic_store_int(&mts->initialized, 1);
3229
#endif
3230
36
}
3231
3232
void
3233
_PyThreadState_ClearMimallocHeaps(PyThreadState *tstate)
3234
0
{
3235
#ifdef Py_GIL_DISABLED
3236
    if (!tstate->_status.bound) {
3237
        // The mimalloc heaps are only initialized when the thread is bound.
3238
        return;
3239
    }
3240
3241
    _PyThreadStateImpl *tstate_impl = (_PyThreadStateImpl *)tstate;
3242
    for (Py_ssize_t i = 0; i < _Py_MIMALLOC_HEAP_COUNT; i++) {
3243
        // Abandon all segments in use by this thread. This pushes them to
3244
        // a shared pool to later be reclaimed by other threads. It's important
3245
        // to do this before the thread state is destroyed so that objects
3246
        // remain visible to the GC.
3247
        _mi_heap_collect_abandon(&tstate_impl->mimalloc.heaps[i]);
3248
    }
3249
#endif
3250
0
}
3251
3252
3253
int
3254
_Py_IsMainThread(void)
3255
70.9M
{
3256
70.9M
    unsigned long thread = PyThread_get_thread_ident();
3257
70.9M
    return (thread == _PyRuntime.main_thread);
3258
70.9M
}
3259
3260
3261
PyInterpreterState *
3262
_PyInterpreterState_Main(void)
3263
68.1M
{
3264
68.1M
    return _PyRuntime.interpreters.main;
3265
68.1M
}
3266
3267
3268
int
3269
_Py_IsMainInterpreterFinalizing(PyInterpreterState *interp)
3270
0
{
3271
    /* bpo-39877: Access _PyRuntime directly rather than using
3272
       tstate->interp->runtime to support calls from Python daemon threads.
3273
       After Py_Finalize() has been called, tstate can be a dangling pointer:
3274
       point to PyThreadState freed memory. */
3275
0
    return (_PyRuntimeState_GetFinalizing(&_PyRuntime) != NULL &&
3276
0
            interp == &_PyRuntime._main_interpreter);
3277
0
}
3278
3279
3280
const PyConfig *
3281
_Py_GetMainConfig(void)
3282
0
{
3283
0
    PyInterpreterState *interp = _PyInterpreterState_Main();
3284
0
    if (interp == NULL) {
3285
0
        return NULL;
3286
0
    }
3287
0
    return _PyInterpreterState_GetConfig(interp);
3288
0
}