Coverage Report

Created: 2026-01-10 06:41

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/cpython/Python/ceval_macros.h
Line
Count
Source
1
// Macros and other things needed by ceval.c, and bytecodes.c
2
3
/* Computed GOTOs, or
4
       the-optimization-commonly-but-improperly-known-as-"threaded code"
5
   using gcc's labels-as-values extension
6
   (http://gcc.gnu.org/onlinedocs/gcc/Labels-as-Values.html).
7
8
   The traditional bytecode evaluation loop uses a "switch" statement, which
9
   decent compilers will optimize as a single indirect branch instruction
10
   combined with a lookup table of jump addresses. However, since the
11
   indirect jump instruction is shared by all opcodes, the CPU will have a
12
   hard time making the right prediction for where to jump next (actually,
13
   it will be always wrong except in the uncommon case of a sequence of
14
   several identical opcodes).
15
16
   "Threaded code" in contrast, uses an explicit jump table and an explicit
17
   indirect jump instruction at the end of each opcode. Since the jump
18
   instruction is at a different address for each opcode, the CPU will make a
19
   separate prediction for each of these instructions, which is equivalent to
20
   predicting the second opcode of each opcode pair. These predictions have
21
   a much better chance to turn out valid, especially in small bytecode loops.
22
23
   A mispredicted branch on a modern CPU flushes the whole pipeline and
24
   can cost several CPU cycles (depending on the pipeline depth),
25
   and potentially many more instructions (depending on the pipeline width).
26
   A correctly predicted branch, however, is nearly free.
27
28
   At the time of this writing, the "threaded code" version is up to 15-20%
29
   faster than the normal "switch" version, depending on the compiler and the
30
   CPU architecture.
31
32
   NOTE: care must be taken that the compiler doesn't try to "optimize" the
33
   indirect jumps by sharing them between all opcodes. Such optimizations
34
   can be disabled on gcc by using the -fno-gcse flag (or possibly
35
   -fno-crossjumping).
36
*/
37
38
/* Use macros rather than inline functions, to make it as clear as possible
39
 * to the C compiler that the tracing check is a simple test then branch.
40
 * We want to be sure that the compiler knows this before it generates
41
 * the CFG.
42
 */
43
44
#ifdef WITH_DTRACE
45
#define OR_DTRACE_LINE | (PyDTrace_LINE_ENABLED() ? 255 : 0)
46
#else
47
#define OR_DTRACE_LINE
48
#endif
49
50
#ifdef HAVE_COMPUTED_GOTOS
51
    #ifndef USE_COMPUTED_GOTOS
52
    #define USE_COMPUTED_GOTOS 1
53
    #endif
54
#else
55
    #if defined(USE_COMPUTED_GOTOS) && USE_COMPUTED_GOTOS
56
    #error "Computed gotos are not supported on this compiler."
57
    #endif
58
    #undef USE_COMPUTED_GOTOS
59
    #define USE_COMPUTED_GOTOS 0
60
#endif
61
62
#ifdef Py_STATS
63
#define INSTRUCTION_STATS(op) \
64
    do { \
65
        PyStats *s = _PyStats_GET(); \
66
        OPCODE_EXE_INC(op); \
67
        if (s) s->opcode_stats[lastopcode].pair_count[op]++; \
68
        lastopcode = op; \
69
    } while (0)
70
#else
71
58.1G
#define INSTRUCTION_STATS(op) ((void)0)
72
#endif
73
74
#ifdef Py_STATS
75
#   define TAIL_CALL_PARAMS _PyInterpreterFrame *frame, _PyStackRef *stack_pointer, PyThreadState *tstate, _Py_CODEUNIT *next_instr, const void *instruction_funcptr_table, int oparg, int lastopcode
76
#   define TAIL_CALL_ARGS frame, stack_pointer, tstate, next_instr, instruction_funcptr_table, oparg, lastopcode
77
#else
78
#   define TAIL_CALL_PARAMS _PyInterpreterFrame *frame, _PyStackRef *stack_pointer, PyThreadState *tstate, _Py_CODEUNIT *next_instr, const void *instruction_funcptr_table, int oparg
79
#   define TAIL_CALL_ARGS frame, stack_pointer, tstate, next_instr, instruction_funcptr_table, oparg
80
#endif
81
82
#if _Py_TAIL_CALL_INTERP
83
#   if defined(__clang__) || defined(__GNUC__)
84
#       if !_Py__has_attribute(preserve_none) || !_Py__has_attribute(musttail)
85
#           error "This compiler does not have support for efficient tail calling."
86
#       endif
87
#   elif defined(_MSC_VER) && (_MSC_VER < 1950)
88
#       error "You need at least VS 2026 / PlatformToolset v145 for tail calling."
89
#   endif
90
#   if defined(_MSC_VER) && !defined(__clang__)
91
#      define Py_MUSTTAIL [[msvc::musttail]]
92
#      define Py_PRESERVE_NONE_CC __preserve_none
93
#   else
94
#       define Py_MUSTTAIL __attribute__((musttail))
95
#       define Py_PRESERVE_NONE_CC __attribute__((preserve_none))
96
#   endif
97
    typedef PyObject *(Py_PRESERVE_NONE_CC *py_tail_call_funcptr)(TAIL_CALL_PARAMS);
98
99
#   define DISPATCH_TABLE_VAR instruction_funcptr_table
100
#   define DISPATCH_TABLE instruction_funcptr_handler_table
101
#   define TRACING_DISPATCH_TABLE instruction_funcptr_tracing_table
102
#   define TARGET(op) Py_NO_INLINE PyObject *Py_PRESERVE_NONE_CC _TAIL_CALL_##op(TAIL_CALL_PARAMS)
103
104
#   define DISPATCH_GOTO() \
105
        do { \
106
            Py_MUSTTAIL return (((py_tail_call_funcptr *)instruction_funcptr_table)[opcode])(TAIL_CALL_ARGS); \
107
        } while (0)
108
#   define DISPATCH_GOTO_NON_TRACING() \
109
        do { \
110
            Py_MUSTTAIL return (((py_tail_call_funcptr *)DISPATCH_TABLE)[opcode])(TAIL_CALL_ARGS); \
111
        } while (0)
112
#   define JUMP_TO_LABEL(name) \
113
        do { \
114
            Py_MUSTTAIL return (_TAIL_CALL_##name)(TAIL_CALL_ARGS); \
115
        } while (0)
116
#   ifdef Py_STATS
117
#       define JUMP_TO_PREDICTED(name) \
118
            do { \
119
                Py_MUSTTAIL return (_TAIL_CALL_##name)(frame, stack_pointer, tstate, this_instr, instruction_funcptr_table, oparg, lastopcode); \
120
            } while (0)
121
#   else
122
#       define JUMP_TO_PREDICTED(name) \
123
            do { \
124
                Py_MUSTTAIL return (_TAIL_CALL_##name)(frame, stack_pointer, tstate, this_instr, instruction_funcptr_table, oparg); \
125
            } while (0)
126
#   endif
127
#    define LABEL(name) TARGET(name)
128
#elif USE_COMPUTED_GOTOS
129
#  define DISPATCH_TABLE_VAR opcode_targets
130
6.96M
#  define DISPATCH_TABLE opcode_targets_table
131
#  define TRACING_DISPATCH_TABLE opcode_tracing_targets_table
132
58.1G
#  define TARGET(op) TARGET_##op:
133
58.5G
#  define DISPATCH_GOTO() goto *opcode_targets[opcode]
134
6.96M
#  define DISPATCH_GOTO_NON_TRACING() goto *DISPATCH_TABLE[opcode];
135
109M
#  define JUMP_TO_LABEL(name) goto name;
136
275M
#  define JUMP_TO_PREDICTED(name) goto PREDICTED_##name;
137
482M
#  define LABEL(name) name:
138
#else
139
#  define TARGET(op) case op: TARGET_##op:
140
#  define DISPATCH_GOTO() dispatch_code = opcode | tracing_mode ; goto dispatch_opcode
141
#  define DISPATCH_GOTO_NON_TRACING() dispatch_code = opcode; goto dispatch_opcode
142
#  define JUMP_TO_LABEL(name) goto name;
143
#  define JUMP_TO_PREDICTED(name) goto PREDICTED_##name;
144
#  define LABEL(name) name:
145
#endif
146
147
#if (_Py_TAIL_CALL_INTERP || USE_COMPUTED_GOTOS) && _Py_TIER2
148
#  define IS_JIT_TRACING() (DISPATCH_TABLE_VAR == TRACING_DISPATCH_TABLE)
149
#  define ENTER_TRACING() \
150
    DISPATCH_TABLE_VAR = TRACING_DISPATCH_TABLE;
151
#  define LEAVE_TRACING() \
152
    DISPATCH_TABLE_VAR = DISPATCH_TABLE;
153
#else
154
#  define IS_JIT_TRACING() (tracing_mode != 0)
155
#  define ENTER_TRACING() tracing_mode = 255
156
#  define LEAVE_TRACING() tracing_mode = 0
157
#endif
158
159
/* PRE_DISPATCH_GOTO() does lltrace if enabled. Normally a no-op */
160
#ifdef Py_DEBUG
161
#define PRE_DISPATCH_GOTO() if (frame->lltrace >= 5) { \
162
    lltrace_instruction(frame, stack_pointer, next_instr, opcode, oparg); }
163
#else
164
58.5G
#define PRE_DISPATCH_GOTO() ((void)0)
165
#endif
166
167
#ifdef Py_DEBUG
168
#define LLTRACE_RESUME_FRAME() \
169
do { \
170
    _PyFrame_SetStackPointer(frame, stack_pointer); \
171
    int lltrace = maybe_lltrace_resume_frame(frame, GLOBALS()); \
172
    stack_pointer = _PyFrame_GetStackPointer(frame); \
173
    frame->lltrace = lltrace; \
174
} while (0)
175
#else
176
2.46G
#define LLTRACE_RESUME_FRAME() ((void)0)
177
#endif
178
179
#ifdef Py_GIL_DISABLED
180
#define QSBR_QUIESCENT_STATE(tstate) _Py_qsbr_quiescent_state(((_PyThreadStateImpl *)tstate)->qsbr)
181
#else
182
#define QSBR_QUIESCENT_STATE(tstate)
183
#endif
184
185
186
/* Do interpreter dispatch accounting for tracing and instrumentation */
187
#define DISPATCH() \
188
58.2G
    { \
189
58.2G
        assert(frame->stackpointer == NULL); \
190
58.2G
        NEXTOPARG(); \
191
58.2G
        PRE_DISPATCH_GOTO(); \
192
58.2G
        DISPATCH_GOTO(); \
193
58.2G
    }
194
195
#define DISPATCH_NON_TRACING() \
196
    { \
197
        assert(frame->stackpointer == NULL); \
198
        NEXTOPARG(); \
199
        PRE_DISPATCH_GOTO(); \
200
        DISPATCH_GOTO_NON_TRACING(); \
201
    }
202
203
#define DISPATCH_SAME_OPARG() \
204
6.96M
    { \
205
6.96M
        opcode = next_instr->op.code; \
206
6.96M
        PRE_DISPATCH_GOTO(); \
207
6.96M
        DISPATCH_GOTO_NON_TRACING(); \
208
6.96M
    }
209
210
#define DISPATCH_INLINED(NEW_FRAME)                     \
211
1.66M
    do {                                                \
212
1.66M
        assert(tstate->interp->eval_frame == NULL);     \
213
1.66M
        _PyFrame_SetStackPointer(frame, stack_pointer); \
214
1.66M
        assert((NEW_FRAME)->previous == frame);         \
215
1.66M
        frame = tstate->current_frame = (NEW_FRAME);     \
216
1.66M
        CALL_STAT_INC(inlined_py_calls);                \
217
1.66M
        JUMP_TO_LABEL(start_frame);                      \
218
0
    } while (0)
219
220
/* Tuple access macros */
221
222
#ifndef Py_DEBUG
223
3.09G
#define GETITEM(v, i) PyTuple_GET_ITEM((v), (i))
224
#else
225
static inline PyObject *
226
GETITEM(PyObject *v, Py_ssize_t i) {
227
    assert(PyTuple_Check(v));
228
    assert(i >= 0);
229
    assert(i < PyTuple_GET_SIZE(v));
230
    return PyTuple_GET_ITEM(v, i);
231
}
232
#endif
233
234
/* Code access macros */
235
236
/* The integer overflow is checked by an assertion below. */
237
67.8M
#define INSTR_OFFSET() ((int)(next_instr - _PyFrame_GetBytecode(frame)))
238
58.2G
#define NEXTOPARG()  do { \
239
58.2G
        _Py_CODEUNIT word  = {.cache = FT_ATOMIC_LOAD_UINT16_RELAXED(*(uint16_t*)next_instr)}; \
240
58.2G
        opcode = word.op.code; \
241
58.2G
        oparg = word.op.arg; \
242
58.2G
    } while (0)
243
244
/* JUMPBY makes the generator identify the instruction as a jump. SKIP_OVER is
245
 * for advancing to the next instruction, taking into account cache entries
246
 * and skipped instructions.
247
 */
248
8.62G
#define JUMPBY(x)       (next_instr += (x))
249
#define SKIP_OVER(x)    (next_instr += (x))
250
251
#define STACK_LEVEL()     ((int)(stack_pointer - _PyFrame_Stackbase(frame)))
252
#define STACK_SIZE()      (_PyFrame_GetCode(frame)->co_stacksize)
253
254
#define WITHIN_STACK_BOUNDS() \
255
   (frame->owner == FRAME_OWNED_BY_INTERPRETER || (STACK_LEVEL() >= 0 && STACK_LEVEL() <= STACK_SIZE()))
256
257
#if defined(Py_DEBUG) && !defined(_Py_JIT)
258
// This allows temporary stack "overflows", provided it's all in the cache at any point of time.
259
#define WITHIN_STACK_BOUNDS_IGNORING_CACHE() \
260
   (frame->owner == FRAME_OWNED_BY_INTERPRETER || (STACK_LEVEL() >= 0 && (STACK_LEVEL()) <= STACK_SIZE()))
261
#else
262
#define WITHIN_STACK_BOUNDS_IGNORING_CACHE WITHIN_STACK_BOUNDS
263
#endif
264
265
/* Data access macros */
266
#define FRAME_CO_CONSTS (_PyFrame_GetCode(frame)->co_consts)
267
#define FRAME_CO_NAMES  (_PyFrame_GetCode(frame)->co_names)
268
269
/* Local variable macros */
270
271
1.42M
#define LOCALS_ARRAY    (frame->localsplus)
272
27.4G
#define GETLOCAL(i)     (frame->localsplus[i])
273
274
275
#ifdef Py_STATS
276
#define UPDATE_MISS_STATS(INSTNAME)                              \
277
    do {                                                         \
278
        STAT_INC(opcode, miss);                                  \
279
        STAT_INC((INSTNAME), miss);                              \
280
        /* The counter is always the first cache entry: */       \
281
        if (ADAPTIVE_COUNTER_TRIGGERS(next_instr->cache)) {       \
282
            STAT_INC((INSTNAME), deopt);                         \
283
        }                                                        \
284
    } while (0)
285
#else
286
275M
#define UPDATE_MISS_STATS(INSTNAME) ((void)0)
287
#endif
288
289
290
// Try to lock an object in the free threading build, if it's not already
291
// locked. Use with a DEOPT_IF() to deopt if the object is already locked.
292
// These are no-ops in the default GIL build. The general pattern is:
293
//
294
// DEOPT_IF(!LOCK_OBJECT(op));
295
// if (/* condition fails */) {
296
//     UNLOCK_OBJECT(op);
297
//     DEOPT_IF(true);
298
//  }
299
//  ...
300
//  UNLOCK_OBJECT(op);
301
//
302
// NOTE: The object must be unlocked on every exit code path and you should
303
// avoid any potentially escaping calls (like PyStackRef_CLOSE) while the
304
// object is locked.
305
#ifdef Py_GIL_DISABLED
306
#  define LOCK_OBJECT(op) PyMutex_LockFast(&(_PyObject_CAST(op))->ob_mutex)
307
#  define UNLOCK_OBJECT(op) PyMutex_Unlock(&(_PyObject_CAST(op))->ob_mutex)
308
#else
309
798M
#  define LOCK_OBJECT(op) (1)
310
798M
#  define UNLOCK_OBJECT(op) ((void)0)
311
#endif
312
313
1.59G
#define GLOBALS() frame->f_globals
314
623M
#define BUILTINS() frame->f_builtins
315
144k
#define LOCALS() frame->f_locals
316
#define CONSTS() _PyFrame_GetCode(frame)->co_consts
317
#define NAMES() _PyFrame_GetCode(frame)->co_names
318
319
#define DTRACE_FUNCTION_ENTRY()  \
320
    if (PyDTrace_FUNCTION_ENTRY_ENABLED()) { \
321
        dtrace_function_entry(frame); \
322
    }
323
324
/* This takes a uint16_t instead of a _Py_BackoffCounter,
325
 * because it is used directly on the cache entry in generated code,
326
 * which is always an integral type. */
327
// Force re-specialization when tracing a side exit to get good side exits.
328
#define ADAPTIVE_COUNTER_TRIGGERS(COUNTER) \
329
1.94G
    backoff_counter_triggers(forge_backoff_counter((COUNTER)))
330
331
#define ADVANCE_ADAPTIVE_COUNTER(COUNTER) \
332
1.94G
    do { \
333
1.94G
        (COUNTER) = advance_backoff_counter((COUNTER)); \
334
1.94G
    } while (0);
335
336
#define PAUSE_ADAPTIVE_COUNTER(COUNTER) \
337
0
    do { \
338
0
        (COUNTER) = pause_backoff_counter((COUNTER)); \
339
0
    } while (0);
340
341
#ifdef ENABLE_SPECIALIZATION_FT
342
/* Multiple threads may execute these concurrently if thread-local bytecode is
343
 * disabled and they all execute the main copy of the bytecode. Specialization
344
 * is disabled in that case so the value is unused, but the RMW cycle should be
345
 * free of data races.
346
 */
347
#define RECORD_BRANCH_TAKEN(bitset, flag) \
348
4.31G
    FT_ATOMIC_STORE_UINT16_RELAXED(       \
349
4.31G
        bitset, (FT_ATOMIC_LOAD_UINT16_RELAXED(bitset) << 1) | (flag))
350
#else
351
#define RECORD_BRANCH_TAKEN(bitset, flag)
352
#endif
353
354
#define UNBOUNDLOCAL_ERROR_MSG \
355
0
    "cannot access local variable '%s' where it is not associated with a value"
356
#define UNBOUNDFREE_ERROR_MSG \
357
0
    "cannot access free variable '%s' where it is not associated with a value" \
358
0
    " in enclosing scope"
359
19
#define NAME_ERROR_MSG "name '%.200s' is not defined"
360
361
// If a trace function sets a new f_lineno and
362
// *then* raises, we use the destination when searching
363
// for an exception handler, displaying the traceback, and so on
364
0
#define INSTRUMENTED_JUMP(src, dest, event) \
365
0
do { \
366
0
    if (tstate->tracing) {\
367
0
        next_instr = dest; \
368
0
    } else { \
369
0
        _PyFrame_SetStackPointer(frame, stack_pointer); \
370
0
        next_instr = _Py_call_instrumentation_jump(this_instr, tstate, event, frame, src, dest); \
371
0
        stack_pointer = _PyFrame_GetStackPointer(frame); \
372
0
        if (next_instr == NULL) { \
373
0
            next_instr = (dest)+1; \
374
0
            JUMP_TO_LABEL(error); \
375
0
        } \
376
0
    } \
377
0
} while (0);
378
379
380
313M
static inline int _Py_EnterRecursivePy(PyThreadState *tstate) {
381
313M
    return (tstate->py_recursion_remaining-- <= 0) &&
382
203
        _Py_CheckRecursiveCallPy(tstate);
383
313M
}
384
385
1.26G
static inline void _Py_LeaveRecursiveCallPy(PyThreadState *tstate)  {
386
1.26G
    tstate->py_recursion_remaining++;
387
1.26G
}
388
389
/* Implementation of "macros" that modify the instruction pointer,
390
 * stack pointer, or frame pointer.
391
 * These need to treated differently by tier 1 and 2.
392
 * The Tier 1 version is here; Tier 2 is inlined in ceval.c. */
393
394
2.16G
#define LOAD_IP(OFFSET) do { \
395
2.16G
        next_instr = frame->instr_ptr + (OFFSET); \
396
2.16G
    } while (0)
397
398
/* There's no STORE_IP(), it's inlined by the code generator. */
399
400
943M
#define LOAD_SP() \
401
943M
stack_pointer = _PyFrame_GetStackPointer(frame)
402
403
#define SAVE_SP() \
404
_PyFrame_SetStackPointer(frame, stack_pointer)
405
406
/* Tier-switching macros. */
407
408
#define TIER1_TO_TIER2(EXECUTOR)                        \
409
do {                                                   \
410
    OPT_STAT_INC(traces_executed);                     \
411
    next_instr = _Py_jit_entry((EXECUTOR), frame, stack_pointer, tstate); \
412
    frame = tstate->current_frame;                     \
413
    stack_pointer = _PyFrame_GetStackPointer(frame);   \
414
    int keep_tracing_bit = (uintptr_t)next_instr & 1;   \
415
    next_instr = (_Py_CODEUNIT *)(((uintptr_t)next_instr) & (~1)); \
416
    if (next_instr == NULL) {                          \
417
        /* gh-140104: The exception handler expects frame->instr_ptr
418
            to after this_instr, not this_instr! */ \
419
        next_instr = frame->instr_ptr + 1;                 \
420
        JUMP_TO_LABEL(error);                          \
421
    }                                                  \
422
    if (keep_tracing_bit) { \
423
        assert(((_PyThreadStateImpl *)tstate)->jit_tracer_state->prev_state.code_curr_size == 2); \
424
        ENTER_TRACING(); \
425
        DISPATCH_NON_TRACING(); \
426
    } \
427
    DISPATCH();                                        \
428
} while (0)
429
430
#define TIER2_TO_TIER2(EXECUTOR) \
431
do {                                                   \
432
    OPT_STAT_INC(traces_executed);                     \
433
    current_executor = (EXECUTOR);                     \
434
    goto tier2_start;                                  \
435
} while (0)
436
437
#define GOTO_TIER_ONE_SETUP \
438
    tstate->current_executor = NULL;                              \
439
    OPT_HIST(trace_uop_execution_counter, trace_run_length_hist); \
440
    _PyFrame_SetStackPointer(frame, stack_pointer);
441
442
#define GOTO_TIER_ONE(TARGET) \
443
    do \
444
    { \
445
        GOTO_TIER_ONE_SETUP \
446
        return (_Py_CODEUNIT *)(TARGET); \
447
    } while (0)
448
449
#define GOTO_TIER_ONE_CONTINUE_TRACING(TARGET) \
450
    do \
451
    { \
452
        GOTO_TIER_ONE_SETUP \
453
        return (_Py_CODEUNIT *)(((uintptr_t)(TARGET))| 1); \
454
    } while (0)
455
456
#define CURRENT_OPARG()    (next_uop[-1].oparg)
457
#define CURRENT_OPERAND0_64() (next_uop[-1].operand0)
458
#define CURRENT_OPERAND1_64() (next_uop[-1].operand1)
459
#define CURRENT_OPERAND0_32() (next_uop[-1].operand0)
460
#define CURRENT_OPERAND1_32() (next_uop[-1].operand1)
461
#define CURRENT_OPERAND0_16() (next_uop[-1].operand0)
462
#define CURRENT_OPERAND1_16() (next_uop[-1].operand1)
463
#define CURRENT_TARGET()   (next_uop[-1].target)
464
465
#define JUMP_TO_JUMP_TARGET() goto jump_to_jump_target
466
#define JUMP_TO_ERROR() goto jump_to_error_target
467
468
/* Stackref macros */
469
470
/* How much scratch space to give stackref to PyObject* conversion. */
471
1.98G
#define MAX_STACKREF_SCRATCH 10
472
473
#define STACKREFS_TO_PYOBJECTS(ARGS, ARG_COUNT, NAME) \
474
    /* +1 because vectorcall might use -1 to write self */ \
475
1.98G
    PyObject *NAME##_temp[MAX_STACKREF_SCRATCH+1]; \
476
1.98G
    PyObject **NAME = _PyObjectArray_FromStackRefArray(ARGS, ARG_COUNT, NAME##_temp);
477
478
#define STACKREFS_TO_PYOBJECTS_CLEANUP(NAME) \
479
    /* +1 because we +1 previously */ \
480
1.98G
    _PyObjectArray_Free(NAME - 1, NAME##_temp);
481
482
1.98G
#define CONVERSION_FAILED(NAME) ((NAME) == NULL)
483
484
#if defined(Py_DEBUG) && !defined(_Py_JIT)
485
#define SET_CURRENT_CACHED_VALUES(N) current_cached_values = (N)
486
#define CHECK_CURRENT_CACHED_VALUES(N) assert(current_cached_values == (N))
487
#else
488
#define SET_CURRENT_CACHED_VALUES(N) ((void)0)
489
#define CHECK_CURRENT_CACHED_VALUES(N) ((void)0)
490
#endif
491
492
1.01G
#define IS_PEP523_HOOKED(tstate) (tstate->interp->eval_frame != NULL)
493
494
static inline int
495
4.05G
check_periodics(PyThreadState *tstate) {
496
4.05G
    _Py_CHECK_EMSCRIPTEN_SIGNALS_PERIODICALLY();
497
4.05G
    QSBR_QUIESCENT_STATE(tstate);
498
4.05G
    if (_Py_atomic_load_uintptr_relaxed(&tstate->eval_breaker) & _PY_EVAL_EVENTS_MASK) {
499
80.4k
        return _Py_HandlePending(tstate);
500
80.4k
    }
501
4.05G
    return 0;
502
4.05G
}
503
504
// Mark the generator as executing. Returns true if the state was changed,
505
// false if it was already executing or finished.
506
static inline bool
507
gen_try_set_executing(PyGenObject *gen)
508
51.5M
{
509
#ifdef Py_GIL_DISABLED
510
    if (!_PyObject_IsUniquelyReferenced((PyObject *)gen)) {
511
        int8_t frame_state = _Py_atomic_load_int8_relaxed(&gen->gi_frame_state);
512
        while (frame_state < FRAME_EXECUTING) {
513
            if (_Py_atomic_compare_exchange_int8(&gen->gi_frame_state,
514
                                                 &frame_state,
515
                                                 FRAME_EXECUTING)) {
516
                return true;
517
            }
518
        }
519
    }
520
#endif
521
    // Use faster non-atomic modifications in the GIL-enabled build and when
522
    // the object is uniquely referenced in the free-threaded build.
523
51.5M
    if (gen->gi_frame_state < FRAME_EXECUTING) {
524
51.5M
        gen->gi_frame_state = FRAME_EXECUTING;
525
51.5M
        return true;
526
51.5M
    }
527
0
    return false;
528
51.5M
}