Coverage Report

Created: 2025-12-14 07:06

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/cpython/Python/ceval_macros.h
Line
Count
Source
1
// Macros and other things needed by ceval.c, and bytecodes.c
2
3
/* Computed GOTOs, or
4
       the-optimization-commonly-but-improperly-known-as-"threaded code"
5
   using gcc's labels-as-values extension
6
   (http://gcc.gnu.org/onlinedocs/gcc/Labels-as-Values.html).
7
8
   The traditional bytecode evaluation loop uses a "switch" statement, which
9
   decent compilers will optimize as a single indirect branch instruction
10
   combined with a lookup table of jump addresses. However, since the
11
   indirect jump instruction is shared by all opcodes, the CPU will have a
12
   hard time making the right prediction for where to jump next (actually,
13
   it will be always wrong except in the uncommon case of a sequence of
14
   several identical opcodes).
15
16
   "Threaded code" in contrast, uses an explicit jump table and an explicit
17
   indirect jump instruction at the end of each opcode. Since the jump
18
   instruction is at a different address for each opcode, the CPU will make a
19
   separate prediction for each of these instructions, which is equivalent to
20
   predicting the second opcode of each opcode pair. These predictions have
21
   a much better chance to turn out valid, especially in small bytecode loops.
22
23
   A mispredicted branch on a modern CPU flushes the whole pipeline and
24
   can cost several CPU cycles (depending on the pipeline depth),
25
   and potentially many more instructions (depending on the pipeline width).
26
   A correctly predicted branch, however, is nearly free.
27
28
   At the time of this writing, the "threaded code" version is up to 15-20%
29
   faster than the normal "switch" version, depending on the compiler and the
30
   CPU architecture.
31
32
   NOTE: care must be taken that the compiler doesn't try to "optimize" the
33
   indirect jumps by sharing them between all opcodes. Such optimizations
34
   can be disabled on gcc by using the -fno-gcse flag (or possibly
35
   -fno-crossjumping).
36
*/
37
38
/* Use macros rather than inline functions, to make it as clear as possible
39
 * to the C compiler that the tracing check is a simple test then branch.
40
 * We want to be sure that the compiler knows this before it generates
41
 * the CFG.
42
 */
43
44
#ifdef WITH_DTRACE
45
#define OR_DTRACE_LINE | (PyDTrace_LINE_ENABLED() ? 255 : 0)
46
#else
47
#define OR_DTRACE_LINE
48
#endif
49
50
#ifdef HAVE_COMPUTED_GOTOS
51
    #ifndef USE_COMPUTED_GOTOS
52
    #define USE_COMPUTED_GOTOS 1
53
    #endif
54
#else
55
    #if defined(USE_COMPUTED_GOTOS) && USE_COMPUTED_GOTOS
56
    #error "Computed gotos are not supported on this compiler."
57
    #endif
58
    #undef USE_COMPUTED_GOTOS
59
    #define USE_COMPUTED_GOTOS 0
60
#endif
61
62
#ifdef Py_STATS
63
#define INSTRUCTION_STATS(op) \
64
    do { \
65
        PyStats *s = _PyStats_GET(); \
66
        OPCODE_EXE_INC(op); \
67
        if (s) s->opcode_stats[lastopcode].pair_count[op]++; \
68
        lastopcode = op; \
69
    } while (0)
70
#else
71
59.8G
#define INSTRUCTION_STATS(op) ((void)0)
72
#endif
73
74
#ifdef Py_STATS
75
#   define TAIL_CALL_PARAMS _PyInterpreterFrame *frame, _PyStackRef *stack_pointer, PyThreadState *tstate, _Py_CODEUNIT *next_instr, const void *instruction_funcptr_table, int oparg, int lastopcode
76
#   define TAIL_CALL_ARGS frame, stack_pointer, tstate, next_instr, instruction_funcptr_table, oparg, lastopcode
77
#else
78
#   define TAIL_CALL_PARAMS _PyInterpreterFrame *frame, _PyStackRef *stack_pointer, PyThreadState *tstate, _Py_CODEUNIT *next_instr, const void *instruction_funcptr_table, int oparg
79
#   define TAIL_CALL_ARGS frame, stack_pointer, tstate, next_instr, instruction_funcptr_table, oparg
80
#endif
81
82
#if _Py_TAIL_CALL_INTERP
83
#   if defined(__clang__) || defined(__GNUC__)
84
#       if !_Py__has_attribute(preserve_none) || !_Py__has_attribute(musttail)
85
#           error "This compiler does not have support for efficient tail calling."
86
#       endif
87
#   elif defined(_MSC_VER) && (_MSC_VER < 1950)
88
#       error "You need at least VS 2026 / PlatformToolset v145 for tail calling."
89
#   endif
90
91
    // Note: [[clang::musttail]] works for GCC 15, but not __attribute__((musttail)) at the moment.
92
#   define Py_MUSTTAIL [[clang::musttail]]
93
#   define Py_PRESERVE_NONE_CC __attribute__((preserve_none))
94
    Py_PRESERVE_NONE_CC typedef PyObject* (*py_tail_call_funcptr)(TAIL_CALL_PARAMS);
95
96
#   define DISPATCH_TABLE_VAR instruction_funcptr_table
97
#   define DISPATCH_TABLE instruction_funcptr_handler_table
98
#   define TRACING_DISPATCH_TABLE instruction_funcptr_tracing_table
99
#   define TARGET(op) Py_PRESERVE_NONE_CC PyObject *_TAIL_CALL_##op(TAIL_CALL_PARAMS)
100
101
#   define DISPATCH_GOTO() \
102
        do { \
103
            Py_MUSTTAIL return (((py_tail_call_funcptr *)instruction_funcptr_table)[opcode])(TAIL_CALL_ARGS); \
104
        } while (0)
105
#   define DISPATCH_GOTO_NON_TRACING() \
106
        do { \
107
            Py_MUSTTAIL return (((py_tail_call_funcptr *)DISPATCH_TABLE)[opcode])(TAIL_CALL_ARGS); \
108
        } while (0)
109
#   define JUMP_TO_LABEL(name) \
110
        do { \
111
            Py_MUSTTAIL return (_TAIL_CALL_##name)(TAIL_CALL_ARGS); \
112
        } while (0)
113
#   ifdef Py_STATS
114
#       define JUMP_TO_PREDICTED(name) \
115
            do { \
116
                Py_MUSTTAIL return (_TAIL_CALL_##name)(frame, stack_pointer, tstate, this_instr, instruction_funcptr_table, oparg, lastopcode); \
117
            } while (0)
118
#   else
119
#       define JUMP_TO_PREDICTED(name) \
120
            do { \
121
                Py_MUSTTAIL return (_TAIL_CALL_##name)(frame, stack_pointer, tstate, this_instr, instruction_funcptr_table, oparg); \
122
            } while (0)
123
#   endif
124
#    define LABEL(name) TARGET(name)
125
#elif USE_COMPUTED_GOTOS
126
#  define DISPATCH_TABLE_VAR opcode_targets
127
15.6M
#  define DISPATCH_TABLE opcode_targets_table
128
#  define TRACING_DISPATCH_TABLE opcode_tracing_targets_table
129
59.8G
#  define TARGET(op) TARGET_##op:
130
60.2G
#  define DISPATCH_GOTO() goto *opcode_targets[opcode]
131
15.6M
#  define DISPATCH_GOTO_NON_TRACING() goto *DISPATCH_TABLE[opcode];
132
89.0M
#  define JUMP_TO_LABEL(name) goto name;
133
766M
#  define JUMP_TO_PREDICTED(name) goto PREDICTED_##name;
134
422M
#  define LABEL(name) name:
135
#else
136
#  define TARGET(op) case op: TARGET_##op:
137
#  define DISPATCH_GOTO() dispatch_code = opcode | tracing_mode ; goto dispatch_opcode
138
#  define DISPATCH_GOTO_NON_TRACING() dispatch_code = opcode; goto dispatch_opcode
139
#  define JUMP_TO_LABEL(name) goto name;
140
#  define JUMP_TO_PREDICTED(name) goto PREDICTED_##name;
141
#  define LABEL(name) name:
142
#endif
143
144
#if (_Py_TAIL_CALL_INTERP || USE_COMPUTED_GOTOS) && _Py_TIER2
145
#  define IS_JIT_TRACING() (DISPATCH_TABLE_VAR == TRACING_DISPATCH_TABLE)
146
#  define ENTER_TRACING() \
147
    DISPATCH_TABLE_VAR = TRACING_DISPATCH_TABLE;
148
#  define LEAVE_TRACING() \
149
    DISPATCH_TABLE_VAR = DISPATCH_TABLE;
150
#else
151
#  define IS_JIT_TRACING() (tracing_mode != 0)
152
#  define ENTER_TRACING() tracing_mode = 255
153
#  define LEAVE_TRACING() tracing_mode = 0
154
#endif
155
156
/* PRE_DISPATCH_GOTO() does lltrace if enabled. Normally a no-op */
157
#ifdef Py_DEBUG
158
#define PRE_DISPATCH_GOTO() if (frame->lltrace >= 5) { \
159
    lltrace_instruction(frame, stack_pointer, next_instr, opcode, oparg); }
160
#else
161
60.2G
#define PRE_DISPATCH_GOTO() ((void)0)
162
#endif
163
164
#ifdef Py_DEBUG
165
#define LLTRACE_RESUME_FRAME() \
166
do { \
167
    _PyFrame_SetStackPointer(frame, stack_pointer); \
168
    int lltrace = maybe_lltrace_resume_frame(frame, GLOBALS()); \
169
    stack_pointer = _PyFrame_GetStackPointer(frame); \
170
    frame->lltrace = lltrace; \
171
} while (0)
172
#else
173
2.24G
#define LLTRACE_RESUME_FRAME() ((void)0)
174
#endif
175
176
#ifdef Py_GIL_DISABLED
177
#define QSBR_QUIESCENT_STATE(tstate) _Py_qsbr_quiescent_state(((_PyThreadStateImpl *)tstate)->qsbr)
178
#else
179
#define QSBR_QUIESCENT_STATE(tstate)
180
#endif
181
182
183
/* Do interpreter dispatch accounting for tracing and instrumentation */
184
#define DISPATCH() \
185
59.9G
    { \
186
59.9G
        assert(frame->stackpointer == NULL); \
187
59.9G
        NEXTOPARG(); \
188
59.9G
        PRE_DISPATCH_GOTO(); \
189
59.9G
        DISPATCH_GOTO(); \
190
59.9G
    }
191
192
#define DISPATCH_NON_TRACING() \
193
    { \
194
        assert(frame->stackpointer == NULL); \
195
        NEXTOPARG(); \
196
        PRE_DISPATCH_GOTO(); \
197
        DISPATCH_GOTO_NON_TRACING(); \
198
    }
199
200
#define DISPATCH_SAME_OPARG() \
201
15.6M
    { \
202
15.6M
        opcode = next_instr->op.code; \
203
15.6M
        PRE_DISPATCH_GOTO(); \
204
15.6M
        DISPATCH_GOTO_NON_TRACING(); \
205
15.6M
    }
206
207
#define DISPATCH_INLINED(NEW_FRAME)                     \
208
1.49M
    do {                                                \
209
1.49M
        assert(tstate->interp->eval_frame == NULL);     \
210
1.49M
        _PyFrame_SetStackPointer(frame, stack_pointer); \
211
1.49M
        assert((NEW_FRAME)->previous == frame);         \
212
1.49M
        frame = tstate->current_frame = (NEW_FRAME);     \
213
1.49M
        CALL_STAT_INC(inlined_py_calls);                \
214
1.49M
        JUMP_TO_LABEL(start_frame);                      \
215
0
    } while (0)
216
217
/* Tuple access macros */
218
219
#ifndef Py_DEBUG
220
2.83G
#define GETITEM(v, i) PyTuple_GET_ITEM((v), (i))
221
#else
222
static inline PyObject *
223
GETITEM(PyObject *v, Py_ssize_t i) {
224
    assert(PyTuple_Check(v));
225
    assert(i >= 0);
226
    assert(i < PyTuple_GET_SIZE(v));
227
    return PyTuple_GET_ITEM(v, i);
228
}
229
#endif
230
231
/* Code access macros */
232
233
/* The integer overflow is checked by an assertion below. */
234
55.8M
#define INSTR_OFFSET() ((int)(next_instr - _PyFrame_GetBytecode(frame)))
235
59.9G
#define NEXTOPARG()  do { \
236
59.9G
        _Py_CODEUNIT word  = {.cache = FT_ATOMIC_LOAD_UINT16_RELAXED(*(uint16_t*)next_instr)}; \
237
59.9G
        opcode = word.op.code; \
238
59.9G
        oparg = word.op.arg; \
239
59.9G
    } while (0)
240
241
/* JUMPBY makes the generator identify the instruction as a jump. SKIP_OVER is
242
 * for advancing to the next instruction, taking into account cache entries
243
 * and skipped instructions.
244
 */
245
9.24G
#define JUMPBY(x)       (next_instr += (x))
246
339M
#define SKIP_OVER(x)    (next_instr += (x))
247
248
#define STACK_LEVEL()     ((int)(stack_pointer - _PyFrame_Stackbase(frame)))
249
#define STACK_SIZE()      (_PyFrame_GetCode(frame)->co_stacksize)
250
251
#define WITHIN_STACK_BOUNDS() \
252
   (frame->owner == FRAME_OWNED_BY_INTERPRETER || (STACK_LEVEL() >= 0 && STACK_LEVEL() <= STACK_SIZE()))
253
254
#if defined(Py_DEBUG) && !defined(_Py_JIT)
255
#define WITHIN_STACK_BOUNDS_WITH_CACHE() \
256
   (frame->owner == FRAME_OWNED_BY_INTERPRETER || (STACK_LEVEL() >= 0 && (STACK_LEVEL() + current_cached_values) <= STACK_SIZE()))
257
#else
258
#define WITHIN_STACK_BOUNDS_WITH_CACHE WITHIN_STACK_BOUNDS
259
#endif
260
261
/* Data access macros */
262
#define FRAME_CO_CONSTS (_PyFrame_GetCode(frame)->co_consts)
263
#define FRAME_CO_NAMES  (_PyFrame_GetCode(frame)->co_names)
264
265
/* Local variable macros */
266
267
10.2M
#define LOCALS_ARRAY    (frame->localsplus)
268
28.6G
#define GETLOCAL(i)     (frame->localsplus[i])
269
270
271
#ifdef Py_STATS
272
#define UPDATE_MISS_STATS(INSTNAME)                              \
273
    do {                                                         \
274
        STAT_INC(opcode, miss);                                  \
275
        STAT_INC((INSTNAME), miss);                              \
276
        /* The counter is always the first cache entry: */       \
277
        if (ADAPTIVE_COUNTER_TRIGGERS(next_instr->cache)) {       \
278
            STAT_INC((INSTNAME), deopt);                         \
279
        }                                                        \
280
    } while (0)
281
#else
282
766M
#define UPDATE_MISS_STATS(INSTNAME) ((void)0)
283
#endif
284
285
286
// Try to lock an object in the free threading build, if it's not already
287
// locked. Use with a DEOPT_IF() to deopt if the object is already locked.
288
// These are no-ops in the default GIL build. The general pattern is:
289
//
290
// DEOPT_IF(!LOCK_OBJECT(op));
291
// if (/* condition fails */) {
292
//     UNLOCK_OBJECT(op);
293
//     DEOPT_IF(true);
294
//  }
295
//  ...
296
//  UNLOCK_OBJECT(op);
297
//
298
// NOTE: The object must be unlocked on every exit code path and you should
299
// avoid any potentially escaping calls (like PyStackRef_CLOSE) while the
300
// object is locked.
301
#ifdef Py_GIL_DISABLED
302
#  define LOCK_OBJECT(op) PyMutex_LockFast(&(_PyObject_CAST(op))->ob_mutex)
303
#  define UNLOCK_OBJECT(op) PyMutex_Unlock(&(_PyObject_CAST(op))->ob_mutex)
304
#else
305
730M
#  define LOCK_OBJECT(op) (1)
306
730M
#  define UNLOCK_OBJECT(op) ((void)0)
307
#endif
308
309
1.40G
#define GLOBALS() frame->f_globals
310
550M
#define BUILTINS() frame->f_builtins
311
144k
#define LOCALS() frame->f_locals
312
#define CONSTS() _PyFrame_GetCode(frame)->co_consts
313
#define NAMES() _PyFrame_GetCode(frame)->co_names
314
315
#define DTRACE_FUNCTION_ENTRY()  \
316
    if (PyDTrace_FUNCTION_ENTRY_ENABLED()) { \
317
        dtrace_function_entry(frame); \
318
    }
319
320
/* This takes a uint16_t instead of a _Py_BackoffCounter,
321
 * because it is used directly on the cache entry in generated code,
322
 * which is always an integral type. */
323
// Force re-specialization when tracing a side exit to get good side exits.
324
#define ADAPTIVE_COUNTER_TRIGGERS(COUNTER) \
325
2.28G
    backoff_counter_triggers(forge_backoff_counter((COUNTER)))
326
327
#define ADVANCE_ADAPTIVE_COUNTER(COUNTER) \
328
2.28G
    do { \
329
2.28G
        (COUNTER) = advance_backoff_counter((COUNTER)); \
330
2.28G
    } while (0);
331
332
#define PAUSE_ADAPTIVE_COUNTER(COUNTER) \
333
0
    do { \
334
0
        (COUNTER) = pause_backoff_counter((COUNTER)); \
335
0
    } while (0);
336
337
#ifdef ENABLE_SPECIALIZATION_FT
338
/* Multiple threads may execute these concurrently if thread-local bytecode is
339
 * disabled and they all execute the main copy of the bytecode. Specialization
340
 * is disabled in that case so the value is unused, but the RMW cycle should be
341
 * free of data races.
342
 */
343
#define RECORD_BRANCH_TAKEN(bitset, flag) \
344
4.62G
    FT_ATOMIC_STORE_UINT16_RELAXED(       \
345
4.62G
        bitset, (FT_ATOMIC_LOAD_UINT16_RELAXED(bitset) << 1) | (flag))
346
#else
347
#define RECORD_BRANCH_TAKEN(bitset, flag)
348
#endif
349
350
#define UNBOUNDLOCAL_ERROR_MSG \
351
0
    "cannot access local variable '%s' where it is not associated with a value"
352
#define UNBOUNDFREE_ERROR_MSG \
353
0
    "cannot access free variable '%s' where it is not associated with a value" \
354
0
    " in enclosing scope"
355
17
#define NAME_ERROR_MSG "name '%.200s' is not defined"
356
357
// If a trace function sets a new f_lineno and
358
// *then* raises, we use the destination when searching
359
// for an exception handler, displaying the traceback, and so on
360
0
#define INSTRUMENTED_JUMP(src, dest, event) \
361
0
do { \
362
0
    if (tstate->tracing) {\
363
0
        next_instr = dest; \
364
0
    } else { \
365
0
        _PyFrame_SetStackPointer(frame, stack_pointer); \
366
0
        next_instr = _Py_call_instrumentation_jump(this_instr, tstate, event, frame, src, dest); \
367
0
        stack_pointer = _PyFrame_GetStackPointer(frame); \
368
0
        if (next_instr == NULL) { \
369
0
            next_instr = (dest)+1; \
370
0
            JUMP_TO_LABEL(error); \
371
0
        } \
372
0
    } \
373
0
} while (0);
374
375
376
284M
static inline int _Py_EnterRecursivePy(PyThreadState *tstate) {
377
284M
    return (tstate->py_recursion_remaining-- <= 0) &&
378
176
        _Py_CheckRecursiveCallPy(tstate);
379
284M
}
380
381
1.14G
static inline void _Py_LeaveRecursiveCallPy(PyThreadState *tstate)  {
382
1.14G
    tstate->py_recursion_remaining++;
383
1.14G
}
384
385
/* Implementation of "macros" that modify the instruction pointer,
386
 * stack pointer, or frame pointer.
387
 * These need to treated differently by tier 1 and 2.
388
 * The Tier 1 version is here; Tier 2 is inlined in ceval.c. */
389
390
1.97G
#define LOAD_IP(OFFSET) do { \
391
1.97G
        next_instr = frame->instr_ptr + (OFFSET); \
392
1.97G
    } while (0)
393
394
/* There's no STORE_IP(), it's inlined by the code generator. */
395
396
861M
#define LOAD_SP() \
397
861M
stack_pointer = _PyFrame_GetStackPointer(frame)
398
399
#define SAVE_SP() \
400
_PyFrame_SetStackPointer(frame, stack_pointer)
401
402
/* Tier-switching macros. */
403
404
#define TIER1_TO_TIER2(EXECUTOR)                        \
405
do {                                                   \
406
    OPT_STAT_INC(traces_executed);                     \
407
    next_instr = _Py_jit_entry((EXECUTOR), frame, stack_pointer, tstate); \
408
    frame = tstate->current_frame;                     \
409
    stack_pointer = _PyFrame_GetStackPointer(frame);   \
410
    int keep_tracing_bit = (uintptr_t)next_instr & 1;   \
411
    next_instr = (_Py_CODEUNIT *)(((uintptr_t)next_instr) & (~1)); \
412
    if (next_instr == NULL) {                          \
413
        /* gh-140104: The exception handler expects frame->instr_ptr
414
            to after this_instr, not this_instr! */ \
415
        next_instr = frame->instr_ptr + 1;                 \
416
        JUMP_TO_LABEL(error);                          \
417
    }                                                  \
418
    if (keep_tracing_bit) { \
419
        assert(((_PyThreadStateImpl *)tstate)->jit_tracer_state.prev_state.code_curr_size == 2); \
420
        ENTER_TRACING(); \
421
        DISPATCH_NON_TRACING(); \
422
    } \
423
    DISPATCH();                                        \
424
} while (0)
425
426
#define TIER2_TO_TIER2(EXECUTOR) \
427
do {                                                   \
428
    OPT_STAT_INC(traces_executed);                     \
429
    current_executor = (EXECUTOR);                     \
430
    goto tier2_start;                                  \
431
} while (0)
432
433
#define GOTO_TIER_ONE_SETUP \
434
    tstate->current_executor = NULL;                              \
435
    OPT_HIST(trace_uop_execution_counter, trace_run_length_hist); \
436
    _PyFrame_SetStackPointer(frame, stack_pointer);
437
438
#define GOTO_TIER_ONE(TARGET) \
439
    do \
440
    { \
441
        GOTO_TIER_ONE_SETUP \
442
        return (_Py_CODEUNIT *)(TARGET); \
443
    } while (0)
444
445
#define GOTO_TIER_ONE_CONTINUE_TRACING(TARGET) \
446
    do \
447
    { \
448
        GOTO_TIER_ONE_SETUP \
449
        return (_Py_CODEUNIT *)(((uintptr_t)(TARGET))| 1); \
450
    } while (0)
451
452
#define CURRENT_OPARG()    (next_uop[-1].oparg)
453
#define CURRENT_OPERAND0_64() (next_uop[-1].operand0)
454
#define CURRENT_OPERAND1_64() (next_uop[-1].operand1)
455
#define CURRENT_OPERAND0_32() (next_uop[-1].operand0)
456
#define CURRENT_OPERAND1_32() (next_uop[-1].operand1)
457
#define CURRENT_OPERAND0_16() (next_uop[-1].operand0)
458
#define CURRENT_OPERAND1_16() (next_uop[-1].operand1)
459
#define CURRENT_TARGET()   (next_uop[-1].target)
460
461
#define JUMP_TO_JUMP_TARGET() goto jump_to_jump_target
462
#define JUMP_TO_ERROR() goto jump_to_error_target
463
464
/* Stackref macros */
465
466
/* How much scratch space to give stackref to PyObject* conversion. */
467
2.07G
#define MAX_STACKREF_SCRATCH 10
468
469
#define STACKREFS_TO_PYOBJECTS(ARGS, ARG_COUNT, NAME) \
470
    /* +1 because vectorcall might use -1 to write self */ \
471
2.08G
    PyObject *NAME##_temp[MAX_STACKREF_SCRATCH+1]; \
472
2.08G
    PyObject **NAME = _PyObjectArray_FromStackRefArray(ARGS, ARG_COUNT, NAME##_temp);
473
474
#define STACKREFS_TO_PYOBJECTS_CLEANUP(NAME) \
475
    /* +1 because we +1 previously */ \
476
2.08G
    _PyObjectArray_Free(NAME - 1, NAME##_temp);
477
478
2.08G
#define CONVERSION_FAILED(NAME) ((NAME) == NULL)
479
480
#if defined(Py_DEBUG) && !defined(_Py_JIT)
481
#define SET_CURRENT_CACHED_VALUES(N) current_cached_values = (N)
482
#define CHECK_CURRENT_CACHED_VALUES(N) assert(current_cached_values == (N))
483
#else
484
#define SET_CURRENT_CACHED_VALUES(N) ((void)0)
485
#define CHECK_CURRENT_CACHED_VALUES(N) ((void)0)
486
#endif
487
488
static inline int
489
4.30G
check_periodics(PyThreadState *tstate) {
490
4.30G
    _Py_CHECK_EMSCRIPTEN_SIGNALS_PERIODICALLY();
491
4.30G
    QSBR_QUIESCENT_STATE(tstate);
492
4.30G
    if (_Py_atomic_load_uintptr_relaxed(&tstate->eval_breaker) & _PY_EVAL_EVENTS_MASK) {
493
76.8k
        return _Py_HandlePending(tstate);
494
76.8k
    }
495
4.30G
    return 0;
496
4.30G
}
497