Coverage Report

Created: 2025-11-11 06:44

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/cpython/Python/ceval_macros.h
Line
Count
Source
1
// Macros and other things needed by ceval.c, and bytecodes.c
2
3
/* Computed GOTOs, or
4
       the-optimization-commonly-but-improperly-known-as-"threaded code"
5
   using gcc's labels-as-values extension
6
   (http://gcc.gnu.org/onlinedocs/gcc/Labels-as-Values.html).
7
8
   The traditional bytecode evaluation loop uses a "switch" statement, which
9
   decent compilers will optimize as a single indirect branch instruction
10
   combined with a lookup table of jump addresses. However, since the
11
   indirect jump instruction is shared by all opcodes, the CPU will have a
12
   hard time making the right prediction for where to jump next (actually,
13
   it will be always wrong except in the uncommon case of a sequence of
14
   several identical opcodes).
15
16
   "Threaded code" in contrast, uses an explicit jump table and an explicit
17
   indirect jump instruction at the end of each opcode. Since the jump
18
   instruction is at a different address for each opcode, the CPU will make a
19
   separate prediction for each of these instructions, which is equivalent to
20
   predicting the second opcode of each opcode pair. These predictions have
21
   a much better chance to turn out valid, especially in small bytecode loops.
22
23
   A mispredicted branch on a modern CPU flushes the whole pipeline and
24
   can cost several CPU cycles (depending on the pipeline depth),
25
   and potentially many more instructions (depending on the pipeline width).
26
   A correctly predicted branch, however, is nearly free.
27
28
   At the time of this writing, the "threaded code" version is up to 15-20%
29
   faster than the normal "switch" version, depending on the compiler and the
30
   CPU architecture.
31
32
   NOTE: care must be taken that the compiler doesn't try to "optimize" the
33
   indirect jumps by sharing them between all opcodes. Such optimizations
34
   can be disabled on gcc by using the -fno-gcse flag (or possibly
35
   -fno-crossjumping).
36
*/
37
38
/* Use macros rather than inline functions, to make it as clear as possible
39
 * to the C compiler that the tracing check is a simple test then branch.
40
 * We want to be sure that the compiler knows this before it generates
41
 * the CFG.
42
 */
43
44
#ifdef WITH_DTRACE
45
#define OR_DTRACE_LINE | (PyDTrace_LINE_ENABLED() ? 255 : 0)
46
#else
47
#define OR_DTRACE_LINE
48
#endif
49
50
#ifdef HAVE_COMPUTED_GOTOS
51
    #ifndef USE_COMPUTED_GOTOS
52
    #define USE_COMPUTED_GOTOS 1
53
    #endif
54
#else
55
    #if defined(USE_COMPUTED_GOTOS) && USE_COMPUTED_GOTOS
56
    #error "Computed gotos are not supported on this compiler."
57
    #endif
58
    #undef USE_COMPUTED_GOTOS
59
    #define USE_COMPUTED_GOTOS 0
60
#endif
61
62
#ifdef Py_STATS
63
#define INSTRUCTION_STATS(op) \
64
    do { \
65
        PyStats *s = _PyStats_GET(); \
66
        OPCODE_EXE_INC(op); \
67
        if (s) s->opcode_stats[lastopcode].pair_count[op]++; \
68
        lastopcode = op; \
69
    } while (0)
70
#else
71
37.5G
#define INSTRUCTION_STATS(op) ((void)0)
72
#endif
73
74
#ifdef Py_STATS
75
#   define TAIL_CALL_PARAMS _PyInterpreterFrame *frame, _PyStackRef *stack_pointer, PyThreadState *tstate, _Py_CODEUNIT *next_instr, const void *instruction_funcptr_table, int oparg, int lastopcode
76
#   define TAIL_CALL_ARGS frame, stack_pointer, tstate, next_instr, instruction_funcptr_table, oparg, lastopcode
77
#else
78
#   define TAIL_CALL_PARAMS _PyInterpreterFrame *frame, _PyStackRef *stack_pointer, PyThreadState *tstate, _Py_CODEUNIT *next_instr, const void *instruction_funcptr_table, int oparg
79
#   define TAIL_CALL_ARGS frame, stack_pointer, tstate, next_instr, instruction_funcptr_table, oparg
80
#endif
81
82
#if _Py_TAIL_CALL_INTERP
83
#   if defined(__clang__) || defined(__GNUC__)
84
#       if !_Py__has_attribute(preserve_none) || !_Py__has_attribute(musttail)
85
#           error "This compiler does not have support for efficient tail calling."
86
#       endif
87
#   elif defined(_MSC_VER) && (_MSC_VER < 1950)
88
#       error "You need at least VS 2026 / PlatformToolset v145 for tail calling."
89
#   endif
90
91
    // Note: [[clang::musttail]] works for GCC 15, but not __attribute__((musttail)) at the moment.
92
#   define Py_MUSTTAIL [[clang::musttail]]
93
#   define Py_PRESERVE_NONE_CC __attribute__((preserve_none))
94
    Py_PRESERVE_NONE_CC typedef PyObject* (*py_tail_call_funcptr)(TAIL_CALL_PARAMS);
95
96
#   define TARGET(op) Py_PRESERVE_NONE_CC PyObject *_TAIL_CALL_##op(TAIL_CALL_PARAMS)
97
#   define DISPATCH_GOTO() \
98
        do { \
99
            Py_MUSTTAIL return (((py_tail_call_funcptr *)instruction_funcptr_table)[opcode])(TAIL_CALL_ARGS); \
100
        } while (0)
101
#   define JUMP_TO_LABEL(name) \
102
        do { \
103
            Py_MUSTTAIL return (_TAIL_CALL_##name)(TAIL_CALL_ARGS); \
104
        } while (0)
105
#   ifdef Py_STATS
106
#       define JUMP_TO_PREDICTED(name) \
107
            do { \
108
                Py_MUSTTAIL return (_TAIL_CALL_##name)(frame, stack_pointer, tstate, this_instr, instruction_funcptr_table, oparg, lastopcode); \
109
            } while (0)
110
#   else
111
#       define JUMP_TO_PREDICTED(name) \
112
            do { \
113
                Py_MUSTTAIL return (_TAIL_CALL_##name)(frame, stack_pointer, tstate, this_instr, instruction_funcptr_table, oparg); \
114
            } while (0)
115
#   endif
116
#    define LABEL(name) TARGET(name)
117
#elif USE_COMPUTED_GOTOS
118
37.5G
#  define TARGET(op) TARGET_##op:
119
37.8G
#  define DISPATCH_GOTO() goto *opcode_targets[opcode]
120
48.0M
#  define JUMP_TO_LABEL(name) goto name;
121
203M
#  define JUMP_TO_PREDICTED(name) goto PREDICTED_##name;
122
318M
#  define LABEL(name) name:
123
#else
124
#  define TARGET(op) case op: TARGET_##op:
125
#  define DISPATCH_GOTO() goto dispatch_opcode
126
#  define JUMP_TO_LABEL(name) goto name;
127
#  define JUMP_TO_PREDICTED(name) goto PREDICTED_##name;
128
#  define LABEL(name) name:
129
#endif
130
131
/* PRE_DISPATCH_GOTO() does lltrace if enabled. Normally a no-op */
132
#ifdef Py_DEBUG
133
#define PRE_DISPATCH_GOTO() if (frame->lltrace >= 5) { \
134
    lltrace_instruction(frame, stack_pointer, next_instr, opcode, oparg); }
135
#else
136
37.8G
#define PRE_DISPATCH_GOTO() ((void)0)
137
#endif
138
139
#ifdef Py_DEBUG
140
#define LLTRACE_RESUME_FRAME() \
141
do { \
142
    _PyFrame_SetStackPointer(frame, stack_pointer); \
143
    int lltrace = maybe_lltrace_resume_frame(frame, GLOBALS()); \
144
    stack_pointer = _PyFrame_GetStackPointer(frame); \
145
    frame->lltrace = lltrace; \
146
} while (0)
147
#else
148
1.39G
#define LLTRACE_RESUME_FRAME() ((void)0)
149
#endif
150
151
#ifdef Py_GIL_DISABLED
152
#define QSBR_QUIESCENT_STATE(tstate) _Py_qsbr_quiescent_state(((_PyThreadStateImpl *)tstate)->qsbr)
153
#else
154
#define QSBR_QUIESCENT_STATE(tstate)
155
#endif
156
157
158
/* Do interpreter dispatch accounting for tracing and instrumentation */
159
#define DISPATCH() \
160
37.7G
    { \
161
37.7G
        assert(frame->stackpointer == NULL); \
162
37.7G
        NEXTOPARG(); \
163
37.7G
        PRE_DISPATCH_GOTO(); \
164
37.7G
        DISPATCH_GOTO(); \
165
37.7G
    }
166
167
#define DISPATCH_SAME_OPARG() \
168
4.29M
    { \
169
4.29M
        opcode = next_instr->op.code; \
170
4.29M
        PRE_DISPATCH_GOTO(); \
171
4.29M
        DISPATCH_GOTO(); \
172
4.29M
    }
173
174
#define DISPATCH_INLINED(NEW_FRAME)                     \
175
1.04M
    do {                                                \
176
1.04M
        assert(tstate->interp->eval_frame == NULL);     \
177
1.04M
        _PyFrame_SetStackPointer(frame, stack_pointer); \
178
1.04M
        assert((NEW_FRAME)->previous == frame);         \
179
1.04M
        frame = tstate->current_frame = (NEW_FRAME);     \
180
1.04M
        CALL_STAT_INC(inlined_py_calls);                \
181
1.04M
        JUMP_TO_LABEL(start_frame);                      \
182
0
    } while (0)
183
184
/* Tuple access macros */
185
186
#ifndef Py_DEBUG
187
1.63G
#define GETITEM(v, i) PyTuple_GET_ITEM((v), (i))
188
#else
189
static inline PyObject *
190
GETITEM(PyObject *v, Py_ssize_t i) {
191
    assert(PyTuple_Check(v));
192
    assert(i >= 0);
193
    assert(i < PyTuple_GET_SIZE(v));
194
    return PyTuple_GET_ITEM(v, i);
195
}
196
#endif
197
198
/* Code access macros */
199
200
/* The integer overflow is checked by an assertion below. */
201
32.3M
#define INSTR_OFFSET() ((int)(next_instr - _PyFrame_GetBytecode(frame)))
202
37.7G
#define NEXTOPARG()  do { \
203
37.7G
        _Py_CODEUNIT word  = {.cache = FT_ATOMIC_LOAD_UINT16_RELAXED(*(uint16_t*)next_instr)}; \
204
37.7G
        opcode = word.op.code; \
205
37.7G
        oparg = word.op.arg; \
206
37.7G
    } while (0)
207
208
/* JUMPBY makes the generator identify the instruction as a jump. SKIP_OVER is
209
 * for advancing to the next instruction, taking into account cache entries
210
 * and skipped instructions.
211
 */
212
5.19G
#define JUMPBY(x)       (next_instr += (x))
213
308M
#define SKIP_OVER(x)    (next_instr += (x))
214
215
#define STACK_LEVEL()     ((int)(stack_pointer - _PyFrame_Stackbase(frame)))
216
#define STACK_SIZE()      (_PyFrame_GetCode(frame)->co_stacksize)
217
218
#define WITHIN_STACK_BOUNDS() \
219
   (frame->owner == FRAME_OWNED_BY_INTERPRETER || (STACK_LEVEL() >= 0 && STACK_LEVEL() <= STACK_SIZE()))
220
221
/* Data access macros */
222
#define FRAME_CO_CONSTS (_PyFrame_GetCode(frame)->co_consts)
223
#define FRAME_CO_NAMES  (_PyFrame_GetCode(frame)->co_names)
224
225
/* Local variable macros */
226
227
1.04M
#define LOCALS_ARRAY    (frame->localsplus)
228
18.1G
#define GETLOCAL(i)     (frame->localsplus[i])
229
230
231
#ifdef Py_STATS
232
#define UPDATE_MISS_STATS(INSTNAME)                              \
233
    do {                                                         \
234
        STAT_INC(opcode, miss);                                  \
235
        STAT_INC((INSTNAME), miss);                              \
236
        /* The counter is always the first cache entry: */       \
237
        if (ADAPTIVE_COUNTER_TRIGGERS(next_instr->cache)) {       \
238
            STAT_INC((INSTNAME), deopt);                         \
239
        }                                                        \
240
    } while (0)
241
#else
242
203M
#define UPDATE_MISS_STATS(INSTNAME) ((void)0)
243
#endif
244
245
246
// Try to lock an object in the free threading build, if it's not already
247
// locked. Use with a DEOPT_IF() to deopt if the object is already locked.
248
// These are no-ops in the default GIL build. The general pattern is:
249
//
250
// DEOPT_IF(!LOCK_OBJECT(op));
251
// if (/* condition fails */) {
252
//     UNLOCK_OBJECT(op);
253
//     DEOPT_IF(true);
254
//  }
255
//  ...
256
//  UNLOCK_OBJECT(op);
257
//
258
// NOTE: The object must be unlocked on every exit code path and you should
259
// avoid any potentially escaping calls (like PyStackRef_CLOSE) while the
260
// object is locked.
261
#ifdef Py_GIL_DISABLED
262
#  define LOCK_OBJECT(op) PyMutex_LockFast(&(_PyObject_CAST(op))->ob_mutex)
263
#  define UNLOCK_OBJECT(op) PyMutex_Unlock(&(_PyObject_CAST(op))->ob_mutex)
264
#else
265
471M
#  define LOCK_OBJECT(op) (1)
266
471M
#  define UNLOCK_OBJECT(op) ((void)0)
267
#endif
268
269
698M
#define GLOBALS() frame->f_globals
270
392M
#define BUILTINS() frame->f_builtins
271
98.5k
#define LOCALS() frame->f_locals
272
#define CONSTS() _PyFrame_GetCode(frame)->co_consts
273
#define NAMES() _PyFrame_GetCode(frame)->co_names
274
275
#define DTRACE_FUNCTION_ENTRY()  \
276
    if (PyDTrace_FUNCTION_ENTRY_ENABLED()) { \
277
        dtrace_function_entry(frame); \
278
    }
279
280
/* This takes a uint16_t instead of a _Py_BackoffCounter,
281
 * because it is used directly on the cache entry in generated code,
282
 * which is always an integral type. */
283
#define ADAPTIVE_COUNTER_TRIGGERS(COUNTER) \
284
1.18G
    backoff_counter_triggers(forge_backoff_counter((COUNTER)))
285
286
#define ADVANCE_ADAPTIVE_COUNTER(COUNTER) \
287
1.18G
    do { \
288
1.18G
        (COUNTER) = advance_backoff_counter((COUNTER)); \
289
1.18G
    } while (0);
290
291
#define PAUSE_ADAPTIVE_COUNTER(COUNTER) \
292
0
    do { \
293
0
        (COUNTER) = pause_backoff_counter((COUNTER)); \
294
0
    } while (0);
295
296
#ifdef ENABLE_SPECIALIZATION_FT
297
/* Multiple threads may execute these concurrently if thread-local bytecode is
298
 * disabled and they all execute the main copy of the bytecode. Specialization
299
 * is disabled in that case so the value is unused, but the RMW cycle should be
300
 * free of data races.
301
 */
302
#define RECORD_BRANCH_TAKEN(bitset, flag) \
303
2.59G
    FT_ATOMIC_STORE_UINT16_RELAXED(       \
304
2.59G
        bitset, (FT_ATOMIC_LOAD_UINT16_RELAXED(bitset) << 1) | (flag))
305
#else
306
#define RECORD_BRANCH_TAKEN(bitset, flag)
307
#endif
308
309
#define UNBOUNDLOCAL_ERROR_MSG \
310
0
    "cannot access local variable '%s' where it is not associated with a value"
311
#define UNBOUNDFREE_ERROR_MSG \
312
0
    "cannot access free variable '%s' where it is not associated with a value" \
313
0
    " in enclosing scope"
314
17
#define NAME_ERROR_MSG "name '%.200s' is not defined"
315
316
// If a trace function sets a new f_lineno and
317
// *then* raises, we use the destination when searching
318
// for an exception handler, displaying the traceback, and so on
319
0
#define INSTRUMENTED_JUMP(src, dest, event) \
320
0
do { \
321
0
    if (tstate->tracing) {\
322
0
        next_instr = dest; \
323
0
    } else { \
324
0
        _PyFrame_SetStackPointer(frame, stack_pointer); \
325
0
        next_instr = _Py_call_instrumentation_jump(this_instr, tstate, event, frame, src, dest); \
326
0
        stack_pointer = _PyFrame_GetStackPointer(frame); \
327
0
        if (next_instr == NULL) { \
328
0
            next_instr = (dest)+1; \
329
0
            JUMP_TO_LABEL(error); \
330
0
        } \
331
0
    } \
332
0
} while (0);
333
334
335
238M
static inline int _Py_EnterRecursivePy(PyThreadState *tstate) {
336
238M
    return (tstate->py_recursion_remaining-- <= 0) &&
337
161
        _Py_CheckRecursiveCallPy(tstate);
338
238M
}
339
340
712M
static inline void _Py_LeaveRecursiveCallPy(PyThreadState *tstate)  {
341
712M
    tstate->py_recursion_remaining++;
342
712M
}
343
344
/* Implementation of "macros" that modify the instruction pointer,
345
 * stack pointer, or frame pointer.
346
 * These need to treated differently by tier 1 and 2.
347
 * The Tier 1 version is here; Tier 2 is inlined in ceval.c. */
348
349
1.17G
#define LOAD_IP(OFFSET) do { \
350
1.17G
        next_instr = frame->instr_ptr + (OFFSET); \
351
1.17G
    } while (0)
352
353
/* There's no STORE_IP(), it's inlined by the code generator. */
354
355
473M
#define LOAD_SP() \
356
473M
stack_pointer = _PyFrame_GetStackPointer(frame)
357
358
#define SAVE_SP() \
359
_PyFrame_SetStackPointer(frame, stack_pointer)
360
361
/* Tier-switching macros. */
362
363
#define TIER1_TO_TIER2(EXECUTOR)                        \
364
do {                                                   \
365
    OPT_STAT_INC(traces_executed);                     \
366
    next_instr = _Py_jit_entry((EXECUTOR), frame, stack_pointer, tstate); \
367
    frame = tstate->current_frame;                     \
368
    stack_pointer = _PyFrame_GetStackPointer(frame);   \
369
    if (next_instr == NULL) {                          \
370
        /* gh-140104: The exception handler expects frame->instr_ptr
371
            to after this_instr, not this_instr! */ \
372
        next_instr = frame->instr_ptr + 1;                 \
373
        JUMP_TO_LABEL(error);                          \
374
    }                                                  \
375
    DISPATCH();                                        \
376
} while (0)
377
378
#define TIER2_TO_TIER2(EXECUTOR) \
379
do {                                                   \
380
    OPT_STAT_INC(traces_executed);                     \
381
    current_executor = (EXECUTOR);                     \
382
    goto tier2_start;                                  \
383
} while (0)
384
385
#define GOTO_TIER_ONE(TARGET)                                         \
386
    do                                                                \
387
    {                                                                 \
388
        tstate->current_executor = NULL;                              \
389
        OPT_HIST(trace_uop_execution_counter, trace_run_length_hist); \
390
        _PyFrame_SetStackPointer(frame, stack_pointer);               \
391
        return TARGET;                                                \
392
    } while (0)
393
394
#define CURRENT_OPARG()    (next_uop[-1].oparg)
395
#define CURRENT_OPERAND0() (next_uop[-1].operand0)
396
#define CURRENT_OPERAND1() (next_uop[-1].operand1)
397
#define CURRENT_TARGET()   (next_uop[-1].target)
398
399
#define JUMP_TO_JUMP_TARGET() goto jump_to_jump_target
400
#define JUMP_TO_ERROR() goto jump_to_error_target
401
402
/* Stackref macros */
403
404
/* How much scratch space to give stackref to PyObject* conversion. */
405
1.78G
#define MAX_STACKREF_SCRATCH 10
406
407
#define STACKREFS_TO_PYOBJECTS(ARGS, ARG_COUNT, NAME) \
408
    /* +1 because vectorcall might use -1 to write self */ \
409
1.78G
    PyObject *NAME##_temp[MAX_STACKREF_SCRATCH+1]; \
410
1.78G
    PyObject **NAME = _PyObjectArray_FromStackRefArray(ARGS, ARG_COUNT, NAME##_temp + 1);
411
412
#define STACKREFS_TO_PYOBJECTS_CLEANUP(NAME) \
413
    /* +1 because we +1 previously */ \
414
1.78G
    _PyObjectArray_Free(NAME - 1, NAME##_temp);
415
416
1.78G
#define CONVERSION_FAILED(NAME) ((NAME) == NULL)
417
418
static inline int
419
3.07G
check_periodics(PyThreadState *tstate) {
420
3.07G
    _Py_CHECK_EMSCRIPTEN_SIGNALS_PERIODICALLY();
421
3.07G
    QSBR_QUIESCENT_STATE(tstate);
422
3.07G
    if (_Py_atomic_load_uintptr_relaxed(&tstate->eval_breaker) & _PY_EVAL_EVENTS_MASK) {
423
31.3k
        return _Py_HandlePending(tstate);
424
31.3k
    }
425
3.07G
    return 0;
426
3.07G
}
427