Coverage Report

Created: 2025-11-02 06:30

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/cpython/Python/ceval_macros.h
Line
Count
Source
1
// Macros and other things needed by ceval.c, and bytecodes.c
2
3
/* Computed GOTOs, or
4
       the-optimization-commonly-but-improperly-known-as-"threaded code"
5
   using gcc's labels-as-values extension
6
   (http://gcc.gnu.org/onlinedocs/gcc/Labels-as-Values.html).
7
8
   The traditional bytecode evaluation loop uses a "switch" statement, which
9
   decent compilers will optimize as a single indirect branch instruction
10
   combined with a lookup table of jump addresses. However, since the
11
   indirect jump instruction is shared by all opcodes, the CPU will have a
12
   hard time making the right prediction for where to jump next (actually,
13
   it will be always wrong except in the uncommon case of a sequence of
14
   several identical opcodes).
15
16
   "Threaded code" in contrast, uses an explicit jump table and an explicit
17
   indirect jump instruction at the end of each opcode. Since the jump
18
   instruction is at a different address for each opcode, the CPU will make a
19
   separate prediction for each of these instructions, which is equivalent to
20
   predicting the second opcode of each opcode pair. These predictions have
21
   a much better chance to turn out valid, especially in small bytecode loops.
22
23
   A mispredicted branch on a modern CPU flushes the whole pipeline and
24
   can cost several CPU cycles (depending on the pipeline depth),
25
   and potentially many more instructions (depending on the pipeline width).
26
   A correctly predicted branch, however, is nearly free.
27
28
   At the time of this writing, the "threaded code" version is up to 15-20%
29
   faster than the normal "switch" version, depending on the compiler and the
30
   CPU architecture.
31
32
   NOTE: care must be taken that the compiler doesn't try to "optimize" the
33
   indirect jumps by sharing them between all opcodes. Such optimizations
34
   can be disabled on gcc by using the -fno-gcse flag (or possibly
35
   -fno-crossjumping).
36
*/
37
38
/* Use macros rather than inline functions, to make it as clear as possible
39
 * to the C compiler that the tracing check is a simple test then branch.
40
 * We want to be sure that the compiler knows this before it generates
41
 * the CFG.
42
 */
43
44
#ifdef WITH_DTRACE
45
#define OR_DTRACE_LINE | (PyDTrace_LINE_ENABLED() ? 255 : 0)
46
#else
47
#define OR_DTRACE_LINE
48
#endif
49
50
#ifdef HAVE_COMPUTED_GOTOS
51
    #ifndef USE_COMPUTED_GOTOS
52
    #define USE_COMPUTED_GOTOS 1
53
    #endif
54
#else
55
    #if defined(USE_COMPUTED_GOTOS) && USE_COMPUTED_GOTOS
56
    #error "Computed gotos are not supported on this compiler."
57
    #endif
58
    #undef USE_COMPUTED_GOTOS
59
    #define USE_COMPUTED_GOTOS 0
60
#endif
61
62
#ifdef Py_STATS
63
#define INSTRUCTION_STATS(op) \
64
    do { \
65
        OPCODE_EXE_INC(op); \
66
        if (_Py_stats) _Py_stats->opcode_stats[lastopcode].pair_count[op]++; \
67
        lastopcode = op; \
68
    } while (0)
69
#else
70
42.7G
#define INSTRUCTION_STATS(op) ((void)0)
71
#endif
72
73
#ifdef Py_STATS
74
#   define TAIL_CALL_PARAMS _PyInterpreterFrame *frame, _PyStackRef *stack_pointer, PyThreadState *tstate, _Py_CODEUNIT *next_instr, const void *instruction_funcptr_table, int oparg, int lastopcode
75
#   define TAIL_CALL_ARGS frame, stack_pointer, tstate, next_instr, instruction_funcptr_table, oparg, lastopcode
76
#else
77
#   define TAIL_CALL_PARAMS _PyInterpreterFrame *frame, _PyStackRef *stack_pointer, PyThreadState *tstate, _Py_CODEUNIT *next_instr, const void *instruction_funcptr_table, int oparg
78
#   define TAIL_CALL_ARGS frame, stack_pointer, tstate, next_instr, instruction_funcptr_table, oparg
79
#endif
80
81
#if _Py_TAIL_CALL_INTERP
82
#   if defined(__clang__) || defined(__GNUC__)
83
#       if !_Py__has_attribute(preserve_none) || !_Py__has_attribute(musttail)
84
#           error "This compiler does not have support for efficient tail calling."
85
#       endif
86
#   elif defined(_MSC_VER) && (_MSC_VER < 1950)
87
#       error "You need at least VS 2026 / PlatformToolset v145 for tail calling."
88
#   endif
89
90
    // Note: [[clang::musttail]] works for GCC 15, but not __attribute__((musttail)) at the moment.
91
#   define Py_MUSTTAIL [[clang::musttail]]
92
#   define Py_PRESERVE_NONE_CC __attribute__((preserve_none))
93
    Py_PRESERVE_NONE_CC typedef PyObject* (*py_tail_call_funcptr)(TAIL_CALL_PARAMS);
94
95
#   define TARGET(op) Py_PRESERVE_NONE_CC PyObject *_TAIL_CALL_##op(TAIL_CALL_PARAMS)
96
#   define DISPATCH_GOTO() \
97
        do { \
98
            Py_MUSTTAIL return (((py_tail_call_funcptr *)instruction_funcptr_table)[opcode])(TAIL_CALL_ARGS); \
99
        } while (0)
100
#   define JUMP_TO_LABEL(name) \
101
        do { \
102
            Py_MUSTTAIL return (_TAIL_CALL_##name)(TAIL_CALL_ARGS); \
103
        } while (0)
104
#   ifdef Py_STATS
105
#       define JUMP_TO_PREDICTED(name) \
106
            do { \
107
                Py_MUSTTAIL return (_TAIL_CALL_##name)(frame, stack_pointer, tstate, this_instr, instruction_funcptr_table, oparg, lastopcode); \
108
            } while (0)
109
#   else
110
#       define JUMP_TO_PREDICTED(name) \
111
            do { \
112
                Py_MUSTTAIL return (_TAIL_CALL_##name)(frame, stack_pointer, tstate, this_instr, instruction_funcptr_table, oparg); \
113
            } while (0)
114
#   endif
115
#    define LABEL(name) TARGET(name)
116
#elif USE_COMPUTED_GOTOS
117
42.7G
#  define TARGET(op) TARGET_##op:
118
43.1G
#  define DISPATCH_GOTO() goto *opcode_targets[opcode]
119
44.9M
#  define JUMP_TO_LABEL(name) goto name;
120
187M
#  define JUMP_TO_PREDICTED(name) goto PREDICTED_##name;
121
305M
#  define LABEL(name) name:
122
#else
123
#  define TARGET(op) case op: TARGET_##op:
124
#  define DISPATCH_GOTO() goto dispatch_opcode
125
#  define JUMP_TO_LABEL(name) goto name;
126
#  define JUMP_TO_PREDICTED(name) goto PREDICTED_##name;
127
#  define LABEL(name) name:
128
#endif
129
130
/* PRE_DISPATCH_GOTO() does lltrace if enabled. Normally a no-op */
131
#ifdef Py_DEBUG
132
#define PRE_DISPATCH_GOTO() if (frame->lltrace >= 5) { \
133
    lltrace_instruction(frame, stack_pointer, next_instr, opcode, oparg); }
134
#else
135
43.1G
#define PRE_DISPATCH_GOTO() ((void)0)
136
#endif
137
138
#ifdef Py_DEBUG
139
#define LLTRACE_RESUME_FRAME() \
140
do { \
141
    _PyFrame_SetStackPointer(frame, stack_pointer); \
142
    int lltrace = maybe_lltrace_resume_frame(frame, GLOBALS()); \
143
    stack_pointer = _PyFrame_GetStackPointer(frame); \
144
    frame->lltrace = lltrace; \
145
} while (0)
146
#else
147
1.32G
#define LLTRACE_RESUME_FRAME() ((void)0)
148
#endif
149
150
#ifdef Py_GIL_DISABLED
151
#define QSBR_QUIESCENT_STATE(tstate) _Py_qsbr_quiescent_state(((_PyThreadStateImpl *)tstate)->qsbr)
152
#else
153
#define QSBR_QUIESCENT_STATE(tstate)
154
#endif
155
156
157
/* Do interpreter dispatch accounting for tracing and instrumentation */
158
#define DISPATCH() \
159
43.0G
    { \
160
43.0G
        assert(frame->stackpointer == NULL); \
161
43.0G
        NEXTOPARG(); \
162
43.0G
        PRE_DISPATCH_GOTO(); \
163
43.0G
        DISPATCH_GOTO(); \
164
43.0G
    }
165
166
#define DISPATCH_SAME_OPARG() \
167
3.95M
    { \
168
3.95M
        opcode = next_instr->op.code; \
169
3.95M
        PRE_DISPATCH_GOTO(); \
170
3.95M
        DISPATCH_GOTO(); \
171
3.95M
    }
172
173
#define DISPATCH_INLINED(NEW_FRAME)                     \
174
949k
    do {                                                \
175
949k
        assert(tstate->interp->eval_frame == NULL);     \
176
949k
        _PyFrame_SetStackPointer(frame, stack_pointer); \
177
949k
        assert((NEW_FRAME)->previous == frame);         \
178
949k
        frame = tstate->current_frame = (NEW_FRAME);     \
179
949k
        CALL_STAT_INC(inlined_py_calls);                \
180
949k
        JUMP_TO_LABEL(start_frame);                      \
181
0
    } while (0)
182
183
/* Tuple access macros */
184
185
#ifndef Py_DEBUG
186
1.69G
#define GETITEM(v, i) PyTuple_GET_ITEM((v), (i))
187
#else
188
static inline PyObject *
189
GETITEM(PyObject *v, Py_ssize_t i) {
190
    assert(PyTuple_Check(v));
191
    assert(i >= 0);
192
    assert(i < PyTuple_GET_SIZE(v));
193
    return PyTuple_GET_ITEM(v, i);
194
}
195
#endif
196
197
/* Code access macros */
198
199
/* The integer overflow is checked by an assertion below. */
200
31.6M
#define INSTR_OFFSET() ((int)(next_instr - _PyFrame_GetBytecode(frame)))
201
43.0G
#define NEXTOPARG()  do { \
202
43.0G
        _Py_CODEUNIT word  = {.cache = FT_ATOMIC_LOAD_UINT16_RELAXED(*(uint16_t*)next_instr)}; \
203
43.0G
        opcode = word.op.code; \
204
43.0G
        oparg = word.op.arg; \
205
43.0G
    } while (0)
206
207
/* JUMPBY makes the generator identify the instruction as a jump. SKIP_OVER is
208
 * for advancing to the next instruction, taking into account cache entries
209
 * and skipped instructions.
210
 */
211
6.26G
#define JUMPBY(x)       (next_instr += (x))
212
294M
#define SKIP_OVER(x)    (next_instr += (x))
213
214
#define STACK_LEVEL()     ((int)(stack_pointer - _PyFrame_Stackbase(frame)))
215
#define STACK_SIZE()      (_PyFrame_GetCode(frame)->co_stacksize)
216
217
#define WITHIN_STACK_BOUNDS() \
218
   (frame->owner == FRAME_OWNED_BY_INTERPRETER || (STACK_LEVEL() >= 0 && STACK_LEVEL() <= STACK_SIZE()))
219
220
/* Data access macros */
221
#define FRAME_CO_CONSTS (_PyFrame_GetCode(frame)->co_consts)
222
#define FRAME_CO_NAMES  (_PyFrame_GetCode(frame)->co_names)
223
224
/* Local variable macros */
225
226
970k
#define LOCALS_ARRAY    (frame->localsplus)
227
20.8G
#define GETLOCAL(i)     (frame->localsplus[i])
228
229
230
#ifdef Py_STATS
231
#define UPDATE_MISS_STATS(INSTNAME)                              \
232
    do {                                                         \
233
        STAT_INC(opcode, miss);                                  \
234
        STAT_INC((INSTNAME), miss);                              \
235
        /* The counter is always the first cache entry: */       \
236
        if (ADAPTIVE_COUNTER_TRIGGERS(next_instr->cache)) {       \
237
            STAT_INC((INSTNAME), deopt);                         \
238
        }                                                        \
239
    } while (0)
240
#else
241
187M
#define UPDATE_MISS_STATS(INSTNAME) ((void)0)
242
#endif
243
244
245
// Try to lock an object in the free threading build, if it's not already
246
// locked. Use with a DEOPT_IF() to deopt if the object is already locked.
247
// These are no-ops in the default GIL build. The general pattern is:
248
//
249
// DEOPT_IF(!LOCK_OBJECT(op));
250
// if (/* condition fails */) {
251
//     UNLOCK_OBJECT(op);
252
//     DEOPT_IF(true);
253
//  }
254
//  ...
255
//  UNLOCK_OBJECT(op);
256
//
257
// NOTE: The object must be unlocked on every exit code path and you should
258
// avoid any potentially escaping calls (like PyStackRef_CLOSE) while the
259
// object is locked.
260
#ifdef Py_GIL_DISABLED
261
#  define LOCK_OBJECT(op) PyMutex_LockFast(&(_PyObject_CAST(op))->ob_mutex)
262
#  define UNLOCK_OBJECT(op) PyMutex_Unlock(&(_PyObject_CAST(op))->ob_mutex)
263
#else
264
440M
#  define LOCK_OBJECT(op) (1)
265
440M
#  define UNLOCK_OBJECT(op) ((void)0)
266
#endif
267
268
667M
#define GLOBALS() frame->f_globals
269
387M
#define BUILTINS() frame->f_builtins
270
77.9k
#define LOCALS() frame->f_locals
271
#define CONSTS() _PyFrame_GetCode(frame)->co_consts
272
#define NAMES() _PyFrame_GetCode(frame)->co_names
273
274
#define DTRACE_FUNCTION_ENTRY()  \
275
    if (PyDTrace_FUNCTION_ENTRY_ENABLED()) { \
276
        dtrace_function_entry(frame); \
277
    }
278
279
/* This takes a uint16_t instead of a _Py_BackoffCounter,
280
 * because it is used directly on the cache entry in generated code,
281
 * which is always an integral type. */
282
#define ADAPTIVE_COUNTER_TRIGGERS(COUNTER) \
283
1.12G
    backoff_counter_triggers(forge_backoff_counter((COUNTER)))
284
285
#define ADVANCE_ADAPTIVE_COUNTER(COUNTER) \
286
1.12G
    do { \
287
1.12G
        (COUNTER) = advance_backoff_counter((COUNTER)); \
288
1.12G
    } while (0);
289
290
#define PAUSE_ADAPTIVE_COUNTER(COUNTER) \
291
0
    do { \
292
0
        (COUNTER) = pause_backoff_counter((COUNTER)); \
293
0
    } while (0);
294
295
#ifdef ENABLE_SPECIALIZATION_FT
296
/* Multiple threads may execute these concurrently if thread-local bytecode is
297
 * disabled and they all execute the main copy of the bytecode. Specialization
298
 * is disabled in that case so the value is unused, but the RMW cycle should be
299
 * free of data races.
300
 */
301
#define RECORD_BRANCH_TAKEN(bitset, flag) \
302
3.13G
    FT_ATOMIC_STORE_UINT16_RELAXED(       \
303
3.13G
        bitset, (FT_ATOMIC_LOAD_UINT16_RELAXED(bitset) << 1) | (flag))
304
#else
305
#define RECORD_BRANCH_TAKEN(bitset, flag)
306
#endif
307
308
#define UNBOUNDLOCAL_ERROR_MSG \
309
0
    "cannot access local variable '%s' where it is not associated with a value"
310
#define UNBOUNDFREE_ERROR_MSG \
311
0
    "cannot access free variable '%s' where it is not associated with a value" \
312
0
    " in enclosing scope"
313
1
#define NAME_ERROR_MSG "name '%.200s' is not defined"
314
315
// If a trace function sets a new f_lineno and
316
// *then* raises, we use the destination when searching
317
// for an exception handler, displaying the traceback, and so on
318
0
#define INSTRUMENTED_JUMP(src, dest, event) \
319
0
do { \
320
0
    if (tstate->tracing) {\
321
0
        next_instr = dest; \
322
0
    } else { \
323
0
        _PyFrame_SetStackPointer(frame, stack_pointer); \
324
0
        next_instr = _Py_call_instrumentation_jump(this_instr, tstate, event, frame, src, dest); \
325
0
        stack_pointer = _PyFrame_GetStackPointer(frame); \
326
0
        if (next_instr == NULL) { \
327
0
            next_instr = (dest)+1; \
328
0
            JUMP_TO_LABEL(error); \
329
0
        } \
330
0
    } \
331
0
} while (0);
332
333
334
229M
static inline int _Py_EnterRecursivePy(PyThreadState *tstate) {
335
229M
    return (tstate->py_recursion_remaining-- <= 0) &&
336
162
        _Py_CheckRecursiveCallPy(tstate);
337
229M
}
338
339
674M
static inline void _Py_LeaveRecursiveCallPy(PyThreadState *tstate)  {
340
674M
    tstate->py_recursion_remaining++;
341
674M
}
342
343
/* Implementation of "macros" that modify the instruction pointer,
344
 * stack pointer, or frame pointer.
345
 * These need to treated differently by tier 1 and 2.
346
 * The Tier 1 version is here; Tier 2 is inlined in ceval.c. */
347
348
1.10G
#define LOAD_IP(OFFSET) do { \
349
1.10G
        next_instr = frame->instr_ptr + (OFFSET); \
350
1.10G
    } while (0)
351
352
/* There's no STORE_IP(), it's inlined by the code generator. */
353
354
444M
#define LOAD_SP() \
355
444M
stack_pointer = _PyFrame_GetStackPointer(frame)
356
357
#define SAVE_SP() \
358
_PyFrame_SetStackPointer(frame, stack_pointer)
359
360
/* Tier-switching macros. */
361
362
#define TIER1_TO_TIER2(EXECUTOR)                        \
363
do {                                                   \
364
    OPT_STAT_INC(traces_executed);                     \
365
    next_instr = _Py_jit_entry((EXECUTOR), frame, stack_pointer, tstate); \
366
    frame = tstate->current_frame;                     \
367
    stack_pointer = _PyFrame_GetStackPointer(frame);   \
368
    if (next_instr == NULL) {                          \
369
        /* gh-140104: The exception handler expects frame->instr_ptr
370
            to after this_instr, not this_instr! */ \
371
        next_instr = frame->instr_ptr + 1;                 \
372
        JUMP_TO_LABEL(error);                          \
373
    }                                                  \
374
    DISPATCH();                                        \
375
} while (0)
376
377
#define TIER2_TO_TIER2(EXECUTOR) \
378
do {                                                   \
379
    OPT_STAT_INC(traces_executed);                     \
380
    current_executor = (EXECUTOR);                     \
381
    goto tier2_start;                                  \
382
} while (0)
383
384
#define GOTO_TIER_ONE(TARGET)                                         \
385
    do                                                                \
386
    {                                                                 \
387
        tstate->current_executor = NULL;                              \
388
        OPT_HIST(trace_uop_execution_counter, trace_run_length_hist); \
389
        _PyFrame_SetStackPointer(frame, stack_pointer);               \
390
        return TARGET;                                                \
391
    } while (0)
392
393
#define CURRENT_OPARG()    (next_uop[-1].oparg)
394
#define CURRENT_OPERAND0() (next_uop[-1].operand0)
395
#define CURRENT_OPERAND1() (next_uop[-1].operand1)
396
#define CURRENT_TARGET()   (next_uop[-1].target)
397
398
#define JUMP_TO_JUMP_TARGET() goto jump_to_jump_target
399
#define JUMP_TO_ERROR() goto jump_to_error_target
400
401
/* Stackref macros */
402
403
/* How much scratch space to give stackref to PyObject* conversion. */
404
2.00G
#define MAX_STACKREF_SCRATCH 10
405
406
#define STACKREFS_TO_PYOBJECTS(ARGS, ARG_COUNT, NAME) \
407
    /* +1 because vectorcall might use -1 to write self */ \
408
2.00G
    PyObject *NAME##_temp[MAX_STACKREF_SCRATCH+1]; \
409
2.00G
    PyObject **NAME = _PyObjectArray_FromStackRefArray(ARGS, ARG_COUNT, NAME##_temp + 1);
410
411
#define STACKREFS_TO_PYOBJECTS_CLEANUP(NAME) \
412
    /* +1 because we +1 previously */ \
413
2.00G
    _PyObjectArray_Free(NAME - 1, NAME##_temp);
414
415
2.00G
#define CONVERSION_FAILED(NAME) ((NAME) == NULL)
416
417
static inline int
418
3.48G
check_periodics(PyThreadState *tstate) {
419
3.48G
    _Py_CHECK_EMSCRIPTEN_SIGNALS_PERIODICALLY();
420
3.48G
    QSBR_QUIESCENT_STATE(tstate);
421
3.48G
    if (_Py_atomic_load_uintptr_relaxed(&tstate->eval_breaker) & _PY_EVAL_EVENTS_MASK) {
422
30.0k
        return _Py_HandlePending(tstate);
423
30.0k
    }
424
3.48G
    return 0;
425
3.48G
}
426