Coverage Report

Created: 2026-04-20 06:11

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/cpython/Python/optimizer.c
Line
Count
Source
1
#include "Python.h"
2
3
#ifdef _Py_TIER2
4
5
#include "opcode.h"
6
#include "pycore_interp.h"
7
#include "pycore_backoff.h"
8
#include "pycore_bitutils.h"        // _Py_popcount32()
9
#include "pycore_ceval.h"       // _Py_set_eval_breaker_bit
10
#include "pycore_code.h"            // _Py_GetBaseCodeUnit
11
#include "pycore_interpframe.h"
12
#include "pycore_object.h"          // _PyObject_GC_UNTRACK()
13
#include "pycore_opcode_metadata.h" // _PyOpcode_OpName[]
14
#include "pycore_opcode_utils.h"  // MAX_REAL_OPCODE
15
#include "pycore_optimizer.h"     // _Py_uop_analyze_and_optimize()
16
#include "pycore_pystate.h"       // _PyInterpreterState_GET()
17
#include "pycore_tuple.h"         // _PyTuple_FromArraySteal
18
#include "pycore_unicodeobject.h" // _PyUnicode_FromASCII
19
#include "pycore_uop_ids.h"
20
#include "pycore_jit.h"
21
#include <stdbool.h>
22
#include <stdint.h>
23
#include <stddef.h>
24
25
#define NEED_OPCODE_METADATA
26
#include "pycore_uop_metadata.h" // Uop tables
27
#undef NEED_OPCODE_METADATA
28
29
#define MAX_EXECUTORS_SIZE 256
30
31
// Trace too short, no progress:
32
// _START_EXECUTOR
33
// _MAKE_WARM
34
// _CHECK_VALIDITY
35
// _SET_IP
36
// is 4-5 instructions.
37
#define CODE_SIZE_NO_PROGRESS 5
38
// We start with _START_EXECUTOR, _MAKE_WARM
39
#define CODE_SIZE_EMPTY 2
40
41
#define _PyExecutorObject_CAST(op)  ((_PyExecutorObject *)(op))
42
43
#ifndef Py_GIL_DISABLED
44
static bool
45
has_space_for_executor(PyCodeObject *code, _Py_CODEUNIT *instr)
46
{
47
    if (code == (PyCodeObject *)&_Py_InitCleanup) {
48
        return false;
49
    }
50
    if (instr->op.code == ENTER_EXECUTOR) {
51
        return true;
52
    }
53
    if (code->co_executors == NULL) {
54
        return true;
55
    }
56
    return code->co_executors->size < MAX_EXECUTORS_SIZE;
57
}
58
59
static int32_t
60
get_index_for_executor(PyCodeObject *code, _Py_CODEUNIT *instr)
61
{
62
    if (instr->op.code == ENTER_EXECUTOR) {
63
        return instr->op.arg;
64
    }
65
    _PyExecutorArray *old = code->co_executors;
66
    int size = 0;
67
    int capacity = 0;
68
    if (old != NULL) {
69
        size = old->size;
70
        capacity = old->capacity;
71
        assert(size < MAX_EXECUTORS_SIZE);
72
    }
73
    assert(size <= capacity);
74
    if (size == capacity) {
75
        /* Array is full. Grow array */
76
        int new_capacity = capacity ? capacity * 2 : 4;
77
        _PyExecutorArray *new = PyMem_Realloc(
78
            old,
79
            offsetof(_PyExecutorArray, executors) +
80
            new_capacity * sizeof(_PyExecutorObject *));
81
        if (new == NULL) {
82
            return -1;
83
        }
84
        new->capacity = new_capacity;
85
        new->size = size;
86
        code->co_executors = new;
87
    }
88
    assert(size < code->co_executors->capacity);
89
    return size;
90
}
91
92
static void
93
insert_executor(PyCodeObject *code, _Py_CODEUNIT *instr, int index, _PyExecutorObject *executor)
94
{
95
    Py_INCREF(executor);
96
    if (instr->op.code == ENTER_EXECUTOR) {
97
        assert(index == instr->op.arg);
98
        _Py_ExecutorDetach(code->co_executors->executors[index]);
99
    }
100
    else {
101
        assert(code->co_executors->size == index);
102
        assert(code->co_executors->capacity > index);
103
        code->co_executors->size++;
104
    }
105
    executor->vm_data.opcode = instr->op.code;
106
    executor->vm_data.oparg = instr->op.arg;
107
    executor->vm_data.code = code;
108
    executor->vm_data.index = (int)(instr - _PyCode_CODE(code));
109
    code->co_executors->executors[index] = executor;
110
    assert(index < MAX_EXECUTORS_SIZE);
111
    instr->op.code = ENTER_EXECUTOR;
112
    instr->op.arg = index;
113
}
114
#endif // Py_GIL_DISABLED
115
116
static _PyExecutorObject *
117
make_executor_from_uops(_PyThreadStateImpl *tstate, _PyUOpInstruction *buffer, int length, const _PyBloomFilter *dependencies);
118
119
static int
120
uop_optimize(_PyInterpreterFrame *frame, PyThreadState *tstate,
121
             _PyExecutorObject **exec_ptr,
122
             bool progress_needed);
123
124
/* Returns 1 if optimized, 0 if not optimized, and -1 for an error.
125
 * If optimized, *executor_ptr contains a new reference to the executor
126
 */
127
// gh-137573: inlining this function causes stack overflows
128
Py_NO_INLINE int
129
_PyOptimizer_Optimize(
130
    _PyInterpreterFrame *frame, PyThreadState *tstate)
131
{
132
    _PyThreadStateImpl *_tstate = (_PyThreadStateImpl *)tstate;
133
    PyInterpreterState *interp = _PyInterpreterState_GET();
134
    if (!interp->jit) {
135
        // gh-140936: It is possible that interp->jit will become false during
136
        // interpreter finalization. However, the specialized JUMP_BACKWARD_JIT
137
        // instruction may still be present. In this case, we should
138
        // return immediately without optimization.
139
        return 0;
140
    }
141
    _PyExecutorObject *prev_executor = _tstate->jit_tracer_state->initial_state.executor;
142
    if (prev_executor != NULL && !prev_executor->vm_data.valid) {
143
        // gh-143604: If we are a side exit executor and the original executor is no
144
        // longer valid, don't compile to prevent a reference leak.
145
        return 0;
146
    }
147
    assert(!interp->compiling);
148
    assert(_tstate->jit_tracer_state->initial_state.stack_depth >= 0);
149
#ifndef Py_GIL_DISABLED
150
    assert(_tstate->jit_tracer_state->initial_state.func != NULL);
151
    interp->compiling = true;
152
    // The first executor in a chain and the MAX_CHAIN_DEPTH'th executor *must*
153
    // make progress in order to avoid infinite loops or excessively-long
154
    // side-exit chains. We can only insert the executor into the bytecode if
155
    // this is true, since a deopt won't infinitely re-enter the executor:
156
    int chain_depth = _tstate->jit_tracer_state->initial_state.chain_depth;
157
    chain_depth %= MAX_CHAIN_DEPTH;
158
    bool progress_needed = chain_depth == 0;
159
    PyCodeObject *code = (PyCodeObject *)_tstate->jit_tracer_state->initial_state.code;
160
    _Py_CODEUNIT *start = _tstate->jit_tracer_state->initial_state.start_instr;
161
    if (progress_needed && !has_space_for_executor(code, start)) {
162
        interp->compiling = false;
163
        return 0;
164
    }
165
    _PyExecutorObject *executor;
166
    int err = uop_optimize(frame, tstate, &executor, progress_needed);
167
    if (err <= 0) {
168
        interp->compiling = false;
169
        return err;
170
    }
171
    assert(executor != NULL);
172
    if (progress_needed) {
173
        int index = get_index_for_executor(code, start);
174
        if (index < 0) {
175
            /* Out of memory. Don't raise and assume that the
176
             * error will show up elsewhere.
177
             *
178
             * If an optimizer has already produced an executor,
179
             * it might get confused by the executor disappearing,
180
             * but there is not much we can do about that here. */
181
            Py_DECREF(executor);
182
            interp->compiling = false;
183
            return 0;
184
        }
185
        insert_executor(code, start, index, executor);
186
    }
187
    executor->vm_data.chain_depth = chain_depth;
188
    assert(executor->vm_data.valid);
189
    _PyExitData *exit = _tstate->jit_tracer_state->initial_state.exit;
190
    if (exit != NULL && !progress_needed) {
191
        exit->executor = executor;
192
    }
193
    else {
194
        // An executor inserted into the code object now has a strong reference
195
        // to it from the code object. Thus, we don't need this reference anymore.
196
        Py_DECREF(executor);
197
    }
198
    interp->compiling = false;
199
    return 1;
200
#else
201
    return 0;
202
#endif
203
}
204
205
static _PyExecutorObject *
206
get_executor_lock_held(PyCodeObject *code, int offset)
207
{
208
    int code_len = (int)Py_SIZE(code);
209
    for (int i = 0 ; i < code_len;) {
210
        if (_PyCode_CODE(code)[i].op.code == ENTER_EXECUTOR && i*2 == offset) {
211
            int oparg = _PyCode_CODE(code)[i].op.arg;
212
            _PyExecutorObject *res = code->co_executors->executors[oparg];
213
            Py_INCREF(res);
214
            return res;
215
        }
216
        i += _PyInstruction_GetLength(code, i);
217
    }
218
    PyErr_SetString(PyExc_ValueError, "no executor at given byte offset");
219
    return NULL;
220
}
221
222
_PyExecutorObject *
223
_Py_GetExecutor(PyCodeObject *code, int offset)
224
{
225
    _PyExecutorObject *executor;
226
    Py_BEGIN_CRITICAL_SECTION(code);
227
    executor = get_executor_lock_held(code, offset);
228
    Py_END_CRITICAL_SECTION();
229
    return executor;
230
}
231
232
static PyObject *
233
is_valid(PyObject *self, PyObject *Py_UNUSED(ignored))
234
{
235
    return PyBool_FromLong(((_PyExecutorObject *)self)->vm_data.valid);
236
}
237
238
static PyObject *
239
get_opcode(PyObject *self, PyObject *Py_UNUSED(ignored))
240
{
241
    return PyLong_FromUnsignedLong(((_PyExecutorObject *)self)->vm_data.opcode);
242
}
243
244
static PyObject *
245
get_oparg(PyObject *self, PyObject *Py_UNUSED(ignored))
246
{
247
    return PyLong_FromUnsignedLong(((_PyExecutorObject *)self)->vm_data.oparg);
248
}
249
250
///////////////////// Experimental UOp Optimizer /////////////////////
251
252
static int executor_clear(PyObject *executor);
253
254
void
255
_PyExecutor_Free(_PyExecutorObject *self)
256
{
257
#ifdef _Py_JIT
258
    _PyJIT_Free(self);
259
#endif
260
    PyObject_GC_Del(self);
261
}
262
263
static void executor_invalidate(PyObject *op);
264
265
static void
266
executor_clear_exits(_PyExecutorObject *executor)
267
{
268
    _PyExecutorObject *cold = _PyExecutor_GetColdExecutor();
269
    _PyExecutorObject *cold_dynamic = _PyExecutor_GetColdDynamicExecutor();
270
    for (uint32_t i = 0; i < executor->exit_count; i++) {
271
        _PyExitData *exit = &executor->exits[i];
272
        exit->temperature = initial_unreachable_backoff_counter();
273
        _PyExecutorObject *old = executor->exits[i].executor;
274
        exit->executor = exit->is_dynamic ? cold_dynamic : cold;
275
        Py_DECREF(old);
276
    }
277
}
278
279
280
void
281
_Py_ClearExecutorDeletionList(PyInterpreterState *interp)
282
{
283
    if (interp->executor_deletion_list_head == NULL) {
284
        return;
285
    }
286
    _PyRuntimeState *runtime = &_PyRuntime;
287
    HEAD_LOCK(runtime);
288
    PyThreadState* ts = PyInterpreterState_ThreadHead(interp);
289
    while (ts) {
290
        _PyExecutorObject *current = (_PyExecutorObject *)ts->current_executor;
291
        Py_XINCREF(current);
292
        ts = ts->next;
293
    }
294
    HEAD_UNLOCK(runtime);
295
    _PyExecutorObject *keep_list = NULL;
296
    do {
297
        _PyExecutorObject *exec = interp->executor_deletion_list_head;
298
        interp->executor_deletion_list_head = exec->vm_data.links.next;
299
        if (Py_REFCNT(exec) == 0) {
300
            _PyExecutor_Free(exec);
301
        } else {
302
            exec->vm_data.links.next = keep_list;
303
            keep_list = exec;
304
        }
305
    } while (interp->executor_deletion_list_head != NULL);
306
    interp->executor_deletion_list_head = keep_list;
307
    HEAD_LOCK(runtime);
308
    ts = PyInterpreterState_ThreadHead(interp);
309
    while (ts) {
310
        _PyExecutorObject *current = (_PyExecutorObject *)ts->current_executor;
311
        if (current != NULL) {
312
            Py_DECREF((PyObject *)current);
313
        }
314
        ts = ts->next;
315
    }
316
    HEAD_UNLOCK(runtime);
317
}
318
319
static void
320
add_to_pending_deletion_list(_PyExecutorObject *self)
321
{
322
    if (self->vm_data.pending_deletion) {
323
        return;
324
    }
325
    self->vm_data.pending_deletion = 1;
326
    PyInterpreterState *interp = PyInterpreterState_Get();
327
    self->vm_data.links.previous = NULL;
328
    self->vm_data.links.next = interp->executor_deletion_list_head;
329
    interp->executor_deletion_list_head = self;
330
}
331
332
static void
333
uop_dealloc(PyObject *op) {
334
    _PyExecutorObject *self = _PyExecutorObject_CAST(op);
335
    executor_invalidate(op);
336
    assert(self->vm_data.code == NULL);
337
    add_to_pending_deletion_list(self);
338
}
339
340
const char *
341
_PyUOpName(int index)
342
{
343
    if (index < 0 || index > MAX_UOP_REGS_ID) {
344
        return NULL;
345
    }
346
    return _PyOpcode_uop_name[index];
347
}
348
349
#ifdef Py_DEBUG
350
void
351
_PyUOpPrint(const _PyUOpInstruction *uop)
352
{
353
    const char *name = _PyUOpName(uop->opcode);
354
    if (name == NULL) {
355
        printf("<uop %d>", uop->opcode);
356
    }
357
    else {
358
        printf("%s", name);
359
    }
360
    switch(uop->format) {
361
        case UOP_FORMAT_TARGET:
362
            printf(" (%d, target=%d, operand0=%#" PRIx64 ", operand1=%#" PRIx64,
363
                uop->oparg,
364
                uop->target,
365
                (uint64_t)uop->operand0,
366
                (uint64_t)uop->operand1);
367
            break;
368
        case UOP_FORMAT_JUMP:
369
            printf(" (%d, jump_target=%d, operand0=%#" PRIx64 ", operand1=%#" PRIx64,
370
                uop->oparg,
371
                uop->jump_target,
372
                (uint64_t)uop->operand0,
373
                (uint64_t)uop->operand1);
374
            break;
375
        default:
376
            printf(" (%d, Unknown format)", uop->oparg);
377
    }
378
    if (_PyUop_Flags[_PyUop_Uncached[uop->opcode]] & HAS_ERROR_FLAG) {
379
        printf(", error_target=%d", uop->error_target);
380
    }
381
382
    printf(")");
383
}
384
#endif
385
386
static Py_ssize_t
387
uop_len(PyObject *op)
388
{
389
    _PyExecutorObject *self = _PyExecutorObject_CAST(op);
390
    return self->code_size;
391
}
392
393
static PyObject *
394
uop_item(PyObject *op, Py_ssize_t index)
395
{
396
    _PyExecutorObject *self = _PyExecutorObject_CAST(op);
397
    Py_ssize_t len = uop_len(op);
398
    if (index < 0 || index >= len) {
399
        PyErr_SetNone(PyExc_IndexError);
400
        return NULL;
401
    }
402
    int opcode = self->trace[index].opcode;
403
    int base_opcode = _PyUop_Uncached[opcode];
404
    const char *name = _PyUOpName(base_opcode);
405
    if (name == NULL) {
406
        name = "<nil>";
407
    }
408
    PyObject *oname = _PyUnicode_FromASCII(name, strlen(name));
409
    if (oname == NULL) {
410
        return NULL;
411
    }
412
    PyObject *oparg = PyLong_FromUnsignedLong(self->trace[index].oparg);
413
    if (oparg == NULL) {
414
        Py_DECREF(oname);
415
        return NULL;
416
    }
417
    PyObject *target = PyLong_FromUnsignedLong(self->trace[index].target);
418
    if (target == NULL) {
419
        Py_DECREF(oparg);
420
        Py_DECREF(oname);
421
        return NULL;
422
    }
423
    PyObject *operand = PyLong_FromUnsignedLongLong(self->trace[index].operand0);
424
    if (operand == NULL) {
425
        Py_DECREF(target);
426
        Py_DECREF(oparg);
427
        Py_DECREF(oname);
428
        return NULL;
429
    }
430
    PyObject *args[4] = { oname, oparg, target, operand };
431
    return _PyTuple_FromArraySteal(args, 4);
432
}
433
434
PySequenceMethods uop_as_sequence = {
435
    .sq_length = uop_len,
436
    .sq_item = uop_item,
437
};
438
439
static int
440
executor_traverse(PyObject *o, visitproc visit, void *arg)
441
{
442
    _PyExecutorObject *executor = _PyExecutorObject_CAST(o);
443
    for (uint32_t i = 0; i < executor->exit_count; i++) {
444
        Py_VISIT(executor->exits[i].executor);
445
    }
446
    return 0;
447
}
448
449
static PyObject *
450
get_jit_code(PyObject *self, PyObject *Py_UNUSED(ignored))
451
{
452
#ifndef _Py_JIT
453
    PyErr_SetString(PyExc_RuntimeError, "JIT support not enabled.");
454
    return NULL;
455
#else
456
    _PyExecutorObject *executor = _PyExecutorObject_CAST(self);
457
    if (executor->jit_code == NULL || executor->jit_size == 0) {
458
        Py_RETURN_NONE;
459
    }
460
    return PyBytes_FromStringAndSize(executor->jit_code, executor->jit_size);
461
#endif
462
}
463
464
static PyMethodDef uop_executor_methods[] = {
465
    { "is_valid", is_valid, METH_NOARGS, NULL },
466
    { "get_jit_code", get_jit_code, METH_NOARGS, NULL},
467
    { "get_opcode", get_opcode, METH_NOARGS, NULL },
468
    { "get_oparg", get_oparg, METH_NOARGS, NULL },
469
    { NULL, NULL },
470
};
471
472
static int
473
executor_is_gc(PyObject *o)
474
{
475
    return !_Py_IsImmortal(o);
476
}
477
478
PyTypeObject _PyUOpExecutor_Type = {
479
    PyVarObject_HEAD_INIT(&PyType_Type, 0)
480
    .tp_name = "uop_executor",
481
    .tp_basicsize = offsetof(_PyExecutorObject, exits),
482
    .tp_itemsize = 1,
483
    .tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_DISALLOW_INSTANTIATION | Py_TPFLAGS_HAVE_GC,
484
    .tp_dealloc = uop_dealloc,
485
    .tp_as_sequence = &uop_as_sequence,
486
    .tp_methods = uop_executor_methods,
487
    .tp_traverse = executor_traverse,
488
    .tp_clear = executor_clear,
489
    .tp_is_gc = executor_is_gc,
490
};
491
492
/* TO DO -- Generate these tables */
493
static const uint16_t
494
_PyUOp_Replacements[MAX_UOP_ID + 1] = {
495
    [_ITER_JUMP_RANGE] = _GUARD_NOT_EXHAUSTED_RANGE,
496
    [_ITER_JUMP_LIST] = _GUARD_NOT_EXHAUSTED_LIST,
497
    [_ITER_JUMP_TUPLE] = _GUARD_NOT_EXHAUSTED_TUPLE,
498
    [_FOR_ITER] = _FOR_ITER_TIER_TWO,
499
    [_FOR_ITER_VIRTUAL] = _FOR_ITER_VIRTUAL_TIER_TWO,
500
    [_ITER_NEXT_LIST] = _ITER_NEXT_LIST_TIER_TWO,
501
    [_CHECK_PERIODIC_AT_END] = _TIER2_RESUME_CHECK,
502
    [_LOAD_BYTECODE] = _NOP,
503
};
504
505
static const uint8_t
506
is_for_iter_test[MAX_UOP_ID + 1] = {
507
    [_GUARD_NOT_EXHAUSTED_RANGE] = 1,
508
    [_GUARD_NOT_EXHAUSTED_LIST] = 1,
509
    [_GUARD_NOT_EXHAUSTED_TUPLE] = 1,
510
    [_FOR_ITER_TIER_TWO] = 1,
511
};
512
513
static const uint16_t
514
BRANCH_TO_GUARD[4][2] = {
515
    [POP_JUMP_IF_FALSE - POP_JUMP_IF_FALSE][0] = _GUARD_IS_TRUE_POP,
516
    [POP_JUMP_IF_FALSE - POP_JUMP_IF_FALSE][1] = _GUARD_IS_FALSE_POP,
517
    [POP_JUMP_IF_TRUE - POP_JUMP_IF_FALSE][0] = _GUARD_IS_FALSE_POP,
518
    [POP_JUMP_IF_TRUE - POP_JUMP_IF_FALSE][1] = _GUARD_IS_TRUE_POP,
519
    [POP_JUMP_IF_NONE - POP_JUMP_IF_FALSE][0] = _GUARD_IS_NOT_NONE_POP,
520
    [POP_JUMP_IF_NONE - POP_JUMP_IF_FALSE][1] = _GUARD_IS_NONE_POP,
521
    [POP_JUMP_IF_NOT_NONE - POP_JUMP_IF_FALSE][0] = _GUARD_IS_NONE_POP,
522
    [POP_JUMP_IF_NOT_NONE - POP_JUMP_IF_FALSE][1] = _GUARD_IS_NOT_NONE_POP,
523
};
524
525
static const uint16_t
526
guard_ip_uop[MAX_UOP_ID + 1] = {
527
    [_PUSH_FRAME] = _GUARD_IP__PUSH_FRAME,
528
    [_RETURN_GENERATOR] = _GUARD_IP_RETURN_GENERATOR,
529
    [_RETURN_VALUE] = _GUARD_IP_RETURN_VALUE,
530
    [_YIELD_VALUE] = _GUARD_IP_YIELD_VALUE,
531
};
532
533
static const uint16_t
534
guard_code_version_uop[MAX_UOP_ID + 1] = {
535
    [_PUSH_FRAME] = _GUARD_CODE_VERSION__PUSH_FRAME,
536
    [_RETURN_GENERATOR] = _GUARD_CODE_VERSION_RETURN_GENERATOR,
537
    [_RETURN_VALUE] = _GUARD_CODE_VERSION_RETURN_VALUE,
538
    [_YIELD_VALUE] = _GUARD_CODE_VERSION_YIELD_VALUE,
539
};
540
541
static const uint16_t
542
dynamic_exit_uop[MAX_UOP_ID + 1] = {
543
    [_GUARD_IP__PUSH_FRAME] = 1,
544
    [_GUARD_IP_RETURN_GENERATOR] = 1,
545
    [_GUARD_IP_RETURN_VALUE] = 1,
546
    [_GUARD_IP_YIELD_VALUE] = 1,
547
    [_GUARD_CODE_VERSION__PUSH_FRAME] = 1,
548
    [_GUARD_CODE_VERSION_RETURN_GENERATOR] = 1,
549
    [_GUARD_CODE_VERSION_RETURN_VALUE] = 1,
550
    [_GUARD_CODE_VERSION_YIELD_VALUE] = 1,
551
};
552
553
554
#define CONFIDENCE_RANGE 1000
555
#define CONFIDENCE_CUTOFF 333
556
557
#ifdef Py_DEBUG
558
#define DPRINTF(level, ...) \
559
    if (lltrace >= (level)) { printf(__VA_ARGS__); }
560
#else
561
#define DPRINTF(level, ...)
562
#endif
563
564
565
static inline void
566
add_to_trace(
567
    _PyJitUopBuffer *trace,
568
    uint16_t opcode,
569
    uint16_t oparg,
570
    uint64_t operand,
571
    uint32_t target)
572
{
573
    _PyUOpInstruction *inst = trace->next;
574
    inst->opcode = opcode;
575
    inst->format = UOP_FORMAT_TARGET;
576
    inst->target = target;
577
    inst->oparg = oparg;
578
    inst->operand0 = operand;
579
#ifdef Py_STATS
580
    inst->execution_count = 0;
581
#endif
582
    trace->next++;
583
}
584
585
586
#ifdef Py_DEBUG
587
#define ADD_TO_TRACE(OPCODE, OPARG, OPERAND, TARGET) \
588
    add_to_trace(trace, (OPCODE), (OPARG), (OPERAND), (TARGET)); \
589
    if (lltrace >= 2) { \
590
        printf("%4d ADD_TO_TRACE: ", uop_buffer_length(trace)); \
591
        _PyUOpPrint(uop_buffer_last(trace)); \
592
        printf("\n"); \
593
    }
594
#else
595
#define ADD_TO_TRACE(OPCODE, OPARG, OPERAND, TARGET) \
596
    add_to_trace(trace, (OPCODE), (OPARG), (OPERAND), (TARGET))
597
#endif
598
599
#define INSTR_IP(INSTR, CODE) \
600
    ((uint32_t)((INSTR) - ((_Py_CODEUNIT *)(CODE)->co_code_adaptive)))
601
602
603
static int
604
is_terminator(const _PyUOpInstruction *uop)
605
{
606
    int opcode = _PyUop_Uncached[uop->opcode];
607
    return (
608
        opcode == _EXIT_TRACE ||
609
        opcode == _DEOPT ||
610
        opcode == _JUMP_TO_TOP ||
611
        opcode == _DYNAMIC_EXIT
612
    );
613
}
614
615
/* Returns 1 on success (added to trace), 0 on trace end.
616
 */
617
// gh-142543: inlining this function causes stack overflows
618
Py_NO_INLINE int
619
_PyJit_translate_single_bytecode_to_trace(
620
    PyThreadState *tstate,
621
    _PyInterpreterFrame *frame,
622
    _Py_CODEUNIT *next_instr,
623
    int stop_tracing_opcode)
624
{
625
626
#ifdef Py_DEBUG
627
    char *python_lltrace = Py_GETENV("PYTHON_LLTRACE");
628
    int lltrace = 0;
629
    if (python_lltrace != NULL && *python_lltrace >= '0') {
630
        lltrace = *python_lltrace - '0';  // TODO: Parse an int and all that
631
    }
632
#endif
633
    _PyThreadStateImpl *_tstate = (_PyThreadStateImpl *)tstate;
634
    _PyJitTracerState *tracer = _tstate->jit_tracer_state;
635
    PyCodeObject *old_code = tracer->prev_state.instr_code;
636
    bool progress_needed = (tracer->initial_state.chain_depth % MAX_CHAIN_DEPTH) == 0;
637
    _PyJitUopBuffer *trace = &tracer->code_buffer;
638
639
    _Py_CODEUNIT *this_instr =  tracer->prev_state.instr;
640
    _Py_CODEUNIT *target_instr = this_instr;
641
    uint32_t target = 0;
642
643
    target = Py_IsNone((PyObject *)old_code)
644
        ? (uint32_t)(target_instr - _Py_INTERPRETER_TRAMPOLINE_INSTRUCTIONS_PTR)
645
        : INSTR_IP(target_instr, old_code);
646
647
    // Rewind EXTENDED_ARG so that we see the whole thing.
648
    // We must point to the first EXTENDED_ARG when deopting.
649
    int oparg = tracer->prev_state.instr_oparg;
650
    int opcode = this_instr->op.code;
651
    int rewind_oparg = oparg;
652
    while (rewind_oparg > 255) {
653
        rewind_oparg >>= 8;
654
        target--;
655
    }
656
657
    if (opcode == ENTER_EXECUTOR) {
658
        _PyExecutorObject *executor = old_code->co_executors->executors[oparg & 255];
659
        opcode = executor->vm_data.opcode;
660
        oparg = (oparg & ~255) | executor->vm_data.oparg;
661
    }
662
663
    if (_PyOpcode_Caches[_PyOpcode_Deopt[opcode]] > 0) {
664
        uint16_t backoff = (this_instr + 1)->counter.value_and_backoff;
665
        // adaptive_counter_cooldown is a fresh specialization.
666
        // trigger_backoff_counter is what we set during tracing.
667
        // All tracing backoffs should be freshly specialized or untouched.
668
        // If not, that indicates a deopt during tracing, and
669
        // thus the "actual" instruction executed is not the one that is
670
        // in the instruction stream, but rather the deopt.
671
        // It's important we check for this, as some specializations might make
672
        // no progress (they can immediately deopt after specializing).
673
        // We do this to improve performance, as otherwise a compiled trace
674
        // will just deopt immediately.
675
        if (backoff != adaptive_counter_cooldown().value_and_backoff &&
676
            backoff != trigger_backoff_counter().value_and_backoff) {
677
            OPT_STAT_INC(trace_immediately_deopts);
678
            opcode = _PyOpcode_Deopt[opcode];
679
        }
680
    }
681
682
    // Strange control-flow
683
    bool has_dynamic_jump_taken = OPCODE_HAS_UNPREDICTABLE_JUMP(opcode) &&
684
        (next_instr != this_instr + 1 + _PyOpcode_Caches[_PyOpcode_Deopt[opcode]]);
685
686
    /* Special case the first instruction,
687
    * so that we can guarantee forward progress */
688
    if (progress_needed && uop_buffer_length(&tracer->code_buffer) < CODE_SIZE_NO_PROGRESS) {
689
        if (OPCODE_HAS_EXIT(opcode) || OPCODE_HAS_DEOPT(opcode)) {
690
            opcode = _PyOpcode_Deopt[opcode];
691
        }
692
        assert(!OPCODE_HAS_EXIT(opcode));
693
        assert(!OPCODE_HAS_DEOPT(opcode));
694
    }
695
696
    bool needs_guard_ip = OPCODE_HAS_NEEDS_GUARD_IP(opcode);
697
    if (has_dynamic_jump_taken && !needs_guard_ip) {
698
        DPRINTF(2, "Unsupported: dynamic jump taken %s\n", _PyOpcode_OpName[opcode]);
699
        goto unsupported;
700
    }
701
702
    int is_sys_tracing = (tstate->c_tracefunc != NULL) || (tstate->c_profilefunc != NULL);
703
    if (is_sys_tracing) {
704
        goto done;
705
    }
706
707
    if (stop_tracing_opcode == _DEOPT) {
708
        // gh-143183: It's important we rewind to the last known proper target.
709
        // The current target might be garbage as stop tracing usually indicates
710
        // we are in something that we can't trace.
711
        DPRINTF(2, "Told to stop tracing\n");
712
        goto unsupported;
713
    }
714
    else if (stop_tracing_opcode != 0) {
715
        assert(stop_tracing_opcode == _EXIT_TRACE);
716
        ADD_TO_TRACE(stop_tracing_opcode, 0, 0, target);
717
        goto done;
718
    }
719
720
    DPRINTF(2, "%p %d: %s(%d) %d\n", old_code, target, _PyOpcode_OpName[opcode], oparg, needs_guard_ip);
721
722
#ifdef Py_DEBUG
723
    if (oparg > 255) {
724
        assert(_Py_GetBaseCodeUnit(old_code, target).op.code == EXTENDED_ARG);
725
    }
726
#endif
727
728
    // This happens when a recursive call happens that we can't trace. Such as Python -> C -> Python calls
729
    // If we haven't guarded the IP, then it's untraceable.
730
    if (frame != tracer->prev_state.instr_frame && !needs_guard_ip) {
731
        DPRINTF(2, "Unsupported: unguardable jump taken\n");
732
        goto unsupported;
733
    }
734
735
    if (oparg > 0xFFFF) {
736
        DPRINTF(2, "Unsupported: oparg too large\n");
737
        unsupported:
738
        {
739
            // Rewind to previous instruction and replace with _EXIT_TRACE.
740
            _PyUOpInstruction *curr = uop_buffer_last(trace);
741
            while (curr->opcode != _SET_IP && uop_buffer_length(trace) > 2) {
742
                trace->next--;
743
                curr = uop_buffer_last(trace);
744
            }
745
            assert(curr->opcode == _SET_IP || uop_buffer_length(trace) == 2);
746
            if (curr->opcode == _SET_IP) {
747
                int32_t old_target = (int32_t)uop_get_target(curr);
748
                curr->opcode = _DEOPT;
749
                curr->format = UOP_FORMAT_TARGET;
750
                curr->target = old_target;
751
            }
752
            goto done;
753
        }
754
    }
755
756
    if (opcode == NOP) {
757
        return 1;
758
    }
759
760
    if (opcode == JUMP_FORWARD) {
761
        return 1;
762
    }
763
764
    if (opcode == EXTENDED_ARG) {
765
        return 1;
766
    }
767
768
    // One for possible _DEOPT, one because _CHECK_VALIDITY itself might _DEOPT
769
    trace->end -= 2;
770
771
    const struct opcode_macro_expansion *expansion = &_PyOpcode_macro_expansion[opcode];
772
773
    assert(opcode != ENTER_EXECUTOR && opcode != EXTENDED_ARG);
774
    assert(!_PyErr_Occurred(tstate));
775
776
777
    if (OPCODE_HAS_EXIT(opcode)) {
778
        // Make space for side exit
779
        trace->end--;
780
    }
781
    if (OPCODE_HAS_ERROR(opcode)) {
782
        // Make space for error stub
783
        trace->end--;
784
    }
785
    if (OPCODE_HAS_DEOPT(opcode)) {
786
        // Make space for side exit
787
        trace->end--;
788
    }
789
790
    // _GUARD_IP leads to an exit.
791
    trace->end -= needs_guard_ip;
792
793
    int space_needed = expansion->nuops + needs_guard_ip + 2 + (!OPCODE_HAS_NO_SAVE_IP(opcode));
794
    if (uop_buffer_remaining_space(trace) < space_needed) {
795
        DPRINTF(2, "No room for expansions and guards (need %d, got %d)\n",
796
                space_needed, uop_buffer_remaining_space(trace));
797
        OPT_STAT_INC(trace_too_long);
798
        goto done;
799
    }
800
801
    ADD_TO_TRACE(_CHECK_VALIDITY, 0, 0, target);
802
803
    if (!OPCODE_HAS_NO_SAVE_IP(opcode)) {
804
        ADD_TO_TRACE(_SET_IP, 0, (uintptr_t)target_instr, target);
805
    }
806
807
    switch (opcode) {
808
        case POP_JUMP_IF_NONE:
809
        case POP_JUMP_IF_NOT_NONE:
810
        case POP_JUMP_IF_FALSE:
811
        case POP_JUMP_IF_TRUE:
812
        {
813
            _Py_CODEUNIT *computed_next_instr_without_modifiers = target_instr + 1 + _PyOpcode_Caches[_PyOpcode_Deopt[opcode]];
814
            _Py_CODEUNIT *computed_next_instr = computed_next_instr_without_modifiers + (computed_next_instr_without_modifiers->op.code == NOT_TAKEN);
815
            _Py_CODEUNIT *computed_jump_instr = computed_next_instr_without_modifiers + oparg;
816
            assert(next_instr == computed_next_instr || next_instr == computed_jump_instr);
817
            int jump_happened = target_instr[1].cache & 1;
818
            assert(jump_happened ? (next_instr == computed_jump_instr) : (next_instr == computed_next_instr));
819
            uint32_t uopcode = BRANCH_TO_GUARD[opcode - POP_JUMP_IF_FALSE][jump_happened];
820
            ADD_TO_TRACE(uopcode, 0, 0, INSTR_IP(jump_happened ? computed_next_instr : computed_jump_instr, old_code));
821
            break;
822
        }
823
        case JUMP_BACKWARD_JIT:
824
            // This is possible as the JIT might have re-activated after it was disabled
825
        case JUMP_BACKWARD_NO_JIT:
826
        case JUMP_BACKWARD:
827
            ADD_TO_TRACE(_CHECK_PERIODIC, 0, 0, target);
828
            _Py_FALLTHROUGH;
829
        case JUMP_BACKWARD_NO_INTERRUPT:
830
        {
831
            if ((next_instr != tracer->initial_state.close_loop_instr) &&
832
                (next_instr != tracer->initial_state.start_instr) &&
833
                uop_buffer_length(&tracer->code_buffer) > CODE_SIZE_NO_PROGRESS &&
834
                // For side exits, we don't want to terminate them early.
835
                tracer->initial_state.exit == NULL &&
836
                // These are coroutines, and we want to unroll those usually.
837
                opcode != JUMP_BACKWARD_NO_INTERRUPT) {
838
                // We encountered a JUMP_BACKWARD but not to the top of our own loop.
839
                // We don't want to continue tracing as we might get stuck in the
840
                // inner loop. Instead, end the trace where the executor of the
841
                // inner loop might start and let the traces rejoin.
842
                OPT_STAT_INC(inner_loop);
843
                ADD_TO_TRACE(_EXIT_TRACE, 0, 0, target);
844
                uop_buffer_last(trace)->operand1 = true; // is_control_flow
845
                DPRINTF(2, "JUMP_BACKWARD not to top ends trace %p %p %p\n", next_instr,
846
                    tracer->initial_state.close_loop_instr, tracer->initial_state.start_instr);
847
                goto done;
848
            }
849
            break;
850
        }
851
852
        case RESUME:
853
        case RESUME_CHECK:
854
        case RESUME_CHECK_JIT:
855
            /* Use a special tier 2 version of RESUME_CHECK to allow traces to
856
             *  start with RESUME_CHECK */
857
            ADD_TO_TRACE(_TIER2_RESUME_CHECK, 0, 0, target);
858
            break;
859
        default:
860
        {
861
            const struct opcode_macro_expansion *expansion = &_PyOpcode_macro_expansion[opcode];
862
            // Reserve space for nuops (+ _SET_IP + _EXIT_TRACE)
863
            int nuops = expansion->nuops;
864
            if (nuops == 0) {
865
                DPRINTF(2, "Unsupported opcode %s\n", _PyOpcode_OpName[opcode]);
866
                goto unsupported;
867
            }
868
            assert(nuops > 0);
869
            uint32_t orig_oparg = oparg;  // For OPARG_TOP/BOTTOM
870
            uint32_t orig_target = target;
871
            int record_idx = 0;
872
            for (int i = 0; i < nuops; i++) {
873
                oparg = orig_oparg;
874
                target = orig_target;
875
                uint32_t uop = expansion->uops[i].uop;
876
                uint64_t operand = 0;
877
                // Add one to account for the actual opcode/oparg pair:
878
                int offset = expansion->uops[i].offset + 1;
879
                switch (expansion->uops[i].size) {
880
                    case OPARG_SIMPLE:
881
                        assert(opcode != _JUMP_BACKWARD_NO_INTERRUPT && opcode != JUMP_BACKWARD);
882
                        break;
883
                    case OPARG_CACHE_1:
884
                        operand = read_u16(&this_instr[offset].cache);
885
                        break;
886
                    case OPARG_CACHE_2:
887
                        operand = read_u32(&this_instr[offset].cache);
888
                        break;
889
                    case OPARG_CACHE_4:
890
                        operand = read_u64(&this_instr[offset].cache);
891
                        break;
892
                    case OPARG_TOP:  // First half of super-instr
893
                        assert(orig_oparg <= 255);
894
                        oparg = orig_oparg >> 4;
895
                        break;
896
                    case OPARG_BOTTOM:  // Second half of super-instr
897
                        assert(orig_oparg <= 255);
898
                        oparg = orig_oparg & 0xF;
899
                        break;
900
                    case OPARG_SAVE_RETURN_OFFSET:  // op=_SAVE_RETURN_OFFSET; oparg=return_offset
901
                        oparg = offset;
902
                        assert(uop == _SAVE_RETURN_OFFSET);
903
                        break;
904
                    case OPARG_REPLACED:
905
                        uop = _PyUOp_Replacements[uop];
906
                        assert(uop != 0);
907
908
                        uint32_t next_inst = target + 1 + _PyOpcode_Caches[_PyOpcode_Deopt[opcode]];
909
                        if (uop == _TIER2_RESUME_CHECK) {
910
                            target = next_inst;
911
                        }
912
                        else {
913
                            int extended_arg = orig_oparg > 255;
914
                            uint32_t jump_target = next_inst + orig_oparg + extended_arg;
915
                            assert(_Py_GetBaseCodeUnit(old_code, jump_target).op.code == END_FOR);
916
                            assert(_Py_GetBaseCodeUnit(old_code, jump_target+1).op.code == POP_ITER);
917
                            if (is_for_iter_test[uop]) {
918
                                target = jump_target + 1;
919
                            }
920
                        }
921
                        break;
922
                    case OPERAND1_1:
923
                        assert(uop_buffer_last(trace)->opcode == uop);
924
                        operand = read_u16(&this_instr[offset].cache);
925
                        uop_buffer_last(trace)->operand1 = operand;
926
                        continue;
927
                    case OPERAND1_2:
928
                        assert(uop_buffer_last(trace)->opcode == uop);
929
                        operand = read_u32(&this_instr[offset].cache);
930
                        uop_buffer_last(trace)->operand1 = operand;
931
                        continue;
932
                    case OPERAND1_4:
933
                        assert(uop_buffer_last(trace)->opcode == uop);
934
                        operand = read_u64(&this_instr[offset].cache);
935
                        uop_buffer_last(trace)->operand1 = operand;
936
                        continue;
937
                    default:
938
                        fprintf(stderr,
939
                                "opcode=%d, oparg=%d; nuops=%d, i=%d; size=%d, offset=%d\n",
940
                                opcode, oparg, nuops, i,
941
                                expansion->uops[i].size,
942
                                expansion->uops[i].offset);
943
                        Py_FatalError("garbled expansion");
944
                }
945
                if (uop == _BINARY_OP_INPLACE_ADD_UNICODE) {
946
                    assert(i + 1 == nuops);
947
                    _Py_CODEUNIT *next = target_instr + 1 + _PyOpcode_Caches[_PyOpcode_Deopt[opcode]];
948
                    assert(next->op.code == STORE_FAST);
949
                    operand = next->op.arg;
950
                }
951
                else if (_PyUop_Flags[uop] & HAS_RECORDS_VALUE_FLAG) {
952
                    PyObject *recorded_value = tracer->prev_state.recorded_values[record_idx];
953
                    tracer->prev_state.recorded_values[record_idx] = NULL;
954
                    record_idx++;
955
                    operand = (uintptr_t)recorded_value;
956
                }
957
                // All other instructions
958
                ADD_TO_TRACE(uop, oparg, operand, target);
959
            }
960
            break;
961
        }  // End default
962
963
    }  // End switch (opcode)
964
965
    if (needs_guard_ip) {
966
        int last_opcode = uop_buffer_last(trace)->opcode;
967
        uint16_t guard_ip = guard_ip_uop[last_opcode];
968
        if (guard_ip == 0) {
969
            DPRINTF(1, "Unknown uop needing guard ip %s\n", _PyOpcode_uop_name[last_opcode]);
970
            Py_UNREACHABLE();
971
        }
972
        PyObject *code = PyStackRef_AsPyObjectBorrow(frame->f_executable);
973
        Py_INCREF(code);
974
        ADD_TO_TRACE(_RECORD_CODE, 0, (uintptr_t)code, 0);
975
        ADD_TO_TRACE(guard_ip, 0, (uintptr_t)next_instr, 0);
976
        if (PyCode_Check(code)) {
977
            /* Record stack depth, in operand1 */
978
            int stack_depth = (int)(frame->stackpointer - _PyFrame_Stackbase(frame));
979
            uop_buffer_last(trace)->operand1 = stack_depth;
980
            ADD_TO_TRACE(guard_code_version_uop[last_opcode], 0, ((PyCodeObject *)code)->co_version, 0);
981
        }
982
    }
983
    // Loop back to the start
984
    int is_first_instr = tracer->initial_state.close_loop_instr == next_instr ||
985
        tracer->initial_state.start_instr == next_instr;
986
    if (is_first_instr && uop_buffer_length(trace) > CODE_SIZE_NO_PROGRESS) {
987
        if (needs_guard_ip) {
988
            ADD_TO_TRACE(_SET_IP, 0, (uintptr_t)next_instr, 0);
989
        }
990
        ADD_TO_TRACE(_JUMP_TO_TOP, 0, 0, 0);
991
        goto done;
992
    }
993
    DPRINTF(2, "Trace continuing\n");
994
    return 1;
995
done:
996
    DPRINTF(2, "Trace done\n");
997
    if (!is_terminator(uop_buffer_last(trace))) {
998
        ADD_TO_TRACE(_EXIT_TRACE, 0, 0, target);
999
        uop_buffer_last(trace)->operand1 = true; // is_control_flow
1000
    }
1001
    return 0;
1002
}
1003
1004
// Returns 0 for do not enter tracing, 1 on enter tracing.
1005
// gh-142543: inlining this function causes stack overflows
1006
Py_NO_INLINE int
1007
_PyJit_TryInitializeTracing(
1008
    PyThreadState *tstate, _PyInterpreterFrame *frame, _Py_CODEUNIT *curr_instr,
1009
    _Py_CODEUNIT *start_instr, _Py_CODEUNIT *close_loop_instr, _PyStackRef *stack_pointer, int chain_depth,
1010
    _PyExitData *exit, int oparg, _PyExecutorObject *current_executor)
1011
{
1012
    _PyThreadStateImpl *_tstate = (_PyThreadStateImpl *)tstate;
1013
    if (_tstate->jit_tracer_state == NULL) {
1014
        _tstate->jit_tracer_state = (_PyJitTracerState *)_PyObject_VirtualAlloc(sizeof(_PyJitTracerState));
1015
        if (_tstate->jit_tracer_state == NULL) {
1016
            // Don't error, just go to next instruction.
1017
            return 0;
1018
        }
1019
        _tstate->jit_tracer_state->is_tracing = false;
1020
    }
1021
    _PyJitTracerState *tracer = _tstate->jit_tracer_state;
1022
    // A recursive trace.
1023
    if (tracer->is_tracing) {
1024
        return 0;
1025
    }
1026
    if (oparg > 0xFFFF) {
1027
        return 0;
1028
    }
1029
    PyObject *func = PyStackRef_AsPyObjectBorrow(frame->f_funcobj);
1030
    if (func == NULL || !PyFunction_Check(func)) {
1031
        return 0;
1032
    }
1033
    PyCodeObject *code = _PyFrame_GetCode(frame);
1034
#ifdef Py_DEBUG
1035
    char *python_lltrace = Py_GETENV("PYTHON_LLTRACE");
1036
    int lltrace = 0;
1037
    if (python_lltrace != NULL && *python_lltrace >= '0') {
1038
        lltrace = *python_lltrace - '0';  // TODO: Parse an int and all that
1039
    }
1040
    DPRINTF(2,
1041
        "Tracing %s (%s:%d) at byte offset %d at chain depth %d\n",
1042
        PyUnicode_AsUTF8(code->co_qualname),
1043
        PyUnicode_AsUTF8(code->co_filename),
1044
        code->co_firstlineno,
1045
        2 * INSTR_IP(close_loop_instr, code),
1046
        chain_depth);
1047
#endif
1048
    /* Set up tracing buffer*/
1049
    _PyJitUopBuffer *trace = &tracer->code_buffer;
1050
    uop_buffer_init(trace, &tracer->uop_array[0], UOP_MAX_TRACE_LENGTH);
1051
    ADD_TO_TRACE(_START_EXECUTOR, 0, (uintptr_t)start_instr, INSTR_IP(start_instr, code));
1052
    ADD_TO_TRACE(_MAKE_WARM, 0, 0, 0);
1053
1054
    tracer->initial_state.start_instr = start_instr;
1055
    tracer->initial_state.close_loop_instr = close_loop_instr;
1056
    tracer->initial_state.code = (PyCodeObject *)Py_NewRef(code);
1057
    tracer->initial_state.func = (PyFunctionObject *)Py_NewRef(func);
1058
    tracer->initial_state.executor = (_PyExecutorObject *)Py_XNewRef(current_executor);
1059
    tracer->initial_state.exit = exit;
1060
    tracer->initial_state.stack_depth = (int)(stack_pointer - _PyFrame_Stackbase(frame));
1061
    tracer->initial_state.chain_depth = chain_depth;
1062
    tracer->prev_state.instr_code = (PyCodeObject *)Py_NewRef(_PyFrame_GetCode(frame));
1063
    tracer->prev_state.instr = curr_instr;
1064
    tracer->prev_state.instr_frame = frame;
1065
    tracer->prev_state.instr_oparg = oparg;
1066
    tracer->prev_state.instr_stacklevel = tracer->initial_state.stack_depth;
1067
    tracer->prev_state.recorded_count = 0;
1068
    for (int i = 0; i < MAX_RECORDED_VALUES; i++) {
1069
        tracer->prev_state.recorded_values[i] = NULL;
1070
    }
1071
    const _PyOpcodeRecordEntry *record_entry = &_PyOpcode_RecordEntries[curr_instr->op.code];
1072
    for (int i = 0; i < record_entry->count; i++) {
1073
        _Py_RecordFuncPtr record_func = _PyOpcode_RecordFunctions[record_entry->indices[i]];
1074
        record_func(frame, stack_pointer, oparg, &tracer->prev_state.recorded_values[i]);
1075
    }
1076
    tracer->prev_state.recorded_count = record_entry->count;
1077
    assert(curr_instr->op.code == JUMP_BACKWARD_JIT || curr_instr->op.code == RESUME_CHECK_JIT || (exit != NULL));
1078
    tracer->initial_state.jump_backward_instr = curr_instr;
1079
1080
    tracer->is_tracing = true;
1081
    return 1;
1082
}
1083
1084
Py_NO_INLINE void
1085
_PyJit_FinalizeTracing(PyThreadState *tstate, int err)
1086
{
1087
    _PyThreadStateImpl *_tstate = (_PyThreadStateImpl *)tstate;
1088
    _PyJitTracerState *tracer = _tstate->jit_tracer_state;
1089
    // Deal with backoffs
1090
    assert(tracer != NULL);
1091
    _PyExitData *exit = tracer->initial_state.exit;
1092
    if (exit == NULL) {
1093
        // We hold a strong reference to the code object, so the instruction won't be freed.
1094
        if (err <= 0) {
1095
            _Py_BackoffCounter counter = tracer->initial_state.jump_backward_instr[1].counter;
1096
            tracer->initial_state.jump_backward_instr[1].counter = restart_backoff_counter(counter);
1097
        }
1098
        else {
1099
            if (tracer->initial_state.jump_backward_instr[0].op.code == JUMP_BACKWARD_JIT) {
1100
                tracer->initial_state.jump_backward_instr[1].counter = initial_jump_backoff_counter(&tstate->interp->opt_config);
1101
            }
1102
            else {
1103
                tracer->initial_state.jump_backward_instr[1].counter = initial_resume_backoff_counter(&tstate->interp->opt_config);
1104
            }
1105
        }
1106
    }
1107
    else if (tracer->initial_state.executor->vm_data.valid) {
1108
        // Likewise, we hold a strong reference to the executor containing this exit, so the exit is guaranteed
1109
        // to be valid to access.
1110
        if (err <= 0) {
1111
            exit->temperature = restart_backoff_counter(exit->temperature);
1112
        }
1113
        else {
1114
            exit->temperature = initial_temperature_backoff_counter(&tstate->interp->opt_config);
1115
        }
1116
    }
1117
    // Clear all recorded values
1118
    _PyJitUopBuffer *buffer = &tracer->code_buffer;
1119
    for (_PyUOpInstruction *inst = buffer->start; inst < buffer->next; inst++) {
1120
        if (_PyUop_Flags[inst->opcode] & HAS_RECORDS_VALUE_FLAG) {
1121
            Py_XDECREF((PyObject *)(uintptr_t)inst->operand0);
1122
        }
1123
    }
1124
    Py_CLEAR(tracer->initial_state.code);
1125
    Py_CLEAR(tracer->initial_state.func);
1126
    Py_CLEAR(tracer->initial_state.executor);
1127
    Py_CLEAR(tracer->prev_state.instr_code);
1128
    for (int i = 0; i < MAX_RECORDED_VALUES; i++) {
1129
        Py_CLEAR(tracer->prev_state.recorded_values[i]);
1130
    }
1131
    tracer->prev_state.recorded_count = 0;
1132
    uop_buffer_init(buffer, &tracer->uop_array[0], UOP_MAX_TRACE_LENGTH);
1133
    tracer->is_tracing = false;
1134
}
1135
1136
bool
1137
_PyJit_EnterExecutorShouldStopTracing(int og_opcode)
1138
{
1139
    // Continue tracing (skip over the executor). If it's a RESUME
1140
    // trace to form longer, more optimizeable traces.
1141
    // We want to trace over RESUME traces. Otherwise, functions with lots of RESUME
1142
    // end up with many fragmented traces which perform badly.
1143
    // See for example, the richards benchmark in pyperformance.
1144
    // For consideration: We may want to consider tracing over side traces
1145
    // inserted into bytecode as well in the future.
1146
    return og_opcode == RESUME_CHECK_JIT;
1147
}
1148
1149
void
1150
_PyJit_TracerFree(_PyThreadStateImpl *_tstate)
1151
{
1152
    if (_tstate->jit_tracer_state != NULL) {
1153
        _PyObject_VirtualFree(_tstate->jit_tracer_state, sizeof(_PyJitTracerState));
1154
        _tstate->jit_tracer_state = NULL;
1155
    }
1156
}
1157
1158
#undef RESERVE
1159
#undef INSTR_IP
1160
#undef ADD_TO_TRACE
1161
#undef DPRINTF
1162
1163
#define UNSET_BIT(array, bit) (array[(bit)>>5] &= ~(1<<((bit)&31)))
1164
#define SET_BIT(array, bit) (array[(bit)>>5] |= (1<<((bit)&31)))
1165
#define BIT_IS_SET(array, bit) (array[(bit)>>5] & (1<<((bit)&31)))
1166
1167
/* Count the number of unused uops and exits
1168
*/
1169
static int
1170
count_exits(_PyUOpInstruction *buffer, int length)
1171
{
1172
    int exit_count = 0;
1173
    for (int i = 0; i < length; i++) {
1174
        uint16_t base_opcode = _PyUop_Uncached[buffer[i].opcode];
1175
        if (base_opcode == _EXIT_TRACE || base_opcode == _DYNAMIC_EXIT) {
1176
            exit_count++;
1177
        }
1178
    }
1179
    return exit_count;
1180
}
1181
1182
/* The number of cached registers at any exit (`EXIT_IF` or `DEOPT_IF`)
1183
 * This is the number of cached at entries at start, unless the uop is
1184
 * marked as `exit_depth_is_output` in which case it is the number of
1185
 * cached entries at the end */
1186
static int
1187
get_cached_entries_for_side_exit(_PyUOpInstruction *inst)
1188
{
1189
    // Maybe add another generated table for this?
1190
    int base_opcode = _PyUop_Uncached[inst->opcode];
1191
    assert(base_opcode != 0);
1192
    for (int i = 0; i <= MAX_CACHED_REGISTER; i++) {
1193
        const _PyUopTOSentry *entry = &_PyUop_Caching[base_opcode].entries[i];
1194
        if (entry->opcode == inst->opcode) {
1195
            return entry->exit;
1196
        }
1197
    }
1198
    Py_UNREACHABLE();
1199
}
1200
1201
static void make_exit(_PyUOpInstruction *inst, int opcode, int target, bool is_control_flow)
1202
{
1203
    assert(opcode > MAX_UOP_ID && opcode <= MAX_UOP_REGS_ID);
1204
    inst->opcode = opcode;
1205
    inst->oparg = 0;
1206
    inst->operand0 = 0;
1207
    inst->format = UOP_FORMAT_TARGET;
1208
    inst->target = target;
1209
    inst->operand1 = is_control_flow;
1210
#ifdef Py_STATS
1211
    inst->execution_count = 0;
1212
#endif
1213
}
1214
1215
/* Convert implicit exits, errors and deopts
1216
 * into explicit ones. */
1217
static int
1218
prepare_for_execution(_PyUOpInstruction *buffer, int length)
1219
{
1220
    int32_t current_jump = -1;
1221
    int32_t current_jump_target = -1;
1222
    int32_t current_error = -1;
1223
    int32_t current_error_target = -1;
1224
    int32_t current_popped = -1;
1225
    int32_t current_exit_op = -1;
1226
    /* Leaving in NOPs slows down the interpreter and messes up the stats */
1227
    _PyUOpInstruction *copy_to = &buffer[0];
1228
    for (int i = 0; i < length; i++) {
1229
        _PyUOpInstruction *inst = &buffer[i];
1230
        if (inst->opcode != _NOP) {
1231
            if (copy_to != inst) {
1232
                *copy_to = *inst;
1233
            }
1234
            copy_to++;
1235
        }
1236
    }
1237
    length = (int)(copy_to - buffer);
1238
    int next_spare = length;
1239
    for (int i = 0; i < length; i++) {
1240
        _PyUOpInstruction *inst = &buffer[i];
1241
        int base_opcode = _PyUop_Uncached[inst->opcode];
1242
        assert(inst->opcode != _NOP);
1243
        int32_t target = (int32_t)uop_get_target(inst);
1244
        uint16_t exit_flags = _PyUop_Flags[base_opcode] & (HAS_EXIT_FLAG | HAS_DEOPT_FLAG | HAS_PERIODIC_FLAG);
1245
        if (exit_flags) {
1246
            uint16_t base_exit_op = _EXIT_TRACE;
1247
            if (exit_flags & HAS_DEOPT_FLAG) {
1248
                base_exit_op = _DEOPT;
1249
            }
1250
            else if (exit_flags & HAS_PERIODIC_FLAG) {
1251
                base_exit_op = _HANDLE_PENDING_AND_DEOPT;
1252
            }
1253
            int32_t jump_target = target;
1254
            if (dynamic_exit_uop[base_opcode]) {
1255
                base_exit_op = _DYNAMIC_EXIT;
1256
            }
1257
            int exit_depth = get_cached_entries_for_side_exit(inst);
1258
            assert(_PyUop_Caching[base_exit_op].entries[exit_depth].opcode > 0);
1259
            int16_t exit_op = _PyUop_Caching[base_exit_op].entries[exit_depth].opcode;
1260
            bool is_control_flow = (base_opcode == _GUARD_IS_FALSE_POP || base_opcode == _GUARD_IS_TRUE_POP || is_for_iter_test[base_opcode]);
1261
            if (jump_target != current_jump_target || current_exit_op != exit_op) {
1262
                make_exit(&buffer[next_spare], exit_op, jump_target, is_control_flow);
1263
                current_exit_op = exit_op;
1264
                current_jump_target = jump_target;
1265
                current_jump = next_spare;
1266
                next_spare++;
1267
            }
1268
            buffer[i].jump_target = current_jump;
1269
            buffer[i].format = UOP_FORMAT_JUMP;
1270
        }
1271
        if (_PyUop_Flags[base_opcode] & HAS_ERROR_FLAG) {
1272
            int popped = (_PyUop_Flags[base_opcode] & HAS_ERROR_NO_POP_FLAG) ?
1273
                0 : _PyUop_num_popped(base_opcode, inst->oparg);
1274
            if (target != current_error_target || popped != current_popped) {
1275
                current_popped = popped;
1276
                current_error = next_spare;
1277
                current_error_target = target;
1278
                make_exit(&buffer[next_spare], _ERROR_POP_N_r00, 0, false);
1279
                buffer[next_spare].operand0 = target;
1280
                next_spare++;
1281
            }
1282
            buffer[i].error_target = current_error;
1283
            if (buffer[i].format == UOP_FORMAT_TARGET) {
1284
                buffer[i].format = UOP_FORMAT_JUMP;
1285
                buffer[i].jump_target = 0;
1286
            }
1287
        }
1288
        if (base_opcode == _JUMP_TO_TOP) {
1289
            assert(_PyUop_Uncached[buffer[0].opcode] == _START_EXECUTOR);
1290
            buffer[i].format = UOP_FORMAT_JUMP;
1291
            buffer[i].jump_target = 1;
1292
        }
1293
    }
1294
    return next_spare;
1295
}
1296
1297
/* Executor side exits */
1298
1299
static _PyExecutorObject *
1300
allocate_executor(int exit_count, int length)
1301
{
1302
    int size = exit_count*sizeof(_PyExitData) + length*sizeof(_PyUOpInstruction);
1303
    _PyExecutorObject *res = PyObject_GC_NewVar(_PyExecutorObject, &_PyUOpExecutor_Type, size);
1304
    if (res == NULL) {
1305
        return NULL;
1306
    }
1307
    res->trace = (_PyUOpInstruction *)(res->exits + exit_count);
1308
    res->code_size = length;
1309
    res->exit_count = exit_count;
1310
    return res;
1311
}
1312
1313
#ifdef Py_DEBUG
1314
1315
#define CHECK(PRED) \
1316
if (!(PRED)) { \
1317
    printf(#PRED " at %d\n", i); \
1318
    assert(0); \
1319
}
1320
1321
static int
1322
target_unused(int opcode)
1323
{
1324
    return (_PyUop_Flags[opcode] & (HAS_ERROR_FLAG | HAS_EXIT_FLAG | HAS_DEOPT_FLAG)) == 0;
1325
}
1326
1327
static void
1328
sanity_check(_PyExecutorObject *executor)
1329
{
1330
    for (uint32_t i = 0; i < executor->exit_count; i++) {
1331
        _PyExitData *exit = &executor->exits[i];
1332
        CHECK(exit->target < (1 << 25));
1333
    }
1334
    bool ended = false;
1335
    uint32_t i = 0;
1336
    CHECK(_PyUop_Uncached[executor->trace[0].opcode] == _START_EXECUTOR ||
1337
        _PyUop_Uncached[executor->trace[0].opcode] == _COLD_EXIT ||
1338
        _PyUop_Uncached[executor->trace[0].opcode] == _COLD_DYNAMIC_EXIT);
1339
    for (; i < executor->code_size; i++) {
1340
        const _PyUOpInstruction *inst = &executor->trace[i];
1341
        uint16_t opcode = inst->opcode;
1342
        uint16_t base_opcode = _PyUop_Uncached[opcode];
1343
        CHECK(opcode > MAX_UOP_ID);
1344
        CHECK(opcode <= MAX_UOP_REGS_ID);
1345
        CHECK(base_opcode <= MAX_UOP_ID);
1346
        CHECK(base_opcode != 0);
1347
        switch(inst->format) {
1348
            case UOP_FORMAT_TARGET:
1349
                CHECK(target_unused(base_opcode));
1350
                break;
1351
            case UOP_FORMAT_JUMP:
1352
                CHECK(inst->jump_target < executor->code_size);
1353
                break;
1354
        }
1355
        if (_PyUop_Flags[base_opcode] & HAS_ERROR_FLAG) {
1356
            CHECK(inst->format == UOP_FORMAT_JUMP);
1357
            CHECK(inst->error_target < executor->code_size);
1358
        }
1359
        if (is_terminator(inst)) {
1360
            ended = true;
1361
            i++;
1362
            break;
1363
        }
1364
    }
1365
    CHECK(ended);
1366
    for (; i < executor->code_size; i++) {
1367
        const _PyUOpInstruction *inst = &executor->trace[i];
1368
        uint16_t base_opcode = _PyUop_Uncached[inst->opcode];
1369
        CHECK(
1370
            base_opcode == _DEOPT ||
1371
            base_opcode == _HANDLE_PENDING_AND_DEOPT ||
1372
            base_opcode == _EXIT_TRACE ||
1373
            base_opcode == _ERROR_POP_N ||
1374
            base_opcode == _DYNAMIC_EXIT);
1375
    }
1376
}
1377
1378
#undef CHECK
1379
#endif
1380
1381
/* Makes an executor from a buffer of uops.
1382
 * Account for the buffer having gaps and NOPs by computing a "used"
1383
 * bit vector and only copying the used uops. Here "used" means reachable
1384
 * and not a NOP.
1385
 */
1386
static _PyExecutorObject *
1387
make_executor_from_uops(_PyThreadStateImpl *tstate, _PyUOpInstruction *buffer, int length, const _PyBloomFilter *dependencies)
1388
{
1389
    int exit_count = count_exits(buffer, length);
1390
    _PyExecutorObject *executor = allocate_executor(exit_count, length);
1391
    if (executor == NULL) {
1392
        return NULL;
1393
    }
1394
1395
    /* Initialize exits */
1396
    int chain_depth = tstate->jit_tracer_state->initial_state.chain_depth;
1397
    _PyExecutorObject *cold = _PyExecutor_GetColdExecutor();
1398
    _PyExecutorObject *cold_dynamic = _PyExecutor_GetColdDynamicExecutor();
1399
    cold->vm_data.chain_depth = chain_depth;
1400
    PyInterpreterState *interp = tstate->base.interp;
1401
    for (int i = 0; i < exit_count; i++) {
1402
        executor->exits[i].index = i;
1403
        executor->exits[i].temperature = initial_temperature_backoff_counter(&interp->opt_config);
1404
    }
1405
    int next_exit = exit_count-1;
1406
    _PyUOpInstruction *dest = (_PyUOpInstruction *)&executor->trace[length];
1407
    assert(_PyUop_Uncached[buffer[0].opcode] == _START_EXECUTOR);
1408
    buffer[0].operand0 = (uint64_t)executor;
1409
    for (int i = length-1; i >= 0; i--) {
1410
        uint16_t base_opcode = _PyUop_Uncached[buffer[i].opcode];
1411
        dest--;
1412
        *dest = buffer[i];
1413
        if (base_opcode == _EXIT_TRACE || base_opcode == _DYNAMIC_EXIT) {
1414
            _PyExitData *exit = &executor->exits[next_exit];
1415
            exit->target = buffer[i].target;
1416
            dest->operand0 = (uint64_t)exit;
1417
            exit->executor = base_opcode == _EXIT_TRACE ? cold : cold_dynamic;
1418
            exit->is_dynamic = (char)(base_opcode == _DYNAMIC_EXIT);
1419
            exit->is_control_flow = (char)buffer[i].operand1;
1420
            next_exit--;
1421
        }
1422
    }
1423
    assert(next_exit == -1);
1424
    assert(dest == executor->trace);
1425
    assert(_PyUop_Uncached[dest->opcode] == _START_EXECUTOR);
1426
    // Note: we MUST track it here before any Py_DECREF(executor) or
1427
    // linking of executor. Otherwise, the GC tries to untrack a
1428
    // still untracked object during dealloc.
1429
    _PyObject_GC_TRACK(executor);
1430
    if (_Py_ExecutorInit(executor, dependencies) < 0) {
1431
        Py_DECREF(executor);
1432
        return NULL;
1433
    }
1434
#ifdef Py_DEBUG
1435
    char *python_lltrace = Py_GETENV("PYTHON_LLTRACE");
1436
    int lltrace = 0;
1437
    if (python_lltrace != NULL && *python_lltrace >= '0') {
1438
        lltrace = *python_lltrace - '0';  // TODO: Parse an int and all that
1439
    }
1440
    if (lltrace >= 2) {
1441
        printf("Optimized trace (length %d):\n", length);
1442
        for (int i = 0; i < length; i++) {
1443
            printf("%4d OPTIMIZED: ", i);
1444
            _PyUOpPrint(&executor->trace[i]);
1445
            printf("\n");
1446
        }
1447
    }
1448
    sanity_check(executor);
1449
#endif
1450
#ifdef _Py_JIT
1451
    executor->jit_code = NULL;
1452
    executor->jit_size = 0;
1453
    // This is initialized to false so we can prevent the executor
1454
    // from being immediately detected as cold and invalidated.
1455
    executor->vm_data.cold = false;
1456
    if (_PyJIT_Compile(executor, executor->trace, length)) {
1457
        Py_DECREF(executor);
1458
        return NULL;
1459
    }
1460
#endif
1461
    return executor;
1462
}
1463
1464
#ifdef Py_STATS
1465
/* Returns the effective trace length.
1466
 * Ignores NOPs and trailing exit and error handling.*/
1467
int effective_trace_length(_PyUOpInstruction *buffer, int length)
1468
{
1469
    int nop_count = 0;
1470
    for (int i = 0; i < length; i++) {
1471
        int opcode = buffer[i].opcode;
1472
        if (opcode == _NOP) {
1473
            nop_count++;
1474
        }
1475
        if (is_terminator(&buffer[i])) {
1476
            return i+1-nop_count;
1477
        }
1478
    }
1479
    Py_FatalError("No terminating instruction");
1480
    Py_UNREACHABLE();
1481
}
1482
#endif
1483
1484
1485
static int
1486
stack_allocate(_PyUOpInstruction *buffer, _PyUOpInstruction *output, int length)
1487
{
1488
    assert(buffer[0].opcode == _START_EXECUTOR);
1489
    /* The input buffer and output buffers will overlap.
1490
       Make sure that we can move instructions to the output
1491
       without overwriting the input. */
1492
    if (buffer == output) {
1493
        // This can only happen if optimizer has not been run
1494
        for (int i = 0; i < length; i++) {
1495
            buffer[i + UOP_MAX_TRACE_LENGTH] = buffer[i];
1496
        }
1497
        buffer += UOP_MAX_TRACE_LENGTH;
1498
    }
1499
    else {
1500
        assert(output + UOP_MAX_TRACE_LENGTH == buffer);
1501
    }
1502
    int depth = 0;
1503
    _PyUOpInstruction *write = output;
1504
    for (int i = 0; i < length; i++) {
1505
        int uop = buffer[i].opcode;
1506
        if (uop == _NOP) {
1507
            continue;
1508
        }
1509
        int new_depth = _PyUop_Caching[uop].best[depth];
1510
        if (new_depth != depth) {
1511
            write->opcode = _PyUop_SpillsAndReloads[depth][new_depth];
1512
            assert(write->opcode != 0);
1513
            write->format = UOP_FORMAT_TARGET;
1514
            write->oparg = 0;
1515
            write->target = 0;
1516
            write++;
1517
            depth = new_depth;
1518
        }
1519
        *write = buffer[i];
1520
        uint16_t new_opcode = _PyUop_Caching[uop].entries[depth].opcode;
1521
        assert(new_opcode != 0);
1522
        write->opcode = new_opcode;
1523
        write++;
1524
        depth = _PyUop_Caching[uop].entries[depth].output;
1525
    }
1526
    return (int)(write - output);
1527
}
1528
1529
static int
1530
uop_optimize(
1531
    _PyInterpreterFrame *frame,
1532
    PyThreadState *tstate,
1533
    _PyExecutorObject **exec_ptr,
1534
    bool progress_needed)
1535
{
1536
    _PyThreadStateImpl *_tstate = (_PyThreadStateImpl *)tstate;
1537
    assert(_tstate->jit_tracer_state != NULL);
1538
    _PyUOpInstruction *buffer = _tstate->jit_tracer_state->code_buffer.start;
1539
    OPT_STAT_INC(attempts);
1540
    bool is_noopt = !tstate->interp->opt_config.uops_optimize_enabled;
1541
    int curr_stackentries = _tstate->jit_tracer_state->initial_state.stack_depth;
1542
    int length = uop_buffer_length(&_tstate->jit_tracer_state->code_buffer);
1543
    if (length <= CODE_SIZE_NO_PROGRESS) {
1544
        return 0;
1545
    }
1546
    assert(length > 0);
1547
    assert(length < UOP_MAX_TRACE_LENGTH);
1548
    OPT_STAT_INC(traces_created);
1549
1550
    _PyBloomFilter dependencies;
1551
    _Py_BloomFilter_Init(&dependencies);
1552
    if (!is_noopt) {
1553
        _PyUOpInstruction *output = &_tstate->jit_tracer_state->uop_array[UOP_MAX_TRACE_LENGTH];
1554
        length = _Py_uop_analyze_and_optimize(
1555
            _tstate, buffer, length, curr_stackentries,
1556
            output, &dependencies);
1557
1558
        if (length <= 0) {
1559
            return length;
1560
        }
1561
        buffer = output;
1562
    }
1563
    assert(length < UOP_MAX_TRACE_LENGTH);
1564
    assert(length >= 1);
1565
    /* Fix up */
1566
    for (int pc = 0; pc < length; pc++) {
1567
        int opcode = buffer[pc].opcode;
1568
        int oparg = buffer[pc].oparg;
1569
        if (oparg < _PyUop_Replication[opcode].stop && oparg >= _PyUop_Replication[opcode].start) {
1570
            buffer[pc].opcode = opcode + oparg + 1 - _PyUop_Replication[opcode].start;
1571
            assert(strncmp(_PyOpcode_uop_name[buffer[pc].opcode], _PyOpcode_uop_name[opcode], strlen(_PyOpcode_uop_name[opcode])) == 0);
1572
        }
1573
        else if (_PyUop_Flags[opcode] & HAS_RECORDS_VALUE_FLAG) {
1574
            Py_XDECREF((PyObject *)(uintptr_t)buffer[pc].operand0);
1575
            buffer[pc].opcode = _NOP;
1576
        }
1577
        else if (is_terminator(&buffer[pc])) {
1578
            break;
1579
        }
1580
        assert(_PyOpcode_uop_name[buffer[pc].opcode]);
1581
    }
1582
    // We've cleaned up the references in the buffer, so discard the code buffer
1583
    // to avoid doing it again during tracer cleanup
1584
    _PyJitUopBuffer *code_buffer = &_tstate->jit_tracer_state->code_buffer;
1585
    code_buffer->next = code_buffer->start;
1586
1587
    OPT_HIST(effective_trace_length(buffer, length), optimized_trace_length_hist);
1588
    _PyUOpInstruction *output = &_tstate->jit_tracer_state->uop_array[0];
1589
    length = stack_allocate(buffer, output, length);
1590
    buffer = output;
1591
    length = prepare_for_execution(buffer, length);
1592
    assert(length <= UOP_MAX_TRACE_LENGTH);
1593
    _PyExecutorObject *executor = make_executor_from_uops(
1594
        _tstate, buffer, length, &dependencies);
1595
    if (executor == NULL) {
1596
        return -1;
1597
    }
1598
    assert(length <= UOP_MAX_TRACE_LENGTH);
1599
1600
    // Check executor coldness
1601
    // It's okay if this ends up going negative.
1602
    if (--tstate->interp->executor_creation_counter == 0) {
1603
        _Py_set_eval_breaker_bit(tstate, _PY_EVAL_JIT_INVALIDATE_COLD_BIT);
1604
    }
1605
1606
    *exec_ptr = executor;
1607
    return 1;
1608
}
1609
1610
1611
/*****************************************
1612
 *        Executor management
1613
 ****************************************/
1614
1615
static int
1616
link_executor(_PyExecutorObject *executor, const _PyBloomFilter *bloom)
1617
{
1618
    PyInterpreterState *interp = _PyInterpreterState_GET();
1619
    if (interp->executor_count == interp->executor_capacity) {
1620
        size_t new_cap = interp->executor_capacity ? interp->executor_capacity * 2 : 64;
1621
        _PyBloomFilter *new_blooms = PyMem_Realloc(
1622
            interp->executor_blooms, new_cap * sizeof(_PyBloomFilter));
1623
        if (new_blooms == NULL) {
1624
            return -1;
1625
        }
1626
        _PyExecutorObject **new_ptrs = PyMem_Realloc(
1627
            interp->executor_ptrs, new_cap * sizeof(_PyExecutorObject *));
1628
        if (new_ptrs == NULL) {
1629
            /* Revert blooms realloc — the old pointer may have been freed by
1630
             * a successful realloc, but new_blooms is the valid pointer. */
1631
            interp->executor_blooms = new_blooms;
1632
            return -1;
1633
        }
1634
        interp->executor_blooms = new_blooms;
1635
        interp->executor_ptrs = new_ptrs;
1636
        interp->executor_capacity = new_cap;
1637
    }
1638
    size_t idx = interp->executor_count++;
1639
    interp->executor_blooms[idx] = *bloom;
1640
    interp->executor_ptrs[idx] = executor;
1641
    executor->vm_data.bloom_array_idx = (int32_t)idx;
1642
    return 0;
1643
}
1644
1645
static void
1646
unlink_executor(_PyExecutorObject *executor)
1647
{
1648
    PyInterpreterState *interp = PyInterpreterState_Get();
1649
    int32_t idx = executor->vm_data.bloom_array_idx;
1650
    assert(idx >= 0 && (size_t)idx < interp->executor_count);
1651
    size_t last = --interp->executor_count;
1652
    if ((size_t)idx != last) {
1653
        /* Swap-remove: move the last element into the vacated slot */
1654
        interp->executor_blooms[idx] = interp->executor_blooms[last];
1655
        interp->executor_ptrs[idx] = interp->executor_ptrs[last];
1656
        interp->executor_ptrs[idx]->vm_data.bloom_array_idx = idx;
1657
    }
1658
    executor->vm_data.bloom_array_idx = -1;
1659
}
1660
1661
/* This must be called by optimizers before using the executor */
1662
int
1663
_Py_ExecutorInit(_PyExecutorObject *executor, const _PyBloomFilter *dependency_set)
1664
{
1665
    executor->vm_data.valid = true;
1666
    executor->vm_data.pending_deletion = 0;
1667
    executor->vm_data.code = NULL;
1668
    if (link_executor(executor, dependency_set) < 0) {
1669
        return -1;
1670
    }
1671
    return 0;
1672
}
1673
1674
static _PyExecutorObject *
1675
make_cold_executor(uint16_t opcode)
1676
{
1677
    _PyExecutorObject *cold = allocate_executor(0, 1);
1678
    if (cold == NULL) {
1679
        Py_FatalError("Cannot allocate core JIT code");
1680
    }
1681
    ((_PyUOpInstruction *)cold->trace)->opcode = opcode;
1682
    // This is initialized to false so we can prevent the executor
1683
    // from being immediately detected as cold and invalidated.
1684
    cold->vm_data.cold = false;
1685
#ifdef _Py_JIT
1686
    cold->jit_code = NULL;
1687
    cold->jit_size = 0;
1688
    if (_PyJIT_Compile(cold, cold->trace, 1)) {
1689
        Py_DECREF(cold);
1690
        Py_FatalError("Cannot allocate core JIT code");
1691
    }
1692
#endif
1693
    _Py_SetImmortal((PyObject *)cold);
1694
    return cold;
1695
}
1696
1697
_PyExecutorObject *
1698
_PyExecutor_GetColdExecutor(void)
1699
{
1700
    PyInterpreterState *interp = _PyInterpreterState_GET();
1701
    if (interp->cold_executor == NULL) {
1702
        return interp->cold_executor = make_cold_executor(_COLD_EXIT_r00);;
1703
    }
1704
    return interp->cold_executor;
1705
}
1706
1707
_PyExecutorObject *
1708
_PyExecutor_GetColdDynamicExecutor(void)
1709
{
1710
    PyInterpreterState *interp = _PyInterpreterState_GET();
1711
    if (interp->cold_dynamic_executor == NULL) {
1712
        interp->cold_dynamic_executor = make_cold_executor(_COLD_DYNAMIC_EXIT_r00);
1713
    }
1714
    return interp->cold_dynamic_executor;
1715
}
1716
1717
void
1718
_PyExecutor_ClearExit(_PyExitData *exit)
1719
{
1720
    if (exit == NULL) {
1721
        return;
1722
    }
1723
    _PyExecutorObject *old = exit->executor;
1724
    if (exit->is_dynamic) {
1725
        exit->executor = _PyExecutor_GetColdDynamicExecutor();
1726
    }
1727
    else {
1728
        exit->executor = _PyExecutor_GetColdExecutor();
1729
    }
1730
    Py_DECREF(old);
1731
}
1732
1733
/* Detaches the executor from the code object (if any) that
1734
 * holds a reference to it */
1735
void
1736
_Py_ExecutorDetach(_PyExecutorObject *executor)
1737
{
1738
    PyCodeObject *code = executor->vm_data.code;
1739
    if (code == NULL) {
1740
        return;
1741
    }
1742
    _Py_CODEUNIT *instruction = &_PyCode_CODE(code)[executor->vm_data.index];
1743
    assert(instruction->op.code == ENTER_EXECUTOR);
1744
    int index = instruction->op.arg;
1745
    assert(code->co_executors->executors[index] == executor);
1746
    instruction->op.code = _PyOpcode_Deopt[executor->vm_data.opcode];
1747
    instruction->op.arg = executor->vm_data.oparg;
1748
    executor->vm_data.code = NULL;
1749
    code->co_executors->executors[index] = NULL;
1750
    Py_DECREF(executor);
1751
}
1752
1753
/* Executors can be invalidated at any time,
1754
   even with a stop-the-world lock held.
1755
   Consequently it must not run arbitrary code,
1756
   including Py_DECREF with a non-executor. */
1757
static void
1758
executor_invalidate(PyObject *op)
1759
{
1760
    _PyExecutorObject *executor = _PyExecutorObject_CAST(op);
1761
    if (!executor->vm_data.valid) {
1762
        return;
1763
    }
1764
    executor->vm_data.valid = 0;
1765
    unlink_executor(executor);
1766
    executor_clear_exits(executor);
1767
    _Py_ExecutorDetach(executor);
1768
    _PyObject_GC_UNTRACK(op);
1769
}
1770
1771
static int
1772
executor_clear(PyObject *op)
1773
{
1774
    executor_invalidate(op);
1775
    return 0;
1776
}
1777
1778
void
1779
_Py_Executor_DependsOn(_PyExecutorObject *executor, void *obj)
1780
{
1781
    assert(executor->vm_data.valid);
1782
    PyInterpreterState *interp = _PyInterpreterState_GET();
1783
    int32_t idx = executor->vm_data.bloom_array_idx;
1784
    assert(idx >= 0 && (size_t)idx < interp->executor_count);
1785
    _Py_BloomFilter_Add(&interp->executor_blooms[idx], obj);
1786
}
1787
1788
/* Invalidate all executors that depend on `obj`
1789
 * May cause other executors to be invalidated as well.
1790
 * Uses contiguous bloom filter array for cache-friendly scanning.
1791
 */
1792
void
1793
_Py_Executors_InvalidateDependency(PyInterpreterState *interp, void *obj, int is_invalidation)
1794
{
1795
    _PyBloomFilter obj_filter;
1796
    _Py_BloomFilter_Init(&obj_filter);
1797
    _Py_BloomFilter_Add(&obj_filter, obj);
1798
    /* Scan contiguous bloom filter array */
1799
    PyObject *invalidate = PyList_New(0);
1800
    if (invalidate == NULL) {
1801
        goto error;
1802
    }
1803
    /* Clearing an executor can clear others, so we need to make a list of
1804
     * executors to invalidate first */
1805
    for (size_t i = 0; i < interp->executor_count; i++) {
1806
        assert(interp->executor_ptrs[i]->vm_data.valid);
1807
        if (bloom_filter_may_contain(&interp->executor_blooms[i], &obj_filter) &&
1808
            PyList_Append(invalidate, (PyObject *)interp->executor_ptrs[i]))
1809
        {
1810
            goto error;
1811
        }
1812
    }
1813
    for (Py_ssize_t i = 0; i < PyList_GET_SIZE(invalidate); i++) {
1814
        PyObject *exec = PyList_GET_ITEM(invalidate, i);
1815
        executor_invalidate(exec);
1816
        if (is_invalidation) {
1817
            OPT_STAT_INC(executors_invalidated);
1818
        }
1819
    }
1820
    Py_DECREF(invalidate);
1821
    return;
1822
error:
1823
    PyErr_Clear();
1824
    Py_XDECREF(invalidate);
1825
    // If we're truly out of memory, wiping out everything is a fine fallback:
1826
    _Py_Executors_InvalidateAll(interp, is_invalidation);
1827
}
1828
1829
/* Invalidate all executors */
1830
void
1831
_Py_Executors_InvalidateAll(PyInterpreterState *interp, int is_invalidation)
1832
{
1833
    while (interp->executor_count > 0) {
1834
        /* Invalidate from the end to avoid repeated swap-remove shifts */
1835
        _PyExecutorObject *executor = interp->executor_ptrs[interp->executor_count - 1];
1836
        assert(executor->vm_data.valid);
1837
        if (executor->vm_data.code) {
1838
            // Clear the entire code object so its co_executors array be freed:
1839
            _PyCode_Clear_Executors(executor->vm_data.code);
1840
        }
1841
        else {
1842
            executor_invalidate((PyObject *)executor);
1843
        }
1844
        if (is_invalidation) {
1845
            OPT_STAT_INC(executors_invalidated);
1846
        }
1847
    }
1848
}
1849
1850
void
1851
_Py_Executors_InvalidateCold(PyInterpreterState *interp)
1852
{
1853
    /* Scan contiguous executor array */
1854
    PyObject *invalidate = PyList_New(0);
1855
    if (invalidate == NULL) {
1856
        goto error;
1857
    }
1858
1859
    /* Clearing an executor can deallocate others, so we need to make a list of
1860
     * executors to invalidate first */
1861
    for (size_t i = 0; i < interp->executor_count; i++) {
1862
        _PyExecutorObject *exec = interp->executor_ptrs[i];
1863
        assert(exec->vm_data.valid);
1864
1865
        if (exec->vm_data.cold && PyList_Append(invalidate, (PyObject *)exec) < 0) {
1866
            goto error;
1867
        }
1868
        else {
1869
            exec->vm_data.cold = true;
1870
        }
1871
    }
1872
    for (Py_ssize_t i = 0; i < PyList_GET_SIZE(invalidate); i++) {
1873
        PyObject *exec = PyList_GET_ITEM(invalidate, i);
1874
        executor_invalidate(exec);
1875
    }
1876
    Py_DECREF(invalidate);
1877
    return;
1878
error:
1879
    PyErr_Clear();
1880
    Py_XDECREF(invalidate);
1881
    // If we're truly out of memory, wiping out everything is a fine fallback
1882
    _Py_Executors_InvalidateAll(interp, 0);
1883
}
1884
1885
#include "record_functions.c.h"
1886
1887
static int
1888
escape_angles(const char *input, Py_ssize_t size, char *buffer) {
1889
    int written = 0;
1890
    for (Py_ssize_t i = 0; i < size; i++) {
1891
        char c = input[i];
1892
        if (c == '<' || c == '>') {
1893
            buffer[written++] = '&';
1894
            buffer[written++] = c == '>' ? 'g' : 'l';
1895
            buffer[written++] = 't';
1896
            buffer[written++] = ';';
1897
        }
1898
        else {
1899
            buffer[written++] = c;
1900
        }
1901
    }
1902
    return written;
1903
}
1904
1905
static void
1906
write_str(PyObject *str, FILE *out)
1907
{
1908
    // Encode the Unicode object to the specified encoding
1909
    PyObject *encoded_obj = PyUnicode_AsEncodedString(str, "utf8", "strict");
1910
    if (encoded_obj == NULL) {
1911
        PyErr_Clear();
1912
        return;
1913
    }
1914
    const char *encoded_str = PyBytes_AsString(encoded_obj);
1915
    Py_ssize_t encoded_size = PyBytes_Size(encoded_obj);
1916
    char buffer[120];
1917
    bool truncated = false;
1918
    if (encoded_size > 24) {
1919
        encoded_size = 24;
1920
        truncated = true;
1921
    }
1922
    int size = escape_angles(encoded_str, encoded_size, buffer);
1923
    fwrite(buffer, 1, size, out);
1924
    if (truncated) {
1925
        fwrite("...", 1, 3, out);
1926
    }
1927
    Py_DECREF(encoded_obj);
1928
}
1929
1930
static int
1931
find_line_number(PyCodeObject *code, _PyExecutorObject *executor)
1932
{
1933
    int code_len = (int)Py_SIZE(code);
1934
    for (int i = 0; i < code_len; i++) {
1935
        _Py_CODEUNIT *instr = &_PyCode_CODE(code)[i];
1936
        int opcode = instr->op.code;
1937
        if (opcode == ENTER_EXECUTOR) {
1938
            _PyExecutorObject *exec = code->co_executors->executors[instr->op.arg];
1939
            if (exec == executor) {
1940
                return PyCode_Addr2Line(code, i*2);
1941
            }
1942
        }
1943
        i += _PyOpcode_Caches[_Py_GetBaseCodeUnit(code, i).op.code];
1944
    }
1945
    return -1;
1946
}
1947
1948
#define RED "#ff0000"
1949
#define WHITE "#ffffff"
1950
#define BLUE "#0000ff"
1951
#define BLACK "#000000"
1952
#define LOOP "#00c000"
1953
1954
#ifdef Py_STATS
1955
1956
static const char *COLORS[10] = {
1957
    "9",
1958
    "8",
1959
    "7",
1960
    "6",
1961
    "5",
1962
    "4",
1963
    "3",
1964
    "2",
1965
    "1",
1966
    WHITE,
1967
};
1968
const char *
1969
get_background_color(_PyUOpInstruction const *inst, uint64_t max_hotness)
1970
{
1971
    uint64_t hotness = inst->execution_count;
1972
    int index = (hotness * 10)/max_hotness;
1973
    if (index > 9) {
1974
        index = 9;
1975
    }
1976
    if (index < 0) {
1977
        index = 0;
1978
    }
1979
    return COLORS[index];
1980
}
1981
1982
const char *
1983
get_foreground_color(_PyUOpInstruction const *inst, uint64_t max_hotness)
1984
{
1985
    if(_PyUop_Uncached[inst->opcode] == _DEOPT) {
1986
        return RED;
1987
    }
1988
    uint64_t hotness = inst->execution_count;
1989
    int index = (hotness * 10)/max_hotness;
1990
    if (index > 3) {
1991
        return BLACK;
1992
    }
1993
    return WHITE;
1994
}
1995
#endif
1996
1997
static void
1998
write_row_for_uop(_PyExecutorObject *executor, uint32_t i, FILE *out)
1999
{
2000
    /* Write row for uop.
2001
        * The `port` is a marker so that outgoing edges can
2002
        * be placed correctly. If a row is marked `port=17`,
2003
        * then the outgoing edge is `{EXEC_NAME}:17 -> {TARGET}`
2004
        * https://graphviz.readthedocs.io/en/stable/manual.html#node-ports-compass
2005
        */
2006
    _PyUOpInstruction const *inst = &executor->trace[i];
2007
    const char *opname = _PyOpcode_uop_name[inst->opcode];
2008
#ifdef Py_STATS
2009
    const char *bg_color = get_background_color(inst, executor->trace[0].execution_count);
2010
    const char *color = get_foreground_color(inst, executor->trace[0].execution_count);
2011
    fprintf(out, "        <tr><td port=\"i%d\" border=\"1\" color=\"%s\" bgcolor=\"%s\" ><font color=\"%s\"> %s &nbsp;--&nbsp; %" PRIu64 "</font></td></tr>\n",
2012
        i, color, bg_color, color, opname, inst->execution_count);
2013
#else
2014
    const char *color = (_PyUop_Uncached[inst->opcode] == _DEOPT) ? RED : BLACK;
2015
    fprintf(out, "        <tr><td port=\"i%d\" border=\"1\" color=\"%s\" >%s op0=%" PRIu64 "</td></tr>\n", i, color, opname, inst->operand0);
2016
#endif
2017
}
2018
2019
static bool
2020
is_stop(_PyUOpInstruction const *inst)
2021
{
2022
    uint16_t base_opcode = _PyUop_Uncached[inst->opcode];
2023
    return (base_opcode == _EXIT_TRACE || base_opcode == _DEOPT || base_opcode == _JUMP_TO_TOP);
2024
}
2025
2026
2027
/* Writes the node and outgoing edges for a single tracelet in graphviz format.
2028
 * Each tracelet is presented as a table of the uops it contains.
2029
 * If Py_STATS is enabled, execution counts are included.
2030
 *
2031
 * https://graphviz.readthedocs.io/en/stable/manual.html
2032
 * https://graphviz.org/gallery/
2033
 */
2034
static void
2035
executor_to_gv(_PyExecutorObject *executor, FILE *out)
2036
{
2037
    PyCodeObject *code = executor->vm_data.code;
2038
    fprintf(out, "executor_%p [\n", executor);
2039
    fprintf(out, "    shape = none\n");
2040
2041
    /* Write the HTML table for the uops */
2042
    fprintf(out, "    label = <<table border=\"0\" cellspacing=\"0\">\n");
2043
    fprintf(out, "        <tr><td port=\"start\" border=\"1\" ><b>Executor</b></td></tr>\n");
2044
    if (code == NULL) {
2045
        fprintf(out, "        <tr><td border=\"1\" >No code object</td></tr>\n");
2046
    }
2047
    else {
2048
        fprintf(out, "        <tr><td  border=\"1\" >");
2049
        write_str(code->co_qualname, out);
2050
        int line = find_line_number(code, executor);
2051
        fprintf(out, ": %d</td></tr>\n", line);
2052
    }
2053
    for (uint32_t i = 0; i < executor->code_size; i++) {
2054
        write_row_for_uop(executor, i, out);
2055
        if (is_stop(&executor->trace[i])) {
2056
            break;
2057
        }
2058
    }
2059
    fprintf(out, "    </table>>\n");
2060
    fprintf(out, "]\n\n");
2061
2062
    /* Write all the outgoing edges */
2063
    _PyExecutorObject *cold = _PyExecutor_GetColdExecutor();
2064
    _PyExecutorObject *cold_dynamic = _PyExecutor_GetColdDynamicExecutor();
2065
    for (uint32_t i = 0; i < executor->code_size; i++) {
2066
        _PyUOpInstruction const *inst = &executor->trace[i];
2067
        uint16_t base_opcode = _PyUop_Uncached[inst->opcode];
2068
        uint16_t flags = _PyUop_Flags[base_opcode];
2069
        _PyExitData *exit = NULL;
2070
        if (base_opcode == _JUMP_TO_TOP) {
2071
            fprintf(out, "executor_%p:i%d -> executor_%p:i%d [color = \"" LOOP "\"]\n", executor, i, executor, inst->jump_target);
2072
            break;
2073
        }
2074
        if (base_opcode == _EXIT_TRACE) {
2075
            exit = (_PyExitData *)inst->operand0;
2076
        }
2077
        else if (flags & HAS_EXIT_FLAG) {
2078
            assert(inst->format == UOP_FORMAT_JUMP);
2079
            _PyUOpInstruction const *exit_inst = &executor->trace[inst->jump_target];
2080
            uint16_t base_exit_opcode = _PyUop_Uncached[exit_inst->opcode];
2081
            (void)base_exit_opcode;
2082
            assert(base_exit_opcode == _EXIT_TRACE || base_exit_opcode == _DYNAMIC_EXIT);
2083
            exit = (_PyExitData *)exit_inst->operand0;
2084
        }
2085
        if (exit != NULL) {
2086
            if (exit->executor == cold || exit->executor == cold_dynamic) {
2087
#ifdef Py_STATS
2088
                /* Only mark as have cold exit if it has actually exited */
2089
                uint64_t diff = inst->execution_count - executor->trace[i+1].execution_count;
2090
                if (diff) {
2091
                    fprintf(out, "cold_%p%d [ label = \"%"  PRIu64  "\" shape = ellipse color=\"" BLUE "\" ]\n", executor, i, diff);
2092
                    fprintf(out, "executor_%p:i%d -> cold_%p%d\n", executor, i, executor, i);
2093
                }
2094
#endif
2095
            }
2096
            else {
2097
                fprintf(out, "executor_%p:i%d -> executor_%p:start\n", executor, i, exit->executor);
2098
            }
2099
        }
2100
        if (is_stop(inst)) {
2101
            break;
2102
        }
2103
    }
2104
}
2105
2106
/* Write the graph of all the live tracelets in graphviz format. */
2107
int
2108
_PyDumpExecutors(FILE *out)
2109
{
2110
    fprintf(out, "digraph ideal {\n\n");
2111
    fprintf(out, "    rankdir = \"LR\"\n\n");
2112
    fprintf(out, "    node [colorscheme=greys9]\n");
2113
    PyInterpreterState *interp = PyInterpreterState_Get();
2114
    for (size_t i = 0; i < interp->executor_count; i++) {
2115
        executor_to_gv(interp->executor_ptrs[i], out);
2116
    }
2117
    fprintf(out, "}\n\n");
2118
    return 0;
2119
}
2120
2121
#else
2122
2123
int
2124
_PyDumpExecutors(FILE *out)
2125
0
{
2126
0
    PyErr_SetString(PyExc_NotImplementedError, "No JIT available");
2127
0
    return -1;
2128
0
}
2129
2130
void
2131
_PyExecutor_Free(struct _PyExecutorObject *self)
2132
0
{
2133
    /* This should never be called */
2134
0
    Py_UNREACHABLE();
2135
0
}
2136
2137
#endif /* _Py_TIER2 */