Coverage Report

Created: 2025-12-14 07:06

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/cpython/Python/optimizer.c
Line
Count
Source
1
#include "Python.h"
2
3
#ifdef _Py_TIER2
4
5
#include "opcode.h"
6
#include "pycore_interp.h"
7
#include "pycore_backoff.h"
8
#include "pycore_bitutils.h"        // _Py_popcount32()
9
#include "pycore_ceval.h"       // _Py_set_eval_breaker_bit
10
#include "pycore_code.h"            // _Py_GetBaseCodeUnit
11
#include "pycore_function.h"        // _PyFunction_LookupByVersion()
12
#include "pycore_interpframe.h"
13
#include "pycore_object.h"          // _PyObject_GC_UNTRACK()
14
#include "pycore_opcode_metadata.h" // _PyOpcode_OpName[]
15
#include "pycore_opcode_utils.h"  // MAX_REAL_OPCODE
16
#include "pycore_optimizer.h"     // _Py_uop_analyze_and_optimize()
17
#include "pycore_pystate.h"       // _PyInterpreterState_GET()
18
#include "pycore_tuple.h"         // _PyTuple_FromArraySteal
19
#include "pycore_unicodeobject.h" // _PyUnicode_FromASCII
20
#include "pycore_uop_ids.h"
21
#include "pycore_jit.h"
22
#include <stdbool.h>
23
#include <stdint.h>
24
#include <stddef.h>
25
26
#define NEED_OPCODE_METADATA
27
#include "pycore_uop_metadata.h" // Uop tables
28
#undef NEED_OPCODE_METADATA
29
30
#define MAX_EXECUTORS_SIZE 256
31
32
// Trace too short, no progress:
33
// _START_EXECUTOR
34
// _MAKE_WARM
35
// _CHECK_VALIDITY
36
// _SET_IP
37
// is 4-5 instructions.
38
#define CODE_SIZE_NO_PROGRESS 5
39
// We start with _START_EXECUTOR, _MAKE_WARM
40
#define CODE_SIZE_EMPTY 2
41
42
#define _PyExecutorObject_CAST(op)  ((_PyExecutorObject *)(op))
43
44
static bool
45
has_space_for_executor(PyCodeObject *code, _Py_CODEUNIT *instr)
46
{
47
    if (code == (PyCodeObject *)&_Py_InitCleanup) {
48
        return false;
49
    }
50
    if (instr->op.code == ENTER_EXECUTOR) {
51
        return true;
52
    }
53
    if (code->co_executors == NULL) {
54
        return true;
55
    }
56
    return code->co_executors->size < MAX_EXECUTORS_SIZE;
57
}
58
59
static int32_t
60
get_index_for_executor(PyCodeObject *code, _Py_CODEUNIT *instr)
61
{
62
    if (instr->op.code == ENTER_EXECUTOR) {
63
        return instr->op.arg;
64
    }
65
    _PyExecutorArray *old = code->co_executors;
66
    int size = 0;
67
    int capacity = 0;
68
    if (old != NULL) {
69
        size = old->size;
70
        capacity = old->capacity;
71
        assert(size < MAX_EXECUTORS_SIZE);
72
    }
73
    assert(size <= capacity);
74
    if (size == capacity) {
75
        /* Array is full. Grow array */
76
        int new_capacity = capacity ? capacity * 2 : 4;
77
        _PyExecutorArray *new = PyMem_Realloc(
78
            old,
79
            offsetof(_PyExecutorArray, executors) +
80
            new_capacity * sizeof(_PyExecutorObject *));
81
        if (new == NULL) {
82
            return -1;
83
        }
84
        new->capacity = new_capacity;
85
        new->size = size;
86
        code->co_executors = new;
87
    }
88
    assert(size < code->co_executors->capacity);
89
    return size;
90
}
91
92
static void
93
insert_executor(PyCodeObject *code, _Py_CODEUNIT *instr, int index, _PyExecutorObject *executor)
94
{
95
    Py_INCREF(executor);
96
    if (instr->op.code == ENTER_EXECUTOR) {
97
        assert(index == instr->op.arg);
98
        _Py_ExecutorDetach(code->co_executors->executors[index]);
99
    }
100
    else {
101
        assert(code->co_executors->size == index);
102
        assert(code->co_executors->capacity > index);
103
        code->co_executors->size++;
104
    }
105
    executor->vm_data.opcode = instr->op.code;
106
    executor->vm_data.oparg = instr->op.arg;
107
    executor->vm_data.code = code;
108
    executor->vm_data.index = (int)(instr - _PyCode_CODE(code));
109
    code->co_executors->executors[index] = executor;
110
    assert(index < MAX_EXECUTORS_SIZE);
111
    instr->op.code = ENTER_EXECUTOR;
112
    instr->op.arg = index;
113
}
114
115
static _PyExecutorObject *
116
make_executor_from_uops(_PyUOpInstruction *buffer, int length, const _PyBloomFilter *dependencies, int chain_depth);
117
118
static int
119
uop_optimize(_PyInterpreterFrame *frame, PyThreadState *tstate,
120
             _PyExecutorObject **exec_ptr,
121
             bool progress_needed);
122
123
/* Returns 1 if optimized, 0 if not optimized, and -1 for an error.
124
 * If optimized, *executor_ptr contains a new reference to the executor
125
 */
126
// gh-137573: inlining this function causes stack overflows
127
Py_NO_INLINE int
128
_PyOptimizer_Optimize(
129
    _PyInterpreterFrame *frame, PyThreadState *tstate)
130
{
131
    _PyThreadStateImpl *_tstate = (_PyThreadStateImpl *)tstate;
132
    int chain_depth = _tstate->jit_tracer_state.initial_state.chain_depth;
133
    PyInterpreterState *interp = _PyInterpreterState_GET();
134
    if (!interp->jit) {
135
        // gh-140936: It is possible that interp->jit will become false during
136
        // interpreter finalization. However, the specialized JUMP_BACKWARD_JIT
137
        // instruction may still be present. In this case, we should
138
        // return immediately without optimization.
139
        return 0;
140
    }
141
    assert(!interp->compiling);
142
    assert(_tstate->jit_tracer_state.initial_state.stack_depth >= 0);
143
#ifndef Py_GIL_DISABLED
144
    assert(_tstate->jit_tracer_state.initial_state.func != NULL);
145
    interp->compiling = true;
146
    // The first executor in a chain and the MAX_CHAIN_DEPTH'th executor *must*
147
    // make progress in order to avoid infinite loops or excessively-long
148
    // side-exit chains. We can only insert the executor into the bytecode if
149
    // this is true, since a deopt won't infinitely re-enter the executor:
150
    chain_depth %= MAX_CHAIN_DEPTH;
151
    bool progress_needed = chain_depth == 0;
152
    PyCodeObject *code = (PyCodeObject *)_tstate->jit_tracer_state.initial_state.code;
153
    _Py_CODEUNIT *start = _tstate->jit_tracer_state.initial_state.start_instr;
154
    if (progress_needed && !has_space_for_executor(code, start)) {
155
        interp->compiling = false;
156
        return 0;
157
    }
158
    // One of our dependencies while tracing was invalidated. Not worth compiling.
159
    if (!_tstate->jit_tracer_state.prev_state.dependencies_still_valid) {
160
        interp->compiling = false;
161
        return 0;
162
    }
163
    _PyExecutorObject *executor;
164
    int err = uop_optimize(frame, tstate, &executor, progress_needed);
165
    if (err <= 0) {
166
        interp->compiling = false;
167
        return err;
168
    }
169
    assert(executor != NULL);
170
    if (progress_needed) {
171
        int index = get_index_for_executor(code, start);
172
        if (index < 0) {
173
            /* Out of memory. Don't raise and assume that the
174
             * error will show up elsewhere.
175
             *
176
             * If an optimizer has already produced an executor,
177
             * it might get confused by the executor disappearing,
178
             * but there is not much we can do about that here. */
179
            Py_DECREF(executor);
180
            interp->compiling = false;
181
            return 0;
182
        }
183
        insert_executor(code, start, index, executor);
184
    }
185
    else {
186
        executor->vm_data.code = NULL;
187
    }
188
    _PyExitData *exit = _tstate->jit_tracer_state.initial_state.exit;
189
    if (exit != NULL) {
190
        exit->executor = executor;
191
    }
192
    executor->vm_data.chain_depth = chain_depth;
193
    assert(executor->vm_data.valid);
194
    interp->compiling = false;
195
    return 1;
196
#else
197
    return 0;
198
#endif
199
}
200
201
static _PyExecutorObject *
202
get_executor_lock_held(PyCodeObject *code, int offset)
203
{
204
    int code_len = (int)Py_SIZE(code);
205
    for (int i = 0 ; i < code_len;) {
206
        if (_PyCode_CODE(code)[i].op.code == ENTER_EXECUTOR && i*2 == offset) {
207
            int oparg = _PyCode_CODE(code)[i].op.arg;
208
            _PyExecutorObject *res = code->co_executors->executors[oparg];
209
            Py_INCREF(res);
210
            return res;
211
        }
212
        i += _PyInstruction_GetLength(code, i);
213
    }
214
    PyErr_SetString(PyExc_ValueError, "no executor at given byte offset");
215
    return NULL;
216
}
217
218
_PyExecutorObject *
219
_Py_GetExecutor(PyCodeObject *code, int offset)
220
{
221
    _PyExecutorObject *executor;
222
    Py_BEGIN_CRITICAL_SECTION(code);
223
    executor = get_executor_lock_held(code, offset);
224
    Py_END_CRITICAL_SECTION();
225
    return executor;
226
}
227
228
static PyObject *
229
is_valid(PyObject *self, PyObject *Py_UNUSED(ignored))
230
{
231
    return PyBool_FromLong(((_PyExecutorObject *)self)->vm_data.valid);
232
}
233
234
static PyObject *
235
get_opcode(PyObject *self, PyObject *Py_UNUSED(ignored))
236
{
237
    return PyLong_FromUnsignedLong(((_PyExecutorObject *)self)->vm_data.opcode);
238
}
239
240
static PyObject *
241
get_oparg(PyObject *self, PyObject *Py_UNUSED(ignored))
242
{
243
    return PyLong_FromUnsignedLong(((_PyExecutorObject *)self)->vm_data.oparg);
244
}
245
246
///////////////////// Experimental UOp Optimizer /////////////////////
247
248
static int executor_clear(PyObject *executor);
249
static void unlink_executor(_PyExecutorObject *executor);
250
251
252
void
253
_PyExecutor_Free(_PyExecutorObject *self)
254
{
255
#ifdef _Py_JIT
256
    _PyJIT_Free(self);
257
#endif
258
    PyObject_GC_Del(self);
259
}
260
261
void
262
_Py_ClearExecutorDeletionList(PyInterpreterState *interp)
263
{
264
    _PyRuntimeState *runtime = &_PyRuntime;
265
    HEAD_LOCK(runtime);
266
    PyThreadState* ts = PyInterpreterState_ThreadHead(interp);
267
    HEAD_UNLOCK(runtime);
268
    while (ts) {
269
        _PyExecutorObject *current = (_PyExecutorObject *)ts->current_executor;
270
        if (current != NULL) {
271
            /* Anything in this list will be unlinked, so we can reuse the
272
             * linked field as a reachability marker. */
273
            current->vm_data.linked = 1;
274
        }
275
        HEAD_LOCK(runtime);
276
        ts = PyThreadState_Next(ts);
277
        HEAD_UNLOCK(runtime);
278
    }
279
    _PyExecutorObject **prev_to_next_ptr = &interp->executor_deletion_list_head;
280
    _PyExecutorObject *exec = *prev_to_next_ptr;
281
    while (exec != NULL) {
282
        if (exec->vm_data.linked) {
283
            // This executor is currently executing
284
            exec->vm_data.linked = 0;
285
            prev_to_next_ptr = &exec->vm_data.links.next;
286
        }
287
        else {
288
            *prev_to_next_ptr = exec->vm_data.links.next;
289
            _PyExecutor_Free(exec);
290
        }
291
        exec = *prev_to_next_ptr;
292
    }
293
    interp->executor_deletion_list_remaining_capacity = EXECUTOR_DELETE_LIST_MAX;
294
}
295
296
static void
297
add_to_pending_deletion_list(_PyExecutorObject *self)
298
{
299
    PyInterpreterState *interp = PyInterpreterState_Get();
300
    self->vm_data.links.next = interp->executor_deletion_list_head;
301
    interp->executor_deletion_list_head = self;
302
    if (interp->executor_deletion_list_remaining_capacity > 0) {
303
        interp->executor_deletion_list_remaining_capacity--;
304
    }
305
    else {
306
        _Py_ClearExecutorDeletionList(interp);
307
    }
308
}
309
310
static void
311
uop_dealloc(PyObject *op) {
312
    _PyExecutorObject *self = _PyExecutorObject_CAST(op);
313
    _PyObject_GC_UNTRACK(self);
314
    assert(self->vm_data.code == NULL);
315
    unlink_executor(self);
316
    // Once unlinked it becomes impossible to invalidate an executor, so do it here.
317
    self->vm_data.valid = 0;
318
    add_to_pending_deletion_list(self);
319
}
320
321
const char *
322
_PyUOpName(int index)
323
{
324
    if (index < 0 || index > MAX_UOP_REGS_ID) {
325
        return NULL;
326
    }
327
    return _PyOpcode_uop_name[index];
328
}
329
330
#ifdef Py_DEBUG
331
void
332
_PyUOpPrint(const _PyUOpInstruction *uop)
333
{
334
    const char *name = _PyUOpName(uop->opcode);
335
    if (name == NULL) {
336
        printf("<uop %d>", uop->opcode);
337
    }
338
    else {
339
        printf("%s", name);
340
    }
341
    switch(uop->format) {
342
        case UOP_FORMAT_TARGET:
343
            printf(" (%d, target=%d, operand0=%#" PRIx64 ", operand1=%#" PRIx64,
344
                uop->oparg,
345
                uop->target,
346
                (uint64_t)uop->operand0,
347
                (uint64_t)uop->operand1);
348
            break;
349
        case UOP_FORMAT_JUMP:
350
            printf(" (%d, jump_target=%d, operand0=%#" PRIx64 ", operand1=%#" PRIx64,
351
                uop->oparg,
352
                uop->jump_target,
353
                (uint64_t)uop->operand0,
354
                (uint64_t)uop->operand1);
355
            break;
356
        default:
357
            printf(" (%d, Unknown format)", uop->oparg);
358
    }
359
    if (_PyUop_Flags[uop->opcode] & HAS_ERROR_FLAG) {
360
        printf(", error_target=%d", uop->error_target);
361
    }
362
363
    printf(")");
364
}
365
#endif
366
367
static Py_ssize_t
368
uop_len(PyObject *op)
369
{
370
    _PyExecutorObject *self = _PyExecutorObject_CAST(op);
371
    return self->code_size;
372
}
373
374
static PyObject *
375
uop_item(PyObject *op, Py_ssize_t index)
376
{
377
    _PyExecutorObject *self = _PyExecutorObject_CAST(op);
378
    Py_ssize_t len = uop_len(op);
379
    if (index < 0 || index >= len) {
380
        PyErr_SetNone(PyExc_IndexError);
381
        return NULL;
382
    }
383
    int opcode = self->trace[index].opcode;
384
    int base_opcode = _PyUop_Uncached[opcode];
385
    const char *name = _PyUOpName(base_opcode);
386
    if (name == NULL) {
387
        name = "<nil>";
388
    }
389
    PyObject *oname = _PyUnicode_FromASCII(name, strlen(name));
390
    if (oname == NULL) {
391
        return NULL;
392
    }
393
    PyObject *oparg = PyLong_FromUnsignedLong(self->trace[index].oparg);
394
    if (oparg == NULL) {
395
        Py_DECREF(oname);
396
        return NULL;
397
    }
398
    PyObject *target = PyLong_FromUnsignedLong(self->trace[index].target);
399
    if (target == NULL) {
400
        Py_DECREF(oparg);
401
        Py_DECREF(oname);
402
        return NULL;
403
    }
404
    PyObject *operand = PyLong_FromUnsignedLongLong(self->trace[index].operand0);
405
    if (operand == NULL) {
406
        Py_DECREF(target);
407
        Py_DECREF(oparg);
408
        Py_DECREF(oname);
409
        return NULL;
410
    }
411
    PyObject *args[4] = { oname, oparg, target, operand };
412
    return _PyTuple_FromArraySteal(args, 4);
413
}
414
415
PySequenceMethods uop_as_sequence = {
416
    .sq_length = uop_len,
417
    .sq_item = uop_item,
418
};
419
420
static int
421
executor_traverse(PyObject *o, visitproc visit, void *arg)
422
{
423
    _PyExecutorObject *executor = _PyExecutorObject_CAST(o);
424
    for (uint32_t i = 0; i < executor->exit_count; i++) {
425
        Py_VISIT(executor->exits[i].executor);
426
    }
427
    return 0;
428
}
429
430
static PyObject *
431
get_jit_code(PyObject *self, PyObject *Py_UNUSED(ignored))
432
{
433
#ifndef _Py_JIT
434
    PyErr_SetString(PyExc_RuntimeError, "JIT support not enabled.");
435
    return NULL;
436
#else
437
    _PyExecutorObject *executor = _PyExecutorObject_CAST(self);
438
    if (executor->jit_code == NULL || executor->jit_size == 0) {
439
        Py_RETURN_NONE;
440
    }
441
    return PyBytes_FromStringAndSize(executor->jit_code, executor->jit_size);
442
#endif
443
}
444
445
static PyMethodDef uop_executor_methods[] = {
446
    { "is_valid", is_valid, METH_NOARGS, NULL },
447
    { "get_jit_code", get_jit_code, METH_NOARGS, NULL},
448
    { "get_opcode", get_opcode, METH_NOARGS, NULL },
449
    { "get_oparg", get_oparg, METH_NOARGS, NULL },
450
    { NULL, NULL },
451
};
452
453
static int
454
executor_is_gc(PyObject *o)
455
{
456
    return !_Py_IsImmortal(o);
457
}
458
459
PyTypeObject _PyUOpExecutor_Type = {
460
    PyVarObject_HEAD_INIT(&PyType_Type, 0)
461
    .tp_name = "uop_executor",
462
    .tp_basicsize = offsetof(_PyExecutorObject, exits),
463
    .tp_itemsize = 1,
464
    .tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_DISALLOW_INSTANTIATION | Py_TPFLAGS_HAVE_GC,
465
    .tp_dealloc = uop_dealloc,
466
    .tp_as_sequence = &uop_as_sequence,
467
    .tp_methods = uop_executor_methods,
468
    .tp_traverse = executor_traverse,
469
    .tp_clear = executor_clear,
470
    .tp_is_gc = executor_is_gc,
471
};
472
473
/* TO DO -- Generate these tables */
474
static const uint16_t
475
_PyUOp_Replacements[MAX_UOP_ID + 1] = {
476
    [_ITER_JUMP_RANGE] = _GUARD_NOT_EXHAUSTED_RANGE,
477
    [_ITER_JUMP_LIST] = _GUARD_NOT_EXHAUSTED_LIST,
478
    [_ITER_JUMP_TUPLE] = _GUARD_NOT_EXHAUSTED_TUPLE,
479
    [_FOR_ITER] = _FOR_ITER_TIER_TWO,
480
    [_ITER_NEXT_LIST] = _ITER_NEXT_LIST_TIER_TWO,
481
    [_CHECK_PERIODIC_AT_END] = _TIER2_RESUME_CHECK,
482
};
483
484
static const uint8_t
485
is_for_iter_test[MAX_UOP_ID + 1] = {
486
    [_GUARD_NOT_EXHAUSTED_RANGE] = 1,
487
    [_GUARD_NOT_EXHAUSTED_LIST] = 1,
488
    [_GUARD_NOT_EXHAUSTED_TUPLE] = 1,
489
    [_FOR_ITER_TIER_TWO] = 1,
490
};
491
492
static const uint16_t
493
BRANCH_TO_GUARD[4][2] = {
494
    [POP_JUMP_IF_FALSE - POP_JUMP_IF_FALSE][0] = _GUARD_IS_TRUE_POP,
495
    [POP_JUMP_IF_FALSE - POP_JUMP_IF_FALSE][1] = _GUARD_IS_FALSE_POP,
496
    [POP_JUMP_IF_TRUE - POP_JUMP_IF_FALSE][0] = _GUARD_IS_FALSE_POP,
497
    [POP_JUMP_IF_TRUE - POP_JUMP_IF_FALSE][1] = _GUARD_IS_TRUE_POP,
498
    [POP_JUMP_IF_NONE - POP_JUMP_IF_FALSE][0] = _GUARD_IS_NOT_NONE_POP,
499
    [POP_JUMP_IF_NONE - POP_JUMP_IF_FALSE][1] = _GUARD_IS_NONE_POP,
500
    [POP_JUMP_IF_NOT_NONE - POP_JUMP_IF_FALSE][0] = _GUARD_IS_NONE_POP,
501
    [POP_JUMP_IF_NOT_NONE - POP_JUMP_IF_FALSE][1] = _GUARD_IS_NOT_NONE_POP,
502
};
503
504
static const uint16_t
505
guard_ip_uop[MAX_UOP_ID + 1] = {
506
    [_PUSH_FRAME] = _GUARD_IP__PUSH_FRAME,
507
    [_RETURN_GENERATOR] = _GUARD_IP_RETURN_GENERATOR,
508
    [_RETURN_VALUE] = _GUARD_IP_RETURN_VALUE,
509
    [_YIELD_VALUE] = _GUARD_IP_YIELD_VALUE,
510
};
511
512
513
#define CONFIDENCE_RANGE 1000
514
#define CONFIDENCE_CUTOFF 333
515
516
#ifdef Py_DEBUG
517
#define DPRINTF(level, ...) \
518
    if (lltrace >= (level)) { printf(__VA_ARGS__); }
519
#else
520
#define DPRINTF(level, ...)
521
#endif
522
523
524
static inline int
525
add_to_trace(
526
    _PyUOpInstruction *trace,
527
    int trace_length,
528
    uint16_t opcode,
529
    uint16_t oparg,
530
    uint64_t operand,
531
    uint32_t target)
532
{
533
    trace[trace_length].opcode = opcode;
534
    trace[trace_length].format = UOP_FORMAT_TARGET;
535
    trace[trace_length].target = target;
536
    trace[trace_length].oparg = oparg;
537
    trace[trace_length].operand0 = operand;
538
#ifdef Py_STATS
539
    trace[trace_length].execution_count = 0;
540
#endif
541
    return trace_length + 1;
542
}
543
544
#ifdef Py_DEBUG
545
#define ADD_TO_TRACE(OPCODE, OPARG, OPERAND, TARGET) \
546
    assert(trace_length < max_length); \
547
    trace_length = add_to_trace(trace, trace_length, (OPCODE), (OPARG), (OPERAND), (TARGET)); \
548
    if (lltrace >= 2) { \
549
        printf("%4d ADD_TO_TRACE: ", trace_length); \
550
        _PyUOpPrint(&trace[trace_length-1]); \
551
        printf("\n"); \
552
    }
553
#else
554
#define ADD_TO_TRACE(OPCODE, OPARG, OPERAND, TARGET) \
555
    assert(trace_length < max_length); \
556
    trace_length = add_to_trace(trace, trace_length, (OPCODE), (OPARG), (OPERAND), (TARGET));
557
#endif
558
559
#define INSTR_IP(INSTR, CODE) \
560
    ((uint32_t)((INSTR) - ((_Py_CODEUNIT *)(CODE)->co_code_adaptive)))
561
562
// Reserve space for n uops
563
#define RESERVE_RAW(n, opname) \
564
    if (trace_length + (n) > max_length) { \
565
        DPRINTF(2, "No room for %s (need %d, got %d)\n", \
566
                (opname), (n), max_length - trace_length); \
567
        OPT_STAT_INC(trace_too_long); \
568
        goto full; \
569
    }
570
571
static int
572
is_terminator(const _PyUOpInstruction *uop)
573
{
574
    int opcode = _PyUop_Uncached[uop->opcode];
575
    return (
576
        opcode == _EXIT_TRACE ||
577
        opcode == _DEOPT ||
578
        opcode == _JUMP_TO_TOP ||
579
        opcode == _DYNAMIC_EXIT
580
    );
581
}
582
583
/* Returns 1 on success (added to trace), 0 on trace end.
584
 */
585
int
586
_PyJit_translate_single_bytecode_to_trace(
587
    PyThreadState *tstate,
588
    _PyInterpreterFrame *frame,
589
    _Py_CODEUNIT *next_instr,
590
    int stop_tracing_opcode)
591
{
592
593
#ifdef Py_DEBUG
594
    char *python_lltrace = Py_GETENV("PYTHON_LLTRACE");
595
    int lltrace = 0;
596
    if (python_lltrace != NULL && *python_lltrace >= '0') {
597
        lltrace = *python_lltrace - '0';  // TODO: Parse an int and all that
598
    }
599
#endif
600
    _PyThreadStateImpl *_tstate = (_PyThreadStateImpl *)tstate;
601
    PyCodeObject *old_code = _tstate->jit_tracer_state.prev_state.instr_code;
602
    bool progress_needed = (_tstate->jit_tracer_state.initial_state.chain_depth % MAX_CHAIN_DEPTH) == 0;
603
    _PyBloomFilter *dependencies = &_tstate->jit_tracer_state.prev_state.dependencies;
604
    int trace_length = _tstate->jit_tracer_state.prev_state.code_curr_size;
605
    _PyUOpInstruction *trace = _tstate->jit_tracer_state.code_buffer;
606
    int max_length = _tstate->jit_tracer_state.prev_state.code_max_size;
607
608
    _Py_CODEUNIT *this_instr =  _tstate->jit_tracer_state.prev_state.instr;
609
    _Py_CODEUNIT *target_instr = this_instr;
610
    uint32_t target = 0;
611
612
    target = Py_IsNone((PyObject *)old_code)
613
        ? (int)(target_instr - _Py_INTERPRETER_TRAMPOLINE_INSTRUCTIONS_PTR)
614
        : INSTR_IP(target_instr, old_code);
615
616
    // Rewind EXTENDED_ARG so that we see the whole thing.
617
    // We must point to the first EXTENDED_ARG when deopting.
618
    int oparg = _tstate->jit_tracer_state.prev_state.instr_oparg;
619
    int opcode = this_instr->op.code;
620
    int rewind_oparg = oparg;
621
    while (rewind_oparg > 255) {
622
        rewind_oparg >>= 8;
623
        target--;
624
    }
625
626
    if (_PyOpcode_Caches[_PyOpcode_Deopt[opcode]] > 0) {
627
        uint16_t backoff = (this_instr + 1)->counter.value_and_backoff;
628
        // adaptive_counter_cooldown is a fresh specialization.
629
        // trigger_backoff_counter is what we set during tracing.
630
        // All tracing backoffs should be freshly specialized or untouched.
631
        // If not, that indicates a deopt during tracing, and
632
        // thus the "actual" instruction executed is not the one that is
633
        // in the instruction stream, but rather the deopt.
634
        // It's important we check for this, as some specializations might make
635
        // no progress (they can immediately deopt after specializing).
636
        // We do this to improve performance, as otherwise a compiled trace
637
        // will just deopt immediately.
638
        if (backoff != adaptive_counter_cooldown().value_and_backoff &&
639
            backoff != trigger_backoff_counter().value_and_backoff) {
640
            OPT_STAT_INC(trace_immediately_deopts);
641
            opcode = _PyOpcode_Deopt[opcode];
642
        }
643
    }
644
645
    int old_stack_level = _tstate->jit_tracer_state.prev_state.instr_stacklevel;
646
647
    // Strange control-flow
648
    bool has_dynamic_jump_taken = OPCODE_HAS_UNPREDICTABLE_JUMP(opcode) &&
649
        (next_instr != this_instr + 1 + _PyOpcode_Caches[_PyOpcode_Deopt[opcode]]);
650
651
    /* Special case the first instruction,
652
    * so that we can guarantee forward progress */
653
    if (progress_needed && _tstate->jit_tracer_state.prev_state.code_curr_size < CODE_SIZE_NO_PROGRESS) {
654
        if (OPCODE_HAS_EXIT(opcode) || OPCODE_HAS_DEOPT(opcode)) {
655
            opcode = _PyOpcode_Deopt[opcode];
656
        }
657
        assert(!OPCODE_HAS_EXIT(opcode));
658
        assert(!OPCODE_HAS_DEOPT(opcode));
659
    }
660
661
    bool needs_guard_ip = OPCODE_HAS_NEEDS_GUARD_IP(opcode);
662
    if (has_dynamic_jump_taken && !needs_guard_ip) {
663
        DPRINTF(2, "Unsupported: dynamic jump taken %s\n", _PyOpcode_OpName[opcode]);
664
        goto unsupported;
665
    }
666
667
    int is_sys_tracing = (tstate->c_tracefunc != NULL) || (tstate->c_profilefunc != NULL);
668
    if (is_sys_tracing) {
669
        goto full;
670
    }
671
672
    if (stop_tracing_opcode != 0) {
673
        ADD_TO_TRACE(stop_tracing_opcode, 0, 0, target);
674
        goto done;
675
    }
676
677
    DPRINTF(2, "%p %d: %s(%d) %d %d\n", old_code, target, _PyOpcode_OpName[opcode], oparg, needs_guard_ip, old_stack_level);
678
679
#ifdef Py_DEBUG
680
    if (oparg > 255) {
681
        assert(_Py_GetBaseCodeUnit(old_code, target).op.code == EXTENDED_ARG);
682
    }
683
#endif
684
685
    // Skip over super instructions.
686
    if (_tstate->jit_tracer_state.prev_state.instr_is_super) {
687
        _tstate->jit_tracer_state.prev_state.instr_is_super = false;
688
        return 1;
689
    }
690
691
    if (opcode == ENTER_EXECUTOR) {
692
        goto full;
693
    }
694
695
    if (!_tstate->jit_tracer_state.prev_state.dependencies_still_valid) {
696
        goto done;
697
    }
698
699
    // This happens when a recursive call happens that we can't trace. Such as Python -> C -> Python calls
700
    // If we haven't guarded the IP, then it's untraceable.
701
    if (frame != _tstate->jit_tracer_state.prev_state.instr_frame && !needs_guard_ip) {
702
        DPRINTF(2, "Unsupported: unguardable jump taken\n");
703
        goto unsupported;
704
    }
705
706
    if (oparg > 0xFFFF) {
707
        DPRINTF(2, "Unsupported: oparg too large\n");
708
        goto unsupported;
709
    }
710
711
    // TODO (gh-140277): The constituent use one extra stack slot. So we need to check for headroom.
712
    if (opcode == BINARY_OP_SUBSCR_GETITEM && old_stack_level + 1 > old_code->co_stacksize) {
713
        unsupported:
714
        {
715
            // Rewind to previous instruction and replace with _EXIT_TRACE.
716
            _PyUOpInstruction *curr = &trace[trace_length-1];
717
            while (curr->opcode != _SET_IP && trace_length > 2) {
718
                trace_length--;
719
                curr = &trace[trace_length-1];
720
            }
721
            assert(curr->opcode == _SET_IP || trace_length == 2);
722
            if (curr->opcode == _SET_IP) {
723
                int32_t old_target = (int32_t)uop_get_target(curr);
724
                curr++;
725
                trace_length++;
726
                curr->opcode = _EXIT_TRACE;
727
                curr->format = UOP_FORMAT_TARGET;
728
                curr->target = old_target;
729
            }
730
            goto done;
731
        }
732
    }
733
734
    if (opcode == NOP) {
735
        return 1;
736
    }
737
738
    if (opcode == JUMP_FORWARD) {
739
        return 1;
740
    }
741
742
    if (opcode == EXTENDED_ARG) {
743
        return 1;
744
    }
745
746
    // One for possible _DEOPT, one because _CHECK_VALIDITY itself might _DEOPT
747
    max_length -= 2;
748
749
    const struct opcode_macro_expansion *expansion = &_PyOpcode_macro_expansion[opcode];
750
751
    assert(opcode != ENTER_EXECUTOR && opcode != EXTENDED_ARG);
752
    assert(!_PyErr_Occurred(tstate));
753
754
755
    if (OPCODE_HAS_EXIT(opcode)) {
756
        // Make space for side exit and final _EXIT_TRACE:
757
        max_length--;
758
    }
759
    if (OPCODE_HAS_ERROR(opcode)) {
760
        // Make space for error stub and final _EXIT_TRACE:
761
        max_length--;
762
    }
763
764
    // _GUARD_IP leads to an exit.
765
    max_length -= needs_guard_ip;
766
767
    RESERVE_RAW(expansion->nuops + needs_guard_ip + 2 + (!OPCODE_HAS_NO_SAVE_IP(opcode)), "uop and various checks");
768
769
    ADD_TO_TRACE(_CHECK_VALIDITY, 0, 0, target);
770
771
    if (!OPCODE_HAS_NO_SAVE_IP(opcode)) {
772
        ADD_TO_TRACE(_SET_IP, 0, (uintptr_t)target_instr, target);
773
    }
774
775
    // Can be NULL for the entry frame.
776
    if (old_code != NULL) {
777
        _Py_BloomFilter_Add(dependencies, old_code);
778
    }
779
780
    switch (opcode) {
781
        case POP_JUMP_IF_NONE:
782
        case POP_JUMP_IF_NOT_NONE:
783
        case POP_JUMP_IF_FALSE:
784
        case POP_JUMP_IF_TRUE:
785
        {
786
            _Py_CODEUNIT *computed_next_instr_without_modifiers = target_instr + 1 + _PyOpcode_Caches[_PyOpcode_Deopt[opcode]];
787
            _Py_CODEUNIT *computed_next_instr = computed_next_instr_without_modifiers + (computed_next_instr_without_modifiers->op.code == NOT_TAKEN);
788
            _Py_CODEUNIT *computed_jump_instr = computed_next_instr_without_modifiers + oparg;
789
            assert(next_instr == computed_next_instr || next_instr == computed_jump_instr);
790
            int jump_happened = computed_jump_instr == next_instr;
791
            assert(jump_happened == (target_instr[1].cache & 1));
792
            uint32_t uopcode = BRANCH_TO_GUARD[opcode - POP_JUMP_IF_FALSE][jump_happened];
793
            ADD_TO_TRACE(uopcode, 0, 0, INSTR_IP(jump_happened ? computed_next_instr : computed_jump_instr, old_code));
794
            break;
795
        }
796
        case JUMP_BACKWARD_JIT:
797
            // This is possible as the JIT might have re-activated after it was disabled
798
        case JUMP_BACKWARD_NO_JIT:
799
        case JUMP_BACKWARD:
800
            ADD_TO_TRACE(_CHECK_PERIODIC, 0, 0, target);
801
            _Py_FALLTHROUGH;
802
        case JUMP_BACKWARD_NO_INTERRUPT:
803
        {
804
            if ((next_instr != _tstate->jit_tracer_state.initial_state.close_loop_instr) &&
805
                (next_instr != _tstate->jit_tracer_state.initial_state.start_instr) &&
806
                _tstate->jit_tracer_state.prev_state.code_curr_size > CODE_SIZE_NO_PROGRESS &&
807
                // For side exits, we don't want to terminate them early.
808
                _tstate->jit_tracer_state.initial_state.exit == NULL &&
809
                // These are coroutines, and we want to unroll those usually.
810
                opcode != JUMP_BACKWARD_NO_INTERRUPT) {
811
                // We encountered a JUMP_BACKWARD but not to the top of our own loop.
812
                // We don't want to continue tracing as we might get stuck in the
813
                // inner loop. Instead, end the trace where the executor of the
814
                // inner loop might start and let the traces rejoin.
815
                OPT_STAT_INC(inner_loop);
816
                ADD_TO_TRACE(_EXIT_TRACE, 0, 0, target);
817
                trace[trace_length-1].operand1 = true; // is_control_flow
818
                DPRINTF(2, "JUMP_BACKWARD not to top ends trace %p %p %p\n", next_instr,
819
                    _tstate->jit_tracer_state.initial_state.close_loop_instr, _tstate->jit_tracer_state.initial_state.start_instr);
820
                goto done;
821
            }
822
            break;
823
        }
824
825
        case RESUME:
826
        case RESUME_CHECK:
827
            /* Use a special tier 2 version of RESUME_CHECK to allow traces to
828
             *  start with RESUME_CHECK */
829
            ADD_TO_TRACE(_TIER2_RESUME_CHECK, 0, 0, target);
830
            break;
831
        default:
832
        {
833
            const struct opcode_macro_expansion *expansion = &_PyOpcode_macro_expansion[opcode];
834
            // Reserve space for nuops (+ _SET_IP + _EXIT_TRACE)
835
            int nuops = expansion->nuops;
836
            if (nuops == 0) {
837
                DPRINTF(2, "Unsupported opcode %s\n", _PyOpcode_OpName[opcode]);
838
                goto unsupported;
839
            }
840
            assert(nuops > 0);
841
            uint32_t orig_oparg = oparg;  // For OPARG_TOP/BOTTOM
842
            uint32_t orig_target = target;
843
            for (int i = 0; i < nuops; i++) {
844
                oparg = orig_oparg;
845
                target = orig_target;
846
                uint32_t uop = expansion->uops[i].uop;
847
                uint64_t operand = 0;
848
                // Add one to account for the actual opcode/oparg pair:
849
                int offset = expansion->uops[i].offset + 1;
850
                switch (expansion->uops[i].size) {
851
                    case OPARG_SIMPLE:
852
                        assert(opcode != _JUMP_BACKWARD_NO_INTERRUPT && opcode != JUMP_BACKWARD);
853
                        break;
854
                    case OPARG_CACHE_1:
855
                        operand = read_u16(&this_instr[offset].cache);
856
                        break;
857
                    case OPARG_CACHE_2:
858
                        operand = read_u32(&this_instr[offset].cache);
859
                        break;
860
                    case OPARG_CACHE_4:
861
                        operand = read_u64(&this_instr[offset].cache);
862
                        break;
863
                    case OPARG_TOP:  // First half of super-instr
864
                        assert(orig_oparg <= 255);
865
                        oparg = orig_oparg >> 4;
866
                        break;
867
                    case OPARG_BOTTOM:  // Second half of super-instr
868
                        assert(orig_oparg <= 255);
869
                        oparg = orig_oparg & 0xF;
870
                        break;
871
                    case OPARG_SAVE_RETURN_OFFSET:  // op=_SAVE_RETURN_OFFSET; oparg=return_offset
872
                        oparg = offset;
873
                        assert(uop == _SAVE_RETURN_OFFSET);
874
                        break;
875
                    case OPARG_REPLACED:
876
                        uop = _PyUOp_Replacements[uop];
877
                        assert(uop != 0);
878
879
                        uint32_t next_inst = target + 1 + _PyOpcode_Caches[_PyOpcode_Deopt[opcode]];
880
                        if (uop == _TIER2_RESUME_CHECK) {
881
                            target = next_inst;
882
                        }
883
                        else {
884
                            int extended_arg = orig_oparg > 255;
885
                            uint32_t jump_target = next_inst + orig_oparg + extended_arg;
886
                            assert(_Py_GetBaseCodeUnit(old_code, jump_target).op.code == END_FOR);
887
                            assert(_Py_GetBaseCodeUnit(old_code, jump_target+1).op.code == POP_ITER);
888
                            if (is_for_iter_test[uop]) {
889
                                target = jump_target + 1;
890
                            }
891
                        }
892
                        break;
893
                    case OPERAND1_1:
894
                        assert(trace[trace_length-1].opcode == uop);
895
                        operand = read_u16(&this_instr[offset].cache);
896
                        trace[trace_length-1].operand1 = operand;
897
                        continue;
898
                    case OPERAND1_2:
899
                        assert(trace[trace_length-1].opcode == uop);
900
                        operand = read_u32(&this_instr[offset].cache);
901
                        trace[trace_length-1].operand1 = operand;
902
                        continue;
903
                    case OPERAND1_4:
904
                        assert(trace[trace_length-1].opcode == uop);
905
                        operand = read_u64(&this_instr[offset].cache);
906
                        trace[trace_length-1].operand1 = operand;
907
                        continue;
908
                    default:
909
                        fprintf(stderr,
910
                                "opcode=%d, oparg=%d; nuops=%d, i=%d; size=%d, offset=%d\n",
911
                                opcode, oparg, nuops, i,
912
                                expansion->uops[i].size,
913
                                expansion->uops[i].offset);
914
                        Py_FatalError("garbled expansion");
915
                }
916
                if (uop == _PUSH_FRAME || uop == _RETURN_VALUE || uop == _RETURN_GENERATOR || uop == _YIELD_VALUE) {
917
                    PyCodeObject *new_code = (PyCodeObject *)PyStackRef_AsPyObjectBorrow(frame->f_executable);
918
                    PyFunctionObject *new_func = (PyFunctionObject *)PyStackRef_AsPyObjectBorrow(frame->f_funcobj);
919
920
                    operand = 0;
921
                    if (frame->owner < FRAME_OWNED_BY_INTERPRETER) {
922
                        // Don't add nested code objects to the dependency.
923
                        // It causes endless re-traces.
924
                        if (new_func != NULL && !Py_IsNone((PyObject*)new_func) && !(new_code->co_flags & CO_NESTED)) {
925
                            operand = (uintptr_t)new_func;
926
                            DPRINTF(2, "Adding %p func to op\n", (void *)operand);
927
                            _Py_BloomFilter_Add(dependencies, new_func);
928
                        }
929
                        else if (new_code != NULL && !Py_IsNone((PyObject*)new_code)) {
930
                            operand = (uintptr_t)new_code | 1;
931
                            DPRINTF(2, "Adding %p code to op\n", (void *)operand);
932
                            _Py_BloomFilter_Add(dependencies, new_code);
933
                        }
934
                    }
935
                    ADD_TO_TRACE(uop, oparg, operand, target);
936
                    trace[trace_length - 1].operand1 = PyStackRef_IsNone(frame->f_executable) ? 2 : ((int)(frame->stackpointer - _PyFrame_Stackbase(frame)));
937
                    break;
938
                }
939
                if (uop == _BINARY_OP_INPLACE_ADD_UNICODE) {
940
                    assert(i + 1 == nuops);
941
                    _Py_CODEUNIT *next = target_instr + 1 + _PyOpcode_Caches[_PyOpcode_Deopt[opcode]];
942
                    assert(next->op.code == STORE_FAST);
943
                    operand = next->op.arg;
944
                }
945
                // All other instructions
946
                ADD_TO_TRACE(uop, oparg, operand, target);
947
            }
948
            break;
949
        }  // End default
950
951
    }  // End switch (opcode)
952
953
    if (needs_guard_ip) {
954
        uint16_t guard_ip = guard_ip_uop[trace[trace_length-1].opcode];
955
        if (guard_ip == 0) {
956
            DPRINTF(1, "Unknown uop needing guard ip %s\n", _PyOpcode_uop_name[trace[trace_length-1].opcode]);
957
            Py_UNREACHABLE();
958
        }
959
        ADD_TO_TRACE(guard_ip, 0, (uintptr_t)next_instr, 0);
960
    }
961
    // Loop back to the start
962
    int is_first_instr = _tstate->jit_tracer_state.initial_state.close_loop_instr == next_instr ||
963
        _tstate->jit_tracer_state.initial_state.start_instr == next_instr;
964
    if (is_first_instr && _tstate->jit_tracer_state.prev_state.code_curr_size > CODE_SIZE_NO_PROGRESS) {
965
        if (needs_guard_ip) {
966
            ADD_TO_TRACE(_SET_IP, 0, (uintptr_t)next_instr, 0);
967
        }
968
        ADD_TO_TRACE(_JUMP_TO_TOP, 0, 0, 0);
969
        goto done;
970
    }
971
    DPRINTF(2, "Trace continuing\n");
972
    _tstate->jit_tracer_state.prev_state.code_curr_size = trace_length;
973
    _tstate->jit_tracer_state.prev_state.code_max_size = max_length;
974
    return 1;
975
done:
976
    DPRINTF(2, "Trace done\n");
977
    _tstate->jit_tracer_state.prev_state.code_curr_size = trace_length;
978
    _tstate->jit_tracer_state.prev_state.code_max_size = max_length;
979
    return 0;
980
full:
981
    DPRINTF(2, "Trace full\n");
982
    if (!is_terminator(&_tstate->jit_tracer_state.code_buffer[trace_length-1])) {
983
        // Undo the last few instructions.
984
        trace_length = _tstate->jit_tracer_state.prev_state.code_curr_size;
985
        max_length = _tstate->jit_tracer_state.prev_state.code_max_size;
986
        // We previously reversed one.
987
        max_length += 1;
988
        ADD_TO_TRACE(_EXIT_TRACE, 0, 0, target);
989
        trace[trace_length-1].operand1 = true; // is_control_flow
990
    }
991
    _tstate->jit_tracer_state.prev_state.code_curr_size = trace_length;
992
    _tstate->jit_tracer_state.prev_state.code_max_size = max_length;
993
    return 0;
994
}
995
996
// Returns 0 for do not enter tracing, 1 on enter tracing.
997
int
998
_PyJit_TryInitializeTracing(
999
    PyThreadState *tstate, _PyInterpreterFrame *frame, _Py_CODEUNIT *curr_instr,
1000
    _Py_CODEUNIT *start_instr, _Py_CODEUNIT *close_loop_instr, int curr_stackdepth, int chain_depth,
1001
    _PyExitData *exit, int oparg)
1002
{
1003
    _PyThreadStateImpl *_tstate = (_PyThreadStateImpl *)tstate;
1004
    // A recursive trace.
1005
    // Don't trace into the inner call because it will stomp on the previous trace, causing endless retraces.
1006
    if (_tstate->jit_tracer_state.prev_state.code_curr_size > CODE_SIZE_EMPTY) {
1007
        return 0;
1008
    }
1009
    if (oparg > 0xFFFF) {
1010
        return 0;
1011
    }
1012
    if (_tstate->jit_tracer_state.code_buffer == NULL) {
1013
        _tstate->jit_tracer_state.code_buffer = (_PyUOpInstruction *)_PyObject_VirtualAlloc(UOP_BUFFER_SIZE);
1014
        if (_tstate->jit_tracer_state.code_buffer == NULL) {
1015
            // Don't error, just go to next instruction.
1016
            return 0;
1017
        }
1018
    }
1019
    PyObject *func = PyStackRef_AsPyObjectBorrow(frame->f_funcobj);
1020
    if (func == NULL) {
1021
        return 0;
1022
    }
1023
    PyCodeObject *code = _PyFrame_GetCode(frame);
1024
#ifdef Py_DEBUG
1025
    char *python_lltrace = Py_GETENV("PYTHON_LLTRACE");
1026
    int lltrace = 0;
1027
    if (python_lltrace != NULL && *python_lltrace >= '0') {
1028
        lltrace = *python_lltrace - '0';  // TODO: Parse an int and all that
1029
    }
1030
    DPRINTF(2,
1031
        "Tracing %s (%s:%d) at byte offset %d at chain depth %d\n",
1032
        PyUnicode_AsUTF8(code->co_qualname),
1033
        PyUnicode_AsUTF8(code->co_filename),
1034
        code->co_firstlineno,
1035
        2 * INSTR_IP(close_loop_instr, code),
1036
        chain_depth);
1037
#endif
1038
1039
    add_to_trace(_tstate->jit_tracer_state.code_buffer, 0, _START_EXECUTOR, 0, (uintptr_t)start_instr, INSTR_IP(start_instr, code));
1040
    add_to_trace(_tstate->jit_tracer_state.code_buffer, 1, _MAKE_WARM, 0, 0, 0);
1041
    _tstate->jit_tracer_state.prev_state.code_curr_size = CODE_SIZE_EMPTY;
1042
1043
    _tstate->jit_tracer_state.prev_state.code_max_size = UOP_MAX_TRACE_LENGTH/2;
1044
    _tstate->jit_tracer_state.initial_state.start_instr = start_instr;
1045
    _tstate->jit_tracer_state.initial_state.close_loop_instr = close_loop_instr;
1046
    _tstate->jit_tracer_state.initial_state.code = (PyCodeObject *)Py_NewRef(code);
1047
    _tstate->jit_tracer_state.initial_state.func = (PyFunctionObject *)Py_NewRef(func);
1048
    _tstate->jit_tracer_state.initial_state.exit = exit;
1049
    _tstate->jit_tracer_state.initial_state.stack_depth = curr_stackdepth;
1050
    _tstate->jit_tracer_state.initial_state.chain_depth = chain_depth;
1051
    _tstate->jit_tracer_state.prev_state.instr_frame = frame;
1052
    _tstate->jit_tracer_state.prev_state.dependencies_still_valid = true;
1053
    _tstate->jit_tracer_state.prev_state.instr_code = (PyCodeObject *)Py_NewRef(_PyFrame_GetCode(frame));
1054
    _tstate->jit_tracer_state.prev_state.instr = curr_instr;
1055
    _tstate->jit_tracer_state.prev_state.instr_frame = frame;
1056
    _tstate->jit_tracer_state.prev_state.instr_oparg = oparg;
1057
    _tstate->jit_tracer_state.prev_state.instr_stacklevel = curr_stackdepth;
1058
    _tstate->jit_tracer_state.prev_state.instr_is_super = false;
1059
    assert(curr_instr->op.code == JUMP_BACKWARD_JIT || (exit != NULL));
1060
    _tstate->jit_tracer_state.initial_state.jump_backward_instr = curr_instr;
1061
1062
    if (_PyOpcode_Caches[_PyOpcode_Deopt[close_loop_instr->op.code]]) {
1063
        close_loop_instr[1].counter = trigger_backoff_counter();
1064
    }
1065
    _Py_BloomFilter_Init(&_tstate->jit_tracer_state.prev_state.dependencies);
1066
    return 1;
1067
}
1068
1069
void
1070
_PyJit_FinalizeTracing(PyThreadState *tstate)
1071
{
1072
    _PyThreadStateImpl *_tstate = (_PyThreadStateImpl *)tstate;
1073
    Py_CLEAR(_tstate->jit_tracer_state.initial_state.code);
1074
    Py_CLEAR(_tstate->jit_tracer_state.initial_state.func);
1075
    Py_CLEAR(_tstate->jit_tracer_state.prev_state.instr_code);
1076
    _tstate->jit_tracer_state.prev_state.code_curr_size = CODE_SIZE_EMPTY;
1077
    _tstate->jit_tracer_state.prev_state.code_max_size = UOP_MAX_TRACE_LENGTH/2 - 1;
1078
}
1079
1080
1081
#undef RESERVE
1082
#undef RESERVE_RAW
1083
#undef INSTR_IP
1084
#undef ADD_TO_TRACE
1085
#undef DPRINTF
1086
1087
#define UNSET_BIT(array, bit) (array[(bit)>>5] &= ~(1<<((bit)&31)))
1088
#define SET_BIT(array, bit) (array[(bit)>>5] |= (1<<((bit)&31)))
1089
#define BIT_IS_SET(array, bit) (array[(bit)>>5] & (1<<((bit)&31)))
1090
1091
/* Count the number of unused uops and exits
1092
*/
1093
static int
1094
count_exits(_PyUOpInstruction *buffer, int length)
1095
{
1096
    int exit_count = 0;
1097
    for (int i = 0; i < length; i++) {
1098
        uint16_t base_opcode = _PyUop_Uncached[buffer[i].opcode];
1099
        if (base_opcode == _EXIT_TRACE || base_opcode == _DYNAMIC_EXIT) {
1100
            exit_count++;
1101
        }
1102
    }
1103
    return exit_count;
1104
}
1105
1106
/* The number of cached registers at any exit (`EXIT_IF` or `DEOPT_IF`)
1107
 * This is the number of cached at entries at start, unless the uop is
1108
 * marked as `exit_depth_is_output` in which case it is the number of
1109
 * cached entries at the end */
1110
static int
1111
get_cached_entries_for_side_exit(_PyUOpInstruction *inst)
1112
{
1113
    // Maybe add another generated table for this?
1114
    int base_opcode = _PyUop_Uncached[inst->opcode];
1115
    assert(base_opcode != 0);
1116
    for (int i = 0; i <= MAX_CACHED_REGISTER; i++) {
1117
        const _PyUopTOSentry *entry = &_PyUop_Caching[base_opcode].entries[i];
1118
        if (entry->opcode == inst->opcode) {
1119
            return entry->exit;
1120
        }
1121
    }
1122
    Py_UNREACHABLE();
1123
}
1124
1125
static void make_exit(_PyUOpInstruction *inst, int opcode, int target, bool is_control_flow)
1126
{
1127
    assert(opcode > MAX_UOP_ID && opcode <= MAX_UOP_REGS_ID);
1128
    inst->opcode = opcode;
1129
    inst->oparg = 0;
1130
    inst->operand0 = 0;
1131
    inst->format = UOP_FORMAT_TARGET;
1132
    inst->target = target;
1133
    inst->operand1 = is_control_flow;
1134
#ifdef Py_STATS
1135
    inst->execution_count = 0;
1136
#endif
1137
}
1138
1139
/* Convert implicit exits, errors and deopts
1140
 * into explicit ones. */
1141
static int
1142
prepare_for_execution(_PyUOpInstruction *buffer, int length)
1143
{
1144
    int32_t current_jump = -1;
1145
    int32_t current_jump_target = -1;
1146
    int32_t current_error = -1;
1147
    int32_t current_error_target = -1;
1148
    int32_t current_popped = -1;
1149
    int32_t current_exit_op = -1;
1150
    /* Leaving in NOPs slows down the interpreter and messes up the stats */
1151
    _PyUOpInstruction *copy_to = &buffer[0];
1152
    for (int i = 0; i < length; i++) {
1153
        _PyUOpInstruction *inst = &buffer[i];
1154
        if (inst->opcode != _NOP) {
1155
            if (copy_to != inst) {
1156
                *copy_to = *inst;
1157
            }
1158
            copy_to++;
1159
        }
1160
    }
1161
    length = (int)(copy_to - buffer);
1162
    int next_spare = length;
1163
    for (int i = 0; i < length; i++) {
1164
        _PyUOpInstruction *inst = &buffer[i];
1165
        int base_opcode = _PyUop_Uncached[inst->opcode];
1166
        assert(inst->opcode != _NOP);
1167
        int32_t target = (int32_t)uop_get_target(inst);
1168
        uint16_t exit_flags = _PyUop_Flags[base_opcode] & (HAS_EXIT_FLAG | HAS_DEOPT_FLAG | HAS_PERIODIC_FLAG);
1169
        if (exit_flags) {
1170
            uint16_t base_exit_op = _EXIT_TRACE;
1171
            if (exit_flags & HAS_DEOPT_FLAG) {
1172
                base_exit_op = _DEOPT;
1173
            }
1174
            else if (exit_flags & HAS_PERIODIC_FLAG) {
1175
                base_exit_op = _HANDLE_PENDING_AND_DEOPT;
1176
            }
1177
            int32_t jump_target = target;
1178
            if (
1179
                base_opcode == _GUARD_IP__PUSH_FRAME ||
1180
                base_opcode == _GUARD_IP_RETURN_VALUE ||
1181
                base_opcode == _GUARD_IP_YIELD_VALUE ||
1182
                base_opcode == _GUARD_IP_RETURN_GENERATOR
1183
            ) {
1184
                base_exit_op = _DYNAMIC_EXIT;
1185
            }
1186
            int exit_depth = get_cached_entries_for_side_exit(inst);
1187
            assert(_PyUop_Caching[base_exit_op].entries[exit_depth].opcode > 0);
1188
            int16_t exit_op = _PyUop_Caching[base_exit_op].entries[exit_depth].opcode;
1189
            bool is_control_flow = (base_opcode == _GUARD_IS_FALSE_POP || base_opcode == _GUARD_IS_TRUE_POP || is_for_iter_test[base_opcode]);
1190
            if (jump_target != current_jump_target || current_exit_op != exit_op) {
1191
                make_exit(&buffer[next_spare], exit_op, jump_target, is_control_flow);
1192
                current_exit_op = exit_op;
1193
                current_jump_target = jump_target;
1194
                current_jump = next_spare;
1195
                next_spare++;
1196
            }
1197
            buffer[i].jump_target = current_jump;
1198
            buffer[i].format = UOP_FORMAT_JUMP;
1199
        }
1200
        if (_PyUop_Flags[base_opcode] & HAS_ERROR_FLAG) {
1201
            int popped = (_PyUop_Flags[base_opcode] & HAS_ERROR_NO_POP_FLAG) ?
1202
                0 : _PyUop_num_popped(base_opcode, inst->oparg);
1203
            if (target != current_error_target || popped != current_popped) {
1204
                current_popped = popped;
1205
                current_error = next_spare;
1206
                current_error_target = target;
1207
                make_exit(&buffer[next_spare], _ERROR_POP_N_r00, 0, false);
1208
                buffer[next_spare].operand0 = target;
1209
                next_spare++;
1210
            }
1211
            buffer[i].error_target = current_error;
1212
            if (buffer[i].format == UOP_FORMAT_TARGET) {
1213
                buffer[i].format = UOP_FORMAT_JUMP;
1214
                buffer[i].jump_target = 0;
1215
            }
1216
        }
1217
        if (base_opcode == _JUMP_TO_TOP) {
1218
            assert(_PyUop_Uncached[buffer[0].opcode] == _START_EXECUTOR);
1219
            buffer[i].format = UOP_FORMAT_JUMP;
1220
            buffer[i].jump_target = 1;
1221
        }
1222
    }
1223
    return next_spare;
1224
}
1225
1226
/* Executor side exits */
1227
1228
static _PyExecutorObject *
1229
allocate_executor(int exit_count, int length)
1230
{
1231
    int size = exit_count*sizeof(_PyExitData) + length*sizeof(_PyUOpInstruction);
1232
    _PyExecutorObject *res = PyObject_GC_NewVar(_PyExecutorObject, &_PyUOpExecutor_Type, size);
1233
    if (res == NULL) {
1234
        return NULL;
1235
    }
1236
    res->trace = (_PyUOpInstruction *)(res->exits + exit_count);
1237
    res->code_size = length;
1238
    res->exit_count = exit_count;
1239
    return res;
1240
}
1241
1242
#ifdef Py_DEBUG
1243
1244
#define CHECK(PRED) \
1245
if (!(PRED)) { \
1246
    printf(#PRED " at %d\n", i); \
1247
    assert(0); \
1248
}
1249
1250
static int
1251
target_unused(int opcode)
1252
{
1253
    return (_PyUop_Flags[opcode] & (HAS_ERROR_FLAG | HAS_EXIT_FLAG | HAS_DEOPT_FLAG)) == 0;
1254
}
1255
1256
static void
1257
sanity_check(_PyExecutorObject *executor)
1258
{
1259
    for (uint32_t i = 0; i < executor->exit_count; i++) {
1260
        _PyExitData *exit = &executor->exits[i];
1261
        CHECK(exit->target < (1 << 25));
1262
    }
1263
    bool ended = false;
1264
    uint32_t i = 0;
1265
    CHECK(_PyUop_Uncached[executor->trace[0].opcode] == _START_EXECUTOR ||
1266
        _PyUop_Uncached[executor->trace[0].opcode] == _COLD_EXIT ||
1267
        _PyUop_Uncached[executor->trace[0].opcode] == _COLD_DYNAMIC_EXIT);
1268
    for (; i < executor->code_size; i++) {
1269
        const _PyUOpInstruction *inst = &executor->trace[i];
1270
        uint16_t opcode = inst->opcode;
1271
        uint16_t base_opcode = _PyUop_Uncached[opcode];
1272
        CHECK(opcode > MAX_UOP_ID);
1273
        CHECK(opcode <= MAX_UOP_REGS_ID);
1274
        CHECK(base_opcode <= MAX_UOP_ID);
1275
        CHECK(base_opcode != 0);
1276
        switch(inst->format) {
1277
            case UOP_FORMAT_TARGET:
1278
                CHECK(target_unused(base_opcode));
1279
                break;
1280
            case UOP_FORMAT_JUMP:
1281
                CHECK(inst->jump_target < executor->code_size);
1282
                break;
1283
        }
1284
        if (_PyUop_Flags[base_opcode] & HAS_ERROR_FLAG) {
1285
            CHECK(inst->format == UOP_FORMAT_JUMP);
1286
            CHECK(inst->error_target < executor->code_size);
1287
        }
1288
        if (is_terminator(inst)) {
1289
            ended = true;
1290
            i++;
1291
            break;
1292
        }
1293
    }
1294
    CHECK(ended);
1295
    for (; i < executor->code_size; i++) {
1296
        const _PyUOpInstruction *inst = &executor->trace[i];
1297
        uint16_t base_opcode = _PyUop_Uncached[inst->opcode];
1298
        CHECK(
1299
            base_opcode == _DEOPT ||
1300
            base_opcode == _HANDLE_PENDING_AND_DEOPT ||
1301
            base_opcode == _EXIT_TRACE ||
1302
            base_opcode == _ERROR_POP_N ||
1303
            base_opcode == _DYNAMIC_EXIT);
1304
    }
1305
}
1306
1307
#undef CHECK
1308
#endif
1309
1310
/* Makes an executor from a buffer of uops.
1311
 * Account for the buffer having gaps and NOPs by computing a "used"
1312
 * bit vector and only copying the used uops. Here "used" means reachable
1313
 * and not a NOP.
1314
 */
1315
static _PyExecutorObject *
1316
make_executor_from_uops(_PyUOpInstruction *buffer, int length, const _PyBloomFilter *dependencies, int chain_depth)
1317
{
1318
    int exit_count = count_exits(buffer, length);
1319
    _PyExecutorObject *executor = allocate_executor(exit_count, length);
1320
    if (executor == NULL) {
1321
        return NULL;
1322
    }
1323
1324
    /* Initialize exits */
1325
    _PyExecutorObject *cold = _PyExecutor_GetColdExecutor();
1326
    _PyExecutorObject *cold_dynamic = _PyExecutor_GetColdDynamicExecutor();
1327
    cold->vm_data.chain_depth = chain_depth;
1328
    for (int i = 0; i < exit_count; i++) {
1329
        executor->exits[i].index = i;
1330
        executor->exits[i].temperature = initial_temperature_backoff_counter();
1331
    }
1332
    int next_exit = exit_count-1;
1333
    _PyUOpInstruction *dest = (_PyUOpInstruction *)&executor->trace[length];
1334
    assert(_PyUop_Uncached[buffer[0].opcode] == _START_EXECUTOR);
1335
    buffer[0].operand0 = (uint64_t)executor;
1336
    for (int i = length-1; i >= 0; i--) {
1337
        uint16_t base_opcode = _PyUop_Uncached[buffer[i].opcode];
1338
        dest--;
1339
        *dest = buffer[i];
1340
        if (base_opcode == _EXIT_TRACE || base_opcode == _DYNAMIC_EXIT) {
1341
            _PyExitData *exit = &executor->exits[next_exit];
1342
            exit->target = buffer[i].target;
1343
            dest->operand0 = (uint64_t)exit;
1344
            exit->executor = base_opcode == _EXIT_TRACE ? cold : cold_dynamic;
1345
            exit->is_dynamic = (char)(base_opcode == _DYNAMIC_EXIT);
1346
            exit->is_control_flow = (char)buffer[i].operand1;
1347
            next_exit--;
1348
        }
1349
    }
1350
    assert(next_exit == -1);
1351
    assert(dest == executor->trace);
1352
    assert(_PyUop_Uncached[dest->opcode] == _START_EXECUTOR);
1353
    // Note: we MUST track it here before any Py_DECREF(executor) or
1354
    // linking of executor. Otherwise, the GC tries to untrack a
1355
    // still untracked object during dealloc.
1356
    _PyObject_GC_TRACK(executor);
1357
    _Py_ExecutorInit(executor, dependencies);
1358
#ifdef Py_DEBUG
1359
    char *python_lltrace = Py_GETENV("PYTHON_LLTRACE");
1360
    int lltrace = 0;
1361
    if (python_lltrace != NULL && *python_lltrace >= '0') {
1362
        lltrace = *python_lltrace - '0';  // TODO: Parse an int and all that
1363
    }
1364
    if (lltrace >= 2) {
1365
        printf("Optimized trace (length %d):\n", length);
1366
        for (int i = 0; i < length; i++) {
1367
            printf("%4d OPTIMIZED: ", i);
1368
            _PyUOpPrint(&executor->trace[i]);
1369
            printf("\n");
1370
        }
1371
    }
1372
    sanity_check(executor);
1373
#endif
1374
#ifdef _Py_JIT
1375
    executor->jit_code = NULL;
1376
    executor->jit_size = 0;
1377
    // This is initialized to true so we can prevent the executor
1378
    // from being immediately detected as cold and invalidated.
1379
    executor->vm_data.warm = true;
1380
    if (_PyJIT_Compile(executor, executor->trace, length)) {
1381
        Py_DECREF(executor);
1382
        return NULL;
1383
    }
1384
#endif
1385
    return executor;
1386
}
1387
1388
#ifdef Py_STATS
1389
/* Returns the effective trace length.
1390
 * Ignores NOPs and trailing exit and error handling.*/
1391
int effective_trace_length(_PyUOpInstruction *buffer, int length)
1392
{
1393
    int nop_count = 0;
1394
    for (int i = 0; i < length; i++) {
1395
        int opcode = buffer[i].opcode;
1396
        if (opcode == _NOP) {
1397
            nop_count++;
1398
        }
1399
        if (is_terminator(&buffer[i])) {
1400
            return i+1-nop_count;
1401
        }
1402
    }
1403
    Py_FatalError("No terminating instruction");
1404
    Py_UNREACHABLE();
1405
}
1406
#endif
1407
1408
1409
static int
1410
stack_allocate(_PyUOpInstruction *buffer, int length)
1411
{
1412
    assert(buffer[0].opcode == _START_EXECUTOR);
1413
    for (int i = length-1; i >= 0; i--) {
1414
        buffer[i*2+1] = buffer[i];
1415
        buffer[i*2].format = UOP_FORMAT_TARGET;
1416
        buffer[i*2].oparg = 0;
1417
        buffer[i*2].target = 0;
1418
    }
1419
    int depth = 0;
1420
    for (int i = 0; i < length; i++) {
1421
        _PyUOpInstruction *spill_or_reload = &buffer[i*2];
1422
        int uop = buffer[i*2+1].opcode;
1423
        if (uop == _NOP) {
1424
            // leave _NOPs to be cleaned up later
1425
            spill_or_reload->opcode = _NOP;
1426
            continue;
1427
        }
1428
        int new_depth = _PyUop_Caching[uop].best[depth];
1429
        if (new_depth == depth) {
1430
            spill_or_reload->opcode = _NOP;
1431
        }
1432
        else {
1433
            spill_or_reload->opcode = _PyUop_SpillsAndReloads[depth][new_depth];
1434
            depth = new_depth;
1435
        }
1436
        uint16_t new_opcode = _PyUop_Caching[uop].entries[depth].opcode;
1437
        assert(new_opcode != 0);
1438
        assert(spill_or_reload->opcode != 0);
1439
        buffer[i*2+1].opcode = new_opcode;
1440
        depth = _PyUop_Caching[uop].entries[depth].output;
1441
    }
1442
    return length*2;
1443
}
1444
1445
static int
1446
uop_optimize(
1447
    _PyInterpreterFrame *frame,
1448
    PyThreadState *tstate,
1449
    _PyExecutorObject **exec_ptr,
1450
    bool progress_needed)
1451
{
1452
    _PyThreadStateImpl *_tstate = (_PyThreadStateImpl *)tstate;
1453
    _PyBloomFilter *dependencies = &_tstate->jit_tracer_state.prev_state.dependencies;
1454
    _PyUOpInstruction *buffer = _tstate->jit_tracer_state.code_buffer;
1455
    OPT_STAT_INC(attempts);
1456
    char *env_var = Py_GETENV("PYTHON_UOPS_OPTIMIZE");
1457
    bool is_noopt = true;
1458
    if (env_var == NULL || *env_var == '\0' || *env_var > '0') {
1459
        is_noopt = false;
1460
    }
1461
    int curr_stackentries = _tstate->jit_tracer_state.initial_state.stack_depth;
1462
    int length = _tstate->jit_tracer_state.prev_state.code_curr_size;
1463
    if (length <= CODE_SIZE_NO_PROGRESS) {
1464
        return 0;
1465
    }
1466
    assert(length > 0);
1467
    assert(length < UOP_MAX_TRACE_LENGTH/2);
1468
    OPT_STAT_INC(traces_created);
1469
    if (!is_noopt) {
1470
        length = _Py_uop_analyze_and_optimize(
1471
            _tstate->jit_tracer_state.initial_state.func,
1472
            buffer,length,
1473
            curr_stackentries, dependencies);
1474
        if (length <= 0) {
1475
            return length;
1476
        }
1477
    }
1478
    assert(length < UOP_MAX_TRACE_LENGTH/2);
1479
    assert(length >= 1);
1480
    /* Fix up */
1481
    for (int pc = 0; pc < length; pc++) {
1482
        int opcode = buffer[pc].opcode;
1483
        int oparg = buffer[pc].oparg;
1484
        if (oparg < _PyUop_Replication[opcode].stop && oparg >= _PyUop_Replication[opcode].start) {
1485
            buffer[pc].opcode = opcode + oparg + 1 - _PyUop_Replication[opcode].start;
1486
            assert(strncmp(_PyOpcode_uop_name[buffer[pc].opcode], _PyOpcode_uop_name[opcode], strlen(_PyOpcode_uop_name[opcode])) == 0);
1487
        }
1488
        else if (is_terminator(&buffer[pc])) {
1489
            break;
1490
        }
1491
        assert(_PyOpcode_uop_name[buffer[pc].opcode]);
1492
    }
1493
    OPT_HIST(effective_trace_length(buffer, length), optimized_trace_length_hist);
1494
    length = stack_allocate(buffer, length);
1495
    length = prepare_for_execution(buffer, length);
1496
    assert(length <= UOP_MAX_TRACE_LENGTH);
1497
    _PyExecutorObject *executor = make_executor_from_uops(
1498
        buffer, length, dependencies, _tstate->jit_tracer_state.initial_state.chain_depth);
1499
    if (executor == NULL) {
1500
        return -1;
1501
    }
1502
    assert(length <= UOP_MAX_TRACE_LENGTH);
1503
1504
    // Check executor coldness
1505
    // It's okay if this ends up going negative.
1506
    if (--tstate->interp->executor_creation_counter == 0) {
1507
        _Py_set_eval_breaker_bit(tstate, _PY_EVAL_JIT_INVALIDATE_COLD_BIT);
1508
    }
1509
1510
    *exec_ptr = executor;
1511
    return 1;
1512
}
1513
1514
1515
/*****************************************
1516
 *        Executor management
1517
 ****************************************/
1518
1519
/* We use a bloomfilter with k = 6, m = 256
1520
 * The choice of k and the following constants
1521
 * could do with a more rigorous analysis,
1522
 * but here is a simple analysis:
1523
 *
1524
 * We want to keep the false positive rate low.
1525
 * For n = 5 (a trace depends on 5 objects),
1526
 * we expect 30 bits set, giving a false positive
1527
 * rate of (30/256)**6 == 2.5e-6 which is plenty
1528
 * good enough.
1529
 *
1530
 * However with n = 10 we expect 60 bits set (worst case),
1531
 * giving a false positive of (60/256)**6 == 0.0001
1532
 *
1533
 * We choose k = 6, rather than a higher number as
1534
 * it means the false positive rate grows slower for high n.
1535
 *
1536
 * n = 5, k = 6 => fp = 2.6e-6
1537
 * n = 5, k = 8 => fp = 3.5e-7
1538
 * n = 10, k = 6 => fp = 1.6e-4
1539
 * n = 10, k = 8 => fp = 0.9e-4
1540
 * n = 15, k = 6 => fp = 0.18%
1541
 * n = 15, k = 8 => fp = 0.23%
1542
 * n = 20, k = 6 => fp = 1.1%
1543
 * n = 20, k = 8 => fp = 2.3%
1544
 *
1545
 * The above analysis assumes perfect hash functions,
1546
 * but those don't exist, so the real false positive
1547
 * rates may be worse.
1548
 */
1549
1550
#define K 6
1551
1552
#define SEED 20221211
1553
1554
/* TO DO -- Use more modern hash functions with better distribution of bits */
1555
static uint64_t
1556
address_to_hash(void *ptr) {
1557
    assert(ptr != NULL);
1558
    uint64_t uhash = SEED;
1559
    uintptr_t addr = (uintptr_t)ptr;
1560
    for (int i = 0; i < SIZEOF_VOID_P; i++) {
1561
        uhash ^= addr & 255;
1562
        uhash *= (uint64_t)PyHASH_MULTIPLIER;
1563
        addr >>= 8;
1564
    }
1565
    return uhash;
1566
}
1567
1568
void
1569
_Py_BloomFilter_Init(_PyBloomFilter *bloom)
1570
{
1571
    for (int i = 0; i < _Py_BLOOM_FILTER_WORDS; i++) {
1572
        bloom->bits[i] = 0;
1573
    }
1574
}
1575
1576
/* We want K hash functions that each set 1 bit.
1577
 * A hash function that sets 1 bit in M bits can be trivially
1578
 * derived from a log2(M) bit hash function.
1579
 * So we extract 8 (log2(256)) bits at a time from
1580
 * the 64bit hash. */
1581
void
1582
_Py_BloomFilter_Add(_PyBloomFilter *bloom, void *ptr)
1583
{
1584
    uint64_t hash = address_to_hash(ptr);
1585
    assert(K <= 8);
1586
    for (int i = 0; i < K; i++) {
1587
        uint8_t bits = hash & 255;
1588
        bloom->bits[bits >> 5] |= (1 << (bits&31));
1589
        hash >>= 8;
1590
    }
1591
}
1592
1593
static bool
1594
bloom_filter_may_contain(_PyBloomFilter *bloom, _PyBloomFilter *hashes)
1595
{
1596
    for (int i = 0; i < _Py_BLOOM_FILTER_WORDS; i++) {
1597
        if ((bloom->bits[i] & hashes->bits[i]) != hashes->bits[i]) {
1598
            return false;
1599
        }
1600
    }
1601
    return true;
1602
}
1603
1604
static void
1605
link_executor(_PyExecutorObject *executor)
1606
{
1607
    PyInterpreterState *interp = _PyInterpreterState_GET();
1608
    _PyExecutorLinkListNode *links = &executor->vm_data.links;
1609
    _PyExecutorObject *head = interp->executor_list_head;
1610
    if (head == NULL) {
1611
        interp->executor_list_head = executor;
1612
        links->previous = NULL;
1613
        links->next = NULL;
1614
    }
1615
    else {
1616
        assert(head->vm_data.links.previous == NULL);
1617
        links->previous = NULL;
1618
        links->next = head;
1619
        head->vm_data.links.previous = executor;
1620
        interp->executor_list_head = executor;
1621
    }
1622
    executor->vm_data.linked = true;
1623
    /* executor_list_head must be first in list */
1624
    assert(interp->executor_list_head->vm_data.links.previous == NULL);
1625
}
1626
1627
static void
1628
unlink_executor(_PyExecutorObject *executor)
1629
{
1630
    if (!executor->vm_data.linked) {
1631
        return;
1632
    }
1633
    _PyExecutorLinkListNode *links = &executor->vm_data.links;
1634
    assert(executor->vm_data.valid);
1635
    _PyExecutorObject *next = links->next;
1636
    _PyExecutorObject *prev = links->previous;
1637
    if (next != NULL) {
1638
        next->vm_data.links.previous = prev;
1639
    }
1640
    if (prev != NULL) {
1641
        prev->vm_data.links.next = next;
1642
    }
1643
    else {
1644
        // prev == NULL implies that executor is the list head
1645
        PyInterpreterState *interp = PyInterpreterState_Get();
1646
        assert(interp->executor_list_head == executor);
1647
        interp->executor_list_head = next;
1648
    }
1649
    executor->vm_data.linked = false;
1650
}
1651
1652
/* This must be called by optimizers before using the executor */
1653
void
1654
_Py_ExecutorInit(_PyExecutorObject *executor, const _PyBloomFilter *dependency_set)
1655
{
1656
    executor->vm_data.valid = true;
1657
    for (int i = 0; i < _Py_BLOOM_FILTER_WORDS; i++) {
1658
        executor->vm_data.bloom.bits[i] = dependency_set->bits[i];
1659
    }
1660
    link_executor(executor);
1661
}
1662
1663
_PyExecutorObject *
1664
_PyExecutor_GetColdExecutor(void)
1665
{
1666
    PyInterpreterState *interp = _PyInterpreterState_GET();
1667
    if (interp->cold_executor != NULL) {
1668
        return interp->cold_executor;
1669
    }
1670
    _PyExecutorObject *cold = allocate_executor(0, 1);
1671
    if (cold == NULL) {
1672
        Py_FatalError("Cannot allocate core JIT code");
1673
    }
1674
    ((_PyUOpInstruction *)cold->trace)->opcode = _COLD_EXIT_r00;
1675
#ifdef _Py_JIT
1676
    cold->jit_code = NULL;
1677
    cold->jit_size = 0;
1678
    // This is initialized to true so we can prevent the executor
1679
    // from being immediately detected as cold and invalidated.
1680
    cold->vm_data.warm = true;
1681
    if (_PyJIT_Compile(cold, cold->trace, 1)) {
1682
        Py_DECREF(cold);
1683
        Py_FatalError("Cannot allocate core JIT code");
1684
    }
1685
#endif
1686
    _Py_SetImmortal((PyObject *)cold);
1687
    interp->cold_executor = cold;
1688
    return cold;
1689
}
1690
1691
_PyExecutorObject *
1692
_PyExecutor_GetColdDynamicExecutor(void)
1693
{
1694
    PyInterpreterState *interp = _PyInterpreterState_GET();
1695
    if (interp->cold_dynamic_executor != NULL) {
1696
        assert(interp->cold_dynamic_executor->trace[0].opcode == _COLD_DYNAMIC_EXIT_r00);
1697
        return interp->cold_dynamic_executor;
1698
    }
1699
    _PyExecutorObject *cold = allocate_executor(0, 1);
1700
    if (cold == NULL) {
1701
        Py_FatalError("Cannot allocate core JIT code");
1702
    }
1703
    ((_PyUOpInstruction *)cold->trace)->opcode = _COLD_DYNAMIC_EXIT_r00;
1704
#ifdef _Py_JIT
1705
    cold->jit_code = NULL;
1706
    cold->jit_size = 0;
1707
    // This is initialized to true so we can prevent the executor
1708
    // from being immediately detected as cold and invalidated.
1709
    cold->vm_data.warm = true;
1710
    if (_PyJIT_Compile(cold, cold->trace, 1)) {
1711
        Py_DECREF(cold);
1712
        Py_FatalError("Cannot allocate core JIT code");
1713
    }
1714
#endif
1715
    _Py_SetImmortal((PyObject *)cold);
1716
    interp->cold_dynamic_executor = cold;
1717
    return cold;
1718
}
1719
1720
void
1721
_PyExecutor_ClearExit(_PyExitData *exit)
1722
{
1723
    if (exit == NULL) {
1724
        return;
1725
    }
1726
    _PyExecutorObject *old = exit->executor;
1727
    if (exit->is_dynamic) {
1728
        exit->executor = _PyExecutor_GetColdDynamicExecutor();
1729
    }
1730
    else {
1731
        exit->executor = _PyExecutor_GetColdExecutor();
1732
    }
1733
    Py_DECREF(old);
1734
}
1735
1736
/* Detaches the executor from the code object (if any) that
1737
 * holds a reference to it */
1738
void
1739
_Py_ExecutorDetach(_PyExecutorObject *executor)
1740
{
1741
    PyCodeObject *code = executor->vm_data.code;
1742
    if (code == NULL) {
1743
        return;
1744
    }
1745
    _Py_CODEUNIT *instruction = &_PyCode_CODE(code)[executor->vm_data.index];
1746
    assert(instruction->op.code == ENTER_EXECUTOR);
1747
    int index = instruction->op.arg;
1748
    assert(code->co_executors->executors[index] == executor);
1749
    instruction->op.code = executor->vm_data.opcode;
1750
    instruction->op.arg = executor->vm_data.oparg;
1751
    executor->vm_data.code = NULL;
1752
    code->co_executors->executors[index] = NULL;
1753
    Py_DECREF(executor);
1754
}
1755
1756
static int
1757
executor_clear(PyObject *op)
1758
{
1759
    _PyExecutorObject *executor = _PyExecutorObject_CAST(op);
1760
    if (!executor->vm_data.valid) {
1761
        return 0;
1762
    }
1763
    assert(executor->vm_data.valid == 1);
1764
    unlink_executor(executor);
1765
    executor->vm_data.valid = 0;
1766
1767
    /* It is possible for an executor to form a reference
1768
     * cycle with itself, so decref'ing a side exit could
1769
     * free the executor unless we hold a strong reference to it
1770
     */
1771
    _PyExecutorObject *cold = _PyExecutor_GetColdExecutor();
1772
    Py_INCREF(executor);
1773
    for (uint32_t i = 0; i < executor->exit_count; i++) {
1774
        executor->exits[i].temperature = initial_unreachable_backoff_counter();
1775
        _PyExecutorObject *e = executor->exits[i].executor;
1776
        executor->exits[i].executor = cold;
1777
        Py_DECREF(e);
1778
    }
1779
    _Py_ExecutorDetach(executor);
1780
    Py_DECREF(executor);
1781
    return 0;
1782
}
1783
1784
void
1785
_Py_Executor_DependsOn(_PyExecutorObject *executor, void *obj)
1786
{
1787
    assert(executor->vm_data.valid);
1788
    _Py_BloomFilter_Add(&executor->vm_data.bloom, obj);
1789
}
1790
1791
/* Invalidate all executors that depend on `obj`
1792
 * May cause other executors to be invalidated as well
1793
 */
1794
void
1795
_Py_Executors_InvalidateDependency(PyInterpreterState *interp, void *obj, int is_invalidation)
1796
{
1797
    _PyBloomFilter obj_filter;
1798
    _Py_BloomFilter_Init(&obj_filter);
1799
    _Py_BloomFilter_Add(&obj_filter, obj);
1800
    /* Walk the list of executors */
1801
    /* TO DO -- Use a tree to avoid traversing as many objects */
1802
    PyObject *invalidate = PyList_New(0);
1803
    if (invalidate == NULL) {
1804
        goto error;
1805
    }
1806
    /* Clearing an executor can deallocate others, so we need to make a list of
1807
     * executors to invalidate first */
1808
    for (_PyExecutorObject *exec = interp->executor_list_head; exec != NULL;) {
1809
        assert(exec->vm_data.valid);
1810
        _PyExecutorObject *next = exec->vm_data.links.next;
1811
        if (bloom_filter_may_contain(&exec->vm_data.bloom, &obj_filter) &&
1812
            PyList_Append(invalidate, (PyObject *)exec))
1813
        {
1814
            goto error;
1815
        }
1816
        exec = next;
1817
    }
1818
    for (Py_ssize_t i = 0; i < PyList_GET_SIZE(invalidate); i++) {
1819
        PyObject *exec = PyList_GET_ITEM(invalidate, i);
1820
        executor_clear(exec);
1821
        if (is_invalidation) {
1822
            OPT_STAT_INC(executors_invalidated);
1823
        }
1824
    }
1825
    Py_DECREF(invalidate);
1826
    return;
1827
error:
1828
    PyErr_Clear();
1829
    Py_XDECREF(invalidate);
1830
    // If we're truly out of memory, wiping out everything is a fine fallback:
1831
    _Py_Executors_InvalidateAll(interp, is_invalidation);
1832
}
1833
1834
void
1835
_PyJit_Tracer_InvalidateDependency(PyThreadState *tstate, void *obj)
1836
{
1837
    _PyBloomFilter obj_filter;
1838
    _Py_BloomFilter_Init(&obj_filter);
1839
    _Py_BloomFilter_Add(&obj_filter, obj);
1840
    _PyThreadStateImpl *_tstate = (_PyThreadStateImpl *)tstate;
1841
    if (bloom_filter_may_contain(&_tstate->jit_tracer_state.prev_state.dependencies, &obj_filter))
1842
    {
1843
        _tstate->jit_tracer_state.prev_state.dependencies_still_valid = false;
1844
    }
1845
}
1846
/* Invalidate all executors */
1847
void
1848
_Py_Executors_InvalidateAll(PyInterpreterState *interp, int is_invalidation)
1849
{
1850
    while (interp->executor_list_head) {
1851
        _PyExecutorObject *executor = interp->executor_list_head;
1852
        assert(executor->vm_data.valid == 1 && executor->vm_data.linked == 1);
1853
        if (executor->vm_data.code) {
1854
            // Clear the entire code object so its co_executors array be freed:
1855
            _PyCode_Clear_Executors(executor->vm_data.code);
1856
        }
1857
        else {
1858
            executor_clear((PyObject *)executor);
1859
        }
1860
        if (is_invalidation) {
1861
            OPT_STAT_INC(executors_invalidated);
1862
        }
1863
    }
1864
}
1865
1866
void
1867
_Py_Executors_InvalidateCold(PyInterpreterState *interp)
1868
{
1869
    /* Walk the list of executors */
1870
    /* TO DO -- Use a tree to avoid traversing as many objects */
1871
    PyObject *invalidate = PyList_New(0);
1872
    if (invalidate == NULL) {
1873
        goto error;
1874
    }
1875
1876
    /* Clearing an executor can deallocate others, so we need to make a list of
1877
     * executors to invalidate first */
1878
    for (_PyExecutorObject *exec = interp->executor_list_head; exec != NULL;) {
1879
        assert(exec->vm_data.valid);
1880
        _PyExecutorObject *next = exec->vm_data.links.next;
1881
1882
        if (!exec->vm_data.warm && PyList_Append(invalidate, (PyObject *)exec) < 0) {
1883
            goto error;
1884
        }
1885
        else {
1886
            exec->vm_data.warm = false;
1887
        }
1888
1889
        exec = next;
1890
    }
1891
    for (Py_ssize_t i = 0; i < PyList_GET_SIZE(invalidate); i++) {
1892
        PyObject *exec = PyList_GET_ITEM(invalidate, i);
1893
        executor_clear(exec);
1894
    }
1895
    Py_DECREF(invalidate);
1896
    return;
1897
error:
1898
    PyErr_Clear();
1899
    Py_XDECREF(invalidate);
1900
    // If we're truly out of memory, wiping out everything is a fine fallback
1901
    _Py_Executors_InvalidateAll(interp, 0);
1902
}
1903
1904
static void
1905
write_str(PyObject *str, FILE *out)
1906
{
1907
    // Encode the Unicode object to the specified encoding
1908
    PyObject *encoded_obj = PyUnicode_AsEncodedString(str, "utf8", "strict");
1909
    if (encoded_obj == NULL) {
1910
        PyErr_Clear();
1911
        return;
1912
    }
1913
    const char *encoded_str = PyBytes_AsString(encoded_obj);
1914
    Py_ssize_t encoded_size = PyBytes_Size(encoded_obj);
1915
    fwrite(encoded_str, 1, encoded_size, out);
1916
    Py_DECREF(encoded_obj);
1917
}
1918
1919
static int
1920
find_line_number(PyCodeObject *code, _PyExecutorObject *executor)
1921
{
1922
    int code_len = (int)Py_SIZE(code);
1923
    for (int i = 0; i < code_len; i++) {
1924
        _Py_CODEUNIT *instr = &_PyCode_CODE(code)[i];
1925
        int opcode = instr->op.code;
1926
        if (opcode == ENTER_EXECUTOR) {
1927
            _PyExecutorObject *exec = code->co_executors->executors[instr->op.arg];
1928
            if (exec == executor) {
1929
                return PyCode_Addr2Line(code, i*2);
1930
            }
1931
        }
1932
        i += _PyOpcode_Caches[_Py_GetBaseCodeUnit(code, i).op.code];
1933
    }
1934
    return -1;
1935
}
1936
1937
/* Writes the node and outgoing edges for a single tracelet in graphviz format.
1938
 * Each tracelet is presented as a table of the uops it contains.
1939
 * If Py_STATS is enabled, execution counts are included.
1940
 *
1941
 * https://graphviz.readthedocs.io/en/stable/manual.html
1942
 * https://graphviz.org/gallery/
1943
 */
1944
static void
1945
executor_to_gv(_PyExecutorObject *executor, FILE *out)
1946
{
1947
    PyCodeObject *code = executor->vm_data.code;
1948
    fprintf(out, "executor_%p [\n", executor);
1949
    fprintf(out, "    shape = none\n");
1950
1951
    /* Write the HTML table for the uops */
1952
    fprintf(out, "    label = <<table border=\"0\" cellspacing=\"0\">\n");
1953
    fprintf(out, "        <tr><td port=\"start\" border=\"1\" ><b>Executor</b></td></tr>\n");
1954
    if (code == NULL) {
1955
        fprintf(out, "        <tr><td border=\"1\" >No code object</td></tr>\n");
1956
    }
1957
    else {
1958
        fprintf(out, "        <tr><td  border=\"1\" >");
1959
        write_str(code->co_qualname, out);
1960
        int line = find_line_number(code, executor);
1961
        fprintf(out, ": %d</td></tr>\n", line);
1962
    }
1963
    for (uint32_t i = 0; i < executor->code_size; i++) {
1964
        /* Write row for uop.
1965
         * The `port` is a marker so that outgoing edges can
1966
         * be placed correctly. If a row is marked `port=17`,
1967
         * then the outgoing edge is `{EXEC_NAME}:17 -> {TARGET}`
1968
         * https://graphviz.readthedocs.io/en/stable/manual.html#node-ports-compass
1969
         */
1970
        _PyUOpInstruction const *inst = &executor->trace[i];
1971
        uint16_t base_opcode = _PyUop_Uncached[inst->opcode];
1972
        const char *opname = _PyOpcode_uop_name[base_opcode];
1973
#ifdef Py_STATS
1974
        fprintf(out, "        <tr><td port=\"i%d\" border=\"1\" >%s -- %" PRIu64 "</td></tr>\n", i, opname, inst->execution_count);
1975
#else
1976
        fprintf(out, "        <tr><td port=\"i%d\" border=\"1\" >%s op0=%" PRIu64 "</td></tr>\n", i, opname, inst->operand0);
1977
#endif
1978
        if (inst->opcode == _EXIT_TRACE || inst->opcode == _JUMP_TO_TOP) {
1979
            break;
1980
        }
1981
    }
1982
    fprintf(out, "    </table>>\n");
1983
    fprintf(out, "]\n\n");
1984
1985
    /* Write all the outgoing edges */
1986
    _PyExecutorObject *cold = _PyExecutor_GetColdExecutor();
1987
    _PyExecutorObject *cold_dynamic = _PyExecutor_GetColdDynamicExecutor();
1988
    for (uint32_t i = 0; i < executor->code_size; i++) {
1989
        _PyUOpInstruction const *inst = &executor->trace[i];
1990
        uint16_t base_opcode = _PyUop_Uncached[inst->opcode];
1991
        uint16_t flags = _PyUop_Flags[inst->opcode];
1992
        _PyExitData *exit = NULL;
1993
        if (base_opcode == _EXIT_TRACE) {
1994
            exit = (_PyExitData *)inst->operand0;
1995
        }
1996
        else if (flags & HAS_EXIT_FLAG) {
1997
            assert(inst->format == UOP_FORMAT_JUMP);
1998
            _PyUOpInstruction const *exit_inst = &executor->trace[inst->jump_target];
1999
            uint16_t base_exit_opcode = _PyUop_Uncached[exit_inst->opcode];
2000
            assert(base_exit_opcode == _EXIT_TRACE || base_exit_opcode == _DYNAMIC_EXIT);
2001
            exit = (_PyExitData *)exit_inst->operand0;
2002
        }
2003
        if (exit != NULL && exit->executor != cold && exit->executor != cold_dynamic) {
2004
            fprintf(out, "executor_%p:i%d -> executor_%p:start\n", executor, i, exit->executor);
2005
        }
2006
        if (base_opcode == _EXIT_TRACE || base_opcode == _JUMP_TO_TOP) {
2007
            break;
2008
        }
2009
    }
2010
}
2011
2012
/* Write the graph of all the live tracelets in graphviz format. */
2013
int
2014
_PyDumpExecutors(FILE *out)
2015
{
2016
    fprintf(out, "digraph ideal {\n\n");
2017
    fprintf(out, "    rankdir = \"LR\"\n\n");
2018
    PyInterpreterState *interp = PyInterpreterState_Get();
2019
    for (_PyExecutorObject *exec = interp->executor_list_head; exec != NULL;) {
2020
        executor_to_gv(exec, out);
2021
        exec = exec->vm_data.links.next;
2022
    }
2023
    fprintf(out, "}\n\n");
2024
    return 0;
2025
}
2026
2027
#else
2028
2029
int
2030
_PyDumpExecutors(FILE *out)
2031
0
{
2032
0
    PyErr_SetString(PyExc_NotImplementedError, "No JIT available");
2033
0
    return -1;
2034
0
}
2035
2036
void
2037
_PyExecutor_Free(struct _PyExecutorObject *self)
2038
0
{
2039
    /* This should never be called */
2040
0
    Py_UNREACHABLE();
2041
0
}
2042
2043
#endif /* _Py_TIER2 */