Coverage Report

Created: 2026-02-09 07:07

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/cpython/Python/optimizer.c
Line
Count
Source
1
#include "Python.h"
2
3
#ifdef _Py_TIER2
4
5
#include "opcode.h"
6
#include "pycore_interp.h"
7
#include "pycore_backoff.h"
8
#include "pycore_bitutils.h"        // _Py_popcount32()
9
#include "pycore_ceval.h"       // _Py_set_eval_breaker_bit
10
#include "pycore_code.h"            // _Py_GetBaseCodeUnit
11
#include "pycore_function.h"        // _PyFunction_LookupByVersion()
12
#include "pycore_interpframe.h"
13
#include "pycore_object.h"          // _PyObject_GC_UNTRACK()
14
#include "pycore_opcode_metadata.h" // _PyOpcode_OpName[]
15
#include "pycore_opcode_utils.h"  // MAX_REAL_OPCODE
16
#include "pycore_optimizer.h"     // _Py_uop_analyze_and_optimize()
17
#include "pycore_pystate.h"       // _PyInterpreterState_GET()
18
#include "pycore_tuple.h"         // _PyTuple_FromArraySteal
19
#include "pycore_unicodeobject.h" // _PyUnicode_FromASCII
20
#include "pycore_uop_ids.h"
21
#include "pycore_jit.h"
22
#include <stdbool.h>
23
#include <stdint.h>
24
#include <stddef.h>
25
26
#define NEED_OPCODE_METADATA
27
#include "pycore_uop_metadata.h" // Uop tables
28
#undef NEED_OPCODE_METADATA
29
30
#define MAX_EXECUTORS_SIZE 256
31
32
// Trace too short, no progress:
33
// _START_EXECUTOR
34
// _MAKE_WARM
35
// _CHECK_VALIDITY
36
// _SET_IP
37
// is 4-5 instructions.
38
#define CODE_SIZE_NO_PROGRESS 5
39
// We start with _START_EXECUTOR, _MAKE_WARM
40
#define CODE_SIZE_EMPTY 2
41
42
#define _PyExecutorObject_CAST(op)  ((_PyExecutorObject *)(op))
43
44
static bool
45
has_space_for_executor(PyCodeObject *code, _Py_CODEUNIT *instr)
46
{
47
    if (code == (PyCodeObject *)&_Py_InitCleanup) {
48
        return false;
49
    }
50
    if (instr->op.code == ENTER_EXECUTOR) {
51
        return true;
52
    }
53
    if (code->co_executors == NULL) {
54
        return true;
55
    }
56
    return code->co_executors->size < MAX_EXECUTORS_SIZE;
57
}
58
59
static int32_t
60
get_index_for_executor(PyCodeObject *code, _Py_CODEUNIT *instr)
61
{
62
    if (instr->op.code == ENTER_EXECUTOR) {
63
        return instr->op.arg;
64
    }
65
    _PyExecutorArray *old = code->co_executors;
66
    int size = 0;
67
    int capacity = 0;
68
    if (old != NULL) {
69
        size = old->size;
70
        capacity = old->capacity;
71
        assert(size < MAX_EXECUTORS_SIZE);
72
    }
73
    assert(size <= capacity);
74
    if (size == capacity) {
75
        /* Array is full. Grow array */
76
        int new_capacity = capacity ? capacity * 2 : 4;
77
        _PyExecutorArray *new = PyMem_Realloc(
78
            old,
79
            offsetof(_PyExecutorArray, executors) +
80
            new_capacity * sizeof(_PyExecutorObject *));
81
        if (new == NULL) {
82
            return -1;
83
        }
84
        new->capacity = new_capacity;
85
        new->size = size;
86
        code->co_executors = new;
87
    }
88
    assert(size < code->co_executors->capacity);
89
    return size;
90
}
91
92
static void
93
insert_executor(PyCodeObject *code, _Py_CODEUNIT *instr, int index, _PyExecutorObject *executor)
94
{
95
    Py_INCREF(executor);
96
    if (instr->op.code == ENTER_EXECUTOR) {
97
        assert(index == instr->op.arg);
98
        _Py_ExecutorDetach(code->co_executors->executors[index]);
99
    }
100
    else {
101
        assert(code->co_executors->size == index);
102
        assert(code->co_executors->capacity > index);
103
        code->co_executors->size++;
104
    }
105
    executor->vm_data.opcode = instr->op.code;
106
    executor->vm_data.oparg = instr->op.arg;
107
    executor->vm_data.code = code;
108
    executor->vm_data.index = (int)(instr - _PyCode_CODE(code));
109
    code->co_executors->executors[index] = executor;
110
    assert(index < MAX_EXECUTORS_SIZE);
111
    instr->op.code = ENTER_EXECUTOR;
112
    instr->op.arg = index;
113
}
114
115
static _PyExecutorObject *
116
make_executor_from_uops(_PyThreadStateImpl *tstate, _PyUOpInstruction *buffer, int length, const _PyBloomFilter *dependencies);
117
118
static int
119
uop_optimize(_PyInterpreterFrame *frame, PyThreadState *tstate,
120
             _PyExecutorObject **exec_ptr,
121
             bool progress_needed);
122
123
/* Returns 1 if optimized, 0 if not optimized, and -1 for an error.
124
 * If optimized, *executor_ptr contains a new reference to the executor
125
 */
126
// gh-137573: inlining this function causes stack overflows
127
Py_NO_INLINE int
128
_PyOptimizer_Optimize(
129
    _PyInterpreterFrame *frame, PyThreadState *tstate)
130
{
131
    _PyThreadStateImpl *_tstate = (_PyThreadStateImpl *)tstate;
132
    int chain_depth = _tstate->jit_tracer_state->initial_state.chain_depth;
133
    PyInterpreterState *interp = _PyInterpreterState_GET();
134
    if (!interp->jit) {
135
        // gh-140936: It is possible that interp->jit will become false during
136
        // interpreter finalization. However, the specialized JUMP_BACKWARD_JIT
137
        // instruction may still be present. In this case, we should
138
        // return immediately without optimization.
139
        return 0;
140
    }
141
    _PyExecutorObject *prev_executor = _tstate->jit_tracer_state->initial_state.executor;
142
    if (prev_executor != NULL && !prev_executor->vm_data.valid) {
143
        // gh-143604: If we are a side exit executor and the original executor is no
144
        // longer valid, don't compile to prevent a reference leak.
145
        return 0;
146
    }
147
    assert(!interp->compiling);
148
    assert(_tstate->jit_tracer_state->initial_state.stack_depth >= 0);
149
#ifndef Py_GIL_DISABLED
150
    assert(_tstate->jit_tracer_state->initial_state.func != NULL);
151
    interp->compiling = true;
152
    // The first executor in a chain and the MAX_CHAIN_DEPTH'th executor *must*
153
    // make progress in order to avoid infinite loops or excessively-long
154
    // side-exit chains. We can only insert the executor into the bytecode if
155
    // this is true, since a deopt won't infinitely re-enter the executor:
156
    chain_depth %= MAX_CHAIN_DEPTH;
157
    bool progress_needed = chain_depth == 0;
158
    PyCodeObject *code = (PyCodeObject *)_tstate->jit_tracer_state->initial_state.code;
159
    _Py_CODEUNIT *start = _tstate->jit_tracer_state->initial_state.start_instr;
160
    if (progress_needed && !has_space_for_executor(code, start)) {
161
        interp->compiling = false;
162
        return 0;
163
    }
164
    // One of our dependencies while tracing was invalidated. Not worth compiling.
165
    if (!_tstate->jit_tracer_state->prev_state.dependencies_still_valid) {
166
        interp->compiling = false;
167
        return 0;
168
    }
169
    _PyExecutorObject *executor;
170
    int err = uop_optimize(frame, tstate, &executor, progress_needed);
171
    if (err <= 0) {
172
        interp->compiling = false;
173
        return err;
174
    }
175
    assert(executor != NULL);
176
    if (progress_needed) {
177
        int index = get_index_for_executor(code, start);
178
        if (index < 0) {
179
            /* Out of memory. Don't raise and assume that the
180
             * error will show up elsewhere.
181
             *
182
             * If an optimizer has already produced an executor,
183
             * it might get confused by the executor disappearing,
184
             * but there is not much we can do about that here. */
185
            Py_DECREF(executor);
186
            interp->compiling = false;
187
            return 0;
188
        }
189
        insert_executor(code, start, index, executor);
190
    }
191
    executor->vm_data.chain_depth = chain_depth;
192
    assert(executor->vm_data.valid);
193
    _PyExitData *exit = _tstate->jit_tracer_state->initial_state.exit;
194
    if (exit != NULL && !progress_needed) {
195
        exit->executor = executor;
196
    }
197
    else {
198
        // An executor inserted into the code object now has a strong reference
199
        // to it from the code object. Thus, we don't need this reference anymore.
200
        Py_DECREF(executor);
201
    }
202
    interp->compiling = false;
203
    return 1;
204
#else
205
    return 0;
206
#endif
207
}
208
209
static _PyExecutorObject *
210
get_executor_lock_held(PyCodeObject *code, int offset)
211
{
212
    int code_len = (int)Py_SIZE(code);
213
    for (int i = 0 ; i < code_len;) {
214
        if (_PyCode_CODE(code)[i].op.code == ENTER_EXECUTOR && i*2 == offset) {
215
            int oparg = _PyCode_CODE(code)[i].op.arg;
216
            _PyExecutorObject *res = code->co_executors->executors[oparg];
217
            Py_INCREF(res);
218
            return res;
219
        }
220
        i += _PyInstruction_GetLength(code, i);
221
    }
222
    PyErr_SetString(PyExc_ValueError, "no executor at given byte offset");
223
    return NULL;
224
}
225
226
_PyExecutorObject *
227
_Py_GetExecutor(PyCodeObject *code, int offset)
228
{
229
    _PyExecutorObject *executor;
230
    Py_BEGIN_CRITICAL_SECTION(code);
231
    executor = get_executor_lock_held(code, offset);
232
    Py_END_CRITICAL_SECTION();
233
    return executor;
234
}
235
236
static PyObject *
237
is_valid(PyObject *self, PyObject *Py_UNUSED(ignored))
238
{
239
    return PyBool_FromLong(((_PyExecutorObject *)self)->vm_data.valid);
240
}
241
242
static PyObject *
243
get_opcode(PyObject *self, PyObject *Py_UNUSED(ignored))
244
{
245
    return PyLong_FromUnsignedLong(((_PyExecutorObject *)self)->vm_data.opcode);
246
}
247
248
static PyObject *
249
get_oparg(PyObject *self, PyObject *Py_UNUSED(ignored))
250
{
251
    return PyLong_FromUnsignedLong(((_PyExecutorObject *)self)->vm_data.oparg);
252
}
253
254
///////////////////// Experimental UOp Optimizer /////////////////////
255
256
static int executor_clear(PyObject *executor);
257
258
void
259
_PyExecutor_Free(_PyExecutorObject *self)
260
{
261
#ifdef _Py_JIT
262
    _PyJIT_Free(self);
263
#endif
264
    PyObject_GC_Del(self);
265
}
266
267
static void executor_invalidate(PyObject *op);
268
269
static void
270
executor_clear_exits(_PyExecutorObject *executor)
271
{
272
    _PyExecutorObject *cold = _PyExecutor_GetColdExecutor();
273
    _PyExecutorObject *cold_dynamic = _PyExecutor_GetColdDynamicExecutor();
274
    for (uint32_t i = 0; i < executor->exit_count; i++) {
275
        _PyExitData *exit = &executor->exits[i];
276
        exit->temperature = initial_unreachable_backoff_counter();
277
        _PyExecutorObject *old = executor->exits[i].executor;
278
        exit->executor = exit->is_dynamic ? cold_dynamic : cold;
279
        Py_DECREF(old);
280
    }
281
}
282
283
284
void
285
_Py_ClearExecutorDeletionList(PyInterpreterState *interp)
286
{
287
    if (interp->executor_deletion_list_head == NULL) {
288
        return;
289
    }
290
    _PyRuntimeState *runtime = &_PyRuntime;
291
    HEAD_LOCK(runtime);
292
    PyThreadState* ts = PyInterpreterState_ThreadHead(interp);
293
    while (ts) {
294
        _PyExecutorObject *current = (_PyExecutorObject *)ts->current_executor;
295
        Py_XINCREF(current);
296
        ts = ts->next;
297
    }
298
    HEAD_UNLOCK(runtime);
299
    _PyExecutorObject *keep_list = NULL;
300
    do {
301
        _PyExecutorObject *exec = interp->executor_deletion_list_head;
302
        interp->executor_deletion_list_head = exec->vm_data.links.next;
303
        if (Py_REFCNT(exec) == 0) {
304
            _PyExecutor_Free(exec);
305
        } else {
306
            exec->vm_data.links.next = keep_list;
307
            keep_list = exec;
308
        }
309
    } while (interp->executor_deletion_list_head != NULL);
310
    interp->executor_deletion_list_head = keep_list;
311
    HEAD_LOCK(runtime);
312
    ts = PyInterpreterState_ThreadHead(interp);
313
    while (ts) {
314
        _PyExecutorObject *current = (_PyExecutorObject *)ts->current_executor;
315
        if (current != NULL) {
316
            Py_DECREF((PyObject *)current);
317
        }
318
        ts = ts->next;
319
    }
320
    HEAD_UNLOCK(runtime);
321
}
322
323
static void
324
add_to_pending_deletion_list(_PyExecutorObject *self)
325
{
326
    if (self->vm_data.pending_deletion) {
327
        return;
328
    }
329
    self->vm_data.pending_deletion = 1;
330
    PyInterpreterState *interp = PyInterpreterState_Get();
331
    self->vm_data.links.previous = NULL;
332
    self->vm_data.links.next = interp->executor_deletion_list_head;
333
    interp->executor_deletion_list_head = self;
334
}
335
336
static void
337
uop_dealloc(PyObject *op) {
338
    _PyExecutorObject *self = _PyExecutorObject_CAST(op);
339
    executor_invalidate(op);
340
    assert(self->vm_data.code == NULL);
341
    add_to_pending_deletion_list(self);
342
}
343
344
const char *
345
_PyUOpName(int index)
346
{
347
    if (index < 0 || index > MAX_UOP_REGS_ID) {
348
        return NULL;
349
    }
350
    return _PyOpcode_uop_name[index];
351
}
352
353
#ifdef Py_DEBUG
354
void
355
_PyUOpPrint(const _PyUOpInstruction *uop)
356
{
357
    const char *name = _PyUOpName(uop->opcode);
358
    if (name == NULL) {
359
        printf("<uop %d>", uop->opcode);
360
    }
361
    else {
362
        printf("%s", name);
363
    }
364
    switch(uop->format) {
365
        case UOP_FORMAT_TARGET:
366
            printf(" (%d, target=%d, operand0=%#" PRIx64 ", operand1=%#" PRIx64,
367
                uop->oparg,
368
                uop->target,
369
                (uint64_t)uop->operand0,
370
                (uint64_t)uop->operand1);
371
            break;
372
        case UOP_FORMAT_JUMP:
373
            printf(" (%d, jump_target=%d, operand0=%#" PRIx64 ", operand1=%#" PRIx64,
374
                uop->oparg,
375
                uop->jump_target,
376
                (uint64_t)uop->operand0,
377
                (uint64_t)uop->operand1);
378
            break;
379
        default:
380
            printf(" (%d, Unknown format)", uop->oparg);
381
    }
382
    if (_PyUop_Flags[_PyUop_Uncached[uop->opcode]] & HAS_ERROR_FLAG) {
383
        printf(", error_target=%d", uop->error_target);
384
    }
385
386
    printf(")");
387
}
388
#endif
389
390
static Py_ssize_t
391
uop_len(PyObject *op)
392
{
393
    _PyExecutorObject *self = _PyExecutorObject_CAST(op);
394
    return self->code_size;
395
}
396
397
static PyObject *
398
uop_item(PyObject *op, Py_ssize_t index)
399
{
400
    _PyExecutorObject *self = _PyExecutorObject_CAST(op);
401
    Py_ssize_t len = uop_len(op);
402
    if (index < 0 || index >= len) {
403
        PyErr_SetNone(PyExc_IndexError);
404
        return NULL;
405
    }
406
    int opcode = self->trace[index].opcode;
407
    int base_opcode = _PyUop_Uncached[opcode];
408
    const char *name = _PyUOpName(base_opcode);
409
    if (name == NULL) {
410
        name = "<nil>";
411
    }
412
    PyObject *oname = _PyUnicode_FromASCII(name, strlen(name));
413
    if (oname == NULL) {
414
        return NULL;
415
    }
416
    PyObject *oparg = PyLong_FromUnsignedLong(self->trace[index].oparg);
417
    if (oparg == NULL) {
418
        Py_DECREF(oname);
419
        return NULL;
420
    }
421
    PyObject *target = PyLong_FromUnsignedLong(self->trace[index].target);
422
    if (target == NULL) {
423
        Py_DECREF(oparg);
424
        Py_DECREF(oname);
425
        return NULL;
426
    }
427
    PyObject *operand = PyLong_FromUnsignedLongLong(self->trace[index].operand0);
428
    if (operand == NULL) {
429
        Py_DECREF(target);
430
        Py_DECREF(oparg);
431
        Py_DECREF(oname);
432
        return NULL;
433
    }
434
    PyObject *args[4] = { oname, oparg, target, operand };
435
    return _PyTuple_FromArraySteal(args, 4);
436
}
437
438
PySequenceMethods uop_as_sequence = {
439
    .sq_length = uop_len,
440
    .sq_item = uop_item,
441
};
442
443
static int
444
executor_traverse(PyObject *o, visitproc visit, void *arg)
445
{
446
    _PyExecutorObject *executor = _PyExecutorObject_CAST(o);
447
    for (uint32_t i = 0; i < executor->exit_count; i++) {
448
        Py_VISIT(executor->exits[i].executor);
449
    }
450
    return 0;
451
}
452
453
static PyObject *
454
get_jit_code(PyObject *self, PyObject *Py_UNUSED(ignored))
455
{
456
#ifndef _Py_JIT
457
    PyErr_SetString(PyExc_RuntimeError, "JIT support not enabled.");
458
    return NULL;
459
#else
460
    _PyExecutorObject *executor = _PyExecutorObject_CAST(self);
461
    if (executor->jit_code == NULL || executor->jit_size == 0) {
462
        Py_RETURN_NONE;
463
    }
464
    return PyBytes_FromStringAndSize(executor->jit_code, executor->jit_size);
465
#endif
466
}
467
468
static PyMethodDef uop_executor_methods[] = {
469
    { "is_valid", is_valid, METH_NOARGS, NULL },
470
    { "get_jit_code", get_jit_code, METH_NOARGS, NULL},
471
    { "get_opcode", get_opcode, METH_NOARGS, NULL },
472
    { "get_oparg", get_oparg, METH_NOARGS, NULL },
473
    { NULL, NULL },
474
};
475
476
static int
477
executor_is_gc(PyObject *o)
478
{
479
    return !_Py_IsImmortal(o);
480
}
481
482
PyTypeObject _PyUOpExecutor_Type = {
483
    PyVarObject_HEAD_INIT(&PyType_Type, 0)
484
    .tp_name = "uop_executor",
485
    .tp_basicsize = offsetof(_PyExecutorObject, exits),
486
    .tp_itemsize = 1,
487
    .tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_DISALLOW_INSTANTIATION | Py_TPFLAGS_HAVE_GC,
488
    .tp_dealloc = uop_dealloc,
489
    .tp_as_sequence = &uop_as_sequence,
490
    .tp_methods = uop_executor_methods,
491
    .tp_traverse = executor_traverse,
492
    .tp_clear = executor_clear,
493
    .tp_is_gc = executor_is_gc,
494
};
495
496
/* TO DO -- Generate these tables */
497
static const uint16_t
498
_PyUOp_Replacements[MAX_UOP_ID + 1] = {
499
    [_ITER_JUMP_RANGE] = _GUARD_NOT_EXHAUSTED_RANGE,
500
    [_ITER_JUMP_LIST] = _GUARD_NOT_EXHAUSTED_LIST,
501
    [_ITER_JUMP_TUPLE] = _GUARD_NOT_EXHAUSTED_TUPLE,
502
    [_FOR_ITER] = _FOR_ITER_TIER_TWO,
503
    [_ITER_NEXT_LIST] = _ITER_NEXT_LIST_TIER_TWO,
504
    [_CHECK_PERIODIC_AT_END] = _TIER2_RESUME_CHECK,
505
};
506
507
static const uint8_t
508
is_for_iter_test[MAX_UOP_ID + 1] = {
509
    [_GUARD_NOT_EXHAUSTED_RANGE] = 1,
510
    [_GUARD_NOT_EXHAUSTED_LIST] = 1,
511
    [_GUARD_NOT_EXHAUSTED_TUPLE] = 1,
512
    [_FOR_ITER_TIER_TWO] = 1,
513
};
514
515
static const uint16_t
516
BRANCH_TO_GUARD[4][2] = {
517
    [POP_JUMP_IF_FALSE - POP_JUMP_IF_FALSE][0] = _GUARD_IS_TRUE_POP,
518
    [POP_JUMP_IF_FALSE - POP_JUMP_IF_FALSE][1] = _GUARD_IS_FALSE_POP,
519
    [POP_JUMP_IF_TRUE - POP_JUMP_IF_FALSE][0] = _GUARD_IS_FALSE_POP,
520
    [POP_JUMP_IF_TRUE - POP_JUMP_IF_FALSE][1] = _GUARD_IS_TRUE_POP,
521
    [POP_JUMP_IF_NONE - POP_JUMP_IF_FALSE][0] = _GUARD_IS_NOT_NONE_POP,
522
    [POP_JUMP_IF_NONE - POP_JUMP_IF_FALSE][1] = _GUARD_IS_NONE_POP,
523
    [POP_JUMP_IF_NOT_NONE - POP_JUMP_IF_FALSE][0] = _GUARD_IS_NONE_POP,
524
    [POP_JUMP_IF_NOT_NONE - POP_JUMP_IF_FALSE][1] = _GUARD_IS_NOT_NONE_POP,
525
};
526
527
static const uint16_t
528
guard_ip_uop[MAX_UOP_ID + 1] = {
529
    [_PUSH_FRAME] = _GUARD_IP__PUSH_FRAME,
530
    [_RETURN_GENERATOR] = _GUARD_IP_RETURN_GENERATOR,
531
    [_RETURN_VALUE] = _GUARD_IP_RETURN_VALUE,
532
    [_YIELD_VALUE] = _GUARD_IP_YIELD_VALUE,
533
};
534
535
536
#define CONFIDENCE_RANGE 1000
537
#define CONFIDENCE_CUTOFF 333
538
539
#ifdef Py_DEBUG
540
#define DPRINTF(level, ...) \
541
    if (lltrace >= (level)) { printf(__VA_ARGS__); }
542
#else
543
#define DPRINTF(level, ...)
544
#endif
545
546
547
static inline void
548
add_to_trace(
549
    _PyJitUopBuffer *trace,
550
    uint16_t opcode,
551
    uint16_t oparg,
552
    uint64_t operand,
553
    uint32_t target)
554
{
555
    _PyUOpInstruction *inst = trace->next;
556
    inst->opcode = opcode;
557
    inst->format = UOP_FORMAT_TARGET;
558
    inst->target = target;
559
    inst->oparg = oparg;
560
    inst->operand0 = operand;
561
#ifdef Py_STATS
562
    inst->execution_count = 0;
563
#endif
564
    trace->next++;
565
}
566
567
568
#ifdef Py_DEBUG
569
#define ADD_TO_TRACE(OPCODE, OPARG, OPERAND, TARGET) \
570
    add_to_trace(trace, (OPCODE), (OPARG), (OPERAND), (TARGET)); \
571
    if (lltrace >= 2) { \
572
        printf("%4d ADD_TO_TRACE: ", uop_buffer_length(trace)); \
573
        _PyUOpPrint(uop_buffer_last(trace)); \
574
        printf("\n"); \
575
    }
576
#else
577
#define ADD_TO_TRACE(OPCODE, OPARG, OPERAND, TARGET) \
578
    add_to_trace(trace, (OPCODE), (OPARG), (OPERAND), (TARGET))
579
#endif
580
581
#define INSTR_IP(INSTR, CODE) \
582
    ((uint32_t)((INSTR) - ((_Py_CODEUNIT *)(CODE)->co_code_adaptive)))
583
584
585
static int
586
is_terminator(const _PyUOpInstruction *uop)
587
{
588
    int opcode = _PyUop_Uncached[uop->opcode];
589
    return (
590
        opcode == _EXIT_TRACE ||
591
        opcode == _DEOPT ||
592
        opcode == _JUMP_TO_TOP ||
593
        opcode == _DYNAMIC_EXIT
594
    );
595
}
596
597
/* Returns 1 on success (added to trace), 0 on trace end.
598
 */
599
// gh-142543: inlining this function causes stack overflows
600
Py_NO_INLINE int
601
_PyJit_translate_single_bytecode_to_trace(
602
    PyThreadState *tstate,
603
    _PyInterpreterFrame *frame,
604
    _Py_CODEUNIT *next_instr,
605
    int stop_tracing_opcode)
606
{
607
608
#ifdef Py_DEBUG
609
    char *python_lltrace = Py_GETENV("PYTHON_LLTRACE");
610
    int lltrace = 0;
611
    if (python_lltrace != NULL && *python_lltrace >= '0') {
612
        lltrace = *python_lltrace - '0';  // TODO: Parse an int and all that
613
    }
614
#endif
615
    _PyThreadStateImpl *_tstate = (_PyThreadStateImpl *)tstate;
616
    _PyJitTracerState *tracer = _tstate->jit_tracer_state;
617
    PyCodeObject *old_code = tracer->prev_state.instr_code;
618
    bool progress_needed = (tracer->initial_state.chain_depth % MAX_CHAIN_DEPTH) == 0;
619
    _PyBloomFilter *dependencies = &tracer->prev_state.dependencies;
620
    _PyJitUopBuffer *trace = &tracer->code_buffer;
621
622
    _Py_CODEUNIT *this_instr =  tracer->prev_state.instr;
623
    _Py_CODEUNIT *target_instr = this_instr;
624
    uint32_t target = 0;
625
626
    target = Py_IsNone((PyObject *)old_code)
627
        ? (uint32_t)(target_instr - _Py_INTERPRETER_TRAMPOLINE_INSTRUCTIONS_PTR)
628
        : INSTR_IP(target_instr, old_code);
629
630
    // Rewind EXTENDED_ARG so that we see the whole thing.
631
    // We must point to the first EXTENDED_ARG when deopting.
632
    int oparg = tracer->prev_state.instr_oparg;
633
    int opcode = this_instr->op.code;
634
    int rewind_oparg = oparg;
635
    while (rewind_oparg > 255) {
636
        rewind_oparg >>= 8;
637
        target--;
638
    }
639
640
    if (_PyOpcode_Caches[_PyOpcode_Deopt[opcode]] > 0) {
641
        uint16_t backoff = (this_instr + 1)->counter.value_and_backoff;
642
        // adaptive_counter_cooldown is a fresh specialization.
643
        // trigger_backoff_counter is what we set during tracing.
644
        // All tracing backoffs should be freshly specialized or untouched.
645
        // If not, that indicates a deopt during tracing, and
646
        // thus the "actual" instruction executed is not the one that is
647
        // in the instruction stream, but rather the deopt.
648
        // It's important we check for this, as some specializations might make
649
        // no progress (they can immediately deopt after specializing).
650
        // We do this to improve performance, as otherwise a compiled trace
651
        // will just deopt immediately.
652
        if (backoff != adaptive_counter_cooldown().value_and_backoff &&
653
            backoff != trigger_backoff_counter().value_and_backoff) {
654
            OPT_STAT_INC(trace_immediately_deopts);
655
            opcode = _PyOpcode_Deopt[opcode];
656
        }
657
    }
658
659
    // Strange control-flow
660
    bool has_dynamic_jump_taken = OPCODE_HAS_UNPREDICTABLE_JUMP(opcode) &&
661
        (next_instr != this_instr + 1 + _PyOpcode_Caches[_PyOpcode_Deopt[opcode]]);
662
663
    /* Special case the first instruction,
664
    * so that we can guarantee forward progress */
665
    if (progress_needed && uop_buffer_length(&tracer->code_buffer) < CODE_SIZE_NO_PROGRESS) {
666
        if (OPCODE_HAS_EXIT(opcode) || OPCODE_HAS_DEOPT(opcode)) {
667
            opcode = _PyOpcode_Deopt[opcode];
668
        }
669
        assert(!OPCODE_HAS_EXIT(opcode));
670
        assert(!OPCODE_HAS_DEOPT(opcode));
671
    }
672
673
    bool needs_guard_ip = OPCODE_HAS_NEEDS_GUARD_IP(opcode);
674
    if (has_dynamic_jump_taken && !needs_guard_ip) {
675
        DPRINTF(2, "Unsupported: dynamic jump taken %s\n", _PyOpcode_OpName[opcode]);
676
        goto unsupported;
677
    }
678
679
    int is_sys_tracing = (tstate->c_tracefunc != NULL) || (tstate->c_profilefunc != NULL);
680
    if (is_sys_tracing) {
681
        goto done;
682
    }
683
684
    if (stop_tracing_opcode == _DEOPT) {
685
        // gh-143183: It's important we rewind to the last known proper target.
686
        // The current target might be garbage as stop tracing usually indicates
687
        // we are in something that we can't trace.
688
        DPRINTF(2, "Told to stop tracing\n");
689
        goto unsupported;
690
    }
691
    else if (stop_tracing_opcode != 0) {
692
        assert(stop_tracing_opcode == _EXIT_TRACE);
693
        ADD_TO_TRACE(stop_tracing_opcode, 0, 0, target);
694
        goto done;
695
    }
696
697
    DPRINTF(2, "%p %d: %s(%d) %d\n", old_code, target, _PyOpcode_OpName[opcode], oparg, needs_guard_ip);
698
699
#ifdef Py_DEBUG
700
    if (oparg > 255) {
701
        assert(_Py_GetBaseCodeUnit(old_code, target).op.code == EXTENDED_ARG);
702
    }
703
#endif
704
705
    if (!tracer->prev_state.dependencies_still_valid) {
706
        goto done;
707
    }
708
709
    // This happens when a recursive call happens that we can't trace. Such as Python -> C -> Python calls
710
    // If we haven't guarded the IP, then it's untraceable.
711
    if (frame != tracer->prev_state.instr_frame && !needs_guard_ip) {
712
        DPRINTF(2, "Unsupported: unguardable jump taken\n");
713
        goto unsupported;
714
    }
715
716
    if (oparg > 0xFFFF) {
717
        DPRINTF(2, "Unsupported: oparg too large\n");
718
        unsupported:
719
        {
720
            // Rewind to previous instruction and replace with _EXIT_TRACE.
721
            _PyUOpInstruction *curr = uop_buffer_last(trace);
722
            while (curr->opcode != _SET_IP && uop_buffer_length(trace) > 2) {
723
                trace->next--;
724
                curr = uop_buffer_last(trace);
725
            }
726
            assert(curr->opcode == _SET_IP || uop_buffer_length(trace) == 2);
727
            if (curr->opcode == _SET_IP) {
728
                int32_t old_target = (int32_t)uop_get_target(curr);
729
                curr->opcode = _DEOPT;
730
                curr->format = UOP_FORMAT_TARGET;
731
                curr->target = old_target;
732
            }
733
            goto done;
734
        }
735
    }
736
737
    if (opcode == NOP) {
738
        return 1;
739
    }
740
741
    if (opcode == JUMP_FORWARD) {
742
        return 1;
743
    }
744
745
    if (opcode == EXTENDED_ARG) {
746
        return 1;
747
    }
748
749
    // One for possible _DEOPT, one because _CHECK_VALIDITY itself might _DEOPT
750
    trace->end -= 2;
751
752
    const struct opcode_macro_expansion *expansion = &_PyOpcode_macro_expansion[opcode];
753
754
    assert(opcode != ENTER_EXECUTOR && opcode != EXTENDED_ARG);
755
    assert(!_PyErr_Occurred(tstate));
756
757
758
    if (OPCODE_HAS_EXIT(opcode)) {
759
        // Make space for side exit
760
        trace->end--;
761
    }
762
    if (OPCODE_HAS_ERROR(opcode)) {
763
        // Make space for error stub
764
        trace->end--;
765
    }
766
    if (OPCODE_HAS_DEOPT(opcode)) {
767
        // Make space for side exit
768
        trace->end--;
769
    }
770
771
    // _GUARD_IP leads to an exit.
772
    trace->end -= needs_guard_ip;
773
774
    int space_needed = expansion->nuops + needs_guard_ip + 2 + (!OPCODE_HAS_NO_SAVE_IP(opcode));
775
    if (uop_buffer_remaining_space(trace) < space_needed) {
776
        DPRINTF(2, "No room for expansions and guards (need %d, got %d)\n",
777
                space_needed, uop_buffer_remaining_space(trace));
778
        OPT_STAT_INC(trace_too_long);
779
        goto done;
780
    }
781
782
    ADD_TO_TRACE(_CHECK_VALIDITY, 0, 0, target);
783
784
    if (!OPCODE_HAS_NO_SAVE_IP(opcode)) {
785
        ADD_TO_TRACE(_SET_IP, 0, (uintptr_t)target_instr, target);
786
    }
787
788
    // Can be NULL for the entry frame.
789
    if (old_code != NULL) {
790
        _Py_BloomFilter_Add(dependencies, old_code);
791
    }
792
793
    switch (opcode) {
794
        case POP_JUMP_IF_NONE:
795
        case POP_JUMP_IF_NOT_NONE:
796
        case POP_JUMP_IF_FALSE:
797
        case POP_JUMP_IF_TRUE:
798
        {
799
            _Py_CODEUNIT *computed_next_instr_without_modifiers = target_instr + 1 + _PyOpcode_Caches[_PyOpcode_Deopt[opcode]];
800
            _Py_CODEUNIT *computed_next_instr = computed_next_instr_without_modifiers + (computed_next_instr_without_modifiers->op.code == NOT_TAKEN);
801
            _Py_CODEUNIT *computed_jump_instr = computed_next_instr_without_modifiers + oparg;
802
            assert(next_instr == computed_next_instr || next_instr == computed_jump_instr);
803
            int jump_happened = computed_jump_instr == next_instr;
804
            assert(jump_happened == (target_instr[1].cache & 1));
805
            uint32_t uopcode = BRANCH_TO_GUARD[opcode - POP_JUMP_IF_FALSE][jump_happened];
806
            ADD_TO_TRACE(uopcode, 0, 0, INSTR_IP(jump_happened ? computed_next_instr : computed_jump_instr, old_code));
807
            break;
808
        }
809
        case JUMP_BACKWARD_JIT:
810
            // This is possible as the JIT might have re-activated after it was disabled
811
        case JUMP_BACKWARD_NO_JIT:
812
        case JUMP_BACKWARD:
813
            ADD_TO_TRACE(_CHECK_PERIODIC, 0, 0, target);
814
            _Py_FALLTHROUGH;
815
        case JUMP_BACKWARD_NO_INTERRUPT:
816
        {
817
            if ((next_instr != tracer->initial_state.close_loop_instr) &&
818
                (next_instr != tracer->initial_state.start_instr) &&
819
                uop_buffer_length(&tracer->code_buffer) > CODE_SIZE_NO_PROGRESS &&
820
                // For side exits, we don't want to terminate them early.
821
                tracer->initial_state.exit == NULL &&
822
                // These are coroutines, and we want to unroll those usually.
823
                opcode != JUMP_BACKWARD_NO_INTERRUPT) {
824
                // We encountered a JUMP_BACKWARD but not to the top of our own loop.
825
                // We don't want to continue tracing as we might get stuck in the
826
                // inner loop. Instead, end the trace where the executor of the
827
                // inner loop might start and let the traces rejoin.
828
                OPT_STAT_INC(inner_loop);
829
                ADD_TO_TRACE(_EXIT_TRACE, 0, 0, target);
830
                uop_buffer_last(trace)->operand1 = true; // is_control_flow
831
                DPRINTF(2, "JUMP_BACKWARD not to top ends trace %p %p %p\n", next_instr,
832
                    tracer->initial_state.close_loop_instr, tracer->initial_state.start_instr);
833
                goto done;
834
            }
835
            break;
836
        }
837
838
        case RESUME:
839
        case RESUME_CHECK:
840
            /* Use a special tier 2 version of RESUME_CHECK to allow traces to
841
             *  start with RESUME_CHECK */
842
            ADD_TO_TRACE(_TIER2_RESUME_CHECK, 0, 0, target);
843
            break;
844
        default:
845
        {
846
            const struct opcode_macro_expansion *expansion = &_PyOpcode_macro_expansion[opcode];
847
            // Reserve space for nuops (+ _SET_IP + _EXIT_TRACE)
848
            int nuops = expansion->nuops;
849
            if (nuops == 0) {
850
                DPRINTF(2, "Unsupported opcode %s\n", _PyOpcode_OpName[opcode]);
851
                goto unsupported;
852
            }
853
            assert(nuops > 0);
854
            uint32_t orig_oparg = oparg;  // For OPARG_TOP/BOTTOM
855
            uint32_t orig_target = target;
856
            for (int i = 0; i < nuops; i++) {
857
                oparg = orig_oparg;
858
                target = orig_target;
859
                uint32_t uop = expansion->uops[i].uop;
860
                uint64_t operand = 0;
861
                // Add one to account for the actual opcode/oparg pair:
862
                int offset = expansion->uops[i].offset + 1;
863
                switch (expansion->uops[i].size) {
864
                    case OPARG_SIMPLE:
865
                        assert(opcode != _JUMP_BACKWARD_NO_INTERRUPT && opcode != JUMP_BACKWARD);
866
                        break;
867
                    case OPARG_CACHE_1:
868
                        operand = read_u16(&this_instr[offset].cache);
869
                        break;
870
                    case OPARG_CACHE_2:
871
                        operand = read_u32(&this_instr[offset].cache);
872
                        break;
873
                    case OPARG_CACHE_4:
874
                        operand = read_u64(&this_instr[offset].cache);
875
                        break;
876
                    case OPARG_TOP:  // First half of super-instr
877
                        assert(orig_oparg <= 255);
878
                        oparg = orig_oparg >> 4;
879
                        break;
880
                    case OPARG_BOTTOM:  // Second half of super-instr
881
                        assert(orig_oparg <= 255);
882
                        oparg = orig_oparg & 0xF;
883
                        break;
884
                    case OPARG_SAVE_RETURN_OFFSET:  // op=_SAVE_RETURN_OFFSET; oparg=return_offset
885
                        oparg = offset;
886
                        assert(uop == _SAVE_RETURN_OFFSET);
887
                        break;
888
                    case OPARG_REPLACED:
889
                        uop = _PyUOp_Replacements[uop];
890
                        assert(uop != 0);
891
892
                        uint32_t next_inst = target + 1 + _PyOpcode_Caches[_PyOpcode_Deopt[opcode]];
893
                        if (uop == _TIER2_RESUME_CHECK) {
894
                            target = next_inst;
895
                        }
896
                        else {
897
                            int extended_arg = orig_oparg > 255;
898
                            uint32_t jump_target = next_inst + orig_oparg + extended_arg;
899
                            assert(_Py_GetBaseCodeUnit(old_code, jump_target).op.code == END_FOR);
900
                            assert(_Py_GetBaseCodeUnit(old_code, jump_target+1).op.code == POP_ITER);
901
                            if (is_for_iter_test[uop]) {
902
                                target = jump_target + 1;
903
                            }
904
                        }
905
                        break;
906
                    case OPERAND1_1:
907
                        assert(uop_buffer_last(trace)->opcode == uop);
908
                        operand = read_u16(&this_instr[offset].cache);
909
                        uop_buffer_last(trace)->operand1 = operand;
910
                        continue;
911
                    case OPERAND1_2:
912
                        assert(uop_buffer_last(trace)->opcode == uop);
913
                        operand = read_u32(&this_instr[offset].cache);
914
                        uop_buffer_last(trace)->operand1 = operand;
915
                        continue;
916
                    case OPERAND1_4:
917
                        assert(uop_buffer_last(trace)->opcode == uop);
918
                        operand = read_u64(&this_instr[offset].cache);
919
                        uop_buffer_last(trace)->operand1 = operand;
920
                        continue;
921
                    default:
922
                        fprintf(stderr,
923
                                "opcode=%d, oparg=%d; nuops=%d, i=%d; size=%d, offset=%d\n",
924
                                opcode, oparg, nuops, i,
925
                                expansion->uops[i].size,
926
                                expansion->uops[i].offset);
927
                        Py_FatalError("garbled expansion");
928
                }
929
                if (uop == _PUSH_FRAME || uop == _RETURN_VALUE || uop == _RETURN_GENERATOR || uop == _YIELD_VALUE) {
930
                    PyCodeObject *new_code = (PyCodeObject *)PyStackRef_AsPyObjectBorrow(frame->f_executable);
931
                    if (new_code != NULL && !Py_IsNone((PyObject*)new_code)) {
932
                        _Py_BloomFilter_Add(dependencies, new_code);
933
                    }
934
                    ADD_TO_TRACE(uop, oparg, operand, target);
935
                    uop_buffer_last(trace)->operand1 = PyStackRef_IsNone(frame->f_executable) ? 2 : ((int)(frame->stackpointer - _PyFrame_Stackbase(frame)));
936
                    break;
937
                }
938
                if (uop == _BINARY_OP_INPLACE_ADD_UNICODE) {
939
                    assert(i + 1 == nuops);
940
                    _Py_CODEUNIT *next = target_instr + 1 + _PyOpcode_Caches[_PyOpcode_Deopt[opcode]];
941
                    assert(next->op.code == STORE_FAST);
942
                    operand = next->op.arg;
943
                }
944
                else if (_PyUop_Flags[uop] & HAS_RECORDS_VALUE_FLAG) {
945
                    PyObject *recorded_value = tracer->prev_state.recorded_value;
946
                    tracer->prev_state.recorded_value = NULL;
947
                    operand = (uintptr_t)recorded_value;
948
                }
949
                // All other instructions
950
                ADD_TO_TRACE(uop, oparg, operand, target);
951
            }
952
            break;
953
        }  // End default
954
955
    }  // End switch (opcode)
956
957
    if (needs_guard_ip) {
958
        uint16_t guard_ip = guard_ip_uop[uop_buffer_last(trace)->opcode];
959
        if (guard_ip == 0) {
960
            DPRINTF(1, "Unknown uop needing guard ip %s\n", _PyOpcode_uop_name[uop_buffer_last(trace)->opcode]);
961
            Py_UNREACHABLE();
962
        }
963
        PyObject *code = PyStackRef_AsPyObjectBorrow(frame->f_executable);
964
        Py_INCREF(code);
965
        ADD_TO_TRACE(_RECORD_CODE, 0, (uintptr_t)code, 0);
966
        ADD_TO_TRACE(guard_ip, 0, (uintptr_t)next_instr, 0);
967
        if (PyCode_Check(code)) {
968
            ADD_TO_TRACE(_GUARD_CODE, 0, ((PyCodeObject *)code)->co_version, 0);
969
        }
970
    }
971
    // Loop back to the start
972
    int is_first_instr = tracer->initial_state.close_loop_instr == next_instr ||
973
        tracer->initial_state.start_instr == next_instr;
974
    if (is_first_instr && uop_buffer_length(trace) > CODE_SIZE_NO_PROGRESS) {
975
        if (needs_guard_ip) {
976
            ADD_TO_TRACE(_SET_IP, 0, (uintptr_t)next_instr, 0);
977
        }
978
        ADD_TO_TRACE(_JUMP_TO_TOP, 0, 0, 0);
979
        goto done;
980
    }
981
    DPRINTF(2, "Trace continuing\n");
982
    return 1;
983
done:
984
    DPRINTF(2, "Trace done\n");
985
    if (!is_terminator(uop_buffer_last(trace))) {
986
        ADD_TO_TRACE(_EXIT_TRACE, 0, 0, target);
987
        uop_buffer_last(trace)->operand1 = true; // is_control_flow
988
    }
989
    return 0;
990
}
991
992
// Returns 0 for do not enter tracing, 1 on enter tracing.
993
// gh-142543: inlining this function causes stack overflows
994
Py_NO_INLINE int
995
_PyJit_TryInitializeTracing(
996
    PyThreadState *tstate, _PyInterpreterFrame *frame, _Py_CODEUNIT *curr_instr,
997
    _Py_CODEUNIT *start_instr, _Py_CODEUNIT *close_loop_instr, _PyStackRef *stack_pointer, int chain_depth,
998
    _PyExitData *exit, int oparg, _PyExecutorObject *current_executor)
999
{
1000
    _PyThreadStateImpl *_tstate = (_PyThreadStateImpl *)tstate;
1001
    if (_tstate->jit_tracer_state == NULL) {
1002
        _tstate->jit_tracer_state = (_PyJitTracerState *)_PyObject_VirtualAlloc(sizeof(_PyJitTracerState));
1003
        if (_tstate->jit_tracer_state == NULL) {
1004
            // Don't error, just go to next instruction.
1005
            return 0;
1006
        }
1007
        _tstate->jit_tracer_state->is_tracing = false;
1008
    }
1009
    _PyJitTracerState *tracer = _tstate->jit_tracer_state;
1010
    // A recursive trace.
1011
    if (tracer->is_tracing) {
1012
        return 0;
1013
    }
1014
    if (oparg > 0xFFFF) {
1015
        return 0;
1016
    }
1017
    PyObject *func = PyStackRef_AsPyObjectBorrow(frame->f_funcobj);
1018
    if (func == NULL) {
1019
        return 0;
1020
    }
1021
    PyCodeObject *code = _PyFrame_GetCode(frame);
1022
#ifdef Py_DEBUG
1023
    char *python_lltrace = Py_GETENV("PYTHON_LLTRACE");
1024
    int lltrace = 0;
1025
    if (python_lltrace != NULL && *python_lltrace >= '0') {
1026
        lltrace = *python_lltrace - '0';  // TODO: Parse an int and all that
1027
    }
1028
    DPRINTF(2,
1029
        "Tracing %s (%s:%d) at byte offset %d at chain depth %d\n",
1030
        PyUnicode_AsUTF8(code->co_qualname),
1031
        PyUnicode_AsUTF8(code->co_filename),
1032
        code->co_firstlineno,
1033
        2 * INSTR_IP(close_loop_instr, code),
1034
        chain_depth);
1035
#endif
1036
    /* Set up tracing buffer*/
1037
    _PyJitUopBuffer *trace = &tracer->code_buffer;
1038
    uop_buffer_init(trace, &tracer->uop_array[0], UOP_MAX_TRACE_LENGTH);
1039
    ADD_TO_TRACE(_START_EXECUTOR, 0, (uintptr_t)start_instr, INSTR_IP(start_instr, code));
1040
    ADD_TO_TRACE(_MAKE_WARM, 0, 0, 0);
1041
1042
    tracer->initial_state.start_instr = start_instr;
1043
    tracer->initial_state.close_loop_instr = close_loop_instr;
1044
    tracer->initial_state.code = (PyCodeObject *)Py_NewRef(code);
1045
    tracer->initial_state.func = (PyFunctionObject *)Py_NewRef(func);
1046
    tracer->initial_state.executor = (_PyExecutorObject *)Py_XNewRef(current_executor);
1047
    tracer->initial_state.exit = exit;
1048
    tracer->initial_state.stack_depth = stack_pointer - _PyFrame_Stackbase(frame);
1049
    tracer->initial_state.chain_depth = chain_depth;
1050
    tracer->prev_state.dependencies_still_valid = true;
1051
    tracer->prev_state.instr_code = (PyCodeObject *)Py_NewRef(_PyFrame_GetCode(frame));
1052
    tracer->prev_state.instr = curr_instr;
1053
    tracer->prev_state.instr_frame = frame;
1054
    tracer->prev_state.instr_oparg = oparg;
1055
    tracer->prev_state.instr_stacklevel = tracer->initial_state.stack_depth;
1056
    tracer->prev_state.recorded_value = NULL;
1057
    uint8_t record_func_index = _PyOpcode_RecordFunctionIndices[curr_instr->op.code];
1058
    if (record_func_index) {
1059
        _Py_RecordFuncPtr record_func = _PyOpcode_RecordFunctions[record_func_index];
1060
        record_func(frame, stack_pointer, oparg, &tracer->prev_state.recorded_value);
1061
    }
1062
    assert(curr_instr->op.code == JUMP_BACKWARD_JIT || (exit != NULL));
1063
    tracer->initial_state.jump_backward_instr = curr_instr;
1064
1065
    if (_PyOpcode_Caches[_PyOpcode_Deopt[close_loop_instr->op.code]]) {
1066
        close_loop_instr[1].counter = trigger_backoff_counter();
1067
    }
1068
    _Py_BloomFilter_Init(&tracer->prev_state.dependencies);
1069
    tracer->is_tracing = true;
1070
    return 1;
1071
}
1072
1073
Py_NO_INLINE void
1074
_PyJit_FinalizeTracing(PyThreadState *tstate, int err)
1075
{
1076
    _PyThreadStateImpl *_tstate = (_PyThreadStateImpl *)tstate;
1077
    _PyJitTracerState *tracer = _tstate->jit_tracer_state;
1078
    // Deal with backoffs
1079
    assert(tracer != NULL);
1080
    _PyExitData *exit = tracer->initial_state.exit;
1081
    if (exit == NULL) {
1082
        // We hold a strong reference to the code object, so the instruction won't be freed.
1083
        if (err <= 0) {
1084
            _Py_BackoffCounter counter = tracer->initial_state.jump_backward_instr[1].counter;
1085
            tracer->initial_state.jump_backward_instr[1].counter = restart_backoff_counter(counter);
1086
        }
1087
        else {
1088
            tracer->initial_state.jump_backward_instr[1].counter = initial_jump_backoff_counter(&tstate->interp->opt_config);
1089
        }
1090
    }
1091
    else if (tracer->initial_state.executor->vm_data.valid) {
1092
        // Likewise, we hold a strong reference to the executor containing this exit, so the exit is guaranteed
1093
        // to be valid to access.
1094
        if (err <= 0) {
1095
            exit->temperature = restart_backoff_counter(exit->temperature);
1096
        }
1097
        else {
1098
            exit->temperature = initial_temperature_backoff_counter(&tstate->interp->opt_config);
1099
        }
1100
    }
1101
    Py_CLEAR(tracer->initial_state.code);
1102
    Py_CLEAR(tracer->initial_state.func);
1103
    Py_CLEAR(tracer->initial_state.executor);
1104
    Py_CLEAR(tracer->prev_state.instr_code);
1105
    Py_CLEAR(tracer->prev_state.recorded_value);
1106
    uop_buffer_init(&tracer->code_buffer, &tracer->uop_array[0], UOP_MAX_TRACE_LENGTH);
1107
    tracer->is_tracing = false;
1108
}
1109
1110
void
1111
_PyJit_TracerFree(_PyThreadStateImpl *_tstate)
1112
{
1113
    if (_tstate->jit_tracer_state != NULL) {
1114
        _PyObject_VirtualFree(_tstate->jit_tracer_state, sizeof(_PyJitTracerState));
1115
        _tstate->jit_tracer_state = NULL;
1116
    }
1117
}
1118
1119
#undef RESERVE
1120
#undef INSTR_IP
1121
#undef ADD_TO_TRACE
1122
#undef DPRINTF
1123
1124
#define UNSET_BIT(array, bit) (array[(bit)>>5] &= ~(1<<((bit)&31)))
1125
#define SET_BIT(array, bit) (array[(bit)>>5] |= (1<<((bit)&31)))
1126
#define BIT_IS_SET(array, bit) (array[(bit)>>5] & (1<<((bit)&31)))
1127
1128
/* Count the number of unused uops and exits
1129
*/
1130
static int
1131
count_exits(_PyUOpInstruction *buffer, int length)
1132
{
1133
    int exit_count = 0;
1134
    for (int i = 0; i < length; i++) {
1135
        uint16_t base_opcode = _PyUop_Uncached[buffer[i].opcode];
1136
        if (base_opcode == _EXIT_TRACE || base_opcode == _DYNAMIC_EXIT) {
1137
            exit_count++;
1138
        }
1139
    }
1140
    return exit_count;
1141
}
1142
1143
/* The number of cached registers at any exit (`EXIT_IF` or `DEOPT_IF`)
1144
 * This is the number of cached at entries at start, unless the uop is
1145
 * marked as `exit_depth_is_output` in which case it is the number of
1146
 * cached entries at the end */
1147
static int
1148
get_cached_entries_for_side_exit(_PyUOpInstruction *inst)
1149
{
1150
    // Maybe add another generated table for this?
1151
    int base_opcode = _PyUop_Uncached[inst->opcode];
1152
    assert(base_opcode != 0);
1153
    for (int i = 0; i <= MAX_CACHED_REGISTER; i++) {
1154
        const _PyUopTOSentry *entry = &_PyUop_Caching[base_opcode].entries[i];
1155
        if (entry->opcode == inst->opcode) {
1156
            return entry->exit;
1157
        }
1158
    }
1159
    Py_UNREACHABLE();
1160
}
1161
1162
static void make_exit(_PyUOpInstruction *inst, int opcode, int target, bool is_control_flow)
1163
{
1164
    assert(opcode > MAX_UOP_ID && opcode <= MAX_UOP_REGS_ID);
1165
    inst->opcode = opcode;
1166
    inst->oparg = 0;
1167
    inst->operand0 = 0;
1168
    inst->format = UOP_FORMAT_TARGET;
1169
    inst->target = target;
1170
    inst->operand1 = is_control_flow;
1171
#ifdef Py_STATS
1172
    inst->execution_count = 0;
1173
#endif
1174
}
1175
1176
/* Convert implicit exits, errors and deopts
1177
 * into explicit ones. */
1178
static int
1179
prepare_for_execution(_PyUOpInstruction *buffer, int length)
1180
{
1181
    int32_t current_jump = -1;
1182
    int32_t current_jump_target = -1;
1183
    int32_t current_error = -1;
1184
    int32_t current_error_target = -1;
1185
    int32_t current_popped = -1;
1186
    int32_t current_exit_op = -1;
1187
    /* Leaving in NOPs slows down the interpreter and messes up the stats */
1188
    _PyUOpInstruction *copy_to = &buffer[0];
1189
    for (int i = 0; i < length; i++) {
1190
        _PyUOpInstruction *inst = &buffer[i];
1191
        if (inst->opcode != _NOP) {
1192
            if (copy_to != inst) {
1193
                *copy_to = *inst;
1194
            }
1195
            copy_to++;
1196
        }
1197
    }
1198
    length = (int)(copy_to - buffer);
1199
    int next_spare = length;
1200
    for (int i = 0; i < length; i++) {
1201
        _PyUOpInstruction *inst = &buffer[i];
1202
        int base_opcode = _PyUop_Uncached[inst->opcode];
1203
        assert(inst->opcode != _NOP);
1204
        int32_t target = (int32_t)uop_get_target(inst);
1205
        uint16_t exit_flags = _PyUop_Flags[base_opcode] & (HAS_EXIT_FLAG | HAS_DEOPT_FLAG | HAS_PERIODIC_FLAG);
1206
        if (exit_flags) {
1207
            uint16_t base_exit_op = _EXIT_TRACE;
1208
            if (exit_flags & HAS_DEOPT_FLAG) {
1209
                base_exit_op = _DEOPT;
1210
            }
1211
            else if (exit_flags & HAS_PERIODIC_FLAG) {
1212
                base_exit_op = _HANDLE_PENDING_AND_DEOPT;
1213
            }
1214
            int32_t jump_target = target;
1215
            if (
1216
                base_opcode == _GUARD_IP__PUSH_FRAME ||
1217
                base_opcode == _GUARD_IP_RETURN_VALUE ||
1218
                base_opcode == _GUARD_IP_YIELD_VALUE ||
1219
                base_opcode == _GUARD_IP_RETURN_GENERATOR ||
1220
                base_opcode == _GUARD_CODE
1221
            ) {
1222
                base_exit_op = _DYNAMIC_EXIT;
1223
            }
1224
            int exit_depth = get_cached_entries_for_side_exit(inst);
1225
            assert(_PyUop_Caching[base_exit_op].entries[exit_depth].opcode > 0);
1226
            int16_t exit_op = _PyUop_Caching[base_exit_op].entries[exit_depth].opcode;
1227
            bool is_control_flow = (base_opcode == _GUARD_IS_FALSE_POP || base_opcode == _GUARD_IS_TRUE_POP || is_for_iter_test[base_opcode]);
1228
            if (jump_target != current_jump_target || current_exit_op != exit_op) {
1229
                make_exit(&buffer[next_spare], exit_op, jump_target, is_control_flow);
1230
                current_exit_op = exit_op;
1231
                current_jump_target = jump_target;
1232
                current_jump = next_spare;
1233
                next_spare++;
1234
            }
1235
            buffer[i].jump_target = current_jump;
1236
            buffer[i].format = UOP_FORMAT_JUMP;
1237
        }
1238
        if (_PyUop_Flags[base_opcode] & HAS_ERROR_FLAG) {
1239
            int popped = (_PyUop_Flags[base_opcode] & HAS_ERROR_NO_POP_FLAG) ?
1240
                0 : _PyUop_num_popped(base_opcode, inst->oparg);
1241
            if (target != current_error_target || popped != current_popped) {
1242
                current_popped = popped;
1243
                current_error = next_spare;
1244
                current_error_target = target;
1245
                make_exit(&buffer[next_spare], _ERROR_POP_N_r00, 0, false);
1246
                buffer[next_spare].operand0 = target;
1247
                next_spare++;
1248
            }
1249
            buffer[i].error_target = current_error;
1250
            if (buffer[i].format == UOP_FORMAT_TARGET) {
1251
                buffer[i].format = UOP_FORMAT_JUMP;
1252
                buffer[i].jump_target = 0;
1253
            }
1254
        }
1255
        if (base_opcode == _JUMP_TO_TOP) {
1256
            assert(_PyUop_Uncached[buffer[0].opcode] == _START_EXECUTOR);
1257
            buffer[i].format = UOP_FORMAT_JUMP;
1258
            buffer[i].jump_target = 1;
1259
        }
1260
    }
1261
    return next_spare;
1262
}
1263
1264
/* Executor side exits */
1265
1266
static _PyExecutorObject *
1267
allocate_executor(int exit_count, int length)
1268
{
1269
    int size = exit_count*sizeof(_PyExitData) + length*sizeof(_PyUOpInstruction);
1270
    _PyExecutorObject *res = PyObject_GC_NewVar(_PyExecutorObject, &_PyUOpExecutor_Type, size);
1271
    if (res == NULL) {
1272
        return NULL;
1273
    }
1274
    res->trace = (_PyUOpInstruction *)(res->exits + exit_count);
1275
    res->code_size = length;
1276
    res->exit_count = exit_count;
1277
    return res;
1278
}
1279
1280
#ifdef Py_DEBUG
1281
1282
#define CHECK(PRED) \
1283
if (!(PRED)) { \
1284
    printf(#PRED " at %d\n", i); \
1285
    assert(0); \
1286
}
1287
1288
static int
1289
target_unused(int opcode)
1290
{
1291
    return (_PyUop_Flags[opcode] & (HAS_ERROR_FLAG | HAS_EXIT_FLAG | HAS_DEOPT_FLAG)) == 0;
1292
}
1293
1294
static void
1295
sanity_check(_PyExecutorObject *executor)
1296
{
1297
    for (uint32_t i = 0; i < executor->exit_count; i++) {
1298
        _PyExitData *exit = &executor->exits[i];
1299
        CHECK(exit->target < (1 << 25));
1300
    }
1301
    bool ended = false;
1302
    uint32_t i = 0;
1303
    CHECK(_PyUop_Uncached[executor->trace[0].opcode] == _START_EXECUTOR ||
1304
        _PyUop_Uncached[executor->trace[0].opcode] == _COLD_EXIT ||
1305
        _PyUop_Uncached[executor->trace[0].opcode] == _COLD_DYNAMIC_EXIT);
1306
    for (; i < executor->code_size; i++) {
1307
        const _PyUOpInstruction *inst = &executor->trace[i];
1308
        uint16_t opcode = inst->opcode;
1309
        uint16_t base_opcode = _PyUop_Uncached[opcode];
1310
        CHECK(opcode > MAX_UOP_ID);
1311
        CHECK(opcode <= MAX_UOP_REGS_ID);
1312
        CHECK(base_opcode <= MAX_UOP_ID);
1313
        CHECK(base_opcode != 0);
1314
        switch(inst->format) {
1315
            case UOP_FORMAT_TARGET:
1316
                CHECK(target_unused(base_opcode));
1317
                break;
1318
            case UOP_FORMAT_JUMP:
1319
                CHECK(inst->jump_target < executor->code_size);
1320
                break;
1321
        }
1322
        if (_PyUop_Flags[base_opcode] & HAS_ERROR_FLAG) {
1323
            CHECK(inst->format == UOP_FORMAT_JUMP);
1324
            CHECK(inst->error_target < executor->code_size);
1325
        }
1326
        if (is_terminator(inst)) {
1327
            ended = true;
1328
            i++;
1329
            break;
1330
        }
1331
    }
1332
    CHECK(ended);
1333
    for (; i < executor->code_size; i++) {
1334
        const _PyUOpInstruction *inst = &executor->trace[i];
1335
        uint16_t base_opcode = _PyUop_Uncached[inst->opcode];
1336
        CHECK(
1337
            base_opcode == _DEOPT ||
1338
            base_opcode == _HANDLE_PENDING_AND_DEOPT ||
1339
            base_opcode == _EXIT_TRACE ||
1340
            base_opcode == _ERROR_POP_N ||
1341
            base_opcode == _DYNAMIC_EXIT);
1342
    }
1343
}
1344
1345
#undef CHECK
1346
#endif
1347
1348
/* Makes an executor from a buffer of uops.
1349
 * Account for the buffer having gaps and NOPs by computing a "used"
1350
 * bit vector and only copying the used uops. Here "used" means reachable
1351
 * and not a NOP.
1352
 */
1353
static _PyExecutorObject *
1354
make_executor_from_uops(_PyThreadStateImpl *tstate, _PyUOpInstruction *buffer, int length, const _PyBloomFilter *dependencies)
1355
{
1356
    int exit_count = count_exits(buffer, length);
1357
    _PyExecutorObject *executor = allocate_executor(exit_count, length);
1358
    if (executor == NULL) {
1359
        return NULL;
1360
    }
1361
1362
    /* Initialize exits */
1363
    int chain_depth = tstate->jit_tracer_state->initial_state.chain_depth;
1364
    _PyExecutorObject *cold = _PyExecutor_GetColdExecutor();
1365
    _PyExecutorObject *cold_dynamic = _PyExecutor_GetColdDynamicExecutor();
1366
    cold->vm_data.chain_depth = chain_depth;
1367
    PyInterpreterState *interp = tstate->base.interp;
1368
    for (int i = 0; i < exit_count; i++) {
1369
        executor->exits[i].index = i;
1370
        executor->exits[i].temperature = initial_temperature_backoff_counter(&interp->opt_config);
1371
    }
1372
    int next_exit = exit_count-1;
1373
    _PyUOpInstruction *dest = (_PyUOpInstruction *)&executor->trace[length];
1374
    assert(_PyUop_Uncached[buffer[0].opcode] == _START_EXECUTOR);
1375
    buffer[0].operand0 = (uint64_t)executor;
1376
    for (int i = length-1; i >= 0; i--) {
1377
        uint16_t base_opcode = _PyUop_Uncached[buffer[i].opcode];
1378
        dest--;
1379
        *dest = buffer[i];
1380
        if (base_opcode == _EXIT_TRACE || base_opcode == _DYNAMIC_EXIT) {
1381
            _PyExitData *exit = &executor->exits[next_exit];
1382
            exit->target = buffer[i].target;
1383
            dest->operand0 = (uint64_t)exit;
1384
            exit->executor = base_opcode == _EXIT_TRACE ? cold : cold_dynamic;
1385
            exit->is_dynamic = (char)(base_opcode == _DYNAMIC_EXIT);
1386
            exit->is_control_flow = (char)buffer[i].operand1;
1387
            next_exit--;
1388
        }
1389
    }
1390
    assert(next_exit == -1);
1391
    assert(dest == executor->trace);
1392
    assert(_PyUop_Uncached[dest->opcode] == _START_EXECUTOR);
1393
    // Note: we MUST track it here before any Py_DECREF(executor) or
1394
    // linking of executor. Otherwise, the GC tries to untrack a
1395
    // still untracked object during dealloc.
1396
    _PyObject_GC_TRACK(executor);
1397
    _Py_ExecutorInit(executor, dependencies);
1398
#ifdef Py_DEBUG
1399
    char *python_lltrace = Py_GETENV("PYTHON_LLTRACE");
1400
    int lltrace = 0;
1401
    if (python_lltrace != NULL && *python_lltrace >= '0') {
1402
        lltrace = *python_lltrace - '0';  // TODO: Parse an int and all that
1403
    }
1404
    if (lltrace >= 2) {
1405
        printf("Optimized trace (length %d):\n", length);
1406
        for (int i = 0; i < length; i++) {
1407
            printf("%4d OPTIMIZED: ", i);
1408
            _PyUOpPrint(&executor->trace[i]);
1409
            printf("\n");
1410
        }
1411
    }
1412
    sanity_check(executor);
1413
#endif
1414
#ifdef _Py_JIT
1415
    executor->jit_code = NULL;
1416
    executor->jit_size = 0;
1417
    // This is initialized to false so we can prevent the executor
1418
    // from being immediately detected as cold and invalidated.
1419
    executor->vm_data.cold = false;
1420
    if (_PyJIT_Compile(executor, executor->trace, length)) {
1421
        Py_DECREF(executor);
1422
        return NULL;
1423
    }
1424
#endif
1425
    return executor;
1426
}
1427
1428
#ifdef Py_STATS
1429
/* Returns the effective trace length.
1430
 * Ignores NOPs and trailing exit and error handling.*/
1431
int effective_trace_length(_PyUOpInstruction *buffer, int length)
1432
{
1433
    int nop_count = 0;
1434
    for (int i = 0; i < length; i++) {
1435
        int opcode = buffer[i].opcode;
1436
        if (opcode == _NOP) {
1437
            nop_count++;
1438
        }
1439
        if (is_terminator(&buffer[i])) {
1440
            return i+1-nop_count;
1441
        }
1442
    }
1443
    Py_FatalError("No terminating instruction");
1444
    Py_UNREACHABLE();
1445
}
1446
#endif
1447
1448
1449
static int
1450
stack_allocate(_PyUOpInstruction *buffer, _PyUOpInstruction *output, int length)
1451
{
1452
    assert(buffer[0].opcode == _START_EXECUTOR);
1453
    /* The input buffer and output buffers will overlap.
1454
       Make sure that we can move instructions to the output
1455
       without overwriting the input. */
1456
    if (buffer == output) {
1457
        // This can only happen if optimizer has not been run
1458
        for (int i = 0; i < length; i++) {
1459
            buffer[i + UOP_MAX_TRACE_LENGTH] = buffer[i];
1460
        }
1461
        buffer += UOP_MAX_TRACE_LENGTH;
1462
    }
1463
    else {
1464
        assert(output + UOP_MAX_TRACE_LENGTH == buffer);
1465
    }
1466
    int depth = 0;
1467
    _PyUOpInstruction *write = output;
1468
    for (int i = 0; i < length; i++) {
1469
        int uop = buffer[i].opcode;
1470
        if (uop == _NOP) {
1471
            continue;
1472
        }
1473
        int new_depth = _PyUop_Caching[uop].best[depth];
1474
        if (new_depth != depth) {
1475
            write->opcode = _PyUop_SpillsAndReloads[depth][new_depth];
1476
            assert(write->opcode != 0);
1477
            write->format = UOP_FORMAT_TARGET;
1478
            write->oparg = 0;
1479
            write->target = 0;
1480
            write++;
1481
            depth = new_depth;
1482
        }
1483
        *write = buffer[i];
1484
        uint16_t new_opcode = _PyUop_Caching[uop].entries[depth].opcode;
1485
        assert(new_opcode != 0);
1486
        write->opcode = new_opcode;
1487
        write++;
1488
        depth = _PyUop_Caching[uop].entries[depth].output;
1489
    }
1490
    return write - output;
1491
}
1492
1493
static int
1494
uop_optimize(
1495
    _PyInterpreterFrame *frame,
1496
    PyThreadState *tstate,
1497
    _PyExecutorObject **exec_ptr,
1498
    bool progress_needed)
1499
{
1500
    _PyThreadStateImpl *_tstate = (_PyThreadStateImpl *)tstate;
1501
    assert(_tstate->jit_tracer_state != NULL);
1502
    _PyBloomFilter *dependencies = &_tstate->jit_tracer_state->prev_state.dependencies;
1503
    _PyUOpInstruction *buffer = _tstate->jit_tracer_state->code_buffer.start;
1504
    OPT_STAT_INC(attempts);
1505
    bool is_noopt = !tstate->interp->opt_config.uops_optimize_enabled;
1506
    int curr_stackentries = _tstate->jit_tracer_state->initial_state.stack_depth;
1507
    int length = uop_buffer_length(&_tstate->jit_tracer_state->code_buffer);
1508
    if (length <= CODE_SIZE_NO_PROGRESS) {
1509
        return 0;
1510
    }
1511
    assert(length > 0);
1512
    assert(length < UOP_MAX_TRACE_LENGTH);
1513
    OPT_STAT_INC(traces_created);
1514
    if (!is_noopt) {
1515
        _PyUOpInstruction *output = &_tstate->jit_tracer_state->uop_array[UOP_MAX_TRACE_LENGTH];
1516
        length = _Py_uop_analyze_and_optimize(
1517
            _tstate, buffer, length, curr_stackentries,
1518
            output, dependencies);
1519
        if (length <= 0) {
1520
            return length;
1521
        }
1522
        buffer = output;
1523
    }
1524
    assert(length < UOP_MAX_TRACE_LENGTH);
1525
    assert(length >= 1);
1526
    /* Fix up */
1527
    for (int pc = 0; pc < length; pc++) {
1528
        int opcode = buffer[pc].opcode;
1529
        int oparg = buffer[pc].oparg;
1530
        if (oparg < _PyUop_Replication[opcode].stop && oparg >= _PyUop_Replication[opcode].start) {
1531
            buffer[pc].opcode = opcode + oparg + 1 - _PyUop_Replication[opcode].start;
1532
            assert(strncmp(_PyOpcode_uop_name[buffer[pc].opcode], _PyOpcode_uop_name[opcode], strlen(_PyOpcode_uop_name[opcode])) == 0);
1533
        }
1534
        else if (_PyUop_Flags[opcode] & HAS_RECORDS_VALUE_FLAG) {
1535
            Py_XDECREF((PyObject *)(uintptr_t)buffer[pc].operand0);
1536
            buffer[pc].opcode = _NOP;
1537
        }
1538
        else if (is_terminator(&buffer[pc])) {
1539
            break;
1540
        }
1541
        assert(_PyOpcode_uop_name[buffer[pc].opcode]);
1542
    }
1543
    OPT_HIST(effective_trace_length(buffer, length), optimized_trace_length_hist);
1544
    _PyUOpInstruction *output = &_tstate->jit_tracer_state->uop_array[0];
1545
    length = stack_allocate(buffer, output, length);
1546
    buffer = output;
1547
    length = prepare_for_execution(buffer, length);
1548
    assert(length <= UOP_MAX_TRACE_LENGTH);
1549
    _PyExecutorObject *executor = make_executor_from_uops(
1550
        _tstate, buffer, length, dependencies);
1551
    if (executor == NULL) {
1552
        return -1;
1553
    }
1554
    assert(length <= UOP_MAX_TRACE_LENGTH);
1555
1556
    // Check executor coldness
1557
    // It's okay if this ends up going negative.
1558
    if (--tstate->interp->executor_creation_counter == 0) {
1559
        _Py_set_eval_breaker_bit(tstate, _PY_EVAL_JIT_INVALIDATE_COLD_BIT);
1560
    }
1561
1562
    *exec_ptr = executor;
1563
    return 1;
1564
}
1565
1566
1567
/*****************************************
1568
 *        Executor management
1569
 ****************************************/
1570
1571
/* We use a bloomfilter with k = 6, m = 256
1572
 * The choice of k and the following constants
1573
 * could do with a more rigorous analysis,
1574
 * but here is a simple analysis:
1575
 *
1576
 * We want to keep the false positive rate low.
1577
 * For n = 5 (a trace depends on 5 objects),
1578
 * we expect 30 bits set, giving a false positive
1579
 * rate of (30/256)**6 == 2.5e-6 which is plenty
1580
 * good enough.
1581
 *
1582
 * However with n = 10 we expect 60 bits set (worst case),
1583
 * giving a false positive of (60/256)**6 == 0.0001
1584
 *
1585
 * We choose k = 6, rather than a higher number as
1586
 * it means the false positive rate grows slower for high n.
1587
 *
1588
 * n = 5, k = 6 => fp = 2.6e-6
1589
 * n = 5, k = 8 => fp = 3.5e-7
1590
 * n = 10, k = 6 => fp = 1.6e-4
1591
 * n = 10, k = 8 => fp = 0.9e-4
1592
 * n = 15, k = 6 => fp = 0.18%
1593
 * n = 15, k = 8 => fp = 0.23%
1594
 * n = 20, k = 6 => fp = 1.1%
1595
 * n = 20, k = 8 => fp = 2.3%
1596
 *
1597
 * The above analysis assumes perfect hash functions,
1598
 * but those don't exist, so the real false positive
1599
 * rates may be worse.
1600
 */
1601
1602
#define K 6
1603
1604
#define SEED 20221211
1605
1606
/* TO DO -- Use more modern hash functions with better distribution of bits */
1607
static uint64_t
1608
address_to_hash(void *ptr) {
1609
    assert(ptr != NULL);
1610
    uint64_t uhash = SEED;
1611
    uintptr_t addr = (uintptr_t)ptr;
1612
    for (int i = 0; i < SIZEOF_VOID_P; i++) {
1613
        uhash ^= addr & 255;
1614
        uhash *= (uint64_t)PyHASH_MULTIPLIER;
1615
        addr >>= 8;
1616
    }
1617
    return uhash;
1618
}
1619
1620
void
1621
_Py_BloomFilter_Init(_PyBloomFilter *bloom)
1622
{
1623
    for (int i = 0; i < _Py_BLOOM_FILTER_WORDS; i++) {
1624
        bloom->bits[i] = 0;
1625
    }
1626
}
1627
1628
/* We want K hash functions that each set 1 bit.
1629
 * A hash function that sets 1 bit in M bits can be trivially
1630
 * derived from a log2(M) bit hash function.
1631
 * So we extract 8 (log2(256)) bits at a time from
1632
 * the 64bit hash. */
1633
void
1634
_Py_BloomFilter_Add(_PyBloomFilter *bloom, void *ptr)
1635
{
1636
    uint64_t hash = address_to_hash(ptr);
1637
    assert(K <= 8);
1638
    for (int i = 0; i < K; i++) {
1639
        uint8_t bits = hash & 255;
1640
        bloom->bits[bits >> 5] |= (1 << (bits&31));
1641
        hash >>= 8;
1642
    }
1643
}
1644
1645
static bool
1646
bloom_filter_may_contain(_PyBloomFilter *bloom, _PyBloomFilter *hashes)
1647
{
1648
    for (int i = 0; i < _Py_BLOOM_FILTER_WORDS; i++) {
1649
        if ((bloom->bits[i] & hashes->bits[i]) != hashes->bits[i]) {
1650
            return false;
1651
        }
1652
    }
1653
    return true;
1654
}
1655
1656
static void
1657
link_executor(_PyExecutorObject *executor)
1658
{
1659
    PyInterpreterState *interp = _PyInterpreterState_GET();
1660
    _PyExecutorLinkListNode *links = &executor->vm_data.links;
1661
    _PyExecutorObject *head = interp->executor_list_head;
1662
    if (head == NULL) {
1663
        interp->executor_list_head = executor;
1664
        links->previous = NULL;
1665
        links->next = NULL;
1666
    }
1667
    else {
1668
        assert(head->vm_data.links.previous == NULL);
1669
        links->previous = NULL;
1670
        links->next = head;
1671
        head->vm_data.links.previous = executor;
1672
        interp->executor_list_head = executor;
1673
    }
1674
    /* executor_list_head must be first in list */
1675
    assert(interp->executor_list_head->vm_data.links.previous == NULL);
1676
}
1677
1678
static void
1679
unlink_executor(_PyExecutorObject *executor)
1680
{
1681
    _PyExecutorLinkListNode *links = &executor->vm_data.links;
1682
    _PyExecutorObject *next = links->next;
1683
    _PyExecutorObject *prev = links->previous;
1684
    if (next != NULL) {
1685
        next->vm_data.links.previous = prev;
1686
    }
1687
    if (prev != NULL) {
1688
        prev->vm_data.links.next = next;
1689
    }
1690
    else {
1691
        // prev == NULL implies that executor is the list head
1692
        PyInterpreterState *interp = PyInterpreterState_Get();
1693
        assert(interp->executor_list_head == executor);
1694
        interp->executor_list_head = next;
1695
    }
1696
}
1697
1698
/* This must be called by optimizers before using the executor */
1699
void
1700
_Py_ExecutorInit(_PyExecutorObject *executor, const _PyBloomFilter *dependency_set)
1701
{
1702
    executor->vm_data.valid = true;
1703
    executor->vm_data.pending_deletion = 0;
1704
    executor->vm_data.code = NULL;
1705
    for (int i = 0; i < _Py_BLOOM_FILTER_WORDS; i++) {
1706
        executor->vm_data.bloom.bits[i] = dependency_set->bits[i];
1707
    }
1708
    link_executor(executor);
1709
}
1710
1711
static _PyExecutorObject *
1712
make_cold_executor(uint16_t opcode)
1713
{
1714
    _PyExecutorObject *cold = allocate_executor(0, 1);
1715
    if (cold == NULL) {
1716
        Py_FatalError("Cannot allocate core JIT code");
1717
    }
1718
    ((_PyUOpInstruction *)cold->trace)->opcode = opcode;
1719
    // This is initialized to false so we can prevent the executor
1720
    // from being immediately detected as cold and invalidated.
1721
    cold->vm_data.cold = false;
1722
#ifdef _Py_JIT
1723
    cold->jit_code = NULL;
1724
    cold->jit_size = 0;
1725
    if (_PyJIT_Compile(cold, cold->trace, 1)) {
1726
        Py_DECREF(cold);
1727
        Py_FatalError("Cannot allocate core JIT code");
1728
    }
1729
#endif
1730
    _Py_SetImmortal((PyObject *)cold);
1731
    return cold;
1732
}
1733
1734
_PyExecutorObject *
1735
_PyExecutor_GetColdExecutor(void)
1736
{
1737
    PyInterpreterState *interp = _PyInterpreterState_GET();
1738
    if (interp->cold_executor == NULL) {
1739
        return interp->cold_executor = make_cold_executor(_COLD_EXIT_r00);;
1740
    }
1741
    return interp->cold_executor;
1742
}
1743
1744
_PyExecutorObject *
1745
_PyExecutor_GetColdDynamicExecutor(void)
1746
{
1747
    PyInterpreterState *interp = _PyInterpreterState_GET();
1748
    if (interp->cold_dynamic_executor == NULL) {
1749
        interp->cold_dynamic_executor = make_cold_executor(_COLD_DYNAMIC_EXIT_r00);
1750
    }
1751
    return interp->cold_dynamic_executor;
1752
}
1753
1754
void
1755
_PyExecutor_ClearExit(_PyExitData *exit)
1756
{
1757
    if (exit == NULL) {
1758
        return;
1759
    }
1760
    _PyExecutorObject *old = exit->executor;
1761
    if (exit->is_dynamic) {
1762
        exit->executor = _PyExecutor_GetColdDynamicExecutor();
1763
    }
1764
    else {
1765
        exit->executor = _PyExecutor_GetColdExecutor();
1766
    }
1767
    Py_DECREF(old);
1768
}
1769
1770
/* Detaches the executor from the code object (if any) that
1771
 * holds a reference to it */
1772
void
1773
_Py_ExecutorDetach(_PyExecutorObject *executor)
1774
{
1775
    PyCodeObject *code = executor->vm_data.code;
1776
    if (code == NULL) {
1777
        return;
1778
    }
1779
    _Py_CODEUNIT *instruction = &_PyCode_CODE(code)[executor->vm_data.index];
1780
    assert(instruction->op.code == ENTER_EXECUTOR);
1781
    int index = instruction->op.arg;
1782
    assert(code->co_executors->executors[index] == executor);
1783
    instruction->op.code = executor->vm_data.opcode;
1784
    instruction->op.arg = executor->vm_data.oparg;
1785
    executor->vm_data.code = NULL;
1786
    code->co_executors->executors[index] = NULL;
1787
    Py_DECREF(executor);
1788
}
1789
1790
/* Executors can be invalidated at any time,
1791
   even with a stop-the-world lock held.
1792
   Consequently it must not run arbitrary code,
1793
   including Py_DECREF with a non-executor. */
1794
static void
1795
executor_invalidate(PyObject *op)
1796
{
1797
    _PyExecutorObject *executor = _PyExecutorObject_CAST(op);
1798
    if (!executor->vm_data.valid) {
1799
        return;
1800
    }
1801
    executor->vm_data.valid = 0;
1802
    unlink_executor(executor);
1803
    executor_clear_exits(executor);
1804
    _Py_ExecutorDetach(executor);
1805
    _PyObject_GC_UNTRACK(op);
1806
}
1807
1808
static int
1809
executor_clear(PyObject *op)
1810
{
1811
    executor_invalidate(op);
1812
    return 0;
1813
}
1814
1815
void
1816
_Py_Executor_DependsOn(_PyExecutorObject *executor, void *obj)
1817
{
1818
    assert(executor->vm_data.valid);
1819
    _Py_BloomFilter_Add(&executor->vm_data.bloom, obj);
1820
}
1821
1822
/* Invalidate all executors that depend on `obj`
1823
 * May cause other executors to be invalidated as well
1824
 */
1825
void
1826
_Py_Executors_InvalidateDependency(PyInterpreterState *interp, void *obj, int is_invalidation)
1827
{
1828
    _PyBloomFilter obj_filter;
1829
    _Py_BloomFilter_Init(&obj_filter);
1830
    _Py_BloomFilter_Add(&obj_filter, obj);
1831
    /* Walk the list of executors */
1832
    /* TO DO -- Use a tree to avoid traversing as many objects */
1833
    PyObject *invalidate = PyList_New(0);
1834
    if (invalidate == NULL) {
1835
        goto error;
1836
    }
1837
    /* Clearing an executor can clear others, so we need to make a list of
1838
     * executors to invalidate first */
1839
    for (_PyExecutorObject *exec = interp->executor_list_head; exec != NULL;) {
1840
        assert(exec->vm_data.valid);
1841
        _PyExecutorObject *next = exec->vm_data.links.next;
1842
        if (bloom_filter_may_contain(&exec->vm_data.bloom, &obj_filter) &&
1843
            PyList_Append(invalidate, (PyObject *)exec))
1844
        {
1845
            goto error;
1846
        }
1847
        exec = next;
1848
    }
1849
    for (Py_ssize_t i = 0; i < PyList_GET_SIZE(invalidate); i++) {
1850
        PyObject *exec = PyList_GET_ITEM(invalidate, i);
1851
        executor_invalidate(exec);
1852
        if (is_invalidation) {
1853
            OPT_STAT_INC(executors_invalidated);
1854
        }
1855
    }
1856
    Py_DECREF(invalidate);
1857
    return;
1858
error:
1859
    PyErr_Clear();
1860
    Py_XDECREF(invalidate);
1861
    // If we're truly out of memory, wiping out everything is a fine fallback:
1862
    _Py_Executors_InvalidateAll(interp, is_invalidation);
1863
}
1864
1865
void
1866
_PyJit_Tracer_InvalidateDependency(PyThreadState *tstate, void *obj)
1867
{
1868
    _PyBloomFilter obj_filter;
1869
    _Py_BloomFilter_Init(&obj_filter);
1870
    _Py_BloomFilter_Add(&obj_filter, obj);
1871
    _PyThreadStateImpl *_tstate = (_PyThreadStateImpl *)tstate;
1872
    if (_tstate->jit_tracer_state == NULL) {
1873
        return;
1874
    }
1875
    if (bloom_filter_may_contain(&_tstate->jit_tracer_state->prev_state.dependencies, &obj_filter))
1876
    {
1877
        _tstate->jit_tracer_state->prev_state.dependencies_still_valid = false;
1878
    }
1879
}
1880
/* Invalidate all executors */
1881
void
1882
_Py_Executors_InvalidateAll(PyInterpreterState *interp, int is_invalidation)
1883
{
1884
    while (interp->executor_list_head) {
1885
        _PyExecutorObject *executor = interp->executor_list_head;
1886
        assert(executor->vm_data.valid);
1887
        if (executor->vm_data.code) {
1888
            // Clear the entire code object so its co_executors array be freed:
1889
            _PyCode_Clear_Executors(executor->vm_data.code);
1890
        }
1891
        else {
1892
            executor_invalidate((PyObject *)executor);
1893
        }
1894
        if (is_invalidation) {
1895
            OPT_STAT_INC(executors_invalidated);
1896
        }
1897
    }
1898
}
1899
1900
void
1901
_Py_Executors_InvalidateCold(PyInterpreterState *interp)
1902
{
1903
    /* Walk the list of executors */
1904
    /* TO DO -- Use a tree to avoid traversing as many objects */
1905
    PyObject *invalidate = PyList_New(0);
1906
    if (invalidate == NULL) {
1907
        goto error;
1908
    }
1909
1910
    /* Clearing an executor can deallocate others, so we need to make a list of
1911
     * executors to invalidate first */
1912
    for (_PyExecutorObject *exec = interp->executor_list_head; exec != NULL;) {
1913
        assert(exec->vm_data.valid);
1914
        _PyExecutorObject *next = exec->vm_data.links.next;
1915
1916
        if (exec->vm_data.cold && PyList_Append(invalidate, (PyObject *)exec) < 0) {
1917
            goto error;
1918
        }
1919
        else {
1920
            exec->vm_data.cold = true;
1921
        }
1922
1923
        exec = next;
1924
    }
1925
    for (Py_ssize_t i = 0; i < PyList_GET_SIZE(invalidate); i++) {
1926
        PyObject *exec = PyList_GET_ITEM(invalidate, i);
1927
        executor_invalidate(exec);
1928
    }
1929
    Py_DECREF(invalidate);
1930
    return;
1931
error:
1932
    PyErr_Clear();
1933
    Py_XDECREF(invalidate);
1934
    // If we're truly out of memory, wiping out everything is a fine fallback
1935
    _Py_Executors_InvalidateAll(interp, 0);
1936
}
1937
1938
#include "record_functions.c.h"
1939
1940
static int
1941
escape_angles(const char *input, Py_ssize_t size, char *buffer) {
1942
    int written = 0;
1943
    for (Py_ssize_t i = 0; i < size; i++) {
1944
        char c = input[i];
1945
        if (c == '<' || c == '>') {
1946
            buffer[written++] = '&';
1947
            buffer[written++] = c == '>' ? 'g' : 'l';
1948
            buffer[written++] = 't';
1949
            buffer[written++] = ';';
1950
        }
1951
        else {
1952
            buffer[written++] = c;
1953
        }
1954
    }
1955
    return written;
1956
}
1957
1958
static void
1959
write_str(PyObject *str, FILE *out)
1960
{
1961
    // Encode the Unicode object to the specified encoding
1962
    PyObject *encoded_obj = PyUnicode_AsEncodedString(str, "utf8", "strict");
1963
    if (encoded_obj == NULL) {
1964
        PyErr_Clear();
1965
        return;
1966
    }
1967
    const char *encoded_str = PyBytes_AsString(encoded_obj);
1968
    Py_ssize_t encoded_size = PyBytes_Size(encoded_obj);
1969
    char buffer[120];
1970
    bool truncated = false;
1971
    if (encoded_size > 24) {
1972
        encoded_size = 24;
1973
        truncated = true;
1974
    }
1975
    int size = escape_angles(encoded_str, encoded_size, buffer);
1976
    fwrite(buffer, 1, size, out);
1977
    if (truncated) {
1978
        fwrite("...", 1, 3, out);
1979
    }
1980
    Py_DECREF(encoded_obj);
1981
}
1982
1983
static int
1984
find_line_number(PyCodeObject *code, _PyExecutorObject *executor)
1985
{
1986
    int code_len = (int)Py_SIZE(code);
1987
    for (int i = 0; i < code_len; i++) {
1988
        _Py_CODEUNIT *instr = &_PyCode_CODE(code)[i];
1989
        int opcode = instr->op.code;
1990
        if (opcode == ENTER_EXECUTOR) {
1991
            _PyExecutorObject *exec = code->co_executors->executors[instr->op.arg];
1992
            if (exec == executor) {
1993
                return PyCode_Addr2Line(code, i*2);
1994
            }
1995
        }
1996
        i += _PyOpcode_Caches[_Py_GetBaseCodeUnit(code, i).op.code];
1997
    }
1998
    return -1;
1999
}
2000
2001
#define RED "#ff0000"
2002
#define WHITE "#ffffff"
2003
#define BLUE "#0000ff"
2004
#define BLACK "#000000"
2005
#define LOOP "#00c000"
2006
2007
#ifdef Py_STATS
2008
2009
static const char *COLORS[10] = {
2010
    "9",
2011
    "8",
2012
    "7",
2013
    "6",
2014
    "5",
2015
    "4",
2016
    "3",
2017
    "2",
2018
    "1",
2019
    WHITE,
2020
};
2021
const char *
2022
get_background_color(_PyUOpInstruction const *inst, uint64_t max_hotness)
2023
{
2024
    uint64_t hotness = inst->execution_count;
2025
    int index = (hotness * 10)/max_hotness;
2026
    if (index > 9) {
2027
        index = 9;
2028
    }
2029
    if (index < 0) {
2030
        index = 0;
2031
    }
2032
    return COLORS[index];
2033
}
2034
2035
const char *
2036
get_foreground_color(_PyUOpInstruction const *inst, uint64_t max_hotness)
2037
{
2038
    if(_PyUop_Uncached[inst->opcode] == _DEOPT) {
2039
        return RED;
2040
    }
2041
    uint64_t hotness = inst->execution_count;
2042
    int index = (hotness * 10)/max_hotness;
2043
    if (index > 3) {
2044
        return BLACK;
2045
    }
2046
    return WHITE;
2047
}
2048
#endif
2049
2050
static void
2051
write_row_for_uop(_PyExecutorObject *executor, uint32_t i, FILE *out)
2052
{
2053
    /* Write row for uop.
2054
        * The `port` is a marker so that outgoing edges can
2055
        * be placed correctly. If a row is marked `port=17`,
2056
        * then the outgoing edge is `{EXEC_NAME}:17 -> {TARGET}`
2057
        * https://graphviz.readthedocs.io/en/stable/manual.html#node-ports-compass
2058
        */
2059
    _PyUOpInstruction const *inst = &executor->trace[i];
2060
    const char *opname = _PyOpcode_uop_name[inst->opcode];
2061
#ifdef Py_STATS
2062
    const char *bg_color = get_background_color(inst, executor->trace[0].execution_count);
2063
    const char *color = get_foreground_color(inst, executor->trace[0].execution_count);
2064
    fprintf(out, "        <tr><td port=\"i%d\" border=\"1\" color=\"%s\" bgcolor=\"%s\" ><font color=\"%s\"> %s &nbsp;--&nbsp; %" PRIu64 "</font></td></tr>\n",
2065
        i, color, bg_color, color, opname, inst->execution_count);
2066
#else
2067
    const char *color = (_PyUop_Uncached[inst->opcode] == _DEOPT) ? RED : BLACK;
2068
    fprintf(out, "        <tr><td port=\"i%d\" border=\"1\" color=\"%s\" >%s op0=%" PRIu64 "</td></tr>\n", i, color, opname, inst->operand0);
2069
#endif
2070
}
2071
2072
static bool
2073
is_stop(_PyUOpInstruction const *inst)
2074
{
2075
    uint16_t base_opcode = _PyUop_Uncached[inst->opcode];
2076
    return (base_opcode == _EXIT_TRACE || base_opcode == _DEOPT || base_opcode == _JUMP_TO_TOP);
2077
}
2078
2079
2080
/* Writes the node and outgoing edges for a single tracelet in graphviz format.
2081
 * Each tracelet is presented as a table of the uops it contains.
2082
 * If Py_STATS is enabled, execution counts are included.
2083
 *
2084
 * https://graphviz.readthedocs.io/en/stable/manual.html
2085
 * https://graphviz.org/gallery/
2086
 */
2087
static void
2088
executor_to_gv(_PyExecutorObject *executor, FILE *out)
2089
{
2090
    PyCodeObject *code = executor->vm_data.code;
2091
    fprintf(out, "executor_%p [\n", executor);
2092
    fprintf(out, "    shape = none\n");
2093
2094
    /* Write the HTML table for the uops */
2095
    fprintf(out, "    label = <<table border=\"0\" cellspacing=\"0\">\n");
2096
    fprintf(out, "        <tr><td port=\"start\" border=\"1\" ><b>Executor</b></td></tr>\n");
2097
    if (code == NULL) {
2098
        fprintf(out, "        <tr><td border=\"1\" >No code object</td></tr>\n");
2099
    }
2100
    else {
2101
        fprintf(out, "        <tr><td  border=\"1\" >");
2102
        write_str(code->co_qualname, out);
2103
        int line = find_line_number(code, executor);
2104
        fprintf(out, ": %d</td></tr>\n", line);
2105
    }
2106
    for (uint32_t i = 0; i < executor->code_size; i++) {
2107
        write_row_for_uop(executor, i, out);
2108
        if (is_stop(&executor->trace[i])) {
2109
            break;
2110
        }
2111
    }
2112
    fprintf(out, "    </table>>\n");
2113
    fprintf(out, "]\n\n");
2114
2115
    /* Write all the outgoing edges */
2116
    _PyExecutorObject *cold = _PyExecutor_GetColdExecutor();
2117
    _PyExecutorObject *cold_dynamic = _PyExecutor_GetColdDynamicExecutor();
2118
    for (uint32_t i = 0; i < executor->code_size; i++) {
2119
        _PyUOpInstruction const *inst = &executor->trace[i];
2120
        uint16_t base_opcode = _PyUop_Uncached[inst->opcode];
2121
        uint16_t flags = _PyUop_Flags[base_opcode];
2122
        _PyExitData *exit = NULL;
2123
        if (base_opcode == _JUMP_TO_TOP) {
2124
            fprintf(out, "executor_%p:i%d -> executor_%p:i%d [color = \"" LOOP "\"]\n", executor, i, executor, inst->jump_target);
2125
            break;
2126
        }
2127
        if (base_opcode == _EXIT_TRACE) {
2128
            exit = (_PyExitData *)inst->operand0;
2129
        }
2130
        else if (flags & HAS_EXIT_FLAG) {
2131
            assert(inst->format == UOP_FORMAT_JUMP);
2132
            _PyUOpInstruction const *exit_inst = &executor->trace[inst->jump_target];
2133
            uint16_t base_exit_opcode = _PyUop_Uncached[exit_inst->opcode];
2134
            assert(base_exit_opcode == _EXIT_TRACE || base_exit_opcode == _DYNAMIC_EXIT);
2135
            exit = (_PyExitData *)exit_inst->operand0;
2136
        }
2137
        if (exit != NULL) {
2138
            if (exit->executor == cold || exit->executor == cold_dynamic) {
2139
#ifdef Py_STATS
2140
                /* Only mark as have cold exit if it has actually exited */
2141
                uint64_t diff = inst->execution_count - executor->trace[i+1].execution_count;
2142
                if (diff) {
2143
                    fprintf(out, "cold_%p%d [ label = \"%"  PRIu64  "\" shape = ellipse color=\"" BLUE "\" ]\n", executor, i, diff);
2144
                    fprintf(out, "executor_%p:i%d -> cold_%p%d\n", executor, i, executor, i);
2145
                }
2146
#endif
2147
            }
2148
            else {
2149
                fprintf(out, "executor_%p:i%d -> executor_%p:start\n", executor, i, exit->executor);
2150
            }
2151
        }
2152
        if (is_stop(inst)) {
2153
            break;
2154
        }
2155
    }
2156
}
2157
2158
/* Write the graph of all the live tracelets in graphviz format. */
2159
int
2160
_PyDumpExecutors(FILE *out)
2161
{
2162
    fprintf(out, "digraph ideal {\n\n");
2163
    fprintf(out, "    rankdir = \"LR\"\n\n");
2164
    fprintf(out, "    node [colorscheme=greys9]\n");
2165
    PyInterpreterState *interp = PyInterpreterState_Get();
2166
    for (_PyExecutorObject *exec = interp->executor_list_head; exec != NULL;) {
2167
        executor_to_gv(exec, out);
2168
        exec = exec->vm_data.links.next;
2169
    }
2170
    fprintf(out, "}\n\n");
2171
    return 0;
2172
}
2173
2174
#else
2175
2176
int
2177
_PyDumpExecutors(FILE *out)
2178
0
{
2179
0
    PyErr_SetString(PyExc_NotImplementedError, "No JIT available");
2180
0
    return -1;
2181
0
}
2182
2183
void
2184
_PyExecutor_Free(struct _PyExecutorObject *self)
2185
0
{
2186
    /* This should never be called */
2187
0
    Py_UNREACHABLE();
2188
0
}
2189
2190
#endif /* _Py_TIER2 */