Coverage Report

Created: 2025-08-26 06:57

/src/cpython3/Python/optimizer.c
Line
Count
Source (jump to first uncovered line)
1
#include "Python.h"
2
3
#ifdef _Py_TIER2
4
5
#include "opcode.h"
6
#include "pycore_interp.h"
7
#include "pycore_backoff.h"
8
#include "pycore_bitutils.h"        // _Py_popcount32()
9
#include "pycore_code.h"            // _Py_GetBaseCodeUnit
10
#include "pycore_function.h"        // _PyFunction_LookupByVersion()
11
#include "pycore_interpframe.h"
12
#include "pycore_object.h"          // _PyObject_GC_UNTRACK()
13
#include "pycore_opcode_metadata.h" // _PyOpcode_OpName[]
14
#include "pycore_opcode_utils.h"  // MAX_REAL_OPCODE
15
#include "pycore_optimizer.h"     // _Py_uop_analyze_and_optimize()
16
#include "pycore_pystate.h"       // _PyInterpreterState_GET()
17
#include "pycore_tuple.h" // _PyTuple_FromArraySteal
18
#include "pycore_unicodeobject.h" // _PyUnicode_FromASCII
19
#include "pycore_uop_ids.h"
20
#include "pycore_jit.h"
21
#include <stdbool.h>
22
#include <stdint.h>
23
#include <stddef.h>
24
25
#define NEED_OPCODE_METADATA
26
#include "pycore_uop_metadata.h" // Uop tables
27
#undef NEED_OPCODE_METADATA
28
29
#define MAX_EXECUTORS_SIZE 256
30
31
#define _PyExecutorObject_CAST(op)  ((_PyExecutorObject *)(op))
32
33
static bool
34
has_space_for_executor(PyCodeObject *code, _Py_CODEUNIT *instr)
35
{
36
    if (instr->op.code == ENTER_EXECUTOR) {
37
        return true;
38
    }
39
    if (code->co_executors == NULL) {
40
        return true;
41
    }
42
    return code->co_executors->size < MAX_EXECUTORS_SIZE;
43
}
44
45
static int32_t
46
get_index_for_executor(PyCodeObject *code, _Py_CODEUNIT *instr)
47
{
48
    if (instr->op.code == ENTER_EXECUTOR) {
49
        return instr->op.arg;
50
    }
51
    _PyExecutorArray *old = code->co_executors;
52
    int size = 0;
53
    int capacity = 0;
54
    if (old != NULL) {
55
        size = old->size;
56
        capacity = old->capacity;
57
        assert(size < MAX_EXECUTORS_SIZE);
58
    }
59
    assert(size <= capacity);
60
    if (size == capacity) {
61
        /* Array is full. Grow array */
62
        int new_capacity = capacity ? capacity * 2 : 4;
63
        _PyExecutorArray *new = PyMem_Realloc(
64
            old,
65
            offsetof(_PyExecutorArray, executors) +
66
            new_capacity * sizeof(_PyExecutorObject *));
67
        if (new == NULL) {
68
            return -1;
69
        }
70
        new->capacity = new_capacity;
71
        new->size = size;
72
        code->co_executors = new;
73
    }
74
    assert(size < code->co_executors->capacity);
75
    return size;
76
}
77
78
static void
79
insert_executor(PyCodeObject *code, _Py_CODEUNIT *instr, int index, _PyExecutorObject *executor)
80
{
81
    Py_INCREF(executor);
82
    if (instr->op.code == ENTER_EXECUTOR) {
83
        assert(index == instr->op.arg);
84
        _Py_ExecutorDetach(code->co_executors->executors[index]);
85
    }
86
    else {
87
        assert(code->co_executors->size == index);
88
        assert(code->co_executors->capacity > index);
89
        code->co_executors->size++;
90
    }
91
    executor->vm_data.opcode = instr->op.code;
92
    executor->vm_data.oparg = instr->op.arg;
93
    executor->vm_data.code = code;
94
    executor->vm_data.index = (int)(instr - _PyCode_CODE(code));
95
    code->co_executors->executors[index] = executor;
96
    assert(index < MAX_EXECUTORS_SIZE);
97
    instr->op.code = ENTER_EXECUTOR;
98
    instr->op.arg = index;
99
}
100
101
static _PyExecutorObject *
102
make_executor_from_uops(_PyUOpInstruction *buffer, int length, const _PyBloomFilter *dependencies);
103
104
static int
105
uop_optimize(_PyInterpreterFrame *frame, _Py_CODEUNIT *instr,
106
             _PyExecutorObject **exec_ptr, int curr_stackentries,
107
             bool progress_needed);
108
109
/* Returns 1 if optimized, 0 if not optimized, and -1 for an error.
110
 * If optimized, *executor_ptr contains a new reference to the executor
111
 */
112
// gh-137573: inlining this function causes stack overflows
113
Py_NO_INLINE int
114
_PyOptimizer_Optimize(
115
    _PyInterpreterFrame *frame, _Py_CODEUNIT *start,
116
    _PyExecutorObject **executor_ptr, int chain_depth)
117
{
118
    _PyStackRef *stack_pointer = frame->stackpointer;
119
    assert(_PyInterpreterState_GET()->jit);
120
    // The first executor in a chain and the MAX_CHAIN_DEPTH'th executor *must*
121
    // make progress in order to avoid infinite loops or excessively-long
122
    // side-exit chains. We can only insert the executor into the bytecode if
123
    // this is true, since a deopt won't infinitely re-enter the executor:
124
    chain_depth %= MAX_CHAIN_DEPTH;
125
    bool progress_needed = chain_depth == 0;
126
    PyCodeObject *code = _PyFrame_GetCode(frame);
127
    assert(PyCode_Check(code));
128
    if (progress_needed && !has_space_for_executor(code, start)) {
129
        return 0;
130
    }
131
    int err = uop_optimize(frame, start, executor_ptr, (int)(stack_pointer - _PyFrame_Stackbase(frame)), progress_needed);
132
    if (err <= 0) {
133
        return err;
134
    }
135
    assert(*executor_ptr != NULL);
136
    if (progress_needed) {
137
        int index = get_index_for_executor(code, start);
138
        if (index < 0) {
139
            /* Out of memory. Don't raise and assume that the
140
             * error will show up elsewhere.
141
             *
142
             * If an optimizer has already produced an executor,
143
             * it might get confused by the executor disappearing,
144
             * but there is not much we can do about that here. */
145
            Py_DECREF(*executor_ptr);
146
            return 0;
147
        }
148
        insert_executor(code, start, index, *executor_ptr);
149
    }
150
    else {
151
        (*executor_ptr)->vm_data.code = NULL;
152
    }
153
    (*executor_ptr)->vm_data.chain_depth = chain_depth;
154
    assert((*executor_ptr)->vm_data.valid);
155
    return 1;
156
}
157
158
static _PyExecutorObject *
159
get_executor_lock_held(PyCodeObject *code, int offset)
160
{
161
    int code_len = (int)Py_SIZE(code);
162
    for (int i = 0 ; i < code_len;) {
163
        if (_PyCode_CODE(code)[i].op.code == ENTER_EXECUTOR && i*2 == offset) {
164
            int oparg = _PyCode_CODE(code)[i].op.arg;
165
            _PyExecutorObject *res = code->co_executors->executors[oparg];
166
            Py_INCREF(res);
167
            return res;
168
        }
169
        i += _PyInstruction_GetLength(code, i);
170
    }
171
    PyErr_SetString(PyExc_ValueError, "no executor at given byte offset");
172
    return NULL;
173
}
174
175
_PyExecutorObject *
176
_Py_GetExecutor(PyCodeObject *code, int offset)
177
{
178
    _PyExecutorObject *executor;
179
    Py_BEGIN_CRITICAL_SECTION(code);
180
    executor = get_executor_lock_held(code, offset);
181
    Py_END_CRITICAL_SECTION();
182
    return executor;
183
}
184
185
static PyObject *
186
is_valid(PyObject *self, PyObject *Py_UNUSED(ignored))
187
{
188
    return PyBool_FromLong(((_PyExecutorObject *)self)->vm_data.valid);
189
}
190
191
static PyObject *
192
get_opcode(PyObject *self, PyObject *Py_UNUSED(ignored))
193
{
194
    return PyLong_FromUnsignedLong(((_PyExecutorObject *)self)->vm_data.opcode);
195
}
196
197
static PyObject *
198
get_oparg(PyObject *self, PyObject *Py_UNUSED(ignored))
199
{
200
    return PyLong_FromUnsignedLong(((_PyExecutorObject *)self)->vm_data.oparg);
201
}
202
203
///////////////////// Experimental UOp Optimizer /////////////////////
204
205
static int executor_clear(PyObject *executor);
206
static void unlink_executor(_PyExecutorObject *executor);
207
208
209
void
210
_PyExecutor_Free(_PyExecutorObject *self)
211
{
212
#ifdef _Py_JIT
213
    _PyJIT_Free(self);
214
#endif
215
    PyObject_GC_Del(self);
216
}
217
218
void
219
_Py_ClearExecutorDeletionList(PyInterpreterState *interp)
220
{
221
    _PyRuntimeState *runtime = &_PyRuntime;
222
    HEAD_LOCK(runtime);
223
    PyThreadState* ts = PyInterpreterState_ThreadHead(interp);
224
    HEAD_UNLOCK(runtime);
225
    while (ts) {
226
        _PyExecutorObject *current = (_PyExecutorObject *)ts->current_executor;
227
        if (current != NULL) {
228
            /* Anything in this list will be unlinked, so we can reuse the
229
             * linked field as a reachability marker. */
230
            current->vm_data.linked = 1;
231
        }
232
        HEAD_LOCK(runtime);
233
        ts = PyThreadState_Next(ts);
234
        HEAD_UNLOCK(runtime);
235
    }
236
    _PyExecutorObject **prev_to_next_ptr = &interp->executor_deletion_list_head;
237
    _PyExecutorObject *exec = *prev_to_next_ptr;
238
    while (exec != NULL) {
239
        if (exec->vm_data.linked) {
240
            // This executor is currently executing
241
            exec->vm_data.linked = 0;
242
            prev_to_next_ptr = &exec->vm_data.links.next;
243
        }
244
        else {
245
            *prev_to_next_ptr = exec->vm_data.links.next;
246
            _PyExecutor_Free(exec);
247
        }
248
        exec = *prev_to_next_ptr;
249
    }
250
    interp->executor_deletion_list_remaining_capacity = EXECUTOR_DELETE_LIST_MAX;
251
}
252
253
static void
254
add_to_pending_deletion_list(_PyExecutorObject *self)
255
{
256
    PyInterpreterState *interp = PyInterpreterState_Get();
257
    self->vm_data.links.next = interp->executor_deletion_list_head;
258
    interp->executor_deletion_list_head = self;
259
    if (interp->executor_deletion_list_remaining_capacity > 0) {
260
        interp->executor_deletion_list_remaining_capacity--;
261
    }
262
    else {
263
        _Py_ClearExecutorDeletionList(interp);
264
    }
265
}
266
267
static void
268
uop_dealloc(PyObject *op) {
269
    _PyExecutorObject *self = _PyExecutorObject_CAST(op);
270
    _PyObject_GC_UNTRACK(self);
271
    assert(self->vm_data.code == NULL);
272
    unlink_executor(self);
273
    // Once unlinked it becomes impossible to invalidate an executor, so do it here.
274
    self->vm_data.valid = 0;
275
    add_to_pending_deletion_list(self);
276
}
277
278
const char *
279
_PyUOpName(int index)
280
{
281
    if (index < 0 || index > MAX_UOP_ID) {
282
        return NULL;
283
    }
284
    return _PyOpcode_uop_name[index];
285
}
286
287
#ifdef Py_DEBUG
288
void
289
_PyUOpPrint(const _PyUOpInstruction *uop)
290
{
291
    const char *name = _PyUOpName(uop->opcode);
292
    if (name == NULL) {
293
        printf("<uop %d>", uop->opcode);
294
    }
295
    else {
296
        printf("%s", name);
297
    }
298
    switch(uop->format) {
299
        case UOP_FORMAT_TARGET:
300
            printf(" (%d, target=%d, operand0=%#" PRIx64 ", operand1=%#" PRIx64,
301
                uop->oparg,
302
                uop->target,
303
                (uint64_t)uop->operand0,
304
                (uint64_t)uop->operand1);
305
            break;
306
        case UOP_FORMAT_JUMP:
307
            printf(" (%d, jump_target=%d, operand0=%#" PRIx64 ", operand1=%#" PRIx64,
308
                uop->oparg,
309
                uop->jump_target,
310
                (uint64_t)uop->operand0,
311
                (uint64_t)uop->operand1);
312
            break;
313
        default:
314
            printf(" (%d, Unknown format)", uop->oparg);
315
    }
316
    if (_PyUop_Flags[uop->opcode] & HAS_ERROR_FLAG) {
317
        printf(", error_target=%d", uop->error_target);
318
    }
319
320
    printf(")");
321
}
322
#endif
323
324
static Py_ssize_t
325
uop_len(PyObject *op)
326
{
327
    _PyExecutorObject *self = _PyExecutorObject_CAST(op);
328
    return self->code_size;
329
}
330
331
static PyObject *
332
uop_item(PyObject *op, Py_ssize_t index)
333
{
334
    _PyExecutorObject *self = _PyExecutorObject_CAST(op);
335
    Py_ssize_t len = uop_len(op);
336
    if (index < 0 || index >= len) {
337
        PyErr_SetNone(PyExc_IndexError);
338
        return NULL;
339
    }
340
    const char *name = _PyUOpName(self->trace[index].opcode);
341
    if (name == NULL) {
342
        name = "<nil>";
343
    }
344
    PyObject *oname = _PyUnicode_FromASCII(name, strlen(name));
345
    if (oname == NULL) {
346
        return NULL;
347
    }
348
    PyObject *oparg = PyLong_FromUnsignedLong(self->trace[index].oparg);
349
    if (oparg == NULL) {
350
        Py_DECREF(oname);
351
        return NULL;
352
    }
353
    PyObject *target = PyLong_FromUnsignedLong(self->trace[index].target);
354
    if (oparg == NULL) {
355
        Py_DECREF(oparg);
356
        Py_DECREF(oname);
357
        return NULL;
358
    }
359
    PyObject *operand = PyLong_FromUnsignedLongLong(self->trace[index].operand0);
360
    if (operand == NULL) {
361
        Py_DECREF(target);
362
        Py_DECREF(oparg);
363
        Py_DECREF(oname);
364
        return NULL;
365
    }
366
    PyObject *args[4] = { oname, oparg, target, operand };
367
    return _PyTuple_FromArraySteal(args, 4);
368
}
369
370
PySequenceMethods uop_as_sequence = {
371
    .sq_length = uop_len,
372
    .sq_item = uop_item,
373
};
374
375
static int
376
executor_traverse(PyObject *o, visitproc visit, void *arg)
377
{
378
    _PyExecutorObject *executor = _PyExecutorObject_CAST(o);
379
    for (uint32_t i = 0; i < executor->exit_count; i++) {
380
        Py_VISIT(executor->exits[i].executor);
381
    }
382
    return 0;
383
}
384
385
static PyObject *
386
get_jit_code(PyObject *self, PyObject *Py_UNUSED(ignored))
387
{
388
#ifndef _Py_JIT
389
    PyErr_SetString(PyExc_RuntimeError, "JIT support not enabled.");
390
    return NULL;
391
#else
392
    _PyExecutorObject *executor = _PyExecutorObject_CAST(self);
393
    if (executor->jit_code == NULL || executor->jit_size == 0) {
394
        Py_RETURN_NONE;
395
    }
396
    return PyBytes_FromStringAndSize(executor->jit_code, executor->jit_size);
397
#endif
398
}
399
400
static PyMethodDef uop_executor_methods[] = {
401
    { "is_valid", is_valid, METH_NOARGS, NULL },
402
    { "get_jit_code", get_jit_code, METH_NOARGS, NULL},
403
    { "get_opcode", get_opcode, METH_NOARGS, NULL },
404
    { "get_oparg", get_oparg, METH_NOARGS, NULL },
405
    { NULL, NULL },
406
};
407
408
static int
409
executor_is_gc(PyObject *o)
410
{
411
    return !_Py_IsImmortal(o);
412
}
413
414
PyTypeObject _PyUOpExecutor_Type = {
415
    PyVarObject_HEAD_INIT(&PyType_Type, 0)
416
    .tp_name = "uop_executor",
417
    .tp_basicsize = offsetof(_PyExecutorObject, exits),
418
    .tp_itemsize = 1,
419
    .tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_DISALLOW_INSTANTIATION | Py_TPFLAGS_HAVE_GC,
420
    .tp_dealloc = uop_dealloc,
421
    .tp_as_sequence = &uop_as_sequence,
422
    .tp_methods = uop_executor_methods,
423
    .tp_traverse = executor_traverse,
424
    .tp_clear = executor_clear,
425
    .tp_is_gc = executor_is_gc,
426
};
427
428
/* TO DO -- Generate these tables */
429
static const uint16_t
430
_PyUOp_Replacements[MAX_UOP_ID + 1] = {
431
    [_ITER_JUMP_RANGE] = _GUARD_NOT_EXHAUSTED_RANGE,
432
    [_ITER_JUMP_LIST] = _GUARD_NOT_EXHAUSTED_LIST,
433
    [_ITER_JUMP_TUPLE] = _GUARD_NOT_EXHAUSTED_TUPLE,
434
    [_FOR_ITER] = _FOR_ITER_TIER_TWO,
435
    [_ITER_NEXT_LIST] = _ITER_NEXT_LIST_TIER_TWO,
436
    [_CHECK_PERIODIC_AT_END] = _TIER2_RESUME_CHECK,
437
};
438
439
static const uint8_t
440
is_for_iter_test[MAX_UOP_ID + 1] = {
441
    [_GUARD_NOT_EXHAUSTED_RANGE] = 1,
442
    [_GUARD_NOT_EXHAUSTED_LIST] = 1,
443
    [_GUARD_NOT_EXHAUSTED_TUPLE] = 1,
444
    [_FOR_ITER_TIER_TWO] = 1,
445
};
446
447
static const uint16_t
448
BRANCH_TO_GUARD[4][2] = {
449
    [POP_JUMP_IF_FALSE - POP_JUMP_IF_FALSE][0] = _GUARD_IS_TRUE_POP,
450
    [POP_JUMP_IF_FALSE - POP_JUMP_IF_FALSE][1] = _GUARD_IS_FALSE_POP,
451
    [POP_JUMP_IF_TRUE - POP_JUMP_IF_FALSE][0] = _GUARD_IS_FALSE_POP,
452
    [POP_JUMP_IF_TRUE - POP_JUMP_IF_FALSE][1] = _GUARD_IS_TRUE_POP,
453
    [POP_JUMP_IF_NONE - POP_JUMP_IF_FALSE][0] = _GUARD_IS_NOT_NONE_POP,
454
    [POP_JUMP_IF_NONE - POP_JUMP_IF_FALSE][1] = _GUARD_IS_NONE_POP,
455
    [POP_JUMP_IF_NOT_NONE - POP_JUMP_IF_FALSE][0] = _GUARD_IS_NONE_POP,
456
    [POP_JUMP_IF_NOT_NONE - POP_JUMP_IF_FALSE][1] = _GUARD_IS_NOT_NONE_POP,
457
};
458
459
460
#define CONFIDENCE_RANGE 1000
461
#define CONFIDENCE_CUTOFF 333
462
463
#ifdef Py_DEBUG
464
#define DPRINTF(level, ...) \
465
    if (lltrace >= (level)) { printf(__VA_ARGS__); }
466
#else
467
#define DPRINTF(level, ...)
468
#endif
469
470
471
static inline int
472
add_to_trace(
473
    _PyUOpInstruction *trace,
474
    int trace_length,
475
    uint16_t opcode,
476
    uint16_t oparg,
477
    uint64_t operand,
478
    uint32_t target)
479
{
480
    trace[trace_length].opcode = opcode;
481
    trace[trace_length].format = UOP_FORMAT_TARGET;
482
    trace[trace_length].target = target;
483
    trace[trace_length].oparg = oparg;
484
    trace[trace_length].operand0 = operand;
485
#ifdef Py_STATS
486
    trace[trace_length].execution_count = 0;
487
#endif
488
    return trace_length + 1;
489
}
490
491
#ifdef Py_DEBUG
492
#define ADD_TO_TRACE(OPCODE, OPARG, OPERAND, TARGET) \
493
    assert(trace_length < max_length); \
494
    trace_length = add_to_trace(trace, trace_length, (OPCODE), (OPARG), (OPERAND), (TARGET)); \
495
    if (lltrace >= 2) { \
496
        printf("%4d ADD_TO_TRACE: ", trace_length); \
497
        _PyUOpPrint(&trace[trace_length-1]); \
498
        printf("\n"); \
499
    }
500
#else
501
#define ADD_TO_TRACE(OPCODE, OPARG, OPERAND, TARGET) \
502
    assert(trace_length < max_length); \
503
    trace_length = add_to_trace(trace, trace_length, (OPCODE), (OPARG), (OPERAND), (TARGET));
504
#endif
505
506
#define INSTR_IP(INSTR, CODE) \
507
    ((uint32_t)((INSTR) - ((_Py_CODEUNIT *)(CODE)->co_code_adaptive)))
508
509
// Reserve space for n uops
510
#define RESERVE_RAW(n, opname) \
511
    if (trace_length + (n) > max_length) { \
512
        DPRINTF(2, "No room for %s (need %d, got %d)\n", \
513
                (opname), (n), max_length - trace_length); \
514
        OPT_STAT_INC(trace_too_long); \
515
        goto done; \
516
    }
517
518
// Reserve space for N uops, plus 3 for _SET_IP, _CHECK_VALIDITY and _EXIT_TRACE
519
#define RESERVE(needed) RESERVE_RAW((needed) + 3, _PyUOpName(opcode))
520
521
// Trace stack operations (used by _PUSH_FRAME, _RETURN_VALUE)
522
#define TRACE_STACK_PUSH() \
523
    if (trace_stack_depth >= TRACE_STACK_SIZE) { \
524
        DPRINTF(2, "Trace stack overflow\n"); \
525
        OPT_STAT_INC(trace_stack_overflow); \
526
        return 0; \
527
    } \
528
    assert(func == NULL || func->func_code == (PyObject *)code); \
529
    trace_stack[trace_stack_depth].func = func; \
530
    trace_stack[trace_stack_depth].code = code; \
531
    trace_stack[trace_stack_depth].instr = instr; \
532
    trace_stack_depth++;
533
#define TRACE_STACK_POP() \
534
    if (trace_stack_depth <= 0) { \
535
        Py_FatalError("Trace stack underflow\n"); \
536
    } \
537
    trace_stack_depth--; \
538
    func = trace_stack[trace_stack_depth].func; \
539
    code = trace_stack[trace_stack_depth].code; \
540
    assert(func == NULL || func->func_code == (PyObject *)code); \
541
    instr = trace_stack[trace_stack_depth].instr;
542
543
/* Returns the length of the trace on success,
544
 * 0 if it failed to produce a worthwhile trace,
545
 * and -1 on an error.
546
 */
547
static int
548
translate_bytecode_to_trace(
549
    _PyInterpreterFrame *frame,
550
    _Py_CODEUNIT *instr,
551
    _PyUOpInstruction *trace,
552
    int buffer_size,
553
    _PyBloomFilter *dependencies, bool progress_needed)
554
{
555
    bool first = true;
556
    PyCodeObject *code = _PyFrame_GetCode(frame);
557
    PyFunctionObject *func = _PyFrame_GetFunction(frame);
558
    assert(PyFunction_Check(func));
559
    PyCodeObject *initial_code = code;
560
    _Py_BloomFilter_Add(dependencies, initial_code);
561
    _Py_CODEUNIT *initial_instr = instr;
562
    int trace_length = 0;
563
    // Leave space for possible trailing _EXIT_TRACE
564
    int max_length = buffer_size-2;
565
    struct {
566
        PyFunctionObject *func;
567
        PyCodeObject *code;
568
        _Py_CODEUNIT *instr;
569
    } trace_stack[TRACE_STACK_SIZE];
570
    int trace_stack_depth = 0;
571
    int confidence = CONFIDENCE_RANGE;  // Adjusted by branch instructions
572
    bool jump_seen = false;
573
574
#ifdef Py_DEBUG
575
    char *python_lltrace = Py_GETENV("PYTHON_LLTRACE");
576
    int lltrace = 0;
577
    if (python_lltrace != NULL && *python_lltrace >= '0') {
578
        lltrace = *python_lltrace - '0';  // TODO: Parse an int and all that
579
    }
580
#endif
581
582
    DPRINTF(2,
583
            "Optimizing %s (%s:%d) at byte offset %d\n",
584
            PyUnicode_AsUTF8(code->co_qualname),
585
            PyUnicode_AsUTF8(code->co_filename),
586
            code->co_firstlineno,
587
            2 * INSTR_IP(initial_instr, code));
588
    ADD_TO_TRACE(_START_EXECUTOR, 0, (uintptr_t)instr, INSTR_IP(instr, code));
589
    ADD_TO_TRACE(_MAKE_WARM, 0, 0, 0);
590
    uint32_t target = 0;
591
592
    for (;;) {
593
        target = INSTR_IP(instr, code);
594
        // Need space for _DEOPT
595
        max_length--;
596
597
        uint32_t opcode = instr->op.code;
598
        uint32_t oparg = instr->op.arg;
599
600
        if (!first && instr == initial_instr) {
601
            // We have looped around to the start:
602
            RESERVE(1);
603
            ADD_TO_TRACE(_JUMP_TO_TOP, 0, 0, 0);
604
            goto done;
605
        }
606
607
        DPRINTF(2, "%d: %s(%d)\n", target, _PyOpcode_OpName[opcode], oparg);
608
609
        if (opcode == EXTENDED_ARG) {
610
            instr++;
611
            opcode = instr->op.code;
612
            oparg = (oparg << 8) | instr->op.arg;
613
            if (opcode == EXTENDED_ARG) {
614
                instr--;
615
                goto done;
616
            }
617
        }
618
        if (opcode == ENTER_EXECUTOR) {
619
            // We have a couple of options here. We *could* peek "underneath"
620
            // this executor and continue tracing, which could give us a longer,
621
            // more optimizeable trace (at the expense of lots of duplicated
622
            // tier two code). Instead, we choose to just end here and stitch to
623
            // the other trace, which allows a side-exit traces to rejoin the
624
            // "main" trace periodically (and also helps protect us against
625
            // pathological behavior where the amount of tier two code explodes
626
            // for a medium-length, branchy code path). This seems to work
627
            // better in practice, but in the future we could be smarter about
628
            // what we do here:
629
            goto done;
630
        }
631
        assert(opcode != ENTER_EXECUTOR && opcode != EXTENDED_ARG);
632
        RESERVE_RAW(2, "_CHECK_VALIDITY");
633
        ADD_TO_TRACE(_CHECK_VALIDITY, 0, 0, target);
634
        if (!OPCODE_HAS_NO_SAVE_IP(opcode)) {
635
            RESERVE_RAW(2, "_SET_IP");
636
            ADD_TO_TRACE(_SET_IP, 0, (uintptr_t)instr, target);
637
        }
638
639
        /* Special case the first instruction,
640
         * so that we can guarantee forward progress */
641
        if (first && progress_needed) {
642
            assert(first);
643
            if (OPCODE_HAS_EXIT(opcode) || OPCODE_HAS_DEOPT(opcode)) {
644
                opcode = _PyOpcode_Deopt[opcode];
645
            }
646
            assert(!OPCODE_HAS_EXIT(opcode));
647
            assert(!OPCODE_HAS_DEOPT(opcode));
648
        }
649
650
        if (OPCODE_HAS_EXIT(opcode)) {
651
            // Make space for side exit and final _EXIT_TRACE:
652
            RESERVE_RAW(2, "_EXIT_TRACE");
653
            max_length--;
654
        }
655
        if (OPCODE_HAS_ERROR(opcode)) {
656
            // Make space for error stub and final _EXIT_TRACE:
657
            RESERVE_RAW(2, "_ERROR_POP_N");
658
            max_length--;
659
        }
660
        switch (opcode) {
661
            case POP_JUMP_IF_NONE:
662
            case POP_JUMP_IF_NOT_NONE:
663
            case POP_JUMP_IF_FALSE:
664
            case POP_JUMP_IF_TRUE:
665
            {
666
                RESERVE(1);
667
                int counter = instr[1].cache;
668
                int bitcount = _Py_popcount32(counter);
669
                int jump_likely = bitcount > 8;
670
                /* If bitcount is 8 (half the jumps were taken), adjust confidence by 50%.
671
                   For values in between, adjust proportionally. */
672
                if (jump_likely) {
673
                    confidence = confidence * bitcount / 16;
674
                }
675
                else {
676
                    confidence = confidence * (16 - bitcount) / 16;
677
                }
678
                uint32_t uopcode = BRANCH_TO_GUARD[opcode - POP_JUMP_IF_FALSE][jump_likely];
679
                DPRINTF(2, "%d: %s(%d): counter=%04x, bitcount=%d, likely=%d, confidence=%d, uopcode=%s\n",
680
                        target, _PyOpcode_OpName[opcode], oparg,
681
                        counter, bitcount, jump_likely, confidence, _PyUOpName(uopcode));
682
                if (confidence < CONFIDENCE_CUTOFF) {
683
                    DPRINTF(2, "Confidence too low (%d < %d)\n", confidence, CONFIDENCE_CUTOFF);
684
                    OPT_STAT_INC(low_confidence);
685
                    goto done;
686
                }
687
                _Py_CODEUNIT *next_instr = instr + 1 + _PyOpcode_Caches[_PyOpcode_Deopt[opcode]];
688
                _Py_CODEUNIT *target_instr = next_instr + oparg;
689
                if (jump_likely) {
690
                    DPRINTF(2, "Jump likely (%04x = %d bits), continue at byte offset %d\n",
691
                            instr[1].cache, bitcount, 2 * INSTR_IP(target_instr, code));
692
                    instr = target_instr;
693
                    ADD_TO_TRACE(uopcode, 0, 0, INSTR_IP(next_instr, code));
694
                    goto top;
695
                }
696
                ADD_TO_TRACE(uopcode, 0, 0, INSTR_IP(target_instr, code));
697
                break;
698
            }
699
700
            case JUMP_BACKWARD:
701
            case JUMP_BACKWARD_JIT:
702
                ADD_TO_TRACE(_CHECK_PERIODIC, 0, 0, target);
703
                _Py_FALLTHROUGH;
704
            case JUMP_BACKWARD_NO_INTERRUPT:
705
            {
706
                instr += 1 + _PyOpcode_Caches[_PyOpcode_Deopt[opcode]] - (int)oparg;
707
                if (jump_seen) {
708
                    OPT_STAT_INC(inner_loop);
709
                    DPRINTF(2, "JUMP_BACKWARD not to top ends trace\n");
710
                    goto done;
711
                }
712
                jump_seen = true;
713
                goto top;
714
            }
715
716
            case JUMP_FORWARD:
717
            {
718
                RESERVE(0);
719
                // This will emit two _SET_IP instructions; leave it to the optimizer
720
                instr += oparg;
721
                break;
722
            }
723
724
            case RESUME:
725
                /* Use a special tier 2 version of RESUME_CHECK to allow traces to
726
                 *  start with RESUME_CHECK */
727
                ADD_TO_TRACE(_TIER2_RESUME_CHECK, 0, 0, target);
728
                break;
729
730
            default:
731
            {
732
                const struct opcode_macro_expansion *expansion = &_PyOpcode_macro_expansion[opcode];
733
                if (expansion->nuops > 0) {
734
                    // Reserve space for nuops (+ _SET_IP + _EXIT_TRACE)
735
                    int nuops = expansion->nuops;
736
                    RESERVE(nuops + 1); /* One extra for exit */
737
                    int16_t last_op = expansion->uops[nuops-1].uop;
738
                    if (last_op == _RETURN_VALUE || last_op == _RETURN_GENERATOR || last_op == _YIELD_VALUE) {
739
                        // Check for trace stack underflow now:
740
                        // We can't bail e.g. in the middle of
741
                        // LOAD_CONST + _RETURN_VALUE.
742
                        if (trace_stack_depth == 0) {
743
                            DPRINTF(2, "Trace stack underflow\n");
744
                            OPT_STAT_INC(trace_stack_underflow);
745
                            return 0;
746
                        }
747
                    }
748
                    uint32_t orig_oparg = oparg;  // For OPARG_TOP/BOTTOM
749
                    for (int i = 0; i < nuops; i++) {
750
                        oparg = orig_oparg;
751
                        uint32_t uop = expansion->uops[i].uop;
752
                        uint64_t operand = 0;
753
                        // Add one to account for the actual opcode/oparg pair:
754
                        int offset = expansion->uops[i].offset + 1;
755
                        switch (expansion->uops[i].size) {
756
                            case OPARG_SIMPLE:
757
                                assert(opcode != JUMP_BACKWARD_NO_INTERRUPT && opcode != JUMP_BACKWARD);
758
                                break;
759
                            case OPARG_CACHE_1:
760
                                operand = read_u16(&instr[offset].cache);
761
                                break;
762
                            case OPARG_CACHE_2:
763
                                operand = read_u32(&instr[offset].cache);
764
                                break;
765
                            case OPARG_CACHE_4:
766
                                operand = read_u64(&instr[offset].cache);
767
                                break;
768
                            case OPARG_TOP:  // First half of super-instr
769
                                oparg = orig_oparg >> 4;
770
                                break;
771
                            case OPARG_BOTTOM:  // Second half of super-instr
772
                                oparg = orig_oparg & 0xF;
773
                                break;
774
                            case OPARG_SAVE_RETURN_OFFSET:  // op=_SAVE_RETURN_OFFSET; oparg=return_offset
775
                                oparg = offset;
776
                                assert(uop == _SAVE_RETURN_OFFSET);
777
                                break;
778
                            case OPARG_REPLACED:
779
                                uop = _PyUOp_Replacements[uop];
780
                                assert(uop != 0);
781
                                uint32_t next_inst = target + 1 + _PyOpcode_Caches[_PyOpcode_Deopt[opcode]] + (oparg > 255);
782
                                if (uop == _TIER2_RESUME_CHECK) {
783
                                    target = next_inst;
784
                                }
785
#ifdef Py_DEBUG
786
                                else {
787
                                    uint32_t jump_target = next_inst + oparg;
788
                                    assert(_Py_GetBaseCodeUnit(code, jump_target).op.code == END_FOR);
789
                                    assert(_Py_GetBaseCodeUnit(code, jump_target+1).op.code == POP_ITER);
790
                                }
791
#endif
792
                                break;
793
                            case OPERAND1_1:
794
                                assert(trace[trace_length-1].opcode == uop);
795
                                operand = read_u16(&instr[offset].cache);
796
                                trace[trace_length-1].operand1 = operand;
797
                                continue;
798
                            case OPERAND1_2:
799
                                assert(trace[trace_length-1].opcode == uop);
800
                                operand = read_u32(&instr[offset].cache);
801
                                trace[trace_length-1].operand1 = operand;
802
                                continue;
803
                            case OPERAND1_4:
804
                                assert(trace[trace_length-1].opcode == uop);
805
                                operand = read_u64(&instr[offset].cache);
806
                                trace[trace_length-1].operand1 = operand;
807
                                continue;
808
                            default:
809
                                fprintf(stderr,
810
                                        "opcode=%d, oparg=%d; nuops=%d, i=%d; size=%d, offset=%d\n",
811
                                        opcode, oparg, nuops, i,
812
                                        expansion->uops[i].size,
813
                                        expansion->uops[i].offset);
814
                                Py_FatalError("garbled expansion");
815
                        }
816
817
                        if (uop == _RETURN_VALUE || uop == _RETURN_GENERATOR || uop == _YIELD_VALUE) {
818
                            TRACE_STACK_POP();
819
                            /* Set the operand to the function or code object returned to,
820
                             * to assist optimization passes. (See _PUSH_FRAME below.)
821
                             */
822
                            if (func != NULL) {
823
                                operand = (uintptr_t)func;
824
                            }
825
                            else if (code != NULL) {
826
                                operand = (uintptr_t)code | 1;
827
                            }
828
                            else {
829
                                operand = 0;
830
                            }
831
                            ADD_TO_TRACE(uop, oparg, operand, target);
832
                            DPRINTF(2,
833
                                "Returning to %s (%s:%d) at byte offset %d\n",
834
                                PyUnicode_AsUTF8(code->co_qualname),
835
                                PyUnicode_AsUTF8(code->co_filename),
836
                                code->co_firstlineno,
837
                                2 * INSTR_IP(instr, code));
838
                            goto top;
839
                        }
840
841
                        if (uop == _PUSH_FRAME) {
842
                            assert(i + 1 == nuops);
843
                            if (opcode == FOR_ITER_GEN ||
844
                                opcode == LOAD_ATTR_PROPERTY ||
845
                                opcode == BINARY_OP_SUBSCR_GETITEM ||
846
                                opcode == SEND_GEN)
847
                            {
848
                                DPRINTF(2, "Bailing due to dynamic target\n");
849
                                OPT_STAT_INC(unknown_callee);
850
                                return 0;
851
                            }
852
                            assert(_PyOpcode_Deopt[opcode] == CALL || _PyOpcode_Deopt[opcode] == CALL_KW);
853
                            int func_version_offset =
854
                                offsetof(_PyCallCache, func_version)/sizeof(_Py_CODEUNIT)
855
                                // Add one to account for the actual opcode/oparg pair:
856
                                + 1;
857
                            uint32_t func_version = read_u32(&instr[func_version_offset].cache);
858
                            PyCodeObject *new_code = NULL;
859
                            PyFunctionObject *new_func =
860
                                _PyFunction_LookupByVersion(func_version, (PyObject **) &new_code);
861
                            DPRINTF(2, "Function: version=%#x; new_func=%p, new_code=%p\n",
862
                                    (int)func_version, new_func, new_code);
863
                            if (new_code != NULL) {
864
                                if (new_code == code) {
865
                                    // Recursive call, bail (we could be here forever).
866
                                    DPRINTF(2, "Bailing on recursive call to %s (%s:%d)\n",
867
                                            PyUnicode_AsUTF8(new_code->co_qualname),
868
                                            PyUnicode_AsUTF8(new_code->co_filename),
869
                                            new_code->co_firstlineno);
870
                                    OPT_STAT_INC(recursive_call);
871
                                    ADD_TO_TRACE(uop, oparg, 0, target);
872
                                    ADD_TO_TRACE(_EXIT_TRACE, 0, 0, 0);
873
                                    goto done;
874
                                }
875
                                if (new_code->co_version != func_version) {
876
                                    // func.__code__ was updated.
877
                                    // Perhaps it may happen again, so don't bother tracing.
878
                                    // TODO: Reason about this -- is it better to bail or not?
879
                                    DPRINTF(2, "Bailing because co_version != func_version\n");
880
                                    ADD_TO_TRACE(uop, oparg, 0, target);
881
                                    ADD_TO_TRACE(_EXIT_TRACE, 0, 0, 0);
882
                                    goto done;
883
                                }
884
                                // Increment IP to the return address
885
                                instr += _PyOpcode_Caches[_PyOpcode_Deopt[opcode]] + 1;
886
                                TRACE_STACK_PUSH();
887
                                _Py_BloomFilter_Add(dependencies, new_code);
888
                                /* Set the operand to the callee's function or code object,
889
                                 * to assist optimization passes.
890
                                 * We prefer setting it to the function (for remove_globals())
891
                                 * but if that's not available but the code is available,
892
                                 * use the code, setting the low bit so the optimizer knows.
893
                                 */
894
                                if (new_func != NULL) {
895
                                    operand = (uintptr_t)new_func;
896
                                }
897
                                else if (new_code != NULL) {
898
                                    operand = (uintptr_t)new_code | 1;
899
                                }
900
                                else {
901
                                    operand = 0;
902
                                }
903
                                ADD_TO_TRACE(uop, oparg, operand, target);
904
                                code = new_code;
905
                                func = new_func;
906
                                instr = _PyCode_CODE(code);
907
                                DPRINTF(2,
908
                                    "Continuing in %s (%s:%d) at byte offset %d\n",
909
                                    PyUnicode_AsUTF8(code->co_qualname),
910
                                    PyUnicode_AsUTF8(code->co_filename),
911
                                    code->co_firstlineno,
912
                                    2 * INSTR_IP(instr, code));
913
                                goto top;
914
                            }
915
                            DPRINTF(2, "Bail, new_code == NULL\n");
916
                            OPT_STAT_INC(unknown_callee);
917
                            return 0;
918
                        }
919
920
                        if (uop == _BINARY_OP_INPLACE_ADD_UNICODE) {
921
                            assert(i + 1 == nuops);
922
                            _Py_CODEUNIT *next_instr = instr + 1 + _PyOpcode_Caches[_PyOpcode_Deopt[opcode]];
923
                            assert(next_instr->op.code == STORE_FAST);
924
                            operand = next_instr->op.arg;
925
                            // Skip the STORE_FAST:
926
                            instr++;
927
                        }
928
929
                        // All other instructions
930
                        ADD_TO_TRACE(uop, oparg, operand, target);
931
                    }
932
                    break;
933
                }
934
                DPRINTF(2, "Unsupported opcode %s\n", _PyOpcode_OpName[opcode]);
935
                OPT_UNSUPPORTED_OPCODE(opcode);
936
                goto done;  // Break out of loop
937
            }  // End default
938
939
        }  // End switch (opcode)
940
941
        instr++;
942
        // Add cache size for opcode
943
        instr += _PyOpcode_Caches[_PyOpcode_Deopt[opcode]];
944
945
        if (opcode == CALL_LIST_APPEND) {
946
            assert(instr->op.code == POP_TOP);
947
            instr++;
948
        }
949
    top:
950
        // Jump here after _PUSH_FRAME or likely branches.
951
        first = false;
952
    }  // End for (;;)
953
954
done:
955
    while (trace_stack_depth > 0) {
956
        TRACE_STACK_POP();
957
    }
958
    assert(code == initial_code);
959
    // Skip short traces where we can't even translate a single instruction:
960
    if (first) {
961
        OPT_STAT_INC(trace_too_short);
962
        DPRINTF(2,
963
                "No trace for %s (%s:%d) at byte offset %d (no progress)\n",
964
                PyUnicode_AsUTF8(code->co_qualname),
965
                PyUnicode_AsUTF8(code->co_filename),
966
                code->co_firstlineno,
967
                2 * INSTR_IP(initial_instr, code));
968
        return 0;
969
    }
970
    if (!is_terminator(&trace[trace_length-1])) {
971
        /* Allow space for _EXIT_TRACE */
972
        max_length += 2;
973
        ADD_TO_TRACE(_EXIT_TRACE, 0, 0, target);
974
    }
975
    DPRINTF(1,
976
            "Created a proto-trace for %s (%s:%d) at byte offset %d -- length %d\n",
977
            PyUnicode_AsUTF8(code->co_qualname),
978
            PyUnicode_AsUTF8(code->co_filename),
979
            code->co_firstlineno,
980
            2 * INSTR_IP(initial_instr, code),
981
            trace_length);
982
    OPT_HIST(trace_length, trace_length_hist);
983
    return trace_length;
984
}
985
986
#undef RESERVE
987
#undef RESERVE_RAW
988
#undef INSTR_IP
989
#undef ADD_TO_TRACE
990
#undef DPRINTF
991
992
#define UNSET_BIT(array, bit) (array[(bit)>>5] &= ~(1<<((bit)&31)))
993
#define SET_BIT(array, bit) (array[(bit)>>5] |= (1<<((bit)&31)))
994
#define BIT_IS_SET(array, bit) (array[(bit)>>5] & (1<<((bit)&31)))
995
996
/* Count the number of unused uops and exits
997
*/
998
static int
999
count_exits(_PyUOpInstruction *buffer, int length)
1000
{
1001
    int exit_count = 0;
1002
    for (int i = 0; i < length; i++) {
1003
        int opcode = buffer[i].opcode;
1004
        if (opcode == _EXIT_TRACE) {
1005
            exit_count++;
1006
        }
1007
    }
1008
    return exit_count;
1009
}
1010
1011
static void make_exit(_PyUOpInstruction *inst, int opcode, int target)
1012
{
1013
    inst->opcode = opcode;
1014
    inst->oparg = 0;
1015
    inst->operand0 = 0;
1016
    inst->format = UOP_FORMAT_TARGET;
1017
    inst->target = target;
1018
#ifdef Py_STATS
1019
    inst->execution_count = 0;
1020
#endif
1021
}
1022
1023
/* Convert implicit exits, errors and deopts
1024
 * into explicit ones. */
1025
static int
1026
prepare_for_execution(_PyUOpInstruction *buffer, int length)
1027
{
1028
    int32_t current_jump = -1;
1029
    int32_t current_jump_target = -1;
1030
    int32_t current_error = -1;
1031
    int32_t current_error_target = -1;
1032
    int32_t current_popped = -1;
1033
    int32_t current_exit_op = -1;
1034
    /* Leaving in NOPs slows down the interpreter and messes up the stats */
1035
    _PyUOpInstruction *copy_to = &buffer[0];
1036
    for (int i = 0; i < length; i++) {
1037
        _PyUOpInstruction *inst = &buffer[i];
1038
        if (inst->opcode != _NOP) {
1039
            if (copy_to != inst) {
1040
                *copy_to = *inst;
1041
            }
1042
            copy_to++;
1043
        }
1044
    }
1045
    length = (int)(copy_to - buffer);
1046
    int next_spare = length;
1047
    for (int i = 0; i < length; i++) {
1048
        _PyUOpInstruction *inst = &buffer[i];
1049
        int opcode = inst->opcode;
1050
        int32_t target = (int32_t)uop_get_target(inst);
1051
        uint16_t exit_flags = _PyUop_Flags[opcode] & (HAS_EXIT_FLAG | HAS_DEOPT_FLAG | HAS_PERIODIC_FLAG);
1052
        if (exit_flags) {
1053
            uint16_t exit_op = _EXIT_TRACE;
1054
            if (exit_flags & HAS_DEOPT_FLAG) {
1055
                exit_op = _DEOPT;
1056
            }
1057
            else if (exit_flags & HAS_PERIODIC_FLAG) {
1058
                exit_op = _HANDLE_PENDING_AND_DEOPT;
1059
            }
1060
            int32_t jump_target = target;
1061
            if (is_for_iter_test[opcode]) {
1062
                /* Target the POP_TOP immediately after the END_FOR,
1063
                 * leaving only the iterator on the stack. */
1064
                int extended_arg = inst->oparg > 255;
1065
                int32_t next_inst = target + 1 + INLINE_CACHE_ENTRIES_FOR_ITER + extended_arg;
1066
                jump_target = next_inst + inst->oparg + 1;
1067
            }
1068
            if (jump_target != current_jump_target || current_exit_op != exit_op) {
1069
                make_exit(&buffer[next_spare], exit_op, jump_target);
1070
                current_exit_op = exit_op;
1071
                current_jump_target = jump_target;
1072
                current_jump = next_spare;
1073
                next_spare++;
1074
            }
1075
            buffer[i].jump_target = current_jump;
1076
            buffer[i].format = UOP_FORMAT_JUMP;
1077
        }
1078
        if (_PyUop_Flags[opcode] & HAS_ERROR_FLAG) {
1079
            int popped = (_PyUop_Flags[opcode] & HAS_ERROR_NO_POP_FLAG) ?
1080
                0 : _PyUop_num_popped(opcode, inst->oparg);
1081
            if (target != current_error_target || popped != current_popped) {
1082
                current_popped = popped;
1083
                current_error = next_spare;
1084
                current_error_target = target;
1085
                make_exit(&buffer[next_spare], _ERROR_POP_N, 0);
1086
                buffer[next_spare].operand0 = target;
1087
                next_spare++;
1088
            }
1089
            buffer[i].error_target = current_error;
1090
            if (buffer[i].format == UOP_FORMAT_TARGET) {
1091
                buffer[i].format = UOP_FORMAT_JUMP;
1092
                buffer[i].jump_target = 0;
1093
            }
1094
        }
1095
        if (opcode == _JUMP_TO_TOP) {
1096
            assert(buffer[0].opcode == _START_EXECUTOR);
1097
            buffer[i].format = UOP_FORMAT_JUMP;
1098
            buffer[i].jump_target = 1;
1099
        }
1100
    }
1101
    return next_spare;
1102
}
1103
1104
/* Executor side exits */
1105
1106
static _PyExecutorObject *
1107
allocate_executor(int exit_count, int length)
1108
{
1109
    int size = exit_count*sizeof(_PyExitData) + length*sizeof(_PyUOpInstruction);
1110
    _PyExecutorObject *res = PyObject_GC_NewVar(_PyExecutorObject, &_PyUOpExecutor_Type, size);
1111
    if (res == NULL) {
1112
        return NULL;
1113
    }
1114
    res->trace = (_PyUOpInstruction *)(res->exits + exit_count);
1115
    res->code_size = length;
1116
    res->exit_count = exit_count;
1117
    return res;
1118
}
1119
1120
#ifdef Py_DEBUG
1121
1122
#define CHECK(PRED) \
1123
if (!(PRED)) { \
1124
    printf(#PRED " at %d\n", i); \
1125
    assert(0); \
1126
}
1127
1128
static int
1129
target_unused(int opcode)
1130
{
1131
    return (_PyUop_Flags[opcode] & (HAS_ERROR_FLAG | HAS_EXIT_FLAG | HAS_DEOPT_FLAG)) == 0;
1132
}
1133
1134
static void
1135
sanity_check(_PyExecutorObject *executor)
1136
{
1137
    for (uint32_t i = 0; i < executor->exit_count; i++) {
1138
        _PyExitData *exit = &executor->exits[i];
1139
        CHECK(exit->target < (1 << 25));
1140
    }
1141
    bool ended = false;
1142
    uint32_t i = 0;
1143
    CHECK(executor->trace[0].opcode == _START_EXECUTOR || executor->trace[0].opcode == _COLD_EXIT);
1144
    for (; i < executor->code_size; i++) {
1145
        const _PyUOpInstruction *inst = &executor->trace[i];
1146
        uint16_t opcode = inst->opcode;
1147
        CHECK(opcode <= MAX_UOP_ID);
1148
        CHECK(_PyOpcode_uop_name[opcode] != NULL);
1149
        switch(inst->format) {
1150
            case UOP_FORMAT_TARGET:
1151
                CHECK(target_unused(opcode));
1152
                break;
1153
            case UOP_FORMAT_JUMP:
1154
                CHECK(inst->jump_target < executor->code_size);
1155
                break;
1156
        }
1157
        if (_PyUop_Flags[opcode] & HAS_ERROR_FLAG) {
1158
            CHECK(inst->format == UOP_FORMAT_JUMP);
1159
            CHECK(inst->error_target < executor->code_size);
1160
        }
1161
        if (is_terminator(inst)) {
1162
            ended = true;
1163
            i++;
1164
            break;
1165
        }
1166
    }
1167
    CHECK(ended);
1168
    for (; i < executor->code_size; i++) {
1169
        const _PyUOpInstruction *inst = &executor->trace[i];
1170
        uint16_t opcode = inst->opcode;
1171
        CHECK(
1172
            opcode == _DEOPT ||
1173
            opcode == _HANDLE_PENDING_AND_DEOPT ||
1174
            opcode == _EXIT_TRACE ||
1175
            opcode == _ERROR_POP_N);
1176
    }
1177
}
1178
1179
#undef CHECK
1180
#endif
1181
1182
/* Makes an executor from a buffer of uops.
1183
 * Account for the buffer having gaps and NOPs by computing a "used"
1184
 * bit vector and only copying the used uops. Here "used" means reachable
1185
 * and not a NOP.
1186
 */
1187
static _PyExecutorObject *
1188
make_executor_from_uops(_PyUOpInstruction *buffer, int length, const _PyBloomFilter *dependencies)
1189
{
1190
    int exit_count = count_exits(buffer, length);
1191
    _PyExecutorObject *executor = allocate_executor(exit_count, length);
1192
    if (executor == NULL) {
1193
        return NULL;
1194
    }
1195
1196
    /* Initialize exits */
1197
    _PyExecutorObject *cold = _PyExecutor_GetColdExecutor();
1198
    for (int i = 0; i < exit_count; i++) {
1199
        executor->exits[i].index = i;
1200
        executor->exits[i].temperature = initial_temperature_backoff_counter();
1201
        executor->exits[i].executor = cold;
1202
    }
1203
    int next_exit = exit_count-1;
1204
    _PyUOpInstruction *dest = (_PyUOpInstruction *)&executor->trace[length];
1205
    assert(buffer[0].opcode == _START_EXECUTOR);
1206
    buffer[0].operand0 = (uint64_t)executor;
1207
    for (int i = length-1; i >= 0; i--) {
1208
        int opcode = buffer[i].opcode;
1209
        dest--;
1210
        *dest = buffer[i];
1211
        assert(opcode != _POP_JUMP_IF_FALSE && opcode != _POP_JUMP_IF_TRUE);
1212
        if (opcode == _EXIT_TRACE) {
1213
            _PyExitData *exit = &executor->exits[next_exit];
1214
            exit->target = buffer[i].target;
1215
            dest->operand0 = (uint64_t)exit;
1216
            next_exit--;
1217
        }
1218
    }
1219
    assert(next_exit == -1);
1220
    assert(dest == executor->trace);
1221
    assert(dest->opcode == _START_EXECUTOR);
1222
    _Py_ExecutorInit(executor, dependencies);
1223
#ifdef Py_DEBUG
1224
    char *python_lltrace = Py_GETENV("PYTHON_LLTRACE");
1225
    int lltrace = 0;
1226
    if (python_lltrace != NULL && *python_lltrace >= '0') {
1227
        lltrace = *python_lltrace - '0';  // TODO: Parse an int and all that
1228
    }
1229
    if (lltrace >= 2) {
1230
        printf("Optimized trace (length %d):\n", length);
1231
        for (int i = 0; i < length; i++) {
1232
            printf("%4d OPTIMIZED: ", i);
1233
            _PyUOpPrint(&executor->trace[i]);
1234
            printf("\n");
1235
        }
1236
    }
1237
    sanity_check(executor);
1238
#endif
1239
#ifdef _Py_JIT
1240
    executor->jit_code = NULL;
1241
    executor->jit_size = 0;
1242
    // This is initialized to true so we can prevent the executor
1243
    // from being immediately detected as cold and invalidated.
1244
    executor->vm_data.warm = true;
1245
    if (_PyJIT_Compile(executor, executor->trace, length)) {
1246
        Py_DECREF(executor);
1247
        return NULL;
1248
    }
1249
#endif
1250
    _PyObject_GC_TRACK(executor);
1251
    return executor;
1252
}
1253
1254
#ifdef Py_STATS
1255
/* Returns the effective trace length.
1256
 * Ignores NOPs and trailing exit and error handling.*/
1257
int effective_trace_length(_PyUOpInstruction *buffer, int length)
1258
{
1259
    int nop_count = 0;
1260
    for (int i = 0; i < length; i++) {
1261
        int opcode = buffer[i].opcode;
1262
        if (opcode == _NOP) {
1263
            nop_count++;
1264
        }
1265
        if (is_terminator(&buffer[i])) {
1266
            return i+1-nop_count;
1267
        }
1268
    }
1269
    Py_FatalError("No terminating instruction");
1270
    Py_UNREACHABLE();
1271
}
1272
#endif
1273
1274
static int
1275
uop_optimize(
1276
    _PyInterpreterFrame *frame,
1277
    _Py_CODEUNIT *instr,
1278
    _PyExecutorObject **exec_ptr,
1279
    int curr_stackentries,
1280
    bool progress_needed)
1281
{
1282
    _PyBloomFilter dependencies;
1283
    _Py_BloomFilter_Init(&dependencies);
1284
    _PyUOpInstruction buffer[UOP_MAX_TRACE_LENGTH];
1285
    OPT_STAT_INC(attempts);
1286
    int length = translate_bytecode_to_trace(frame, instr, buffer, UOP_MAX_TRACE_LENGTH, &dependencies, progress_needed);
1287
    if (length <= 0) {
1288
        // Error or nothing translated
1289
        return length;
1290
    }
1291
    assert(length < UOP_MAX_TRACE_LENGTH);
1292
    OPT_STAT_INC(traces_created);
1293
    char *env_var = Py_GETENV("PYTHON_UOPS_OPTIMIZE");
1294
    if (env_var == NULL || *env_var == '\0' || *env_var > '0') {
1295
        length = _Py_uop_analyze_and_optimize(frame, buffer,
1296
                                           length,
1297
                                           curr_stackentries, &dependencies);
1298
        if (length <= 0) {
1299
            return length;
1300
        }
1301
    }
1302
    assert(length < UOP_MAX_TRACE_LENGTH);
1303
    assert(length >= 1);
1304
    /* Fix up */
1305
    for (int pc = 0; pc < length; pc++) {
1306
        int opcode = buffer[pc].opcode;
1307
        int oparg = buffer[pc].oparg;
1308
        if (oparg < _PyUop_Replication[opcode].stop && oparg >= _PyUop_Replication[opcode].start) {
1309
            buffer[pc].opcode = opcode + oparg + 1 - _PyUop_Replication[opcode].start;
1310
            assert(strncmp(_PyOpcode_uop_name[buffer[pc].opcode], _PyOpcode_uop_name[opcode], strlen(_PyOpcode_uop_name[opcode])) == 0);
1311
        }
1312
        else if (is_terminator(&buffer[pc])) {
1313
            break;
1314
        }
1315
        assert(_PyOpcode_uop_name[buffer[pc].opcode]);
1316
    }
1317
    OPT_HIST(effective_trace_length(buffer, length), optimized_trace_length_hist);
1318
    length = prepare_for_execution(buffer, length);
1319
    assert(length <= UOP_MAX_TRACE_LENGTH);
1320
    _PyExecutorObject *executor = make_executor_from_uops(buffer, length,  &dependencies);
1321
    if (executor == NULL) {
1322
        return -1;
1323
    }
1324
    assert(length <= UOP_MAX_TRACE_LENGTH);
1325
    *exec_ptr = executor;
1326
    return 1;
1327
}
1328
1329
1330
/*****************************************
1331
 *        Executor management
1332
 ****************************************/
1333
1334
/* We use a bloomfilter with k = 6, m = 256
1335
 * The choice of k and the following constants
1336
 * could do with a more rigorous analysis,
1337
 * but here is a simple analysis:
1338
 *
1339
 * We want to keep the false positive rate low.
1340
 * For n = 5 (a trace depends on 5 objects),
1341
 * we expect 30 bits set, giving a false positive
1342
 * rate of (30/256)**6 == 2.5e-6 which is plenty
1343
 * good enough.
1344
 *
1345
 * However with n = 10 we expect 60 bits set (worst case),
1346
 * giving a false positive of (60/256)**6 == 0.0001
1347
 *
1348
 * We choose k = 6, rather than a higher number as
1349
 * it means the false positive rate grows slower for high n.
1350
 *
1351
 * n = 5, k = 6 => fp = 2.6e-6
1352
 * n = 5, k = 8 => fp = 3.5e-7
1353
 * n = 10, k = 6 => fp = 1.6e-4
1354
 * n = 10, k = 8 => fp = 0.9e-4
1355
 * n = 15, k = 6 => fp = 0.18%
1356
 * n = 15, k = 8 => fp = 0.23%
1357
 * n = 20, k = 6 => fp = 1.1%
1358
 * n = 20, k = 8 => fp = 2.3%
1359
 *
1360
 * The above analysis assumes perfect hash functions,
1361
 * but those don't exist, so the real false positive
1362
 * rates may be worse.
1363
 */
1364
1365
#define K 6
1366
1367
#define SEED 20221211
1368
1369
/* TO DO -- Use more modern hash functions with better distribution of bits */
1370
static uint64_t
1371
address_to_hash(void *ptr) {
1372
    assert(ptr != NULL);
1373
    uint64_t uhash = SEED;
1374
    uintptr_t addr = (uintptr_t)ptr;
1375
    for (int i = 0; i < SIZEOF_VOID_P; i++) {
1376
        uhash ^= addr & 255;
1377
        uhash *= (uint64_t)PyHASH_MULTIPLIER;
1378
        addr >>= 8;
1379
    }
1380
    return uhash;
1381
}
1382
1383
void
1384
_Py_BloomFilter_Init(_PyBloomFilter *bloom)
1385
{
1386
    for (int i = 0; i < _Py_BLOOM_FILTER_WORDS; i++) {
1387
        bloom->bits[i] = 0;
1388
    }
1389
}
1390
1391
/* We want K hash functions that each set 1 bit.
1392
 * A hash function that sets 1 bit in M bits can be trivially
1393
 * derived from a log2(M) bit hash function.
1394
 * So we extract 8 (log2(256)) bits at a time from
1395
 * the 64bit hash. */
1396
void
1397
_Py_BloomFilter_Add(_PyBloomFilter *bloom, void *ptr)
1398
{
1399
    uint64_t hash = address_to_hash(ptr);
1400
    assert(K <= 8);
1401
    for (int i = 0; i < K; i++) {
1402
        uint8_t bits = hash & 255;
1403
        bloom->bits[bits >> 5] |= (1 << (bits&31));
1404
        hash >>= 8;
1405
    }
1406
}
1407
1408
static bool
1409
bloom_filter_may_contain(_PyBloomFilter *bloom, _PyBloomFilter *hashes)
1410
{
1411
    for (int i = 0; i < _Py_BLOOM_FILTER_WORDS; i++) {
1412
        if ((bloom->bits[i] & hashes->bits[i]) != hashes->bits[i]) {
1413
            return false;
1414
        }
1415
    }
1416
    return true;
1417
}
1418
1419
static void
1420
link_executor(_PyExecutorObject *executor)
1421
{
1422
    PyInterpreterState *interp = _PyInterpreterState_GET();
1423
    _PyExecutorLinkListNode *links = &executor->vm_data.links;
1424
    _PyExecutorObject *head = interp->executor_list_head;
1425
    if (head == NULL) {
1426
        interp->executor_list_head = executor;
1427
        links->previous = NULL;
1428
        links->next = NULL;
1429
    }
1430
    else {
1431
        assert(head->vm_data.links.previous == NULL);
1432
        links->previous = NULL;
1433
        links->next = head;
1434
        head->vm_data.links.previous = executor;
1435
        interp->executor_list_head = executor;
1436
    }
1437
    executor->vm_data.linked = true;
1438
    /* executor_list_head must be first in list */
1439
    assert(interp->executor_list_head->vm_data.links.previous == NULL);
1440
}
1441
1442
static void
1443
unlink_executor(_PyExecutorObject *executor)
1444
{
1445
    if (!executor->vm_data.linked) {
1446
        return;
1447
    }
1448
    _PyExecutorLinkListNode *links = &executor->vm_data.links;
1449
    assert(executor->vm_data.valid);
1450
    _PyExecutorObject *next = links->next;
1451
    _PyExecutorObject *prev = links->previous;
1452
    if (next != NULL) {
1453
        next->vm_data.links.previous = prev;
1454
    }
1455
    if (prev != NULL) {
1456
        prev->vm_data.links.next = next;
1457
    }
1458
    else {
1459
        // prev == NULL implies that executor is the list head
1460
        PyInterpreterState *interp = PyInterpreterState_Get();
1461
        assert(interp->executor_list_head == executor);
1462
        interp->executor_list_head = next;
1463
    }
1464
    executor->vm_data.linked = false;
1465
}
1466
1467
/* This must be called by optimizers before using the executor */
1468
void
1469
_Py_ExecutorInit(_PyExecutorObject *executor, const _PyBloomFilter *dependency_set)
1470
{
1471
    executor->vm_data.valid = true;
1472
    for (int i = 0; i < _Py_BLOOM_FILTER_WORDS; i++) {
1473
        executor->vm_data.bloom.bits[i] = dependency_set->bits[i];
1474
    }
1475
    link_executor(executor);
1476
}
1477
1478
_PyExecutorObject *
1479
_PyExecutor_GetColdExecutor(void)
1480
{
1481
    PyInterpreterState *interp = _PyInterpreterState_GET();
1482
    if (interp->cold_executor != NULL) {
1483
        return interp->cold_executor;
1484
    }
1485
    _PyExecutorObject *cold = allocate_executor(0, 1);
1486
    if (cold == NULL) {
1487
        Py_FatalError("Cannot allocate core JIT code");
1488
    }
1489
    ((_PyUOpInstruction *)cold->trace)->opcode = _COLD_EXIT;
1490
#ifdef _Py_JIT
1491
    cold->jit_code = NULL;
1492
    cold->jit_size = 0;
1493
    // This is initialized to true so we can prevent the executor
1494
    // from being immediately detected as cold and invalidated.
1495
    cold->vm_data.warm = true;
1496
    if (_PyJIT_Compile(cold, cold->trace, 1)) {
1497
        Py_DECREF(cold);
1498
        Py_FatalError("Cannot allocate core JIT code");
1499
    }
1500
#endif
1501
    _Py_SetImmortal((PyObject *)cold);
1502
    interp->cold_executor = cold;
1503
    return cold;
1504
}
1505
1506
void
1507
_PyExecutor_ClearExit(_PyExitData *exit)
1508
{
1509
    if (exit == NULL) {
1510
        return;
1511
    }
1512
    _PyExecutorObject *old = exit->executor;
1513
    exit->executor = _PyExecutor_GetColdExecutor();
1514
    Py_DECREF(old);
1515
}
1516
1517
/* Detaches the executor from the code object (if any) that
1518
 * holds a reference to it */
1519
void
1520
_Py_ExecutorDetach(_PyExecutorObject *executor)
1521
{
1522
    PyCodeObject *code = executor->vm_data.code;
1523
    if (code == NULL) {
1524
        return;
1525
    }
1526
    _Py_CODEUNIT *instruction = &_PyCode_CODE(code)[executor->vm_data.index];
1527
    assert(instruction->op.code == ENTER_EXECUTOR);
1528
    int index = instruction->op.arg;
1529
    assert(code->co_executors->executors[index] == executor);
1530
    instruction->op.code = executor->vm_data.opcode;
1531
    instruction->op.arg = executor->vm_data.oparg;
1532
    executor->vm_data.code = NULL;
1533
    code->co_executors->executors[index] = NULL;
1534
    Py_DECREF(executor);
1535
}
1536
1537
static int
1538
executor_clear(PyObject *op)
1539
{
1540
    _PyExecutorObject *executor = _PyExecutorObject_CAST(op);
1541
    if (!executor->vm_data.valid) {
1542
        return 0;
1543
    }
1544
    assert(executor->vm_data.valid == 1);
1545
    unlink_executor(executor);
1546
    executor->vm_data.valid = 0;
1547
1548
    /* It is possible for an executor to form a reference
1549
     * cycle with itself, so decref'ing a side exit could
1550
     * free the executor unless we hold a strong reference to it
1551
     */
1552
    _PyExecutorObject *cold = _PyExecutor_GetColdExecutor();
1553
    Py_INCREF(executor);
1554
    for (uint32_t i = 0; i < executor->exit_count; i++) {
1555
        executor->exits[i].temperature = initial_unreachable_backoff_counter();
1556
        _PyExecutorObject *e = executor->exits[i].executor;
1557
        executor->exits[i].executor = cold;
1558
        Py_DECREF(e);
1559
    }
1560
    _Py_ExecutorDetach(executor);
1561
    Py_DECREF(executor);
1562
    return 0;
1563
}
1564
1565
void
1566
_Py_Executor_DependsOn(_PyExecutorObject *executor, void *obj)
1567
{
1568
    assert(executor->vm_data.valid);
1569
    _Py_BloomFilter_Add(&executor->vm_data.bloom, obj);
1570
}
1571
1572
/* Invalidate all executors that depend on `obj`
1573
 * May cause other executors to be invalidated as well
1574
 */
1575
void
1576
_Py_Executors_InvalidateDependency(PyInterpreterState *interp, void *obj, int is_invalidation)
1577
{
1578
    _PyBloomFilter obj_filter;
1579
    _Py_BloomFilter_Init(&obj_filter);
1580
    _Py_BloomFilter_Add(&obj_filter, obj);
1581
    /* Walk the list of executors */
1582
    /* TO DO -- Use a tree to avoid traversing as many objects */
1583
    PyObject *invalidate = PyList_New(0);
1584
    if (invalidate == NULL) {
1585
        goto error;
1586
    }
1587
    /* Clearing an executor can deallocate others, so we need to make a list of
1588
     * executors to invalidate first */
1589
    for (_PyExecutorObject *exec = interp->executor_list_head; exec != NULL;) {
1590
        assert(exec->vm_data.valid);
1591
        _PyExecutorObject *next = exec->vm_data.links.next;
1592
        if (bloom_filter_may_contain(&exec->vm_data.bloom, &obj_filter) &&
1593
            PyList_Append(invalidate, (PyObject *)exec))
1594
        {
1595
            goto error;
1596
        }
1597
        exec = next;
1598
    }
1599
    for (Py_ssize_t i = 0; i < PyList_GET_SIZE(invalidate); i++) {
1600
        PyObject *exec = PyList_GET_ITEM(invalidate, i);
1601
        executor_clear(exec);
1602
        if (is_invalidation) {
1603
            OPT_STAT_INC(executors_invalidated);
1604
        }
1605
    }
1606
    Py_DECREF(invalidate);
1607
    return;
1608
error:
1609
    PyErr_Clear();
1610
    Py_XDECREF(invalidate);
1611
    // If we're truly out of memory, wiping out everything is a fine fallback:
1612
    _Py_Executors_InvalidateAll(interp, is_invalidation);
1613
}
1614
1615
/* Invalidate all executors */
1616
void
1617
_Py_Executors_InvalidateAll(PyInterpreterState *interp, int is_invalidation)
1618
{
1619
    while (interp->executor_list_head) {
1620
        _PyExecutorObject *executor = interp->executor_list_head;
1621
        assert(executor->vm_data.valid == 1 && executor->vm_data.linked == 1);
1622
        if (executor->vm_data.code) {
1623
            // Clear the entire code object so its co_executors array be freed:
1624
            _PyCode_Clear_Executors(executor->vm_data.code);
1625
        }
1626
        else {
1627
            executor_clear((PyObject *)executor);
1628
        }
1629
        if (is_invalidation) {
1630
            OPT_STAT_INC(executors_invalidated);
1631
        }
1632
    }
1633
}
1634
1635
void
1636
_Py_Executors_InvalidateCold(PyInterpreterState *interp)
1637
{
1638
    /* Walk the list of executors */
1639
    /* TO DO -- Use a tree to avoid traversing as many objects */
1640
    PyObject *invalidate = PyList_New(0);
1641
    if (invalidate == NULL) {
1642
        goto error;
1643
    }
1644
1645
    /* Clearing an executor can deallocate others, so we need to make a list of
1646
     * executors to invalidate first */
1647
    for (_PyExecutorObject *exec = interp->executor_list_head; exec != NULL;) {
1648
        assert(exec->vm_data.valid);
1649
        _PyExecutorObject *next = exec->vm_data.links.next;
1650
1651
        if (!exec->vm_data.warm && PyList_Append(invalidate, (PyObject *)exec) < 0) {
1652
            goto error;
1653
        }
1654
        else {
1655
            exec->vm_data.warm = false;
1656
        }
1657
1658
        exec = next;
1659
    }
1660
    for (Py_ssize_t i = 0; i < PyList_GET_SIZE(invalidate); i++) {
1661
        PyObject *exec = PyList_GET_ITEM(invalidate, i);
1662
        executor_clear(exec);
1663
    }
1664
    Py_DECREF(invalidate);
1665
    return;
1666
error:
1667
    PyErr_Clear();
1668
    Py_XDECREF(invalidate);
1669
    // If we're truly out of memory, wiping out everything is a fine fallback
1670
    _Py_Executors_InvalidateAll(interp, 0);
1671
}
1672
1673
static void
1674
write_str(PyObject *str, FILE *out)
1675
{
1676
    // Encode the Unicode object to the specified encoding
1677
    PyObject *encoded_obj = PyUnicode_AsEncodedString(str, "utf8", "strict");
1678
    if (encoded_obj == NULL) {
1679
        PyErr_Clear();
1680
        return;
1681
    }
1682
    const char *encoded_str = PyBytes_AsString(encoded_obj);
1683
    Py_ssize_t encoded_size = PyBytes_Size(encoded_obj);
1684
    fwrite(encoded_str, 1, encoded_size, out);
1685
    Py_DECREF(encoded_obj);
1686
}
1687
1688
static int
1689
find_line_number(PyCodeObject *code, _PyExecutorObject *executor)
1690
{
1691
    int code_len = (int)Py_SIZE(code);
1692
    for (int i = 0; i < code_len; i++) {
1693
        _Py_CODEUNIT *instr = &_PyCode_CODE(code)[i];
1694
        int opcode = instr->op.code;
1695
        if (opcode == ENTER_EXECUTOR) {
1696
            _PyExecutorObject *exec = code->co_executors->executors[instr->op.arg];
1697
            if (exec == executor) {
1698
                return PyCode_Addr2Line(code, i*2);
1699
            }
1700
        }
1701
        i += _PyOpcode_Caches[_Py_GetBaseCodeUnit(code, i).op.code];
1702
    }
1703
    return -1;
1704
}
1705
1706
/* Writes the node and outgoing edges for a single tracelet in graphviz format.
1707
 * Each tracelet is presented as a table of the uops it contains.
1708
 * If Py_STATS is enabled, execution counts are included.
1709
 *
1710
 * https://graphviz.readthedocs.io/en/stable/manual.html
1711
 * https://graphviz.org/gallery/
1712
 */
1713
static void
1714
executor_to_gv(_PyExecutorObject *executor, FILE *out)
1715
{
1716
    PyCodeObject *code = executor->vm_data.code;
1717
    fprintf(out, "executor_%p [\n", executor);
1718
    fprintf(out, "    shape = none\n");
1719
1720
    /* Write the HTML table for the uops */
1721
    fprintf(out, "    label = <<table border=\"0\" cellspacing=\"0\">\n");
1722
    fprintf(out, "        <tr><td port=\"start\" border=\"1\" ><b>Executor</b></td></tr>\n");
1723
    if (code == NULL) {
1724
        fprintf(out, "        <tr><td border=\"1\" >No code object</td></tr>\n");
1725
    }
1726
    else {
1727
        fprintf(out, "        <tr><td  border=\"1\" >");
1728
        write_str(code->co_qualname, out);
1729
        int line = find_line_number(code, executor);
1730
        fprintf(out, ": %d</td></tr>\n", line);
1731
    }
1732
    for (uint32_t i = 0; i < executor->code_size; i++) {
1733
        /* Write row for uop.
1734
         * The `port` is a marker so that outgoing edges can
1735
         * be placed correctly. If a row is marked `port=17`,
1736
         * then the outgoing edge is `{EXEC_NAME}:17 -> {TARGET}`
1737
         * https://graphviz.readthedocs.io/en/stable/manual.html#node-ports-compass
1738
         */
1739
        _PyUOpInstruction const *inst = &executor->trace[i];
1740
        const char *opname = _PyOpcode_uop_name[inst->opcode];
1741
#ifdef Py_STATS
1742
        fprintf(out, "        <tr><td port=\"i%d\" border=\"1\" >%s -- %" PRIu64 "</td></tr>\n", i, opname, inst->execution_count);
1743
#else
1744
        fprintf(out, "        <tr><td port=\"i%d\" border=\"1\" >%s</td></tr>\n", i, opname);
1745
#endif
1746
        if (inst->opcode == _EXIT_TRACE || inst->opcode == _JUMP_TO_TOP) {
1747
            break;
1748
        }
1749
    }
1750
    fprintf(out, "    </table>>\n");
1751
    fprintf(out, "]\n\n");
1752
1753
    /* Write all the outgoing edges */
1754
    for (uint32_t i = 0; i < executor->code_size; i++) {
1755
        _PyUOpInstruction const *inst = &executor->trace[i];
1756
        uint16_t flags = _PyUop_Flags[inst->opcode];
1757
        _PyExitData *exit = NULL;
1758
        if (inst->opcode == _EXIT_TRACE) {
1759
            exit = (_PyExitData *)inst->operand0;
1760
        }
1761
        else if (flags & HAS_EXIT_FLAG) {
1762
            assert(inst->format == UOP_FORMAT_JUMP);
1763
            _PyUOpInstruction const *exit_inst = &executor->trace[inst->jump_target];
1764
            assert(exit_inst->opcode == _EXIT_TRACE);
1765
            exit = (_PyExitData *)exit_inst->operand0;
1766
        }
1767
        if (exit != NULL && exit->executor != NULL) {
1768
            fprintf(out, "executor_%p:i%d -> executor_%p:start\n", executor, i, exit->executor);
1769
        }
1770
        if (inst->opcode == _EXIT_TRACE || inst->opcode == _JUMP_TO_TOP) {
1771
            break;
1772
        }
1773
    }
1774
}
1775
1776
/* Write the graph of all the live tracelets in graphviz format. */
1777
int
1778
_PyDumpExecutors(FILE *out)
1779
{
1780
    fprintf(out, "digraph ideal {\n\n");
1781
    fprintf(out, "    rankdir = \"LR\"\n\n");
1782
    PyInterpreterState *interp = PyInterpreterState_Get();
1783
    for (_PyExecutorObject *exec = interp->executor_list_head; exec != NULL;) {
1784
        executor_to_gv(exec, out);
1785
        exec = exec->vm_data.links.next;
1786
    }
1787
    fprintf(out, "}\n\n");
1788
    return 0;
1789
}
1790
1791
#else
1792
1793
int
1794
_PyDumpExecutors(FILE *out)
1795
0
{
1796
0
    PyErr_SetString(PyExc_NotImplementedError, "No JIT available");
1797
0
    return -1;
1798
0
}
1799
1800
void
1801
_PyExecutor_Free(struct _PyExecutorObject *self)
1802
0
{
1803
    /* This should never be called */
1804
0
    Py_UNREACHABLE();
1805
0
}
1806
1807
#endif /* _Py_TIER2 */