Coverage Report

Created: 2025-10-10 06:33

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/cpython/Python/optimizer.c
Line
Count
Source
1
#include "Python.h"
2
3
#ifdef _Py_TIER2
4
5
#include "opcode.h"
6
#include "pycore_interp.h"
7
#include "pycore_backoff.h"
8
#include "pycore_bitutils.h"        // _Py_popcount32()
9
#include "pycore_code.h"            // _Py_GetBaseCodeUnit
10
#include "pycore_function.h"        // _PyFunction_LookupByVersion()
11
#include "pycore_interpframe.h"
12
#include "pycore_object.h"          // _PyObject_GC_UNTRACK()
13
#include "pycore_opcode_metadata.h" // _PyOpcode_OpName[]
14
#include "pycore_opcode_utils.h"  // MAX_REAL_OPCODE
15
#include "pycore_optimizer.h"     // _Py_uop_analyze_and_optimize()
16
#include "pycore_pystate.h"       // _PyInterpreterState_GET()
17
#include "pycore_tuple.h" // _PyTuple_FromArraySteal
18
#include "pycore_unicodeobject.h" // _PyUnicode_FromASCII
19
#include "pycore_uop_ids.h"
20
#include "pycore_jit.h"
21
#include <stdbool.h>
22
#include <stdint.h>
23
#include <stddef.h>
24
25
#define NEED_OPCODE_METADATA
26
#include "pycore_uop_metadata.h" // Uop tables
27
#undef NEED_OPCODE_METADATA
28
29
#define MAX_EXECUTORS_SIZE 256
30
31
#define _PyExecutorObject_CAST(op)  ((_PyExecutorObject *)(op))
32
33
static bool
34
has_space_for_executor(PyCodeObject *code, _Py_CODEUNIT *instr)
35
{
36
    if (instr->op.code == ENTER_EXECUTOR) {
37
        return true;
38
    }
39
    if (code->co_executors == NULL) {
40
        return true;
41
    }
42
    return code->co_executors->size < MAX_EXECUTORS_SIZE;
43
}
44
45
static int32_t
46
get_index_for_executor(PyCodeObject *code, _Py_CODEUNIT *instr)
47
{
48
    if (instr->op.code == ENTER_EXECUTOR) {
49
        return instr->op.arg;
50
    }
51
    _PyExecutorArray *old = code->co_executors;
52
    int size = 0;
53
    int capacity = 0;
54
    if (old != NULL) {
55
        size = old->size;
56
        capacity = old->capacity;
57
        assert(size < MAX_EXECUTORS_SIZE);
58
    }
59
    assert(size <= capacity);
60
    if (size == capacity) {
61
        /* Array is full. Grow array */
62
        int new_capacity = capacity ? capacity * 2 : 4;
63
        _PyExecutorArray *new = PyMem_Realloc(
64
            old,
65
            offsetof(_PyExecutorArray, executors) +
66
            new_capacity * sizeof(_PyExecutorObject *));
67
        if (new == NULL) {
68
            return -1;
69
        }
70
        new->capacity = new_capacity;
71
        new->size = size;
72
        code->co_executors = new;
73
    }
74
    assert(size < code->co_executors->capacity);
75
    return size;
76
}
77
78
static void
79
insert_executor(PyCodeObject *code, _Py_CODEUNIT *instr, int index, _PyExecutorObject *executor)
80
{
81
    Py_INCREF(executor);
82
    if (instr->op.code == ENTER_EXECUTOR) {
83
        assert(index == instr->op.arg);
84
        _Py_ExecutorDetach(code->co_executors->executors[index]);
85
    }
86
    else {
87
        assert(code->co_executors->size == index);
88
        assert(code->co_executors->capacity > index);
89
        code->co_executors->size++;
90
    }
91
    executor->vm_data.opcode = instr->op.code;
92
    executor->vm_data.oparg = instr->op.arg;
93
    executor->vm_data.code = code;
94
    executor->vm_data.index = (int)(instr - _PyCode_CODE(code));
95
    code->co_executors->executors[index] = executor;
96
    assert(index < MAX_EXECUTORS_SIZE);
97
    instr->op.code = ENTER_EXECUTOR;
98
    instr->op.arg = index;
99
}
100
101
static _PyExecutorObject *
102
make_executor_from_uops(_PyUOpInstruction *buffer, int length, const _PyBloomFilter *dependencies);
103
104
static int
105
uop_optimize(_PyInterpreterFrame *frame, _Py_CODEUNIT *instr,
106
             _PyExecutorObject **exec_ptr, int curr_stackentries,
107
             bool progress_needed);
108
109
/* Returns 1 if optimized, 0 if not optimized, and -1 for an error.
110
 * If optimized, *executor_ptr contains a new reference to the executor
111
 */
112
// gh-137573: inlining this function causes stack overflows
113
Py_NO_INLINE int
114
_PyOptimizer_Optimize(
115
    _PyInterpreterFrame *frame, _Py_CODEUNIT *start,
116
    _PyExecutorObject **executor_ptr, int chain_depth)
117
{
118
    _PyStackRef *stack_pointer = frame->stackpointer;
119
    PyInterpreterState *interp = _PyInterpreterState_GET();
120
    assert(interp->jit);
121
    assert(!interp->compiling);
122
#ifndef Py_GIL_DISABLED
123
    interp->compiling = true;
124
    // The first executor in a chain and the MAX_CHAIN_DEPTH'th executor *must*
125
    // make progress in order to avoid infinite loops or excessively-long
126
    // side-exit chains. We can only insert the executor into the bytecode if
127
    // this is true, since a deopt won't infinitely re-enter the executor:
128
    chain_depth %= MAX_CHAIN_DEPTH;
129
    bool progress_needed = chain_depth == 0;
130
    PyCodeObject *code = _PyFrame_GetCode(frame);
131
    assert(PyCode_Check(code));
132
    if (progress_needed && !has_space_for_executor(code, start)) {
133
        interp->compiling = false;
134
        return 0;
135
    }
136
    int err = uop_optimize(frame, start, executor_ptr, (int)(stack_pointer - _PyFrame_Stackbase(frame)), progress_needed);
137
    if (err <= 0) {
138
        interp->compiling = false;
139
        return err;
140
    }
141
    assert(*executor_ptr != NULL);
142
    if (progress_needed) {
143
        int index = get_index_for_executor(code, start);
144
        if (index < 0) {
145
            /* Out of memory. Don't raise and assume that the
146
             * error will show up elsewhere.
147
             *
148
             * If an optimizer has already produced an executor,
149
             * it might get confused by the executor disappearing,
150
             * but there is not much we can do about that here. */
151
            Py_DECREF(*executor_ptr);
152
            interp->compiling = false;
153
            return 0;
154
        }
155
        insert_executor(code, start, index, *executor_ptr);
156
    }
157
    else {
158
        (*executor_ptr)->vm_data.code = NULL;
159
    }
160
    (*executor_ptr)->vm_data.chain_depth = chain_depth;
161
    assert((*executor_ptr)->vm_data.valid);
162
    interp->compiling = false;
163
    return 1;
164
#else
165
    return 0;
166
#endif
167
}
168
169
static _PyExecutorObject *
170
get_executor_lock_held(PyCodeObject *code, int offset)
171
{
172
    int code_len = (int)Py_SIZE(code);
173
    for (int i = 0 ; i < code_len;) {
174
        if (_PyCode_CODE(code)[i].op.code == ENTER_EXECUTOR && i*2 == offset) {
175
            int oparg = _PyCode_CODE(code)[i].op.arg;
176
            _PyExecutorObject *res = code->co_executors->executors[oparg];
177
            Py_INCREF(res);
178
            return res;
179
        }
180
        i += _PyInstruction_GetLength(code, i);
181
    }
182
    PyErr_SetString(PyExc_ValueError, "no executor at given byte offset");
183
    return NULL;
184
}
185
186
_PyExecutorObject *
187
_Py_GetExecutor(PyCodeObject *code, int offset)
188
{
189
    _PyExecutorObject *executor;
190
    Py_BEGIN_CRITICAL_SECTION(code);
191
    executor = get_executor_lock_held(code, offset);
192
    Py_END_CRITICAL_SECTION();
193
    return executor;
194
}
195
196
static PyObject *
197
is_valid(PyObject *self, PyObject *Py_UNUSED(ignored))
198
{
199
    return PyBool_FromLong(((_PyExecutorObject *)self)->vm_data.valid);
200
}
201
202
static PyObject *
203
get_opcode(PyObject *self, PyObject *Py_UNUSED(ignored))
204
{
205
    return PyLong_FromUnsignedLong(((_PyExecutorObject *)self)->vm_data.opcode);
206
}
207
208
static PyObject *
209
get_oparg(PyObject *self, PyObject *Py_UNUSED(ignored))
210
{
211
    return PyLong_FromUnsignedLong(((_PyExecutorObject *)self)->vm_data.oparg);
212
}
213
214
///////////////////// Experimental UOp Optimizer /////////////////////
215
216
static int executor_clear(PyObject *executor);
217
static void unlink_executor(_PyExecutorObject *executor);
218
219
220
void
221
_PyExecutor_Free(_PyExecutorObject *self)
222
{
223
#ifdef _Py_JIT
224
    _PyJIT_Free(self);
225
#endif
226
    PyObject_GC_Del(self);
227
}
228
229
void
230
_Py_ClearExecutorDeletionList(PyInterpreterState *interp)
231
{
232
    _PyRuntimeState *runtime = &_PyRuntime;
233
    HEAD_LOCK(runtime);
234
    PyThreadState* ts = PyInterpreterState_ThreadHead(interp);
235
    HEAD_UNLOCK(runtime);
236
    while (ts) {
237
        _PyExecutorObject *current = (_PyExecutorObject *)ts->current_executor;
238
        if (current != NULL) {
239
            /* Anything in this list will be unlinked, so we can reuse the
240
             * linked field as a reachability marker. */
241
            current->vm_data.linked = 1;
242
        }
243
        HEAD_LOCK(runtime);
244
        ts = PyThreadState_Next(ts);
245
        HEAD_UNLOCK(runtime);
246
    }
247
    _PyExecutorObject **prev_to_next_ptr = &interp->executor_deletion_list_head;
248
    _PyExecutorObject *exec = *prev_to_next_ptr;
249
    while (exec != NULL) {
250
        if (exec->vm_data.linked) {
251
            // This executor is currently executing
252
            exec->vm_data.linked = 0;
253
            prev_to_next_ptr = &exec->vm_data.links.next;
254
        }
255
        else {
256
            *prev_to_next_ptr = exec->vm_data.links.next;
257
            _PyExecutor_Free(exec);
258
        }
259
        exec = *prev_to_next_ptr;
260
    }
261
    interp->executor_deletion_list_remaining_capacity = EXECUTOR_DELETE_LIST_MAX;
262
}
263
264
static void
265
add_to_pending_deletion_list(_PyExecutorObject *self)
266
{
267
    PyInterpreterState *interp = PyInterpreterState_Get();
268
    self->vm_data.links.next = interp->executor_deletion_list_head;
269
    interp->executor_deletion_list_head = self;
270
    if (interp->executor_deletion_list_remaining_capacity > 0) {
271
        interp->executor_deletion_list_remaining_capacity--;
272
    }
273
    else {
274
        _Py_ClearExecutorDeletionList(interp);
275
    }
276
}
277
278
static void
279
uop_dealloc(PyObject *op) {
280
    _PyExecutorObject *self = _PyExecutorObject_CAST(op);
281
    _PyObject_GC_UNTRACK(self);
282
    assert(self->vm_data.code == NULL);
283
    unlink_executor(self);
284
    // Once unlinked it becomes impossible to invalidate an executor, so do it here.
285
    self->vm_data.valid = 0;
286
    add_to_pending_deletion_list(self);
287
}
288
289
const char *
290
_PyUOpName(int index)
291
{
292
    if (index < 0 || index > MAX_UOP_ID) {
293
        return NULL;
294
    }
295
    return _PyOpcode_uop_name[index];
296
}
297
298
#ifdef Py_DEBUG
299
void
300
_PyUOpPrint(const _PyUOpInstruction *uop)
301
{
302
    const char *name = _PyUOpName(uop->opcode);
303
    if (name == NULL) {
304
        printf("<uop %d>", uop->opcode);
305
    }
306
    else {
307
        printf("%s", name);
308
    }
309
    switch(uop->format) {
310
        case UOP_FORMAT_TARGET:
311
            printf(" (%d, target=%d, operand0=%#" PRIx64 ", operand1=%#" PRIx64,
312
                uop->oparg,
313
                uop->target,
314
                (uint64_t)uop->operand0,
315
                (uint64_t)uop->operand1);
316
            break;
317
        case UOP_FORMAT_JUMP:
318
            printf(" (%d, jump_target=%d, operand0=%#" PRIx64 ", operand1=%#" PRIx64,
319
                uop->oparg,
320
                uop->jump_target,
321
                (uint64_t)uop->operand0,
322
                (uint64_t)uop->operand1);
323
            break;
324
        default:
325
            printf(" (%d, Unknown format)", uop->oparg);
326
    }
327
    if (_PyUop_Flags[uop->opcode] & HAS_ERROR_FLAG) {
328
        printf(", error_target=%d", uop->error_target);
329
    }
330
331
    printf(")");
332
}
333
#endif
334
335
static Py_ssize_t
336
uop_len(PyObject *op)
337
{
338
    _PyExecutorObject *self = _PyExecutorObject_CAST(op);
339
    return self->code_size;
340
}
341
342
static PyObject *
343
uop_item(PyObject *op, Py_ssize_t index)
344
{
345
    _PyExecutorObject *self = _PyExecutorObject_CAST(op);
346
    Py_ssize_t len = uop_len(op);
347
    if (index < 0 || index >= len) {
348
        PyErr_SetNone(PyExc_IndexError);
349
        return NULL;
350
    }
351
    const char *name = _PyUOpName(self->trace[index].opcode);
352
    if (name == NULL) {
353
        name = "<nil>";
354
    }
355
    PyObject *oname = _PyUnicode_FromASCII(name, strlen(name));
356
    if (oname == NULL) {
357
        return NULL;
358
    }
359
    PyObject *oparg = PyLong_FromUnsignedLong(self->trace[index].oparg);
360
    if (oparg == NULL) {
361
        Py_DECREF(oname);
362
        return NULL;
363
    }
364
    PyObject *target = PyLong_FromUnsignedLong(self->trace[index].target);
365
    if (oparg == NULL) {
366
        Py_DECREF(oparg);
367
        Py_DECREF(oname);
368
        return NULL;
369
    }
370
    PyObject *operand = PyLong_FromUnsignedLongLong(self->trace[index].operand0);
371
    if (operand == NULL) {
372
        Py_DECREF(target);
373
        Py_DECREF(oparg);
374
        Py_DECREF(oname);
375
        return NULL;
376
    }
377
    PyObject *args[4] = { oname, oparg, target, operand };
378
    return _PyTuple_FromArraySteal(args, 4);
379
}
380
381
PySequenceMethods uop_as_sequence = {
382
    .sq_length = uop_len,
383
    .sq_item = uop_item,
384
};
385
386
static int
387
executor_traverse(PyObject *o, visitproc visit, void *arg)
388
{
389
    _PyExecutorObject *executor = _PyExecutorObject_CAST(o);
390
    for (uint32_t i = 0; i < executor->exit_count; i++) {
391
        Py_VISIT(executor->exits[i].executor);
392
    }
393
    return 0;
394
}
395
396
static PyObject *
397
get_jit_code(PyObject *self, PyObject *Py_UNUSED(ignored))
398
{
399
#ifndef _Py_JIT
400
    PyErr_SetString(PyExc_RuntimeError, "JIT support not enabled.");
401
    return NULL;
402
#else
403
    _PyExecutorObject *executor = _PyExecutorObject_CAST(self);
404
    if (executor->jit_code == NULL || executor->jit_size == 0) {
405
        Py_RETURN_NONE;
406
    }
407
    return PyBytes_FromStringAndSize(executor->jit_code, executor->jit_size);
408
#endif
409
}
410
411
static PyMethodDef uop_executor_methods[] = {
412
    { "is_valid", is_valid, METH_NOARGS, NULL },
413
    { "get_jit_code", get_jit_code, METH_NOARGS, NULL},
414
    { "get_opcode", get_opcode, METH_NOARGS, NULL },
415
    { "get_oparg", get_oparg, METH_NOARGS, NULL },
416
    { NULL, NULL },
417
};
418
419
static int
420
executor_is_gc(PyObject *o)
421
{
422
    return !_Py_IsImmortal(o);
423
}
424
425
PyTypeObject _PyUOpExecutor_Type = {
426
    PyVarObject_HEAD_INIT(&PyType_Type, 0)
427
    .tp_name = "uop_executor",
428
    .tp_basicsize = offsetof(_PyExecutorObject, exits),
429
    .tp_itemsize = 1,
430
    .tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_DISALLOW_INSTANTIATION | Py_TPFLAGS_HAVE_GC,
431
    .tp_dealloc = uop_dealloc,
432
    .tp_as_sequence = &uop_as_sequence,
433
    .tp_methods = uop_executor_methods,
434
    .tp_traverse = executor_traverse,
435
    .tp_clear = executor_clear,
436
    .tp_is_gc = executor_is_gc,
437
};
438
439
/* TO DO -- Generate these tables */
440
static const uint16_t
441
_PyUOp_Replacements[MAX_UOP_ID + 1] = {
442
    [_ITER_JUMP_RANGE] = _GUARD_NOT_EXHAUSTED_RANGE,
443
    [_ITER_JUMP_LIST] = _GUARD_NOT_EXHAUSTED_LIST,
444
    [_ITER_JUMP_TUPLE] = _GUARD_NOT_EXHAUSTED_TUPLE,
445
    [_FOR_ITER] = _FOR_ITER_TIER_TWO,
446
    [_ITER_NEXT_LIST] = _ITER_NEXT_LIST_TIER_TWO,
447
    [_CHECK_PERIODIC_AT_END] = _TIER2_RESUME_CHECK,
448
};
449
450
static const uint8_t
451
is_for_iter_test[MAX_UOP_ID + 1] = {
452
    [_GUARD_NOT_EXHAUSTED_RANGE] = 1,
453
    [_GUARD_NOT_EXHAUSTED_LIST] = 1,
454
    [_GUARD_NOT_EXHAUSTED_TUPLE] = 1,
455
    [_FOR_ITER_TIER_TWO] = 1,
456
};
457
458
static const uint16_t
459
BRANCH_TO_GUARD[4][2] = {
460
    [POP_JUMP_IF_FALSE - POP_JUMP_IF_FALSE][0] = _GUARD_IS_TRUE_POP,
461
    [POP_JUMP_IF_FALSE - POP_JUMP_IF_FALSE][1] = _GUARD_IS_FALSE_POP,
462
    [POP_JUMP_IF_TRUE - POP_JUMP_IF_FALSE][0] = _GUARD_IS_FALSE_POP,
463
    [POP_JUMP_IF_TRUE - POP_JUMP_IF_FALSE][1] = _GUARD_IS_TRUE_POP,
464
    [POP_JUMP_IF_NONE - POP_JUMP_IF_FALSE][0] = _GUARD_IS_NOT_NONE_POP,
465
    [POP_JUMP_IF_NONE - POP_JUMP_IF_FALSE][1] = _GUARD_IS_NONE_POP,
466
    [POP_JUMP_IF_NOT_NONE - POP_JUMP_IF_FALSE][0] = _GUARD_IS_NONE_POP,
467
    [POP_JUMP_IF_NOT_NONE - POP_JUMP_IF_FALSE][1] = _GUARD_IS_NOT_NONE_POP,
468
};
469
470
471
#define CONFIDENCE_RANGE 1000
472
#define CONFIDENCE_CUTOFF 333
473
474
#ifdef Py_DEBUG
475
#define DPRINTF(level, ...) \
476
    if (lltrace >= (level)) { printf(__VA_ARGS__); }
477
#else
478
#define DPRINTF(level, ...)
479
#endif
480
481
482
static inline int
483
add_to_trace(
484
    _PyUOpInstruction *trace,
485
    int trace_length,
486
    uint16_t opcode,
487
    uint16_t oparg,
488
    uint64_t operand,
489
    uint32_t target)
490
{
491
    trace[trace_length].opcode = opcode;
492
    trace[trace_length].format = UOP_FORMAT_TARGET;
493
    trace[trace_length].target = target;
494
    trace[trace_length].oparg = oparg;
495
    trace[trace_length].operand0 = operand;
496
#ifdef Py_STATS
497
    trace[trace_length].execution_count = 0;
498
#endif
499
    return trace_length + 1;
500
}
501
502
#ifdef Py_DEBUG
503
#define ADD_TO_TRACE(OPCODE, OPARG, OPERAND, TARGET) \
504
    assert(trace_length < max_length); \
505
    trace_length = add_to_trace(trace, trace_length, (OPCODE), (OPARG), (OPERAND), (TARGET)); \
506
    if (lltrace >= 2) { \
507
        printf("%4d ADD_TO_TRACE: ", trace_length); \
508
        _PyUOpPrint(&trace[trace_length-1]); \
509
        printf("\n"); \
510
    }
511
#else
512
#define ADD_TO_TRACE(OPCODE, OPARG, OPERAND, TARGET) \
513
    assert(trace_length < max_length); \
514
    trace_length = add_to_trace(trace, trace_length, (OPCODE), (OPARG), (OPERAND), (TARGET));
515
#endif
516
517
#define INSTR_IP(INSTR, CODE) \
518
    ((uint32_t)((INSTR) - ((_Py_CODEUNIT *)(CODE)->co_code_adaptive)))
519
520
// Reserve space for n uops
521
#define RESERVE_RAW(n, opname) \
522
    if (trace_length + (n) > max_length) { \
523
        DPRINTF(2, "No room for %s (need %d, got %d)\n", \
524
                (opname), (n), max_length - trace_length); \
525
        OPT_STAT_INC(trace_too_long); \
526
        goto done; \
527
    }
528
529
// Reserve space for N uops, plus 3 for _SET_IP, _CHECK_VALIDITY and _EXIT_TRACE
530
#define RESERVE(needed) RESERVE_RAW((needed) + 3, _PyUOpName(opcode))
531
532
// Trace stack operations (used by _PUSH_FRAME, _RETURN_VALUE)
533
#define TRACE_STACK_PUSH() \
534
    if (trace_stack_depth >= TRACE_STACK_SIZE) { \
535
        DPRINTF(2, "Trace stack overflow\n"); \
536
        OPT_STAT_INC(trace_stack_overflow); \
537
        return 0; \
538
    } \
539
    assert(func == NULL || func->func_code == (PyObject *)code); \
540
    trace_stack[trace_stack_depth].func = func; \
541
    trace_stack[trace_stack_depth].code = code; \
542
    trace_stack[trace_stack_depth].instr = instr; \
543
    trace_stack_depth++;
544
#define TRACE_STACK_POP() \
545
    if (trace_stack_depth <= 0) { \
546
        Py_FatalError("Trace stack underflow\n"); \
547
    } \
548
    trace_stack_depth--; \
549
    func = trace_stack[trace_stack_depth].func; \
550
    code = trace_stack[trace_stack_depth].code; \
551
    assert(func == NULL || func->func_code == (PyObject *)code); \
552
    instr = trace_stack[trace_stack_depth].instr;
553
554
/* Returns the length of the trace on success,
555
 * 0 if it failed to produce a worthwhile trace,
556
 * and -1 on an error.
557
 */
558
static int
559
translate_bytecode_to_trace(
560
    _PyInterpreterFrame *frame,
561
    _Py_CODEUNIT *instr,
562
    _PyUOpInstruction *trace,
563
    int buffer_size,
564
    _PyBloomFilter *dependencies, bool progress_needed)
565
{
566
    bool first = true;
567
    PyCodeObject *code = _PyFrame_GetCode(frame);
568
    PyFunctionObject *func = _PyFrame_GetFunction(frame);
569
    assert(PyFunction_Check(func));
570
    PyCodeObject *initial_code = code;
571
    _Py_BloomFilter_Add(dependencies, initial_code);
572
    _Py_CODEUNIT *initial_instr = instr;
573
    int trace_length = 0;
574
    // Leave space for possible trailing _EXIT_TRACE
575
    int max_length = buffer_size-2;
576
    struct {
577
        PyFunctionObject *func;
578
        PyCodeObject *code;
579
        _Py_CODEUNIT *instr;
580
    } trace_stack[TRACE_STACK_SIZE];
581
    int trace_stack_depth = 0;
582
    int confidence = CONFIDENCE_RANGE;  // Adjusted by branch instructions
583
    bool jump_seen = false;
584
585
#ifdef Py_DEBUG
586
    char *python_lltrace = Py_GETENV("PYTHON_LLTRACE");
587
    int lltrace = 0;
588
    if (python_lltrace != NULL && *python_lltrace >= '0') {
589
        lltrace = *python_lltrace - '0';  // TODO: Parse an int and all that
590
    }
591
#endif
592
593
    DPRINTF(2,
594
            "Optimizing %s (%s:%d) at byte offset %d\n",
595
            PyUnicode_AsUTF8(code->co_qualname),
596
            PyUnicode_AsUTF8(code->co_filename),
597
            code->co_firstlineno,
598
            2 * INSTR_IP(initial_instr, code));
599
    ADD_TO_TRACE(_START_EXECUTOR, 0, (uintptr_t)instr, INSTR_IP(instr, code));
600
    ADD_TO_TRACE(_MAKE_WARM, 0, 0, 0);
601
    uint32_t target = 0;
602
603
    for (;;) {
604
        target = INSTR_IP(instr, code);
605
        // One for possible _DEOPT, one because _CHECK_VALIDITY itself might _DEOPT
606
        max_length-=2;
607
        uint32_t opcode = instr->op.code;
608
        uint32_t oparg = instr->op.arg;
609
610
        if (!first && instr == initial_instr) {
611
            // We have looped around to the start:
612
            RESERVE(1);
613
            ADD_TO_TRACE(_JUMP_TO_TOP, 0, 0, 0);
614
            goto done;
615
        }
616
617
        DPRINTF(2, "%d: %s(%d)\n", target, _PyOpcode_OpName[opcode], oparg);
618
619
        if (opcode == EXTENDED_ARG) {
620
            instr++;
621
            opcode = instr->op.code;
622
            oparg = (oparg << 8) | instr->op.arg;
623
            if (opcode == EXTENDED_ARG) {
624
                instr--;
625
                goto done;
626
            }
627
        }
628
        if (opcode == ENTER_EXECUTOR) {
629
            // We have a couple of options here. We *could* peek "underneath"
630
            // this executor and continue tracing, which could give us a longer,
631
            // more optimizeable trace (at the expense of lots of duplicated
632
            // tier two code). Instead, we choose to just end here and stitch to
633
            // the other trace, which allows a side-exit traces to rejoin the
634
            // "main" trace periodically (and also helps protect us against
635
            // pathological behavior where the amount of tier two code explodes
636
            // for a medium-length, branchy code path). This seems to work
637
            // better in practice, but in the future we could be smarter about
638
            // what we do here:
639
            goto done;
640
        }
641
        assert(opcode != ENTER_EXECUTOR && opcode != EXTENDED_ARG);
642
        RESERVE_RAW(2, "_CHECK_VALIDITY");
643
        ADD_TO_TRACE(_CHECK_VALIDITY, 0, 0, target);
644
        if (!OPCODE_HAS_NO_SAVE_IP(opcode)) {
645
            RESERVE_RAW(2, "_SET_IP");
646
            ADD_TO_TRACE(_SET_IP, 0, (uintptr_t)instr, target);
647
        }
648
649
        /* Special case the first instruction,
650
         * so that we can guarantee forward progress */
651
        if (first && progress_needed) {
652
            assert(first);
653
            if (OPCODE_HAS_EXIT(opcode) || OPCODE_HAS_DEOPT(opcode)) {
654
                opcode = _PyOpcode_Deopt[opcode];
655
            }
656
            assert(!OPCODE_HAS_EXIT(opcode));
657
            assert(!OPCODE_HAS_DEOPT(opcode));
658
        }
659
660
        if (OPCODE_HAS_EXIT(opcode)) {
661
            // Make space for side exit and final _EXIT_TRACE:
662
            RESERVE_RAW(2, "_EXIT_TRACE");
663
            max_length--;
664
        }
665
        if (OPCODE_HAS_ERROR(opcode)) {
666
            // Make space for error stub and final _EXIT_TRACE:
667
            RESERVE_RAW(2, "_ERROR_POP_N");
668
            max_length--;
669
        }
670
        switch (opcode) {
671
            case POP_JUMP_IF_NONE:
672
            case POP_JUMP_IF_NOT_NONE:
673
            case POP_JUMP_IF_FALSE:
674
            case POP_JUMP_IF_TRUE:
675
            {
676
                RESERVE(1);
677
                int counter = instr[1].cache;
678
                int bitcount = _Py_popcount32(counter);
679
                int jump_likely = bitcount > 8;
680
                /* If bitcount is 8 (half the jumps were taken), adjust confidence by 50%.
681
                   For values in between, adjust proportionally. */
682
                if (jump_likely) {
683
                    confidence = confidence * bitcount / 16;
684
                }
685
                else {
686
                    confidence = confidence * (16 - bitcount) / 16;
687
                }
688
                uint32_t uopcode = BRANCH_TO_GUARD[opcode - POP_JUMP_IF_FALSE][jump_likely];
689
                DPRINTF(2, "%d: %s(%d): counter=%04x, bitcount=%d, likely=%d, confidence=%d, uopcode=%s\n",
690
                        target, _PyOpcode_OpName[opcode], oparg,
691
                        counter, bitcount, jump_likely, confidence, _PyUOpName(uopcode));
692
                if (confidence < CONFIDENCE_CUTOFF) {
693
                    DPRINTF(2, "Confidence too low (%d < %d)\n", confidence, CONFIDENCE_CUTOFF);
694
                    OPT_STAT_INC(low_confidence);
695
                    goto done;
696
                }
697
                _Py_CODEUNIT *next_instr = instr + 1 + _PyOpcode_Caches[_PyOpcode_Deopt[opcode]];
698
                _Py_CODEUNIT *target_instr = next_instr + oparg;
699
                if (jump_likely) {
700
                    DPRINTF(2, "Jump likely (%04x = %d bits), continue at byte offset %d\n",
701
                            instr[1].cache, bitcount, 2 * INSTR_IP(target_instr, code));
702
                    instr = target_instr;
703
                    ADD_TO_TRACE(uopcode, 0, 0, INSTR_IP(next_instr, code));
704
                    goto top;
705
                }
706
                ADD_TO_TRACE(uopcode, 0, 0, INSTR_IP(target_instr, code));
707
                break;
708
            }
709
710
            case JUMP_BACKWARD:
711
            case JUMP_BACKWARD_JIT:
712
                ADD_TO_TRACE(_CHECK_PERIODIC, 0, 0, target);
713
                _Py_FALLTHROUGH;
714
            case JUMP_BACKWARD_NO_INTERRUPT:
715
            {
716
                instr += 1 + _PyOpcode_Caches[_PyOpcode_Deopt[opcode]] - (int)oparg;
717
                if (jump_seen) {
718
                    OPT_STAT_INC(inner_loop);
719
                    DPRINTF(2, "JUMP_BACKWARD not to top ends trace\n");
720
                    goto done;
721
                }
722
                jump_seen = true;
723
                goto top;
724
            }
725
726
            case JUMP_FORWARD:
727
            {
728
                RESERVE(0);
729
                // This will emit two _SET_IP instructions; leave it to the optimizer
730
                instr += oparg;
731
                break;
732
            }
733
734
            case RESUME:
735
                /* Use a special tier 2 version of RESUME_CHECK to allow traces to
736
                 *  start with RESUME_CHECK */
737
                ADD_TO_TRACE(_TIER2_RESUME_CHECK, 0, 0, target);
738
                break;
739
740
            default:
741
            {
742
                const struct opcode_macro_expansion *expansion = &_PyOpcode_macro_expansion[opcode];
743
                if (expansion->nuops > 0) {
744
                    // Reserve space for nuops (+ _SET_IP + _EXIT_TRACE)
745
                    int nuops = expansion->nuops;
746
                    RESERVE(nuops + 1); /* One extra for exit */
747
                    int16_t last_op = expansion->uops[nuops-1].uop;
748
                    if (last_op == _RETURN_VALUE || last_op == _RETURN_GENERATOR || last_op == _YIELD_VALUE) {
749
                        // Check for trace stack underflow now:
750
                        // We can't bail e.g. in the middle of
751
                        // LOAD_CONST + _RETURN_VALUE.
752
                        if (trace_stack_depth == 0) {
753
                            DPRINTF(2, "Trace stack underflow\n");
754
                            OPT_STAT_INC(trace_stack_underflow);
755
                            return 0;
756
                        }
757
                    }
758
                    uint32_t orig_oparg = oparg;  // For OPARG_TOP/BOTTOM
759
                    for (int i = 0; i < nuops; i++) {
760
                        oparg = orig_oparg;
761
                        uint32_t uop = expansion->uops[i].uop;
762
                        uint64_t operand = 0;
763
                        // Add one to account for the actual opcode/oparg pair:
764
                        int offset = expansion->uops[i].offset + 1;
765
                        switch (expansion->uops[i].size) {
766
                            case OPARG_SIMPLE:
767
                                assert(opcode != JUMP_BACKWARD_NO_INTERRUPT && opcode != JUMP_BACKWARD);
768
                                break;
769
                            case OPARG_CACHE_1:
770
                                operand = read_u16(&instr[offset].cache);
771
                                break;
772
                            case OPARG_CACHE_2:
773
                                operand = read_u32(&instr[offset].cache);
774
                                break;
775
                            case OPARG_CACHE_4:
776
                                operand = read_u64(&instr[offset].cache);
777
                                break;
778
                            case OPARG_TOP:  // First half of super-instr
779
                                oparg = orig_oparg >> 4;
780
                                break;
781
                            case OPARG_BOTTOM:  // Second half of super-instr
782
                                oparg = orig_oparg & 0xF;
783
                                break;
784
                            case OPARG_SAVE_RETURN_OFFSET:  // op=_SAVE_RETURN_OFFSET; oparg=return_offset
785
                                oparg = offset;
786
                                assert(uop == _SAVE_RETURN_OFFSET);
787
                                break;
788
                            case OPARG_REPLACED:
789
                                uop = _PyUOp_Replacements[uop];
790
                                assert(uop != 0);
791
                                uint32_t next_inst = target + 1 + _PyOpcode_Caches[_PyOpcode_Deopt[opcode]] + (oparg > 255);
792
                                if (uop == _TIER2_RESUME_CHECK) {
793
                                    target = next_inst;
794
                                }
795
#ifdef Py_DEBUG
796
                                else {
797
                                    uint32_t jump_target = next_inst + oparg;
798
                                    assert(_Py_GetBaseCodeUnit(code, jump_target).op.code == END_FOR);
799
                                    assert(_Py_GetBaseCodeUnit(code, jump_target+1).op.code == POP_ITER);
800
                                }
801
#endif
802
                                break;
803
                            case OPERAND1_1:
804
                                assert(trace[trace_length-1].opcode == uop);
805
                                operand = read_u16(&instr[offset].cache);
806
                                trace[trace_length-1].operand1 = operand;
807
                                continue;
808
                            case OPERAND1_2:
809
                                assert(trace[trace_length-1].opcode == uop);
810
                                operand = read_u32(&instr[offset].cache);
811
                                trace[trace_length-1].operand1 = operand;
812
                                continue;
813
                            case OPERAND1_4:
814
                                assert(trace[trace_length-1].opcode == uop);
815
                                operand = read_u64(&instr[offset].cache);
816
                                trace[trace_length-1].operand1 = operand;
817
                                continue;
818
                            default:
819
                                fprintf(stderr,
820
                                        "opcode=%d, oparg=%d; nuops=%d, i=%d; size=%d, offset=%d\n",
821
                                        opcode, oparg, nuops, i,
822
                                        expansion->uops[i].size,
823
                                        expansion->uops[i].offset);
824
                                Py_FatalError("garbled expansion");
825
                        }
826
827
                        if (uop == _RETURN_VALUE || uop == _RETURN_GENERATOR || uop == _YIELD_VALUE) {
828
                            TRACE_STACK_POP();
829
                            /* Set the operand to the function or code object returned to,
830
                             * to assist optimization passes. (See _PUSH_FRAME below.)
831
                             */
832
                            if (func != NULL) {
833
                                operand = (uintptr_t)func;
834
                            }
835
                            else if (code != NULL) {
836
                                operand = (uintptr_t)code | 1;
837
                            }
838
                            else {
839
                                operand = 0;
840
                            }
841
                            ADD_TO_TRACE(uop, oparg, operand, target);
842
                            DPRINTF(2,
843
                                "Returning to %s (%s:%d) at byte offset %d\n",
844
                                PyUnicode_AsUTF8(code->co_qualname),
845
                                PyUnicode_AsUTF8(code->co_filename),
846
                                code->co_firstlineno,
847
                                2 * INSTR_IP(instr, code));
848
                            goto top;
849
                        }
850
851
                        if (uop == _PUSH_FRAME) {
852
                            assert(i + 1 == nuops);
853
                            if (opcode == FOR_ITER_GEN ||
854
                                opcode == LOAD_ATTR_PROPERTY ||
855
                                opcode == BINARY_OP_SUBSCR_GETITEM ||
856
                                opcode == SEND_GEN)
857
                            {
858
                                DPRINTF(2, "Bailing due to dynamic target\n");
859
                                OPT_STAT_INC(unknown_callee);
860
                                return 0;
861
                            }
862
                            assert(_PyOpcode_Deopt[opcode] == CALL || _PyOpcode_Deopt[opcode] == CALL_KW);
863
                            int func_version_offset =
864
                                offsetof(_PyCallCache, func_version)/sizeof(_Py_CODEUNIT)
865
                                // Add one to account for the actual opcode/oparg pair:
866
                                + 1;
867
                            uint32_t func_version = read_u32(&instr[func_version_offset].cache);
868
                            PyCodeObject *new_code = NULL;
869
                            PyFunctionObject *new_func =
870
                                _PyFunction_LookupByVersion(func_version, (PyObject **) &new_code);
871
                            DPRINTF(2, "Function: version=%#x; new_func=%p, new_code=%p\n",
872
                                    (int)func_version, new_func, new_code);
873
                            if (new_code != NULL) {
874
                                if (new_code == code) {
875
                                    // Recursive call, bail (we could be here forever).
876
                                    DPRINTF(2, "Bailing on recursive call to %s (%s:%d)\n",
877
                                            PyUnicode_AsUTF8(new_code->co_qualname),
878
                                            PyUnicode_AsUTF8(new_code->co_filename),
879
                                            new_code->co_firstlineno);
880
                                    OPT_STAT_INC(recursive_call);
881
                                    ADD_TO_TRACE(uop, oparg, 0, target);
882
                                    ADD_TO_TRACE(_EXIT_TRACE, 0, 0, 0);
883
                                    goto done;
884
                                }
885
                                if (new_code->co_version != func_version) {
886
                                    // func.__code__ was updated.
887
                                    // Perhaps it may happen again, so don't bother tracing.
888
                                    // TODO: Reason about this -- is it better to bail or not?
889
                                    DPRINTF(2, "Bailing because co_version != func_version\n");
890
                                    ADD_TO_TRACE(uop, oparg, 0, target);
891
                                    ADD_TO_TRACE(_EXIT_TRACE, 0, 0, 0);
892
                                    goto done;
893
                                }
894
                                // Increment IP to the return address
895
                                instr += _PyOpcode_Caches[_PyOpcode_Deopt[opcode]] + 1;
896
                                TRACE_STACK_PUSH();
897
                                _Py_BloomFilter_Add(dependencies, new_code);
898
                                /* Set the operand to the callee's function or code object,
899
                                 * to assist optimization passes.
900
                                 * We prefer setting it to the function
901
                                 * but if that's not available but the code is available,
902
                                 * use the code, setting the low bit so the optimizer knows.
903
                                 */
904
                                if (new_func != NULL) {
905
                                    operand = (uintptr_t)new_func;
906
                                }
907
                                else if (new_code != NULL) {
908
                                    operand = (uintptr_t)new_code | 1;
909
                                }
910
                                else {
911
                                    operand = 0;
912
                                }
913
                                ADD_TO_TRACE(uop, oparg, operand, target);
914
                                code = new_code;
915
                                func = new_func;
916
                                instr = _PyCode_CODE(code);
917
                                DPRINTF(2,
918
                                    "Continuing in %s (%s:%d) at byte offset %d\n",
919
                                    PyUnicode_AsUTF8(code->co_qualname),
920
                                    PyUnicode_AsUTF8(code->co_filename),
921
                                    code->co_firstlineno,
922
                                    2 * INSTR_IP(instr, code));
923
                                goto top;
924
                            }
925
                            DPRINTF(2, "Bail, new_code == NULL\n");
926
                            OPT_STAT_INC(unknown_callee);
927
                            return 0;
928
                        }
929
930
                        if (uop == _BINARY_OP_INPLACE_ADD_UNICODE) {
931
                            assert(i + 1 == nuops);
932
                            _Py_CODEUNIT *next_instr = instr + 1 + _PyOpcode_Caches[_PyOpcode_Deopt[opcode]];
933
                            assert(next_instr->op.code == STORE_FAST);
934
                            operand = next_instr->op.arg;
935
                            // Skip the STORE_FAST:
936
                            instr++;
937
                        }
938
939
                        // All other instructions
940
                        ADD_TO_TRACE(uop, oparg, operand, target);
941
                    }
942
                    break;
943
                }
944
                DPRINTF(2, "Unsupported opcode %s\n", _PyOpcode_OpName[opcode]);
945
                OPT_UNSUPPORTED_OPCODE(opcode);
946
                goto done;  // Break out of loop
947
            }  // End default
948
949
        }  // End switch (opcode)
950
951
        instr++;
952
        // Add cache size for opcode
953
        instr += _PyOpcode_Caches[_PyOpcode_Deopt[opcode]];
954
955
        if (opcode == CALL_LIST_APPEND) {
956
            assert(instr->op.code == POP_TOP);
957
            instr++;
958
        }
959
    top:
960
        // Jump here after _PUSH_FRAME or likely branches.
961
        first = false;
962
    }  // End for (;;)
963
964
done:
965
    while (trace_stack_depth > 0) {
966
        TRACE_STACK_POP();
967
    }
968
    assert(code == initial_code);
969
    // Skip short traces where we can't even translate a single instruction:
970
    if (first) {
971
        OPT_STAT_INC(trace_too_short);
972
        DPRINTF(2,
973
                "No trace for %s (%s:%d) at byte offset %d (no progress)\n",
974
                PyUnicode_AsUTF8(code->co_qualname),
975
                PyUnicode_AsUTF8(code->co_filename),
976
                code->co_firstlineno,
977
                2 * INSTR_IP(initial_instr, code));
978
        return 0;
979
    }
980
    if (!is_terminator(&trace[trace_length-1])) {
981
        /* Allow space for _EXIT_TRACE */
982
        max_length += 2;
983
        ADD_TO_TRACE(_EXIT_TRACE, 0, 0, target);
984
    }
985
    DPRINTF(1,
986
            "Created a proto-trace for %s (%s:%d) at byte offset %d -- length %d\n",
987
            PyUnicode_AsUTF8(code->co_qualname),
988
            PyUnicode_AsUTF8(code->co_filename),
989
            code->co_firstlineno,
990
            2 * INSTR_IP(initial_instr, code),
991
            trace_length);
992
    OPT_HIST(trace_length, trace_length_hist);
993
    return trace_length;
994
}
995
996
#undef RESERVE
997
#undef RESERVE_RAW
998
#undef INSTR_IP
999
#undef ADD_TO_TRACE
1000
#undef DPRINTF
1001
1002
#define UNSET_BIT(array, bit) (array[(bit)>>5] &= ~(1<<((bit)&31)))
1003
#define SET_BIT(array, bit) (array[(bit)>>5] |= (1<<((bit)&31)))
1004
#define BIT_IS_SET(array, bit) (array[(bit)>>5] & (1<<((bit)&31)))
1005
1006
/* Count the number of unused uops and exits
1007
*/
1008
static int
1009
count_exits(_PyUOpInstruction *buffer, int length)
1010
{
1011
    int exit_count = 0;
1012
    for (int i = 0; i < length; i++) {
1013
        int opcode = buffer[i].opcode;
1014
        if (opcode == _EXIT_TRACE) {
1015
            exit_count++;
1016
        }
1017
    }
1018
    return exit_count;
1019
}
1020
1021
static void make_exit(_PyUOpInstruction *inst, int opcode, int target)
1022
{
1023
    inst->opcode = opcode;
1024
    inst->oparg = 0;
1025
    inst->operand0 = 0;
1026
    inst->format = UOP_FORMAT_TARGET;
1027
    inst->target = target;
1028
#ifdef Py_STATS
1029
    inst->execution_count = 0;
1030
#endif
1031
}
1032
1033
/* Convert implicit exits, errors and deopts
1034
 * into explicit ones. */
1035
static int
1036
prepare_for_execution(_PyUOpInstruction *buffer, int length)
1037
{
1038
    int32_t current_jump = -1;
1039
    int32_t current_jump_target = -1;
1040
    int32_t current_error = -1;
1041
    int32_t current_error_target = -1;
1042
    int32_t current_popped = -1;
1043
    int32_t current_exit_op = -1;
1044
    /* Leaving in NOPs slows down the interpreter and messes up the stats */
1045
    _PyUOpInstruction *copy_to = &buffer[0];
1046
    for (int i = 0; i < length; i++) {
1047
        _PyUOpInstruction *inst = &buffer[i];
1048
        if (inst->opcode != _NOP) {
1049
            if (copy_to != inst) {
1050
                *copy_to = *inst;
1051
            }
1052
            copy_to++;
1053
        }
1054
    }
1055
    length = (int)(copy_to - buffer);
1056
    int next_spare = length;
1057
    for (int i = 0; i < length; i++) {
1058
        _PyUOpInstruction *inst = &buffer[i];
1059
        int opcode = inst->opcode;
1060
        int32_t target = (int32_t)uop_get_target(inst);
1061
        uint16_t exit_flags = _PyUop_Flags[opcode] & (HAS_EXIT_FLAG | HAS_DEOPT_FLAG | HAS_PERIODIC_FLAG);
1062
        if (exit_flags) {
1063
            uint16_t exit_op = _EXIT_TRACE;
1064
            if (exit_flags & HAS_DEOPT_FLAG) {
1065
                exit_op = _DEOPT;
1066
            }
1067
            else if (exit_flags & HAS_PERIODIC_FLAG) {
1068
                exit_op = _HANDLE_PENDING_AND_DEOPT;
1069
            }
1070
            int32_t jump_target = target;
1071
            if (is_for_iter_test[opcode]) {
1072
                /* Target the POP_TOP immediately after the END_FOR,
1073
                 * leaving only the iterator on the stack. */
1074
                int extended_arg = inst->oparg > 255;
1075
                int32_t next_inst = target + 1 + INLINE_CACHE_ENTRIES_FOR_ITER + extended_arg;
1076
                jump_target = next_inst + inst->oparg + 1;
1077
            }
1078
            if (jump_target != current_jump_target || current_exit_op != exit_op) {
1079
                make_exit(&buffer[next_spare], exit_op, jump_target);
1080
                current_exit_op = exit_op;
1081
                current_jump_target = jump_target;
1082
                current_jump = next_spare;
1083
                next_spare++;
1084
            }
1085
            buffer[i].jump_target = current_jump;
1086
            buffer[i].format = UOP_FORMAT_JUMP;
1087
        }
1088
        if (_PyUop_Flags[opcode] & HAS_ERROR_FLAG) {
1089
            int popped = (_PyUop_Flags[opcode] & HAS_ERROR_NO_POP_FLAG) ?
1090
                0 : _PyUop_num_popped(opcode, inst->oparg);
1091
            if (target != current_error_target || popped != current_popped) {
1092
                current_popped = popped;
1093
                current_error = next_spare;
1094
                current_error_target = target;
1095
                make_exit(&buffer[next_spare], _ERROR_POP_N, 0);
1096
                buffer[next_spare].operand0 = target;
1097
                next_spare++;
1098
            }
1099
            buffer[i].error_target = current_error;
1100
            if (buffer[i].format == UOP_FORMAT_TARGET) {
1101
                buffer[i].format = UOP_FORMAT_JUMP;
1102
                buffer[i].jump_target = 0;
1103
            }
1104
        }
1105
        if (opcode == _JUMP_TO_TOP) {
1106
            assert(buffer[0].opcode == _START_EXECUTOR);
1107
            buffer[i].format = UOP_FORMAT_JUMP;
1108
            buffer[i].jump_target = 1;
1109
        }
1110
    }
1111
    return next_spare;
1112
}
1113
1114
/* Executor side exits */
1115
1116
static _PyExecutorObject *
1117
allocate_executor(int exit_count, int length)
1118
{
1119
    int size = exit_count*sizeof(_PyExitData) + length*sizeof(_PyUOpInstruction);
1120
    _PyExecutorObject *res = PyObject_GC_NewVar(_PyExecutorObject, &_PyUOpExecutor_Type, size);
1121
    if (res == NULL) {
1122
        return NULL;
1123
    }
1124
    res->trace = (_PyUOpInstruction *)(res->exits + exit_count);
1125
    res->code_size = length;
1126
    res->exit_count = exit_count;
1127
    return res;
1128
}
1129
1130
#ifdef Py_DEBUG
1131
1132
#define CHECK(PRED) \
1133
if (!(PRED)) { \
1134
    printf(#PRED " at %d\n", i); \
1135
    assert(0); \
1136
}
1137
1138
static int
1139
target_unused(int opcode)
1140
{
1141
    return (_PyUop_Flags[opcode] & (HAS_ERROR_FLAG | HAS_EXIT_FLAG | HAS_DEOPT_FLAG)) == 0;
1142
}
1143
1144
static void
1145
sanity_check(_PyExecutorObject *executor)
1146
{
1147
    for (uint32_t i = 0; i < executor->exit_count; i++) {
1148
        _PyExitData *exit = &executor->exits[i];
1149
        CHECK(exit->target < (1 << 25));
1150
    }
1151
    bool ended = false;
1152
    uint32_t i = 0;
1153
    CHECK(executor->trace[0].opcode == _START_EXECUTOR || executor->trace[0].opcode == _COLD_EXIT);
1154
    for (; i < executor->code_size; i++) {
1155
        const _PyUOpInstruction *inst = &executor->trace[i];
1156
        uint16_t opcode = inst->opcode;
1157
        CHECK(opcode <= MAX_UOP_ID);
1158
        CHECK(_PyOpcode_uop_name[opcode] != NULL);
1159
        switch(inst->format) {
1160
            case UOP_FORMAT_TARGET:
1161
                CHECK(target_unused(opcode));
1162
                break;
1163
            case UOP_FORMAT_JUMP:
1164
                CHECK(inst->jump_target < executor->code_size);
1165
                break;
1166
        }
1167
        if (_PyUop_Flags[opcode] & HAS_ERROR_FLAG) {
1168
            CHECK(inst->format == UOP_FORMAT_JUMP);
1169
            CHECK(inst->error_target < executor->code_size);
1170
        }
1171
        if (is_terminator(inst)) {
1172
            ended = true;
1173
            i++;
1174
            break;
1175
        }
1176
    }
1177
    CHECK(ended);
1178
    for (; i < executor->code_size; i++) {
1179
        const _PyUOpInstruction *inst = &executor->trace[i];
1180
        uint16_t opcode = inst->opcode;
1181
        CHECK(
1182
            opcode == _DEOPT ||
1183
            opcode == _HANDLE_PENDING_AND_DEOPT ||
1184
            opcode == _EXIT_TRACE ||
1185
            opcode == _ERROR_POP_N);
1186
    }
1187
}
1188
1189
#undef CHECK
1190
#endif
1191
1192
/* Makes an executor from a buffer of uops.
1193
 * Account for the buffer having gaps and NOPs by computing a "used"
1194
 * bit vector and only copying the used uops. Here "used" means reachable
1195
 * and not a NOP.
1196
 */
1197
static _PyExecutorObject *
1198
make_executor_from_uops(_PyUOpInstruction *buffer, int length, const _PyBloomFilter *dependencies)
1199
{
1200
    int exit_count = count_exits(buffer, length);
1201
    _PyExecutorObject *executor = allocate_executor(exit_count, length);
1202
    if (executor == NULL) {
1203
        return NULL;
1204
    }
1205
1206
    /* Initialize exits */
1207
    _PyExecutorObject *cold = _PyExecutor_GetColdExecutor();
1208
    for (int i = 0; i < exit_count; i++) {
1209
        executor->exits[i].index = i;
1210
        executor->exits[i].temperature = initial_temperature_backoff_counter();
1211
        executor->exits[i].executor = cold;
1212
    }
1213
    int next_exit = exit_count-1;
1214
    _PyUOpInstruction *dest = (_PyUOpInstruction *)&executor->trace[length];
1215
    assert(buffer[0].opcode == _START_EXECUTOR);
1216
    buffer[0].operand0 = (uint64_t)executor;
1217
    for (int i = length-1; i >= 0; i--) {
1218
        int opcode = buffer[i].opcode;
1219
        dest--;
1220
        *dest = buffer[i];
1221
        assert(opcode != _POP_JUMP_IF_FALSE && opcode != _POP_JUMP_IF_TRUE);
1222
        if (opcode == _EXIT_TRACE) {
1223
            _PyExitData *exit = &executor->exits[next_exit];
1224
            exit->target = buffer[i].target;
1225
            dest->operand0 = (uint64_t)exit;
1226
            next_exit--;
1227
        }
1228
    }
1229
    assert(next_exit == -1);
1230
    assert(dest == executor->trace);
1231
    assert(dest->opcode == _START_EXECUTOR);
1232
    _Py_ExecutorInit(executor, dependencies);
1233
#ifdef Py_DEBUG
1234
    char *python_lltrace = Py_GETENV("PYTHON_LLTRACE");
1235
    int lltrace = 0;
1236
    if (python_lltrace != NULL && *python_lltrace >= '0') {
1237
        lltrace = *python_lltrace - '0';  // TODO: Parse an int and all that
1238
    }
1239
    if (lltrace >= 2) {
1240
        printf("Optimized trace (length %d):\n", length);
1241
        for (int i = 0; i < length; i++) {
1242
            printf("%4d OPTIMIZED: ", i);
1243
            _PyUOpPrint(&executor->trace[i]);
1244
            printf("\n");
1245
        }
1246
    }
1247
    sanity_check(executor);
1248
#endif
1249
#ifdef _Py_JIT
1250
    executor->jit_code = NULL;
1251
    executor->jit_size = 0;
1252
    // This is initialized to true so we can prevent the executor
1253
    // from being immediately detected as cold and invalidated.
1254
    executor->vm_data.warm = true;
1255
    if (_PyJIT_Compile(executor, executor->trace, length)) {
1256
        Py_DECREF(executor);
1257
        return NULL;
1258
    }
1259
#endif
1260
    _PyObject_GC_TRACK(executor);
1261
    return executor;
1262
}
1263
1264
#ifdef Py_STATS
1265
/* Returns the effective trace length.
1266
 * Ignores NOPs and trailing exit and error handling.*/
1267
int effective_trace_length(_PyUOpInstruction *buffer, int length)
1268
{
1269
    int nop_count = 0;
1270
    for (int i = 0; i < length; i++) {
1271
        int opcode = buffer[i].opcode;
1272
        if (opcode == _NOP) {
1273
            nop_count++;
1274
        }
1275
        if (is_terminator(&buffer[i])) {
1276
            return i+1-nop_count;
1277
        }
1278
    }
1279
    Py_FatalError("No terminating instruction");
1280
    Py_UNREACHABLE();
1281
}
1282
#endif
1283
1284
static int
1285
uop_optimize(
1286
    _PyInterpreterFrame *frame,
1287
    _Py_CODEUNIT *instr,
1288
    _PyExecutorObject **exec_ptr,
1289
    int curr_stackentries,
1290
    bool progress_needed)
1291
{
1292
    _PyBloomFilter dependencies;
1293
    _Py_BloomFilter_Init(&dependencies);
1294
    PyInterpreterState *interp = _PyInterpreterState_GET();
1295
    if (interp->jit_uop_buffer == NULL) {
1296
        interp->jit_uop_buffer = (_PyUOpInstruction *)_PyObject_VirtualAlloc(UOP_BUFFER_SIZE);
1297
        if (interp->jit_uop_buffer == NULL) {
1298
            return 0;
1299
        }
1300
    }
1301
    _PyUOpInstruction *buffer = interp->jit_uop_buffer;
1302
    OPT_STAT_INC(attempts);
1303
    char *env_var = Py_GETENV("PYTHON_UOPS_OPTIMIZE");
1304
    bool is_noopt = true;
1305
    if (env_var == NULL || *env_var == '\0' || *env_var > '0') {
1306
        is_noopt = false;
1307
    }
1308
    int length = translate_bytecode_to_trace(frame, instr, buffer, UOP_MAX_TRACE_LENGTH, &dependencies, progress_needed);
1309
    if (length <= 0) {
1310
        // Error or nothing translated
1311
        return length;
1312
    }
1313
    assert(length < UOP_MAX_TRACE_LENGTH);
1314
    OPT_STAT_INC(traces_created);
1315
    if (!is_noopt) {
1316
        length = _Py_uop_analyze_and_optimize(frame, buffer,
1317
                                           length,
1318
                                           curr_stackentries, &dependencies);
1319
        if (length <= 0) {
1320
            return length;
1321
        }
1322
    }
1323
    assert(length < UOP_MAX_TRACE_LENGTH);
1324
    assert(length >= 1);
1325
    /* Fix up */
1326
    for (int pc = 0; pc < length; pc++) {
1327
        int opcode = buffer[pc].opcode;
1328
        int oparg = buffer[pc].oparg;
1329
        if (oparg < _PyUop_Replication[opcode].stop && oparg >= _PyUop_Replication[opcode].start) {
1330
            buffer[pc].opcode = opcode + oparg + 1 - _PyUop_Replication[opcode].start;
1331
            assert(strncmp(_PyOpcode_uop_name[buffer[pc].opcode], _PyOpcode_uop_name[opcode], strlen(_PyOpcode_uop_name[opcode])) == 0);
1332
        }
1333
        else if (is_terminator(&buffer[pc])) {
1334
            break;
1335
        }
1336
        assert(_PyOpcode_uop_name[buffer[pc].opcode]);
1337
    }
1338
    OPT_HIST(effective_trace_length(buffer, length), optimized_trace_length_hist);
1339
    length = prepare_for_execution(buffer, length);
1340
    assert(length <= UOP_MAX_TRACE_LENGTH);
1341
    _PyExecutorObject *executor = make_executor_from_uops(buffer, length,  &dependencies);
1342
    if (executor == NULL) {
1343
        return -1;
1344
    }
1345
    assert(length <= UOP_MAX_TRACE_LENGTH);
1346
    *exec_ptr = executor;
1347
    return 1;
1348
}
1349
1350
1351
/*****************************************
1352
 *        Executor management
1353
 ****************************************/
1354
1355
/* We use a bloomfilter with k = 6, m = 256
1356
 * The choice of k and the following constants
1357
 * could do with a more rigorous analysis,
1358
 * but here is a simple analysis:
1359
 *
1360
 * We want to keep the false positive rate low.
1361
 * For n = 5 (a trace depends on 5 objects),
1362
 * we expect 30 bits set, giving a false positive
1363
 * rate of (30/256)**6 == 2.5e-6 which is plenty
1364
 * good enough.
1365
 *
1366
 * However with n = 10 we expect 60 bits set (worst case),
1367
 * giving a false positive of (60/256)**6 == 0.0001
1368
 *
1369
 * We choose k = 6, rather than a higher number as
1370
 * it means the false positive rate grows slower for high n.
1371
 *
1372
 * n = 5, k = 6 => fp = 2.6e-6
1373
 * n = 5, k = 8 => fp = 3.5e-7
1374
 * n = 10, k = 6 => fp = 1.6e-4
1375
 * n = 10, k = 8 => fp = 0.9e-4
1376
 * n = 15, k = 6 => fp = 0.18%
1377
 * n = 15, k = 8 => fp = 0.23%
1378
 * n = 20, k = 6 => fp = 1.1%
1379
 * n = 20, k = 8 => fp = 2.3%
1380
 *
1381
 * The above analysis assumes perfect hash functions,
1382
 * but those don't exist, so the real false positive
1383
 * rates may be worse.
1384
 */
1385
1386
#define K 6
1387
1388
#define SEED 20221211
1389
1390
/* TO DO -- Use more modern hash functions with better distribution of bits */
1391
static uint64_t
1392
address_to_hash(void *ptr) {
1393
    assert(ptr != NULL);
1394
    uint64_t uhash = SEED;
1395
    uintptr_t addr = (uintptr_t)ptr;
1396
    for (int i = 0; i < SIZEOF_VOID_P; i++) {
1397
        uhash ^= addr & 255;
1398
        uhash *= (uint64_t)PyHASH_MULTIPLIER;
1399
        addr >>= 8;
1400
    }
1401
    return uhash;
1402
}
1403
1404
void
1405
_Py_BloomFilter_Init(_PyBloomFilter *bloom)
1406
{
1407
    for (int i = 0; i < _Py_BLOOM_FILTER_WORDS; i++) {
1408
        bloom->bits[i] = 0;
1409
    }
1410
}
1411
1412
/* We want K hash functions that each set 1 bit.
1413
 * A hash function that sets 1 bit in M bits can be trivially
1414
 * derived from a log2(M) bit hash function.
1415
 * So we extract 8 (log2(256)) bits at a time from
1416
 * the 64bit hash. */
1417
void
1418
_Py_BloomFilter_Add(_PyBloomFilter *bloom, void *ptr)
1419
{
1420
    uint64_t hash = address_to_hash(ptr);
1421
    assert(K <= 8);
1422
    for (int i = 0; i < K; i++) {
1423
        uint8_t bits = hash & 255;
1424
        bloom->bits[bits >> 5] |= (1 << (bits&31));
1425
        hash >>= 8;
1426
    }
1427
}
1428
1429
static bool
1430
bloom_filter_may_contain(_PyBloomFilter *bloom, _PyBloomFilter *hashes)
1431
{
1432
    for (int i = 0; i < _Py_BLOOM_FILTER_WORDS; i++) {
1433
        if ((bloom->bits[i] & hashes->bits[i]) != hashes->bits[i]) {
1434
            return false;
1435
        }
1436
    }
1437
    return true;
1438
}
1439
1440
static void
1441
link_executor(_PyExecutorObject *executor)
1442
{
1443
    PyInterpreterState *interp = _PyInterpreterState_GET();
1444
    _PyExecutorLinkListNode *links = &executor->vm_data.links;
1445
    _PyExecutorObject *head = interp->executor_list_head;
1446
    if (head == NULL) {
1447
        interp->executor_list_head = executor;
1448
        links->previous = NULL;
1449
        links->next = NULL;
1450
    }
1451
    else {
1452
        assert(head->vm_data.links.previous == NULL);
1453
        links->previous = NULL;
1454
        links->next = head;
1455
        head->vm_data.links.previous = executor;
1456
        interp->executor_list_head = executor;
1457
    }
1458
    executor->vm_data.linked = true;
1459
    /* executor_list_head must be first in list */
1460
    assert(interp->executor_list_head->vm_data.links.previous == NULL);
1461
}
1462
1463
static void
1464
unlink_executor(_PyExecutorObject *executor)
1465
{
1466
    if (!executor->vm_data.linked) {
1467
        return;
1468
    }
1469
    _PyExecutorLinkListNode *links = &executor->vm_data.links;
1470
    assert(executor->vm_data.valid);
1471
    _PyExecutorObject *next = links->next;
1472
    _PyExecutorObject *prev = links->previous;
1473
    if (next != NULL) {
1474
        next->vm_data.links.previous = prev;
1475
    }
1476
    if (prev != NULL) {
1477
        prev->vm_data.links.next = next;
1478
    }
1479
    else {
1480
        // prev == NULL implies that executor is the list head
1481
        PyInterpreterState *interp = PyInterpreterState_Get();
1482
        assert(interp->executor_list_head == executor);
1483
        interp->executor_list_head = next;
1484
    }
1485
    executor->vm_data.linked = false;
1486
}
1487
1488
/* This must be called by optimizers before using the executor */
1489
void
1490
_Py_ExecutorInit(_PyExecutorObject *executor, const _PyBloomFilter *dependency_set)
1491
{
1492
    executor->vm_data.valid = true;
1493
    for (int i = 0; i < _Py_BLOOM_FILTER_WORDS; i++) {
1494
        executor->vm_data.bloom.bits[i] = dependency_set->bits[i];
1495
    }
1496
    link_executor(executor);
1497
}
1498
1499
_PyExecutorObject *
1500
_PyExecutor_GetColdExecutor(void)
1501
{
1502
    PyInterpreterState *interp = _PyInterpreterState_GET();
1503
    if (interp->cold_executor != NULL) {
1504
        return interp->cold_executor;
1505
    }
1506
    _PyExecutorObject *cold = allocate_executor(0, 1);
1507
    if (cold == NULL) {
1508
        Py_FatalError("Cannot allocate core JIT code");
1509
    }
1510
    ((_PyUOpInstruction *)cold->trace)->opcode = _COLD_EXIT;
1511
#ifdef _Py_JIT
1512
    cold->jit_code = NULL;
1513
    cold->jit_size = 0;
1514
    // This is initialized to true so we can prevent the executor
1515
    // from being immediately detected as cold and invalidated.
1516
    cold->vm_data.warm = true;
1517
    if (_PyJIT_Compile(cold, cold->trace, 1)) {
1518
        Py_DECREF(cold);
1519
        Py_FatalError("Cannot allocate core JIT code");
1520
    }
1521
#endif
1522
    _Py_SetImmortal((PyObject *)cold);
1523
    interp->cold_executor = cold;
1524
    return cold;
1525
}
1526
1527
void
1528
_PyExecutor_ClearExit(_PyExitData *exit)
1529
{
1530
    if (exit == NULL) {
1531
        return;
1532
    }
1533
    _PyExecutorObject *old = exit->executor;
1534
    exit->executor = _PyExecutor_GetColdExecutor();
1535
    Py_DECREF(old);
1536
}
1537
1538
/* Detaches the executor from the code object (if any) that
1539
 * holds a reference to it */
1540
void
1541
_Py_ExecutorDetach(_PyExecutorObject *executor)
1542
{
1543
    PyCodeObject *code = executor->vm_data.code;
1544
    if (code == NULL) {
1545
        return;
1546
    }
1547
    _Py_CODEUNIT *instruction = &_PyCode_CODE(code)[executor->vm_data.index];
1548
    assert(instruction->op.code == ENTER_EXECUTOR);
1549
    int index = instruction->op.arg;
1550
    assert(code->co_executors->executors[index] == executor);
1551
    instruction->op.code = executor->vm_data.opcode;
1552
    instruction->op.arg = executor->vm_data.oparg;
1553
    executor->vm_data.code = NULL;
1554
    code->co_executors->executors[index] = NULL;
1555
    Py_DECREF(executor);
1556
}
1557
1558
static int
1559
executor_clear(PyObject *op)
1560
{
1561
    _PyExecutorObject *executor = _PyExecutorObject_CAST(op);
1562
    if (!executor->vm_data.valid) {
1563
        return 0;
1564
    }
1565
    assert(executor->vm_data.valid == 1);
1566
    unlink_executor(executor);
1567
    executor->vm_data.valid = 0;
1568
1569
    /* It is possible for an executor to form a reference
1570
     * cycle with itself, so decref'ing a side exit could
1571
     * free the executor unless we hold a strong reference to it
1572
     */
1573
    _PyExecutorObject *cold = _PyExecutor_GetColdExecutor();
1574
    Py_INCREF(executor);
1575
    for (uint32_t i = 0; i < executor->exit_count; i++) {
1576
        executor->exits[i].temperature = initial_unreachable_backoff_counter();
1577
        _PyExecutorObject *e = executor->exits[i].executor;
1578
        executor->exits[i].executor = cold;
1579
        Py_DECREF(e);
1580
    }
1581
    _Py_ExecutorDetach(executor);
1582
    Py_DECREF(executor);
1583
    return 0;
1584
}
1585
1586
void
1587
_Py_Executor_DependsOn(_PyExecutorObject *executor, void *obj)
1588
{
1589
    assert(executor->vm_data.valid);
1590
    _Py_BloomFilter_Add(&executor->vm_data.bloom, obj);
1591
}
1592
1593
/* Invalidate all executors that depend on `obj`
1594
 * May cause other executors to be invalidated as well
1595
 */
1596
void
1597
_Py_Executors_InvalidateDependency(PyInterpreterState *interp, void *obj, int is_invalidation)
1598
{
1599
    _PyBloomFilter obj_filter;
1600
    _Py_BloomFilter_Init(&obj_filter);
1601
    _Py_BloomFilter_Add(&obj_filter, obj);
1602
    /* Walk the list of executors */
1603
    /* TO DO -- Use a tree to avoid traversing as many objects */
1604
    PyObject *invalidate = PyList_New(0);
1605
    if (invalidate == NULL) {
1606
        goto error;
1607
    }
1608
    /* Clearing an executor can deallocate others, so we need to make a list of
1609
     * executors to invalidate first */
1610
    for (_PyExecutorObject *exec = interp->executor_list_head; exec != NULL;) {
1611
        assert(exec->vm_data.valid);
1612
        _PyExecutorObject *next = exec->vm_data.links.next;
1613
        if (bloom_filter_may_contain(&exec->vm_data.bloom, &obj_filter) &&
1614
            PyList_Append(invalidate, (PyObject *)exec))
1615
        {
1616
            goto error;
1617
        }
1618
        exec = next;
1619
    }
1620
    for (Py_ssize_t i = 0; i < PyList_GET_SIZE(invalidate); i++) {
1621
        PyObject *exec = PyList_GET_ITEM(invalidate, i);
1622
        executor_clear(exec);
1623
        if (is_invalidation) {
1624
            OPT_STAT_INC(executors_invalidated);
1625
        }
1626
    }
1627
    Py_DECREF(invalidate);
1628
    return;
1629
error:
1630
    PyErr_Clear();
1631
    Py_XDECREF(invalidate);
1632
    // If we're truly out of memory, wiping out everything is a fine fallback:
1633
    _Py_Executors_InvalidateAll(interp, is_invalidation);
1634
}
1635
1636
/* Invalidate all executors */
1637
void
1638
_Py_Executors_InvalidateAll(PyInterpreterState *interp, int is_invalidation)
1639
{
1640
    while (interp->executor_list_head) {
1641
        _PyExecutorObject *executor = interp->executor_list_head;
1642
        assert(executor->vm_data.valid == 1 && executor->vm_data.linked == 1);
1643
        if (executor->vm_data.code) {
1644
            // Clear the entire code object so its co_executors array be freed:
1645
            _PyCode_Clear_Executors(executor->vm_data.code);
1646
        }
1647
        else {
1648
            executor_clear((PyObject *)executor);
1649
        }
1650
        if (is_invalidation) {
1651
            OPT_STAT_INC(executors_invalidated);
1652
        }
1653
    }
1654
}
1655
1656
void
1657
_Py_Executors_InvalidateCold(PyInterpreterState *interp)
1658
{
1659
    /* Walk the list of executors */
1660
    /* TO DO -- Use a tree to avoid traversing as many objects */
1661
    PyObject *invalidate = PyList_New(0);
1662
    if (invalidate == NULL) {
1663
        goto error;
1664
    }
1665
1666
    /* Clearing an executor can deallocate others, so we need to make a list of
1667
     * executors to invalidate first */
1668
    for (_PyExecutorObject *exec = interp->executor_list_head; exec != NULL;) {
1669
        assert(exec->vm_data.valid);
1670
        _PyExecutorObject *next = exec->vm_data.links.next;
1671
1672
        if (!exec->vm_data.warm && PyList_Append(invalidate, (PyObject *)exec) < 0) {
1673
            goto error;
1674
        }
1675
        else {
1676
            exec->vm_data.warm = false;
1677
        }
1678
1679
        exec = next;
1680
    }
1681
    for (Py_ssize_t i = 0; i < PyList_GET_SIZE(invalidate); i++) {
1682
        PyObject *exec = PyList_GET_ITEM(invalidate, i);
1683
        executor_clear(exec);
1684
    }
1685
    Py_DECREF(invalidate);
1686
    return;
1687
error:
1688
    PyErr_Clear();
1689
    Py_XDECREF(invalidate);
1690
    // If we're truly out of memory, wiping out everything is a fine fallback
1691
    _Py_Executors_InvalidateAll(interp, 0);
1692
}
1693
1694
static void
1695
write_str(PyObject *str, FILE *out)
1696
{
1697
    // Encode the Unicode object to the specified encoding
1698
    PyObject *encoded_obj = PyUnicode_AsEncodedString(str, "utf8", "strict");
1699
    if (encoded_obj == NULL) {
1700
        PyErr_Clear();
1701
        return;
1702
    }
1703
    const char *encoded_str = PyBytes_AsString(encoded_obj);
1704
    Py_ssize_t encoded_size = PyBytes_Size(encoded_obj);
1705
    fwrite(encoded_str, 1, encoded_size, out);
1706
    Py_DECREF(encoded_obj);
1707
}
1708
1709
static int
1710
find_line_number(PyCodeObject *code, _PyExecutorObject *executor)
1711
{
1712
    int code_len = (int)Py_SIZE(code);
1713
    for (int i = 0; i < code_len; i++) {
1714
        _Py_CODEUNIT *instr = &_PyCode_CODE(code)[i];
1715
        int opcode = instr->op.code;
1716
        if (opcode == ENTER_EXECUTOR) {
1717
            _PyExecutorObject *exec = code->co_executors->executors[instr->op.arg];
1718
            if (exec == executor) {
1719
                return PyCode_Addr2Line(code, i*2);
1720
            }
1721
        }
1722
        i += _PyOpcode_Caches[_Py_GetBaseCodeUnit(code, i).op.code];
1723
    }
1724
    return -1;
1725
}
1726
1727
/* Writes the node and outgoing edges for a single tracelet in graphviz format.
1728
 * Each tracelet is presented as a table of the uops it contains.
1729
 * If Py_STATS is enabled, execution counts are included.
1730
 *
1731
 * https://graphviz.readthedocs.io/en/stable/manual.html
1732
 * https://graphviz.org/gallery/
1733
 */
1734
static void
1735
executor_to_gv(_PyExecutorObject *executor, FILE *out)
1736
{
1737
    PyCodeObject *code = executor->vm_data.code;
1738
    fprintf(out, "executor_%p [\n", executor);
1739
    fprintf(out, "    shape = none\n");
1740
1741
    /* Write the HTML table for the uops */
1742
    fprintf(out, "    label = <<table border=\"0\" cellspacing=\"0\">\n");
1743
    fprintf(out, "        <tr><td port=\"start\" border=\"1\" ><b>Executor</b></td></tr>\n");
1744
    if (code == NULL) {
1745
        fprintf(out, "        <tr><td border=\"1\" >No code object</td></tr>\n");
1746
    }
1747
    else {
1748
        fprintf(out, "        <tr><td  border=\"1\" >");
1749
        write_str(code->co_qualname, out);
1750
        int line = find_line_number(code, executor);
1751
        fprintf(out, ": %d</td></tr>\n", line);
1752
    }
1753
    for (uint32_t i = 0; i < executor->code_size; i++) {
1754
        /* Write row for uop.
1755
         * The `port` is a marker so that outgoing edges can
1756
         * be placed correctly. If a row is marked `port=17`,
1757
         * then the outgoing edge is `{EXEC_NAME}:17 -> {TARGET}`
1758
         * https://graphviz.readthedocs.io/en/stable/manual.html#node-ports-compass
1759
         */
1760
        _PyUOpInstruction const *inst = &executor->trace[i];
1761
        const char *opname = _PyOpcode_uop_name[inst->opcode];
1762
#ifdef Py_STATS
1763
        fprintf(out, "        <tr><td port=\"i%d\" border=\"1\" >%s -- %" PRIu64 "</td></tr>\n", i, opname, inst->execution_count);
1764
#else
1765
        fprintf(out, "        <tr><td port=\"i%d\" border=\"1\" >%s</td></tr>\n", i, opname);
1766
#endif
1767
        if (inst->opcode == _EXIT_TRACE || inst->opcode == _JUMP_TO_TOP) {
1768
            break;
1769
        }
1770
    }
1771
    fprintf(out, "    </table>>\n");
1772
    fprintf(out, "]\n\n");
1773
1774
    /* Write all the outgoing edges */
1775
    for (uint32_t i = 0; i < executor->code_size; i++) {
1776
        _PyUOpInstruction const *inst = &executor->trace[i];
1777
        uint16_t flags = _PyUop_Flags[inst->opcode];
1778
        _PyExitData *exit = NULL;
1779
        if (inst->opcode == _EXIT_TRACE) {
1780
            exit = (_PyExitData *)inst->operand0;
1781
        }
1782
        else if (flags & HAS_EXIT_FLAG) {
1783
            assert(inst->format == UOP_FORMAT_JUMP);
1784
            _PyUOpInstruction const *exit_inst = &executor->trace[inst->jump_target];
1785
            assert(exit_inst->opcode == _EXIT_TRACE);
1786
            exit = (_PyExitData *)exit_inst->operand0;
1787
        }
1788
        if (exit != NULL && exit->executor != NULL) {
1789
            fprintf(out, "executor_%p:i%d -> executor_%p:start\n", executor, i, exit->executor);
1790
        }
1791
        if (inst->opcode == _EXIT_TRACE || inst->opcode == _JUMP_TO_TOP) {
1792
            break;
1793
        }
1794
    }
1795
}
1796
1797
/* Write the graph of all the live tracelets in graphviz format. */
1798
int
1799
_PyDumpExecutors(FILE *out)
1800
{
1801
    fprintf(out, "digraph ideal {\n\n");
1802
    fprintf(out, "    rankdir = \"LR\"\n\n");
1803
    PyInterpreterState *interp = PyInterpreterState_Get();
1804
    for (_PyExecutorObject *exec = interp->executor_list_head; exec != NULL;) {
1805
        executor_to_gv(exec, out);
1806
        exec = exec->vm_data.links.next;
1807
    }
1808
    fprintf(out, "}\n\n");
1809
    return 0;
1810
}
1811
1812
#else
1813
1814
int
1815
_PyDumpExecutors(FILE *out)
1816
0
{
1817
0
    PyErr_SetString(PyExc_NotImplementedError, "No JIT available");
1818
0
    return -1;
1819
0
}
1820
1821
void
1822
_PyExecutor_Free(struct _PyExecutorObject *self)
1823
0
{
1824
    /* This should never be called */
1825
0
    Py_UNREACHABLE();
1826
0
}
1827
1828
#endif /* _Py_TIER2 */