/src/cpython/Python/optimizer.c
Line | Count | Source |
1 | | #include "Python.h" |
2 | | |
3 | | #ifdef _Py_TIER2 |
4 | | |
5 | | #include "opcode.h" |
6 | | #include "pycore_interp.h" |
7 | | #include "pycore_backoff.h" |
8 | | #include "pycore_bitutils.h" // _Py_popcount32() |
9 | | #include "pycore_ceval.h" // _Py_set_eval_breaker_bit |
10 | | #include "pycore_code.h" // _Py_GetBaseCodeUnit |
11 | | #include "pycore_function.h" // _PyFunction_LookupByVersion() |
12 | | #include "pycore_interpframe.h" |
13 | | #include "pycore_object.h" // _PyObject_GC_UNTRACK() |
14 | | #include "pycore_opcode_metadata.h" // _PyOpcode_OpName[] |
15 | | #include "pycore_opcode_utils.h" // MAX_REAL_OPCODE |
16 | | #include "pycore_optimizer.h" // _Py_uop_analyze_and_optimize() |
17 | | #include "pycore_pystate.h" // _PyInterpreterState_GET() |
18 | | #include "pycore_tuple.h" // _PyTuple_FromArraySteal |
19 | | #include "pycore_unicodeobject.h" // _PyUnicode_FromASCII |
20 | | #include "pycore_uop_ids.h" |
21 | | #include "pycore_jit.h" |
22 | | #include <stdbool.h> |
23 | | #include <stdint.h> |
24 | | #include <stddef.h> |
25 | | |
26 | | #define NEED_OPCODE_METADATA |
27 | | #include "pycore_uop_metadata.h" // Uop tables |
28 | | #undef NEED_OPCODE_METADATA |
29 | | |
30 | | #define MAX_EXECUTORS_SIZE 256 |
31 | | |
32 | | #define _PyExecutorObject_CAST(op) ((_PyExecutorObject *)(op)) |
33 | | |
34 | | static bool |
35 | | has_space_for_executor(PyCodeObject *code, _Py_CODEUNIT *instr) |
36 | | { |
37 | | if (instr->op.code == ENTER_EXECUTOR) { |
38 | | return true; |
39 | | } |
40 | | if (code->co_executors == NULL) { |
41 | | return true; |
42 | | } |
43 | | return code->co_executors->size < MAX_EXECUTORS_SIZE; |
44 | | } |
45 | | |
46 | | static int32_t |
47 | | get_index_for_executor(PyCodeObject *code, _Py_CODEUNIT *instr) |
48 | | { |
49 | | if (instr->op.code == ENTER_EXECUTOR) { |
50 | | return instr->op.arg; |
51 | | } |
52 | | _PyExecutorArray *old = code->co_executors; |
53 | | int size = 0; |
54 | | int capacity = 0; |
55 | | if (old != NULL) { |
56 | | size = old->size; |
57 | | capacity = old->capacity; |
58 | | assert(size < MAX_EXECUTORS_SIZE); |
59 | | } |
60 | | assert(size <= capacity); |
61 | | if (size == capacity) { |
62 | | /* Array is full. Grow array */ |
63 | | int new_capacity = capacity ? capacity * 2 : 4; |
64 | | _PyExecutorArray *new = PyMem_Realloc( |
65 | | old, |
66 | | offsetof(_PyExecutorArray, executors) + |
67 | | new_capacity * sizeof(_PyExecutorObject *)); |
68 | | if (new == NULL) { |
69 | | return -1; |
70 | | } |
71 | | new->capacity = new_capacity; |
72 | | new->size = size; |
73 | | code->co_executors = new; |
74 | | } |
75 | | assert(size < code->co_executors->capacity); |
76 | | return size; |
77 | | } |
78 | | |
79 | | static void |
80 | | insert_executor(PyCodeObject *code, _Py_CODEUNIT *instr, int index, _PyExecutorObject *executor) |
81 | | { |
82 | | Py_INCREF(executor); |
83 | | if (instr->op.code == ENTER_EXECUTOR) { |
84 | | assert(index == instr->op.arg); |
85 | | _Py_ExecutorDetach(code->co_executors->executors[index]); |
86 | | } |
87 | | else { |
88 | | assert(code->co_executors->size == index); |
89 | | assert(code->co_executors->capacity > index); |
90 | | code->co_executors->size++; |
91 | | } |
92 | | executor->vm_data.opcode = instr->op.code; |
93 | | executor->vm_data.oparg = instr->op.arg; |
94 | | executor->vm_data.code = code; |
95 | | executor->vm_data.index = (int)(instr - _PyCode_CODE(code)); |
96 | | code->co_executors->executors[index] = executor; |
97 | | assert(index < MAX_EXECUTORS_SIZE); |
98 | | instr->op.code = ENTER_EXECUTOR; |
99 | | instr->op.arg = index; |
100 | | } |
101 | | |
102 | | static _PyExecutorObject * |
103 | | make_executor_from_uops(_PyUOpInstruction *buffer, int length, const _PyBloomFilter *dependencies); |
104 | | |
105 | | static int |
106 | | uop_optimize(_PyInterpreterFrame *frame, _Py_CODEUNIT *instr, |
107 | | _PyExecutorObject **exec_ptr, int curr_stackentries, |
108 | | bool progress_needed); |
109 | | |
110 | | /* Returns 1 if optimized, 0 if not optimized, and -1 for an error. |
111 | | * If optimized, *executor_ptr contains a new reference to the executor |
112 | | */ |
113 | | // gh-137573: inlining this function causes stack overflows |
114 | | Py_NO_INLINE int |
115 | | _PyOptimizer_Optimize( |
116 | | _PyInterpreterFrame *frame, _Py_CODEUNIT *start, |
117 | | _PyExecutorObject **executor_ptr, int chain_depth) |
118 | | { |
119 | | _PyStackRef *stack_pointer = frame->stackpointer; |
120 | | PyInterpreterState *interp = _PyInterpreterState_GET(); |
121 | | assert(interp->jit); |
122 | | assert(!interp->compiling); |
123 | | #ifndef Py_GIL_DISABLED |
124 | | interp->compiling = true; |
125 | | // The first executor in a chain and the MAX_CHAIN_DEPTH'th executor *must* |
126 | | // make progress in order to avoid infinite loops or excessively-long |
127 | | // side-exit chains. We can only insert the executor into the bytecode if |
128 | | // this is true, since a deopt won't infinitely re-enter the executor: |
129 | | chain_depth %= MAX_CHAIN_DEPTH; |
130 | | bool progress_needed = chain_depth == 0; |
131 | | PyCodeObject *code = _PyFrame_GetCode(frame); |
132 | | assert(PyCode_Check(code)); |
133 | | if (progress_needed && !has_space_for_executor(code, start)) { |
134 | | interp->compiling = false; |
135 | | return 0; |
136 | | } |
137 | | int err = uop_optimize(frame, start, executor_ptr, (int)(stack_pointer - _PyFrame_Stackbase(frame)), progress_needed); |
138 | | if (err <= 0) { |
139 | | interp->compiling = false; |
140 | | return err; |
141 | | } |
142 | | assert(*executor_ptr != NULL); |
143 | | if (progress_needed) { |
144 | | int index = get_index_for_executor(code, start); |
145 | | if (index < 0) { |
146 | | /* Out of memory. Don't raise and assume that the |
147 | | * error will show up elsewhere. |
148 | | * |
149 | | * If an optimizer has already produced an executor, |
150 | | * it might get confused by the executor disappearing, |
151 | | * but there is not much we can do about that here. */ |
152 | | Py_DECREF(*executor_ptr); |
153 | | interp->compiling = false; |
154 | | return 0; |
155 | | } |
156 | | insert_executor(code, start, index, *executor_ptr); |
157 | | } |
158 | | else { |
159 | | (*executor_ptr)->vm_data.code = NULL; |
160 | | } |
161 | | (*executor_ptr)->vm_data.chain_depth = chain_depth; |
162 | | assert((*executor_ptr)->vm_data.valid); |
163 | | interp->compiling = false; |
164 | | return 1; |
165 | | #else |
166 | | return 0; |
167 | | #endif |
168 | | } |
169 | | |
170 | | static _PyExecutorObject * |
171 | | get_executor_lock_held(PyCodeObject *code, int offset) |
172 | | { |
173 | | int code_len = (int)Py_SIZE(code); |
174 | | for (int i = 0 ; i < code_len;) { |
175 | | if (_PyCode_CODE(code)[i].op.code == ENTER_EXECUTOR && i*2 == offset) { |
176 | | int oparg = _PyCode_CODE(code)[i].op.arg; |
177 | | _PyExecutorObject *res = code->co_executors->executors[oparg]; |
178 | | Py_INCREF(res); |
179 | | return res; |
180 | | } |
181 | | i += _PyInstruction_GetLength(code, i); |
182 | | } |
183 | | PyErr_SetString(PyExc_ValueError, "no executor at given byte offset"); |
184 | | return NULL; |
185 | | } |
186 | | |
187 | | _PyExecutorObject * |
188 | | _Py_GetExecutor(PyCodeObject *code, int offset) |
189 | | { |
190 | | _PyExecutorObject *executor; |
191 | | Py_BEGIN_CRITICAL_SECTION(code); |
192 | | executor = get_executor_lock_held(code, offset); |
193 | | Py_END_CRITICAL_SECTION(); |
194 | | return executor; |
195 | | } |
196 | | |
197 | | static PyObject * |
198 | | is_valid(PyObject *self, PyObject *Py_UNUSED(ignored)) |
199 | | { |
200 | | return PyBool_FromLong(((_PyExecutorObject *)self)->vm_data.valid); |
201 | | } |
202 | | |
203 | | static PyObject * |
204 | | get_opcode(PyObject *self, PyObject *Py_UNUSED(ignored)) |
205 | | { |
206 | | return PyLong_FromUnsignedLong(((_PyExecutorObject *)self)->vm_data.opcode); |
207 | | } |
208 | | |
209 | | static PyObject * |
210 | | get_oparg(PyObject *self, PyObject *Py_UNUSED(ignored)) |
211 | | { |
212 | | return PyLong_FromUnsignedLong(((_PyExecutorObject *)self)->vm_data.oparg); |
213 | | } |
214 | | |
215 | | ///////////////////// Experimental UOp Optimizer ///////////////////// |
216 | | |
217 | | static int executor_clear(PyObject *executor); |
218 | | static void unlink_executor(_PyExecutorObject *executor); |
219 | | |
220 | | |
221 | | void |
222 | | _PyExecutor_Free(_PyExecutorObject *self) |
223 | | { |
224 | | #ifdef _Py_JIT |
225 | | _PyJIT_Free(self); |
226 | | #endif |
227 | | PyObject_GC_Del(self); |
228 | | } |
229 | | |
230 | | void |
231 | | _Py_ClearExecutorDeletionList(PyInterpreterState *interp) |
232 | | { |
233 | | _PyRuntimeState *runtime = &_PyRuntime; |
234 | | HEAD_LOCK(runtime); |
235 | | PyThreadState* ts = PyInterpreterState_ThreadHead(interp); |
236 | | HEAD_UNLOCK(runtime); |
237 | | while (ts) { |
238 | | _PyExecutorObject *current = (_PyExecutorObject *)ts->current_executor; |
239 | | if (current != NULL) { |
240 | | /* Anything in this list will be unlinked, so we can reuse the |
241 | | * linked field as a reachability marker. */ |
242 | | current->vm_data.linked = 1; |
243 | | } |
244 | | HEAD_LOCK(runtime); |
245 | | ts = PyThreadState_Next(ts); |
246 | | HEAD_UNLOCK(runtime); |
247 | | } |
248 | | _PyExecutorObject **prev_to_next_ptr = &interp->executor_deletion_list_head; |
249 | | _PyExecutorObject *exec = *prev_to_next_ptr; |
250 | | while (exec != NULL) { |
251 | | if (exec->vm_data.linked) { |
252 | | // This executor is currently executing |
253 | | exec->vm_data.linked = 0; |
254 | | prev_to_next_ptr = &exec->vm_data.links.next; |
255 | | } |
256 | | else { |
257 | | *prev_to_next_ptr = exec->vm_data.links.next; |
258 | | _PyExecutor_Free(exec); |
259 | | } |
260 | | exec = *prev_to_next_ptr; |
261 | | } |
262 | | interp->executor_deletion_list_remaining_capacity = EXECUTOR_DELETE_LIST_MAX; |
263 | | } |
264 | | |
265 | | static void |
266 | | add_to_pending_deletion_list(_PyExecutorObject *self) |
267 | | { |
268 | | PyInterpreterState *interp = PyInterpreterState_Get(); |
269 | | self->vm_data.links.next = interp->executor_deletion_list_head; |
270 | | interp->executor_deletion_list_head = self; |
271 | | if (interp->executor_deletion_list_remaining_capacity > 0) { |
272 | | interp->executor_deletion_list_remaining_capacity--; |
273 | | } |
274 | | else { |
275 | | _Py_ClearExecutorDeletionList(interp); |
276 | | } |
277 | | } |
278 | | |
279 | | static void |
280 | | uop_dealloc(PyObject *op) { |
281 | | _PyExecutorObject *self = _PyExecutorObject_CAST(op); |
282 | | _PyObject_GC_UNTRACK(self); |
283 | | assert(self->vm_data.code == NULL); |
284 | | unlink_executor(self); |
285 | | // Once unlinked it becomes impossible to invalidate an executor, so do it here. |
286 | | self->vm_data.valid = 0; |
287 | | add_to_pending_deletion_list(self); |
288 | | } |
289 | | |
290 | | const char * |
291 | | _PyUOpName(int index) |
292 | | { |
293 | | if (index < 0 || index > MAX_UOP_ID) { |
294 | | return NULL; |
295 | | } |
296 | | return _PyOpcode_uop_name[index]; |
297 | | } |
298 | | |
299 | | #ifdef Py_DEBUG |
300 | | void |
301 | | _PyUOpPrint(const _PyUOpInstruction *uop) |
302 | | { |
303 | | const char *name = _PyUOpName(uop->opcode); |
304 | | if (name == NULL) { |
305 | | printf("<uop %d>", uop->opcode); |
306 | | } |
307 | | else { |
308 | | printf("%s", name); |
309 | | } |
310 | | switch(uop->format) { |
311 | | case UOP_FORMAT_TARGET: |
312 | | printf(" (%d, target=%d, operand0=%#" PRIx64 ", operand1=%#" PRIx64, |
313 | | uop->oparg, |
314 | | uop->target, |
315 | | (uint64_t)uop->operand0, |
316 | | (uint64_t)uop->operand1); |
317 | | break; |
318 | | case UOP_FORMAT_JUMP: |
319 | | printf(" (%d, jump_target=%d, operand0=%#" PRIx64 ", operand1=%#" PRIx64, |
320 | | uop->oparg, |
321 | | uop->jump_target, |
322 | | (uint64_t)uop->operand0, |
323 | | (uint64_t)uop->operand1); |
324 | | break; |
325 | | default: |
326 | | printf(" (%d, Unknown format)", uop->oparg); |
327 | | } |
328 | | if (_PyUop_Flags[uop->opcode] & HAS_ERROR_FLAG) { |
329 | | printf(", error_target=%d", uop->error_target); |
330 | | } |
331 | | |
332 | | printf(")"); |
333 | | } |
334 | | #endif |
335 | | |
336 | | static Py_ssize_t |
337 | | uop_len(PyObject *op) |
338 | | { |
339 | | _PyExecutorObject *self = _PyExecutorObject_CAST(op); |
340 | | return self->code_size; |
341 | | } |
342 | | |
343 | | static PyObject * |
344 | | uop_item(PyObject *op, Py_ssize_t index) |
345 | | { |
346 | | _PyExecutorObject *self = _PyExecutorObject_CAST(op); |
347 | | Py_ssize_t len = uop_len(op); |
348 | | if (index < 0 || index >= len) { |
349 | | PyErr_SetNone(PyExc_IndexError); |
350 | | return NULL; |
351 | | } |
352 | | const char *name = _PyUOpName(self->trace[index].opcode); |
353 | | if (name == NULL) { |
354 | | name = "<nil>"; |
355 | | } |
356 | | PyObject *oname = _PyUnicode_FromASCII(name, strlen(name)); |
357 | | if (oname == NULL) { |
358 | | return NULL; |
359 | | } |
360 | | PyObject *oparg = PyLong_FromUnsignedLong(self->trace[index].oparg); |
361 | | if (oparg == NULL) { |
362 | | Py_DECREF(oname); |
363 | | return NULL; |
364 | | } |
365 | | PyObject *target = PyLong_FromUnsignedLong(self->trace[index].target); |
366 | | if (target == NULL) { |
367 | | Py_DECREF(oparg); |
368 | | Py_DECREF(oname); |
369 | | return NULL; |
370 | | } |
371 | | PyObject *operand = PyLong_FromUnsignedLongLong(self->trace[index].operand0); |
372 | | if (operand == NULL) { |
373 | | Py_DECREF(target); |
374 | | Py_DECREF(oparg); |
375 | | Py_DECREF(oname); |
376 | | return NULL; |
377 | | } |
378 | | PyObject *args[4] = { oname, oparg, target, operand }; |
379 | | return _PyTuple_FromArraySteal(args, 4); |
380 | | } |
381 | | |
382 | | PySequenceMethods uop_as_sequence = { |
383 | | .sq_length = uop_len, |
384 | | .sq_item = uop_item, |
385 | | }; |
386 | | |
387 | | static int |
388 | | executor_traverse(PyObject *o, visitproc visit, void *arg) |
389 | | { |
390 | | _PyExecutorObject *executor = _PyExecutorObject_CAST(o); |
391 | | for (uint32_t i = 0; i < executor->exit_count; i++) { |
392 | | Py_VISIT(executor->exits[i].executor); |
393 | | } |
394 | | return 0; |
395 | | } |
396 | | |
397 | | static PyObject * |
398 | | get_jit_code(PyObject *self, PyObject *Py_UNUSED(ignored)) |
399 | | { |
400 | | #ifndef _Py_JIT |
401 | | PyErr_SetString(PyExc_RuntimeError, "JIT support not enabled."); |
402 | | return NULL; |
403 | | #else |
404 | | _PyExecutorObject *executor = _PyExecutorObject_CAST(self); |
405 | | if (executor->jit_code == NULL || executor->jit_size == 0) { |
406 | | Py_RETURN_NONE; |
407 | | } |
408 | | return PyBytes_FromStringAndSize(executor->jit_code, executor->jit_size); |
409 | | #endif |
410 | | } |
411 | | |
412 | | static PyMethodDef uop_executor_methods[] = { |
413 | | { "is_valid", is_valid, METH_NOARGS, NULL }, |
414 | | { "get_jit_code", get_jit_code, METH_NOARGS, NULL}, |
415 | | { "get_opcode", get_opcode, METH_NOARGS, NULL }, |
416 | | { "get_oparg", get_oparg, METH_NOARGS, NULL }, |
417 | | { NULL, NULL }, |
418 | | }; |
419 | | |
420 | | static int |
421 | | executor_is_gc(PyObject *o) |
422 | | { |
423 | | return !_Py_IsImmortal(o); |
424 | | } |
425 | | |
426 | | PyTypeObject _PyUOpExecutor_Type = { |
427 | | PyVarObject_HEAD_INIT(&PyType_Type, 0) |
428 | | .tp_name = "uop_executor", |
429 | | .tp_basicsize = offsetof(_PyExecutorObject, exits), |
430 | | .tp_itemsize = 1, |
431 | | .tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_DISALLOW_INSTANTIATION | Py_TPFLAGS_HAVE_GC, |
432 | | .tp_dealloc = uop_dealloc, |
433 | | .tp_as_sequence = &uop_as_sequence, |
434 | | .tp_methods = uop_executor_methods, |
435 | | .tp_traverse = executor_traverse, |
436 | | .tp_clear = executor_clear, |
437 | | .tp_is_gc = executor_is_gc, |
438 | | }; |
439 | | |
440 | | /* TO DO -- Generate these tables */ |
441 | | static const uint16_t |
442 | | _PyUOp_Replacements[MAX_UOP_ID + 1] = { |
443 | | [_ITER_JUMP_RANGE] = _GUARD_NOT_EXHAUSTED_RANGE, |
444 | | [_ITER_JUMP_LIST] = _GUARD_NOT_EXHAUSTED_LIST, |
445 | | [_ITER_JUMP_TUPLE] = _GUARD_NOT_EXHAUSTED_TUPLE, |
446 | | [_FOR_ITER] = _FOR_ITER_TIER_TWO, |
447 | | [_ITER_NEXT_LIST] = _ITER_NEXT_LIST_TIER_TWO, |
448 | | [_CHECK_PERIODIC_AT_END] = _TIER2_RESUME_CHECK, |
449 | | }; |
450 | | |
451 | | static const uint8_t |
452 | | is_for_iter_test[MAX_UOP_ID + 1] = { |
453 | | [_GUARD_NOT_EXHAUSTED_RANGE] = 1, |
454 | | [_GUARD_NOT_EXHAUSTED_LIST] = 1, |
455 | | [_GUARD_NOT_EXHAUSTED_TUPLE] = 1, |
456 | | [_FOR_ITER_TIER_TWO] = 1, |
457 | | }; |
458 | | |
459 | | static const uint16_t |
460 | | BRANCH_TO_GUARD[4][2] = { |
461 | | [POP_JUMP_IF_FALSE - POP_JUMP_IF_FALSE][0] = _GUARD_IS_TRUE_POP, |
462 | | [POP_JUMP_IF_FALSE - POP_JUMP_IF_FALSE][1] = _GUARD_IS_FALSE_POP, |
463 | | [POP_JUMP_IF_TRUE - POP_JUMP_IF_FALSE][0] = _GUARD_IS_FALSE_POP, |
464 | | [POP_JUMP_IF_TRUE - POP_JUMP_IF_FALSE][1] = _GUARD_IS_TRUE_POP, |
465 | | [POP_JUMP_IF_NONE - POP_JUMP_IF_FALSE][0] = _GUARD_IS_NOT_NONE_POP, |
466 | | [POP_JUMP_IF_NONE - POP_JUMP_IF_FALSE][1] = _GUARD_IS_NONE_POP, |
467 | | [POP_JUMP_IF_NOT_NONE - POP_JUMP_IF_FALSE][0] = _GUARD_IS_NONE_POP, |
468 | | [POP_JUMP_IF_NOT_NONE - POP_JUMP_IF_FALSE][1] = _GUARD_IS_NOT_NONE_POP, |
469 | | }; |
470 | | |
471 | | |
472 | | #define CONFIDENCE_RANGE 1000 |
473 | | #define CONFIDENCE_CUTOFF 333 |
474 | | |
475 | | #ifdef Py_DEBUG |
476 | | #define DPRINTF(level, ...) \ |
477 | | if (lltrace >= (level)) { printf(__VA_ARGS__); } |
478 | | #else |
479 | | #define DPRINTF(level, ...) |
480 | | #endif |
481 | | |
482 | | |
483 | | static inline int |
484 | | add_to_trace( |
485 | | _PyUOpInstruction *trace, |
486 | | int trace_length, |
487 | | uint16_t opcode, |
488 | | uint16_t oparg, |
489 | | uint64_t operand, |
490 | | uint32_t target) |
491 | | { |
492 | | trace[trace_length].opcode = opcode; |
493 | | trace[trace_length].format = UOP_FORMAT_TARGET; |
494 | | trace[trace_length].target = target; |
495 | | trace[trace_length].oparg = oparg; |
496 | | trace[trace_length].operand0 = operand; |
497 | | #ifdef Py_STATS |
498 | | trace[trace_length].execution_count = 0; |
499 | | #endif |
500 | | return trace_length + 1; |
501 | | } |
502 | | |
503 | | #ifdef Py_DEBUG |
504 | | #define ADD_TO_TRACE(OPCODE, OPARG, OPERAND, TARGET) \ |
505 | | assert(trace_length < max_length); \ |
506 | | trace_length = add_to_trace(trace, trace_length, (OPCODE), (OPARG), (OPERAND), (TARGET)); \ |
507 | | if (lltrace >= 2) { \ |
508 | | printf("%4d ADD_TO_TRACE: ", trace_length); \ |
509 | | _PyUOpPrint(&trace[trace_length-1]); \ |
510 | | printf("\n"); \ |
511 | | } |
512 | | #else |
513 | | #define ADD_TO_TRACE(OPCODE, OPARG, OPERAND, TARGET) \ |
514 | | assert(trace_length < max_length); \ |
515 | | trace_length = add_to_trace(trace, trace_length, (OPCODE), (OPARG), (OPERAND), (TARGET)); |
516 | | #endif |
517 | | |
518 | | #define INSTR_IP(INSTR, CODE) \ |
519 | | ((uint32_t)((INSTR) - ((_Py_CODEUNIT *)(CODE)->co_code_adaptive))) |
520 | | |
521 | | // Reserve space for n uops |
522 | | #define RESERVE_RAW(n, opname) \ |
523 | | if (trace_length + (n) > max_length) { \ |
524 | | DPRINTF(2, "No room for %s (need %d, got %d)\n", \ |
525 | | (opname), (n), max_length - trace_length); \ |
526 | | OPT_STAT_INC(trace_too_long); \ |
527 | | goto done; \ |
528 | | } |
529 | | |
530 | | // Reserve space for N uops, plus 3 for _SET_IP, _CHECK_VALIDITY and _EXIT_TRACE |
531 | | #define RESERVE(needed) RESERVE_RAW((needed) + 3, _PyUOpName(opcode)) |
532 | | |
533 | | // Trace stack operations (used by _PUSH_FRAME, _RETURN_VALUE) |
534 | | #define TRACE_STACK_PUSH() \ |
535 | | if (trace_stack_depth >= TRACE_STACK_SIZE) { \ |
536 | | DPRINTF(2, "Trace stack overflow\n"); \ |
537 | | OPT_STAT_INC(trace_stack_overflow); \ |
538 | | return 0; \ |
539 | | } \ |
540 | | assert(func == NULL || func->func_code == (PyObject *)code); \ |
541 | | trace_stack[trace_stack_depth].func = func; \ |
542 | | trace_stack[trace_stack_depth].code = code; \ |
543 | | trace_stack[trace_stack_depth].instr = instr; \ |
544 | | trace_stack_depth++; |
545 | | #define TRACE_STACK_POP() \ |
546 | | if (trace_stack_depth <= 0) { \ |
547 | | Py_FatalError("Trace stack underflow\n"); \ |
548 | | } \ |
549 | | trace_stack_depth--; \ |
550 | | func = trace_stack[trace_stack_depth].func; \ |
551 | | code = trace_stack[trace_stack_depth].code; \ |
552 | | assert(func == NULL || func->func_code == (PyObject *)code); \ |
553 | | instr = trace_stack[trace_stack_depth].instr; |
554 | | |
555 | | /* Returns the length of the trace on success, |
556 | | * 0 if it failed to produce a worthwhile trace, |
557 | | * and -1 on an error. |
558 | | */ |
559 | | static int |
560 | | translate_bytecode_to_trace( |
561 | | _PyInterpreterFrame *frame, |
562 | | _Py_CODEUNIT *instr, |
563 | | _PyUOpInstruction *trace, |
564 | | int buffer_size, |
565 | | _PyBloomFilter *dependencies, bool progress_needed) |
566 | | { |
567 | | bool first = true; |
568 | | PyCodeObject *code = _PyFrame_GetCode(frame); |
569 | | PyFunctionObject *func = _PyFrame_GetFunction(frame); |
570 | | assert(PyFunction_Check(func)); |
571 | | PyCodeObject *initial_code = code; |
572 | | _Py_BloomFilter_Add(dependencies, initial_code); |
573 | | _Py_CODEUNIT *initial_instr = instr; |
574 | | int trace_length = 0; |
575 | | // Leave space for possible trailing _EXIT_TRACE |
576 | | int max_length = buffer_size-2; |
577 | | struct { |
578 | | PyFunctionObject *func; |
579 | | PyCodeObject *code; |
580 | | _Py_CODEUNIT *instr; |
581 | | } trace_stack[TRACE_STACK_SIZE]; |
582 | | int trace_stack_depth = 0; |
583 | | int confidence = CONFIDENCE_RANGE; // Adjusted by branch instructions |
584 | | bool jump_seen = false; |
585 | | |
586 | | #ifdef Py_DEBUG |
587 | | char *python_lltrace = Py_GETENV("PYTHON_LLTRACE"); |
588 | | int lltrace = 0; |
589 | | if (python_lltrace != NULL && *python_lltrace >= '0') { |
590 | | lltrace = *python_lltrace - '0'; // TODO: Parse an int and all that |
591 | | } |
592 | | #endif |
593 | | |
594 | | DPRINTF(2, |
595 | | "Optimizing %s (%s:%d) at byte offset %d\n", |
596 | | PyUnicode_AsUTF8(code->co_qualname), |
597 | | PyUnicode_AsUTF8(code->co_filename), |
598 | | code->co_firstlineno, |
599 | | 2 * INSTR_IP(initial_instr, code)); |
600 | | ADD_TO_TRACE(_START_EXECUTOR, 0, (uintptr_t)instr, INSTR_IP(instr, code)); |
601 | | ADD_TO_TRACE(_MAKE_WARM, 0, 0, 0); |
602 | | uint32_t target = 0; |
603 | | |
604 | | for (;;) { |
605 | | target = INSTR_IP(instr, code); |
606 | | // One for possible _DEOPT, one because _CHECK_VALIDITY itself might _DEOPT |
607 | | max_length-=2; |
608 | | uint32_t opcode = instr->op.code; |
609 | | uint32_t oparg = instr->op.arg; |
610 | | |
611 | | if (!first && instr == initial_instr) { |
612 | | // We have looped around to the start: |
613 | | RESERVE(1); |
614 | | ADD_TO_TRACE(_JUMP_TO_TOP, 0, 0, 0); |
615 | | goto done; |
616 | | } |
617 | | |
618 | | DPRINTF(2, "%d: %s(%d)\n", target, _PyOpcode_OpName[opcode], oparg); |
619 | | |
620 | | if (opcode == EXTENDED_ARG) { |
621 | | instr++; |
622 | | opcode = instr->op.code; |
623 | | oparg = (oparg << 8) | instr->op.arg; |
624 | | if (opcode == EXTENDED_ARG) { |
625 | | instr--; |
626 | | goto done; |
627 | | } |
628 | | } |
629 | | if (opcode == ENTER_EXECUTOR) { |
630 | | // We have a couple of options here. We *could* peek "underneath" |
631 | | // this executor and continue tracing, which could give us a longer, |
632 | | // more optimizeable trace (at the expense of lots of duplicated |
633 | | // tier two code). Instead, we choose to just end here and stitch to |
634 | | // the other trace, which allows a side-exit traces to rejoin the |
635 | | // "main" trace periodically (and also helps protect us against |
636 | | // pathological behavior where the amount of tier two code explodes |
637 | | // for a medium-length, branchy code path). This seems to work |
638 | | // better in practice, but in the future we could be smarter about |
639 | | // what we do here: |
640 | | goto done; |
641 | | } |
642 | | assert(opcode != ENTER_EXECUTOR && opcode != EXTENDED_ARG); |
643 | | RESERVE_RAW(2, "_CHECK_VALIDITY"); |
644 | | ADD_TO_TRACE(_CHECK_VALIDITY, 0, 0, target); |
645 | | if (!OPCODE_HAS_NO_SAVE_IP(opcode)) { |
646 | | RESERVE_RAW(2, "_SET_IP"); |
647 | | ADD_TO_TRACE(_SET_IP, 0, (uintptr_t)instr, target); |
648 | | } |
649 | | |
650 | | /* Special case the first instruction, |
651 | | * so that we can guarantee forward progress */ |
652 | | if (first && progress_needed) { |
653 | | assert(first); |
654 | | if (OPCODE_HAS_EXIT(opcode) || OPCODE_HAS_DEOPT(opcode)) { |
655 | | opcode = _PyOpcode_Deopt[opcode]; |
656 | | } |
657 | | assert(!OPCODE_HAS_EXIT(opcode)); |
658 | | assert(!OPCODE_HAS_DEOPT(opcode)); |
659 | | } |
660 | | |
661 | | if (OPCODE_HAS_EXIT(opcode)) { |
662 | | // Make space for side exit and final _EXIT_TRACE: |
663 | | RESERVE_RAW(2, "_EXIT_TRACE"); |
664 | | max_length--; |
665 | | } |
666 | | if (OPCODE_HAS_ERROR(opcode)) { |
667 | | // Make space for error stub and final _EXIT_TRACE: |
668 | | RESERVE_RAW(2, "_ERROR_POP_N"); |
669 | | max_length--; |
670 | | } |
671 | | switch (opcode) { |
672 | | case POP_JUMP_IF_NONE: |
673 | | case POP_JUMP_IF_NOT_NONE: |
674 | | case POP_JUMP_IF_FALSE: |
675 | | case POP_JUMP_IF_TRUE: |
676 | | { |
677 | | RESERVE(1); |
678 | | int counter = instr[1].cache; |
679 | | int bitcount = _Py_popcount32(counter); |
680 | | int jump_likely = bitcount > 8; |
681 | | /* If bitcount is 8 (half the jumps were taken), adjust confidence by 50%. |
682 | | For values in between, adjust proportionally. */ |
683 | | if (jump_likely) { |
684 | | confidence = confidence * bitcount / 16; |
685 | | } |
686 | | else { |
687 | | confidence = confidence * (16 - bitcount) / 16; |
688 | | } |
689 | | uint32_t uopcode = BRANCH_TO_GUARD[opcode - POP_JUMP_IF_FALSE][jump_likely]; |
690 | | DPRINTF(2, "%d: %s(%d): counter=%04x, bitcount=%d, likely=%d, confidence=%d, uopcode=%s\n", |
691 | | target, _PyOpcode_OpName[opcode], oparg, |
692 | | counter, bitcount, jump_likely, confidence, _PyUOpName(uopcode)); |
693 | | if (confidence < CONFIDENCE_CUTOFF) { |
694 | | DPRINTF(2, "Confidence too low (%d < %d)\n", confidence, CONFIDENCE_CUTOFF); |
695 | | OPT_STAT_INC(low_confidence); |
696 | | goto done; |
697 | | } |
698 | | _Py_CODEUNIT *next_instr = instr + 1 + _PyOpcode_Caches[_PyOpcode_Deopt[opcode]]; |
699 | | _Py_CODEUNIT *target_instr = next_instr + oparg; |
700 | | if (jump_likely) { |
701 | | DPRINTF(2, "Jump likely (%04x = %d bits), continue at byte offset %d\n", |
702 | | instr[1].cache, bitcount, 2 * INSTR_IP(target_instr, code)); |
703 | | instr = target_instr; |
704 | | ADD_TO_TRACE(uopcode, 0, 0, INSTR_IP(next_instr, code)); |
705 | | goto top; |
706 | | } |
707 | | ADD_TO_TRACE(uopcode, 0, 0, INSTR_IP(target_instr, code)); |
708 | | break; |
709 | | } |
710 | | |
711 | | case JUMP_BACKWARD: |
712 | | case JUMP_BACKWARD_JIT: |
713 | | ADD_TO_TRACE(_CHECK_PERIODIC, 0, 0, target); |
714 | | _Py_FALLTHROUGH; |
715 | | case JUMP_BACKWARD_NO_INTERRUPT: |
716 | | { |
717 | | instr += 1 + _PyOpcode_Caches[_PyOpcode_Deopt[opcode]] - (int)oparg; |
718 | | if (jump_seen) { |
719 | | OPT_STAT_INC(inner_loop); |
720 | | DPRINTF(2, "JUMP_BACKWARD not to top ends trace\n"); |
721 | | goto done; |
722 | | } |
723 | | jump_seen = true; |
724 | | goto top; |
725 | | } |
726 | | |
727 | | case JUMP_FORWARD: |
728 | | { |
729 | | RESERVE(0); |
730 | | // This will emit two _SET_IP instructions; leave it to the optimizer |
731 | | instr += oparg; |
732 | | break; |
733 | | } |
734 | | |
735 | | case RESUME: |
736 | | /* Use a special tier 2 version of RESUME_CHECK to allow traces to |
737 | | * start with RESUME_CHECK */ |
738 | | ADD_TO_TRACE(_TIER2_RESUME_CHECK, 0, 0, target); |
739 | | break; |
740 | | |
741 | | default: |
742 | | { |
743 | | const struct opcode_macro_expansion *expansion = &_PyOpcode_macro_expansion[opcode]; |
744 | | if (expansion->nuops > 0) { |
745 | | // Reserve space for nuops (+ _SET_IP + _EXIT_TRACE) |
746 | | int nuops = expansion->nuops; |
747 | | RESERVE(nuops + 1); /* One extra for exit */ |
748 | | int16_t last_op = expansion->uops[nuops-1].uop; |
749 | | if (last_op == _RETURN_VALUE || last_op == _RETURN_GENERATOR || last_op == _YIELD_VALUE) { |
750 | | // Check for trace stack underflow now: |
751 | | // We can't bail e.g. in the middle of |
752 | | // LOAD_CONST + _RETURN_VALUE. |
753 | | if (trace_stack_depth == 0) { |
754 | | DPRINTF(2, "Trace stack underflow\n"); |
755 | | OPT_STAT_INC(trace_stack_underflow); |
756 | | return 0; |
757 | | } |
758 | | } |
759 | | uint32_t orig_oparg = oparg; // For OPARG_TOP/BOTTOM |
760 | | for (int i = 0; i < nuops; i++) { |
761 | | oparg = orig_oparg; |
762 | | uint32_t uop = expansion->uops[i].uop; |
763 | | uint64_t operand = 0; |
764 | | // Add one to account for the actual opcode/oparg pair: |
765 | | int offset = expansion->uops[i].offset + 1; |
766 | | switch (expansion->uops[i].size) { |
767 | | case OPARG_SIMPLE: |
768 | | assert(opcode != JUMP_BACKWARD_NO_INTERRUPT && opcode != JUMP_BACKWARD); |
769 | | break; |
770 | | case OPARG_CACHE_1: |
771 | | operand = read_u16(&instr[offset].cache); |
772 | | break; |
773 | | case OPARG_CACHE_2: |
774 | | operand = read_u32(&instr[offset].cache); |
775 | | break; |
776 | | case OPARG_CACHE_4: |
777 | | operand = read_u64(&instr[offset].cache); |
778 | | break; |
779 | | case OPARG_TOP: // First half of super-instr |
780 | | oparg = orig_oparg >> 4; |
781 | | break; |
782 | | case OPARG_BOTTOM: // Second half of super-instr |
783 | | oparg = orig_oparg & 0xF; |
784 | | break; |
785 | | case OPARG_SAVE_RETURN_OFFSET: // op=_SAVE_RETURN_OFFSET; oparg=return_offset |
786 | | oparg = offset; |
787 | | assert(uop == _SAVE_RETURN_OFFSET); |
788 | | break; |
789 | | case OPARG_REPLACED: |
790 | | uop = _PyUOp_Replacements[uop]; |
791 | | assert(uop != 0); |
792 | | uint32_t next_inst = target + 1 + _PyOpcode_Caches[_PyOpcode_Deopt[opcode]] + (oparg > 255); |
793 | | if (uop == _TIER2_RESUME_CHECK) { |
794 | | target = next_inst; |
795 | | } |
796 | | #ifdef Py_DEBUG |
797 | | else { |
798 | | uint32_t jump_target = next_inst + oparg; |
799 | | assert(_Py_GetBaseCodeUnit(code, jump_target).op.code == END_FOR); |
800 | | assert(_Py_GetBaseCodeUnit(code, jump_target+1).op.code == POP_ITER); |
801 | | } |
802 | | #endif |
803 | | break; |
804 | | case OPERAND1_1: |
805 | | assert(trace[trace_length-1].opcode == uop); |
806 | | operand = read_u16(&instr[offset].cache); |
807 | | trace[trace_length-1].operand1 = operand; |
808 | | continue; |
809 | | case OPERAND1_2: |
810 | | assert(trace[trace_length-1].opcode == uop); |
811 | | operand = read_u32(&instr[offset].cache); |
812 | | trace[trace_length-1].operand1 = operand; |
813 | | continue; |
814 | | case OPERAND1_4: |
815 | | assert(trace[trace_length-1].opcode == uop); |
816 | | operand = read_u64(&instr[offset].cache); |
817 | | trace[trace_length-1].operand1 = operand; |
818 | | continue; |
819 | | default: |
820 | | fprintf(stderr, |
821 | | "opcode=%d, oparg=%d; nuops=%d, i=%d; size=%d, offset=%d\n", |
822 | | opcode, oparg, nuops, i, |
823 | | expansion->uops[i].size, |
824 | | expansion->uops[i].offset); |
825 | | Py_FatalError("garbled expansion"); |
826 | | } |
827 | | |
828 | | if (uop == _RETURN_VALUE || uop == _RETURN_GENERATOR || uop == _YIELD_VALUE) { |
829 | | TRACE_STACK_POP(); |
830 | | /* Set the operand to the function or code object returned to, |
831 | | * to assist optimization passes. (See _PUSH_FRAME below.) |
832 | | */ |
833 | | if (func != NULL) { |
834 | | operand = (uintptr_t)func; |
835 | | } |
836 | | else if (code != NULL) { |
837 | | operand = (uintptr_t)code | 1; |
838 | | } |
839 | | else { |
840 | | operand = 0; |
841 | | } |
842 | | ADD_TO_TRACE(uop, oparg, operand, target); |
843 | | DPRINTF(2, |
844 | | "Returning to %s (%s:%d) at byte offset %d\n", |
845 | | PyUnicode_AsUTF8(code->co_qualname), |
846 | | PyUnicode_AsUTF8(code->co_filename), |
847 | | code->co_firstlineno, |
848 | | 2 * INSTR_IP(instr, code)); |
849 | | goto top; |
850 | | } |
851 | | |
852 | | if (uop == _PUSH_FRAME) { |
853 | | assert(i + 1 == nuops); |
854 | | if (opcode == FOR_ITER_GEN || |
855 | | opcode == LOAD_ATTR_PROPERTY || |
856 | | opcode == BINARY_OP_SUBSCR_GETITEM || |
857 | | opcode == SEND_GEN) |
858 | | { |
859 | | DPRINTF(2, "Bailing due to dynamic target\n"); |
860 | | OPT_STAT_INC(unknown_callee); |
861 | | return 0; |
862 | | } |
863 | | assert(_PyOpcode_Deopt[opcode] == CALL || _PyOpcode_Deopt[opcode] == CALL_KW); |
864 | | int func_version_offset = |
865 | | offsetof(_PyCallCache, func_version)/sizeof(_Py_CODEUNIT) |
866 | | // Add one to account for the actual opcode/oparg pair: |
867 | | + 1; |
868 | | uint32_t func_version = read_u32(&instr[func_version_offset].cache); |
869 | | PyCodeObject *new_code = NULL; |
870 | | PyFunctionObject *new_func = |
871 | | _PyFunction_LookupByVersion(func_version, (PyObject **) &new_code); |
872 | | DPRINTF(2, "Function: version=%#x; new_func=%p, new_code=%p\n", |
873 | | (int)func_version, new_func, new_code); |
874 | | if (new_code != NULL) { |
875 | | if (new_code == code) { |
876 | | // Recursive call, bail (we could be here forever). |
877 | | DPRINTF(2, "Bailing on recursive call to %s (%s:%d)\n", |
878 | | PyUnicode_AsUTF8(new_code->co_qualname), |
879 | | PyUnicode_AsUTF8(new_code->co_filename), |
880 | | new_code->co_firstlineno); |
881 | | OPT_STAT_INC(recursive_call); |
882 | | ADD_TO_TRACE(uop, oparg, 0, target); |
883 | | ADD_TO_TRACE(_EXIT_TRACE, 0, 0, 0); |
884 | | goto done; |
885 | | } |
886 | | if (new_code->co_version != func_version) { |
887 | | // func.__code__ was updated. |
888 | | // Perhaps it may happen again, so don't bother tracing. |
889 | | // TODO: Reason about this -- is it better to bail or not? |
890 | | DPRINTF(2, "Bailing because co_version != func_version\n"); |
891 | | ADD_TO_TRACE(uop, oparg, 0, target); |
892 | | ADD_TO_TRACE(_EXIT_TRACE, 0, 0, 0); |
893 | | goto done; |
894 | | } |
895 | | // Increment IP to the return address |
896 | | instr += _PyOpcode_Caches[_PyOpcode_Deopt[opcode]] + 1; |
897 | | TRACE_STACK_PUSH(); |
898 | | _Py_BloomFilter_Add(dependencies, new_code); |
899 | | /* Set the operand to the callee's function or code object, |
900 | | * to assist optimization passes. |
901 | | * We prefer setting it to the function |
902 | | * but if that's not available but the code is available, |
903 | | * use the code, setting the low bit so the optimizer knows. |
904 | | */ |
905 | | if (new_func != NULL) { |
906 | | operand = (uintptr_t)new_func; |
907 | | } |
908 | | else if (new_code != NULL) { |
909 | | operand = (uintptr_t)new_code | 1; |
910 | | } |
911 | | else { |
912 | | operand = 0; |
913 | | } |
914 | | ADD_TO_TRACE(uop, oparg, operand, target); |
915 | | code = new_code; |
916 | | func = new_func; |
917 | | instr = _PyCode_CODE(code); |
918 | | DPRINTF(2, |
919 | | "Continuing in %s (%s:%d) at byte offset %d\n", |
920 | | PyUnicode_AsUTF8(code->co_qualname), |
921 | | PyUnicode_AsUTF8(code->co_filename), |
922 | | code->co_firstlineno, |
923 | | 2 * INSTR_IP(instr, code)); |
924 | | goto top; |
925 | | } |
926 | | DPRINTF(2, "Bail, new_code == NULL\n"); |
927 | | OPT_STAT_INC(unknown_callee); |
928 | | return 0; |
929 | | } |
930 | | |
931 | | if (uop == _BINARY_OP_INPLACE_ADD_UNICODE) { |
932 | | assert(i + 1 == nuops); |
933 | | _Py_CODEUNIT *next_instr = instr + 1 + _PyOpcode_Caches[_PyOpcode_Deopt[opcode]]; |
934 | | assert(next_instr->op.code == STORE_FAST); |
935 | | operand = next_instr->op.arg; |
936 | | // Skip the STORE_FAST: |
937 | | instr++; |
938 | | } |
939 | | |
940 | | // All other instructions |
941 | | ADD_TO_TRACE(uop, oparg, operand, target); |
942 | | } |
943 | | break; |
944 | | } |
945 | | DPRINTF(2, "Unsupported opcode %s\n", _PyOpcode_OpName[opcode]); |
946 | | OPT_UNSUPPORTED_OPCODE(opcode); |
947 | | goto done; // Break out of loop |
948 | | } // End default |
949 | | |
950 | | } // End switch (opcode) |
951 | | |
952 | | instr++; |
953 | | // Add cache size for opcode |
954 | | instr += _PyOpcode_Caches[_PyOpcode_Deopt[opcode]]; |
955 | | |
956 | | if (opcode == CALL_LIST_APPEND) { |
957 | | assert(instr->op.code == POP_TOP); |
958 | | instr++; |
959 | | } |
960 | | top: |
961 | | // Jump here after _PUSH_FRAME or likely branches. |
962 | | first = false; |
963 | | } // End for (;;) |
964 | | |
965 | | done: |
966 | | while (trace_stack_depth > 0) { |
967 | | TRACE_STACK_POP(); |
968 | | } |
969 | | assert(code == initial_code); |
970 | | // Skip short traces where we can't even translate a single instruction: |
971 | | if (first) { |
972 | | OPT_STAT_INC(trace_too_short); |
973 | | DPRINTF(2, |
974 | | "No trace for %s (%s:%d) at byte offset %d (no progress)\n", |
975 | | PyUnicode_AsUTF8(code->co_qualname), |
976 | | PyUnicode_AsUTF8(code->co_filename), |
977 | | code->co_firstlineno, |
978 | | 2 * INSTR_IP(initial_instr, code)); |
979 | | return 0; |
980 | | } |
981 | | if (!is_terminator(&trace[trace_length-1])) { |
982 | | /* Allow space for _EXIT_TRACE */ |
983 | | max_length += 2; |
984 | | ADD_TO_TRACE(_EXIT_TRACE, 0, 0, target); |
985 | | } |
986 | | DPRINTF(1, |
987 | | "Created a proto-trace for %s (%s:%d) at byte offset %d -- length %d\n", |
988 | | PyUnicode_AsUTF8(code->co_qualname), |
989 | | PyUnicode_AsUTF8(code->co_filename), |
990 | | code->co_firstlineno, |
991 | | 2 * INSTR_IP(initial_instr, code), |
992 | | trace_length); |
993 | | OPT_HIST(trace_length, trace_length_hist); |
994 | | return trace_length; |
995 | | } |
996 | | |
997 | | #undef RESERVE |
998 | | #undef RESERVE_RAW |
999 | | #undef INSTR_IP |
1000 | | #undef ADD_TO_TRACE |
1001 | | #undef DPRINTF |
1002 | | |
1003 | | #define UNSET_BIT(array, bit) (array[(bit)>>5] &= ~(1<<((bit)&31))) |
1004 | | #define SET_BIT(array, bit) (array[(bit)>>5] |= (1<<((bit)&31))) |
1005 | | #define BIT_IS_SET(array, bit) (array[(bit)>>5] & (1<<((bit)&31))) |
1006 | | |
1007 | | /* Count the number of unused uops and exits |
1008 | | */ |
1009 | | static int |
1010 | | count_exits(_PyUOpInstruction *buffer, int length) |
1011 | | { |
1012 | | int exit_count = 0; |
1013 | | for (int i = 0; i < length; i++) { |
1014 | | int opcode = buffer[i].opcode; |
1015 | | if (opcode == _EXIT_TRACE) { |
1016 | | exit_count++; |
1017 | | } |
1018 | | } |
1019 | | return exit_count; |
1020 | | } |
1021 | | |
1022 | | static void make_exit(_PyUOpInstruction *inst, int opcode, int target) |
1023 | | { |
1024 | | inst->opcode = opcode; |
1025 | | inst->oparg = 0; |
1026 | | inst->operand0 = 0; |
1027 | | inst->format = UOP_FORMAT_TARGET; |
1028 | | inst->target = target; |
1029 | | #ifdef Py_STATS |
1030 | | inst->execution_count = 0; |
1031 | | #endif |
1032 | | } |
1033 | | |
1034 | | /* Convert implicit exits, errors and deopts |
1035 | | * into explicit ones. */ |
1036 | | static int |
1037 | | prepare_for_execution(_PyUOpInstruction *buffer, int length) |
1038 | | { |
1039 | | int32_t current_jump = -1; |
1040 | | int32_t current_jump_target = -1; |
1041 | | int32_t current_error = -1; |
1042 | | int32_t current_error_target = -1; |
1043 | | int32_t current_popped = -1; |
1044 | | int32_t current_exit_op = -1; |
1045 | | /* Leaving in NOPs slows down the interpreter and messes up the stats */ |
1046 | | _PyUOpInstruction *copy_to = &buffer[0]; |
1047 | | for (int i = 0; i < length; i++) { |
1048 | | _PyUOpInstruction *inst = &buffer[i]; |
1049 | | if (inst->opcode != _NOP) { |
1050 | | if (copy_to != inst) { |
1051 | | *copy_to = *inst; |
1052 | | } |
1053 | | copy_to++; |
1054 | | } |
1055 | | } |
1056 | | length = (int)(copy_to - buffer); |
1057 | | int next_spare = length; |
1058 | | for (int i = 0; i < length; i++) { |
1059 | | _PyUOpInstruction *inst = &buffer[i]; |
1060 | | int opcode = inst->opcode; |
1061 | | int32_t target = (int32_t)uop_get_target(inst); |
1062 | | uint16_t exit_flags = _PyUop_Flags[opcode] & (HAS_EXIT_FLAG | HAS_DEOPT_FLAG | HAS_PERIODIC_FLAG); |
1063 | | if (exit_flags) { |
1064 | | uint16_t exit_op = _EXIT_TRACE; |
1065 | | if (exit_flags & HAS_DEOPT_FLAG) { |
1066 | | exit_op = _DEOPT; |
1067 | | } |
1068 | | else if (exit_flags & HAS_PERIODIC_FLAG) { |
1069 | | exit_op = _HANDLE_PENDING_AND_DEOPT; |
1070 | | } |
1071 | | int32_t jump_target = target; |
1072 | | if (is_for_iter_test[opcode]) { |
1073 | | /* Target the POP_TOP immediately after the END_FOR, |
1074 | | * leaving only the iterator on the stack. */ |
1075 | | int extended_arg = inst->oparg > 255; |
1076 | | int32_t next_inst = target + 1 + INLINE_CACHE_ENTRIES_FOR_ITER + extended_arg; |
1077 | | jump_target = next_inst + inst->oparg + 1; |
1078 | | } |
1079 | | if (jump_target != current_jump_target || current_exit_op != exit_op) { |
1080 | | make_exit(&buffer[next_spare], exit_op, jump_target); |
1081 | | current_exit_op = exit_op; |
1082 | | current_jump_target = jump_target; |
1083 | | current_jump = next_spare; |
1084 | | next_spare++; |
1085 | | } |
1086 | | buffer[i].jump_target = current_jump; |
1087 | | buffer[i].format = UOP_FORMAT_JUMP; |
1088 | | } |
1089 | | if (_PyUop_Flags[opcode] & HAS_ERROR_FLAG) { |
1090 | | int popped = (_PyUop_Flags[opcode] & HAS_ERROR_NO_POP_FLAG) ? |
1091 | | 0 : _PyUop_num_popped(opcode, inst->oparg); |
1092 | | if (target != current_error_target || popped != current_popped) { |
1093 | | current_popped = popped; |
1094 | | current_error = next_spare; |
1095 | | current_error_target = target; |
1096 | | make_exit(&buffer[next_spare], _ERROR_POP_N, 0); |
1097 | | buffer[next_spare].operand0 = target; |
1098 | | next_spare++; |
1099 | | } |
1100 | | buffer[i].error_target = current_error; |
1101 | | if (buffer[i].format == UOP_FORMAT_TARGET) { |
1102 | | buffer[i].format = UOP_FORMAT_JUMP; |
1103 | | buffer[i].jump_target = 0; |
1104 | | } |
1105 | | } |
1106 | | if (opcode == _JUMP_TO_TOP) { |
1107 | | assert(buffer[0].opcode == _START_EXECUTOR); |
1108 | | buffer[i].format = UOP_FORMAT_JUMP; |
1109 | | buffer[i].jump_target = 1; |
1110 | | } |
1111 | | } |
1112 | | return next_spare; |
1113 | | } |
1114 | | |
1115 | | /* Executor side exits */ |
1116 | | |
1117 | | static _PyExecutorObject * |
1118 | | allocate_executor(int exit_count, int length) |
1119 | | { |
1120 | | int size = exit_count*sizeof(_PyExitData) + length*sizeof(_PyUOpInstruction); |
1121 | | _PyExecutorObject *res = PyObject_GC_NewVar(_PyExecutorObject, &_PyUOpExecutor_Type, size); |
1122 | | if (res == NULL) { |
1123 | | return NULL; |
1124 | | } |
1125 | | res->trace = (_PyUOpInstruction *)(res->exits + exit_count); |
1126 | | res->code_size = length; |
1127 | | res->exit_count = exit_count; |
1128 | | return res; |
1129 | | } |
1130 | | |
1131 | | #ifdef Py_DEBUG |
1132 | | |
1133 | | #define CHECK(PRED) \ |
1134 | | if (!(PRED)) { \ |
1135 | | printf(#PRED " at %d\n", i); \ |
1136 | | assert(0); \ |
1137 | | } |
1138 | | |
1139 | | static int |
1140 | | target_unused(int opcode) |
1141 | | { |
1142 | | return (_PyUop_Flags[opcode] & (HAS_ERROR_FLAG | HAS_EXIT_FLAG | HAS_DEOPT_FLAG)) == 0; |
1143 | | } |
1144 | | |
1145 | | static void |
1146 | | sanity_check(_PyExecutorObject *executor) |
1147 | | { |
1148 | | for (uint32_t i = 0; i < executor->exit_count; i++) { |
1149 | | _PyExitData *exit = &executor->exits[i]; |
1150 | | CHECK(exit->target < (1 << 25)); |
1151 | | } |
1152 | | bool ended = false; |
1153 | | uint32_t i = 0; |
1154 | | CHECK(executor->trace[0].opcode == _START_EXECUTOR || executor->trace[0].opcode == _COLD_EXIT); |
1155 | | for (; i < executor->code_size; i++) { |
1156 | | const _PyUOpInstruction *inst = &executor->trace[i]; |
1157 | | uint16_t opcode = inst->opcode; |
1158 | | CHECK(opcode <= MAX_UOP_ID); |
1159 | | CHECK(_PyOpcode_uop_name[opcode] != NULL); |
1160 | | switch(inst->format) { |
1161 | | case UOP_FORMAT_TARGET: |
1162 | | CHECK(target_unused(opcode)); |
1163 | | break; |
1164 | | case UOP_FORMAT_JUMP: |
1165 | | CHECK(inst->jump_target < executor->code_size); |
1166 | | break; |
1167 | | } |
1168 | | if (_PyUop_Flags[opcode] & HAS_ERROR_FLAG) { |
1169 | | CHECK(inst->format == UOP_FORMAT_JUMP); |
1170 | | CHECK(inst->error_target < executor->code_size); |
1171 | | } |
1172 | | if (is_terminator(inst)) { |
1173 | | ended = true; |
1174 | | i++; |
1175 | | break; |
1176 | | } |
1177 | | } |
1178 | | CHECK(ended); |
1179 | | for (; i < executor->code_size; i++) { |
1180 | | const _PyUOpInstruction *inst = &executor->trace[i]; |
1181 | | uint16_t opcode = inst->opcode; |
1182 | | CHECK( |
1183 | | opcode == _DEOPT || |
1184 | | opcode == _HANDLE_PENDING_AND_DEOPT || |
1185 | | opcode == _EXIT_TRACE || |
1186 | | opcode == _ERROR_POP_N); |
1187 | | } |
1188 | | } |
1189 | | |
1190 | | #undef CHECK |
1191 | | #endif |
1192 | | |
1193 | | /* Makes an executor from a buffer of uops. |
1194 | | * Account for the buffer having gaps and NOPs by computing a "used" |
1195 | | * bit vector and only copying the used uops. Here "used" means reachable |
1196 | | * and not a NOP. |
1197 | | */ |
1198 | | static _PyExecutorObject * |
1199 | | make_executor_from_uops(_PyUOpInstruction *buffer, int length, const _PyBloomFilter *dependencies) |
1200 | | { |
1201 | | int exit_count = count_exits(buffer, length); |
1202 | | _PyExecutorObject *executor = allocate_executor(exit_count, length); |
1203 | | if (executor == NULL) { |
1204 | | return NULL; |
1205 | | } |
1206 | | |
1207 | | /* Initialize exits */ |
1208 | | _PyExecutorObject *cold = _PyExecutor_GetColdExecutor(); |
1209 | | for (int i = 0; i < exit_count; i++) { |
1210 | | executor->exits[i].index = i; |
1211 | | executor->exits[i].temperature = initial_temperature_backoff_counter(); |
1212 | | executor->exits[i].executor = cold; |
1213 | | } |
1214 | | int next_exit = exit_count-1; |
1215 | | _PyUOpInstruction *dest = (_PyUOpInstruction *)&executor->trace[length]; |
1216 | | assert(buffer[0].opcode == _START_EXECUTOR); |
1217 | | buffer[0].operand0 = (uint64_t)executor; |
1218 | | for (int i = length-1; i >= 0; i--) { |
1219 | | int opcode = buffer[i].opcode; |
1220 | | dest--; |
1221 | | *dest = buffer[i]; |
1222 | | assert(opcode != _POP_JUMP_IF_FALSE && opcode != _POP_JUMP_IF_TRUE); |
1223 | | if (opcode == _EXIT_TRACE) { |
1224 | | _PyExitData *exit = &executor->exits[next_exit]; |
1225 | | exit->target = buffer[i].target; |
1226 | | dest->operand0 = (uint64_t)exit; |
1227 | | next_exit--; |
1228 | | } |
1229 | | } |
1230 | | assert(next_exit == -1); |
1231 | | assert(dest == executor->trace); |
1232 | | assert(dest->opcode == _START_EXECUTOR); |
1233 | | _Py_ExecutorInit(executor, dependencies); |
1234 | | #ifdef Py_DEBUG |
1235 | | char *python_lltrace = Py_GETENV("PYTHON_LLTRACE"); |
1236 | | int lltrace = 0; |
1237 | | if (python_lltrace != NULL && *python_lltrace >= '0') { |
1238 | | lltrace = *python_lltrace - '0'; // TODO: Parse an int and all that |
1239 | | } |
1240 | | if (lltrace >= 2) { |
1241 | | printf("Optimized trace (length %d):\n", length); |
1242 | | for (int i = 0; i < length; i++) { |
1243 | | printf("%4d OPTIMIZED: ", i); |
1244 | | _PyUOpPrint(&executor->trace[i]); |
1245 | | printf("\n"); |
1246 | | } |
1247 | | } |
1248 | | sanity_check(executor); |
1249 | | #endif |
1250 | | #ifdef _Py_JIT |
1251 | | executor->jit_code = NULL; |
1252 | | executor->jit_size = 0; |
1253 | | // This is initialized to true so we can prevent the executor |
1254 | | // from being immediately detected as cold and invalidated. |
1255 | | executor->vm_data.warm = true; |
1256 | | if (_PyJIT_Compile(executor, executor->trace, length)) { |
1257 | | Py_DECREF(executor); |
1258 | | return NULL; |
1259 | | } |
1260 | | #endif |
1261 | | _PyObject_GC_TRACK(executor); |
1262 | | return executor; |
1263 | | } |
1264 | | |
1265 | | #ifdef Py_STATS |
1266 | | /* Returns the effective trace length. |
1267 | | * Ignores NOPs and trailing exit and error handling.*/ |
1268 | | int effective_trace_length(_PyUOpInstruction *buffer, int length) |
1269 | | { |
1270 | | int nop_count = 0; |
1271 | | for (int i = 0; i < length; i++) { |
1272 | | int opcode = buffer[i].opcode; |
1273 | | if (opcode == _NOP) { |
1274 | | nop_count++; |
1275 | | } |
1276 | | if (is_terminator(&buffer[i])) { |
1277 | | return i+1-nop_count; |
1278 | | } |
1279 | | } |
1280 | | Py_FatalError("No terminating instruction"); |
1281 | | Py_UNREACHABLE(); |
1282 | | } |
1283 | | #endif |
1284 | | |
1285 | | static int |
1286 | | uop_optimize( |
1287 | | _PyInterpreterFrame *frame, |
1288 | | _Py_CODEUNIT *instr, |
1289 | | _PyExecutorObject **exec_ptr, |
1290 | | int curr_stackentries, |
1291 | | bool progress_needed) |
1292 | | { |
1293 | | _PyBloomFilter dependencies; |
1294 | | _Py_BloomFilter_Init(&dependencies); |
1295 | | PyInterpreterState *interp = _PyInterpreterState_GET(); |
1296 | | if (interp->jit_uop_buffer == NULL) { |
1297 | | interp->jit_uop_buffer = (_PyUOpInstruction *)_PyObject_VirtualAlloc(UOP_BUFFER_SIZE); |
1298 | | if (interp->jit_uop_buffer == NULL) { |
1299 | | return 0; |
1300 | | } |
1301 | | } |
1302 | | _PyUOpInstruction *buffer = interp->jit_uop_buffer; |
1303 | | OPT_STAT_INC(attempts); |
1304 | | char *env_var = Py_GETENV("PYTHON_UOPS_OPTIMIZE"); |
1305 | | bool is_noopt = true; |
1306 | | if (env_var == NULL || *env_var == '\0' || *env_var > '0') { |
1307 | | is_noopt = false; |
1308 | | } |
1309 | | int length = translate_bytecode_to_trace(frame, instr, buffer, UOP_MAX_TRACE_LENGTH, &dependencies, progress_needed); |
1310 | | if (length <= 0) { |
1311 | | // Error or nothing translated |
1312 | | return length; |
1313 | | } |
1314 | | assert(length < UOP_MAX_TRACE_LENGTH); |
1315 | | OPT_STAT_INC(traces_created); |
1316 | | if (!is_noopt) { |
1317 | | length = _Py_uop_analyze_and_optimize(frame, buffer, |
1318 | | length, |
1319 | | curr_stackentries, &dependencies); |
1320 | | if (length <= 0) { |
1321 | | return length; |
1322 | | } |
1323 | | } |
1324 | | assert(length < UOP_MAX_TRACE_LENGTH); |
1325 | | assert(length >= 1); |
1326 | | /* Fix up */ |
1327 | | for (int pc = 0; pc < length; pc++) { |
1328 | | int opcode = buffer[pc].opcode; |
1329 | | int oparg = buffer[pc].oparg; |
1330 | | if (oparg < _PyUop_Replication[opcode].stop && oparg >= _PyUop_Replication[opcode].start) { |
1331 | | buffer[pc].opcode = opcode + oparg + 1 - _PyUop_Replication[opcode].start; |
1332 | | assert(strncmp(_PyOpcode_uop_name[buffer[pc].opcode], _PyOpcode_uop_name[opcode], strlen(_PyOpcode_uop_name[opcode])) == 0); |
1333 | | } |
1334 | | else if (is_terminator(&buffer[pc])) { |
1335 | | break; |
1336 | | } |
1337 | | assert(_PyOpcode_uop_name[buffer[pc].opcode]); |
1338 | | } |
1339 | | OPT_HIST(effective_trace_length(buffer, length), optimized_trace_length_hist); |
1340 | | length = prepare_for_execution(buffer, length); |
1341 | | assert(length <= UOP_MAX_TRACE_LENGTH); |
1342 | | _PyExecutorObject *executor = make_executor_from_uops(buffer, length, &dependencies); |
1343 | | if (executor == NULL) { |
1344 | | return -1; |
1345 | | } |
1346 | | assert(length <= UOP_MAX_TRACE_LENGTH); |
1347 | | |
1348 | | // Check executor coldness |
1349 | | PyThreadState *tstate = PyThreadState_Get(); |
1350 | | // It's okay if this ends up going negative. |
1351 | | if (--tstate->interp->executor_creation_counter == 0) { |
1352 | | _Py_set_eval_breaker_bit(tstate, _PY_EVAL_JIT_INVALIDATE_COLD_BIT); |
1353 | | } |
1354 | | |
1355 | | *exec_ptr = executor; |
1356 | | return 1; |
1357 | | } |
1358 | | |
1359 | | |
1360 | | /***************************************** |
1361 | | * Executor management |
1362 | | ****************************************/ |
1363 | | |
1364 | | /* We use a bloomfilter with k = 6, m = 256 |
1365 | | * The choice of k and the following constants |
1366 | | * could do with a more rigorous analysis, |
1367 | | * but here is a simple analysis: |
1368 | | * |
1369 | | * We want to keep the false positive rate low. |
1370 | | * For n = 5 (a trace depends on 5 objects), |
1371 | | * we expect 30 bits set, giving a false positive |
1372 | | * rate of (30/256)**6 == 2.5e-6 which is plenty |
1373 | | * good enough. |
1374 | | * |
1375 | | * However with n = 10 we expect 60 bits set (worst case), |
1376 | | * giving a false positive of (60/256)**6 == 0.0001 |
1377 | | * |
1378 | | * We choose k = 6, rather than a higher number as |
1379 | | * it means the false positive rate grows slower for high n. |
1380 | | * |
1381 | | * n = 5, k = 6 => fp = 2.6e-6 |
1382 | | * n = 5, k = 8 => fp = 3.5e-7 |
1383 | | * n = 10, k = 6 => fp = 1.6e-4 |
1384 | | * n = 10, k = 8 => fp = 0.9e-4 |
1385 | | * n = 15, k = 6 => fp = 0.18% |
1386 | | * n = 15, k = 8 => fp = 0.23% |
1387 | | * n = 20, k = 6 => fp = 1.1% |
1388 | | * n = 20, k = 8 => fp = 2.3% |
1389 | | * |
1390 | | * The above analysis assumes perfect hash functions, |
1391 | | * but those don't exist, so the real false positive |
1392 | | * rates may be worse. |
1393 | | */ |
1394 | | |
1395 | | #define K 6 |
1396 | | |
1397 | | #define SEED 20221211 |
1398 | | |
1399 | | /* TO DO -- Use more modern hash functions with better distribution of bits */ |
1400 | | static uint64_t |
1401 | | address_to_hash(void *ptr) { |
1402 | | assert(ptr != NULL); |
1403 | | uint64_t uhash = SEED; |
1404 | | uintptr_t addr = (uintptr_t)ptr; |
1405 | | for (int i = 0; i < SIZEOF_VOID_P; i++) { |
1406 | | uhash ^= addr & 255; |
1407 | | uhash *= (uint64_t)PyHASH_MULTIPLIER; |
1408 | | addr >>= 8; |
1409 | | } |
1410 | | return uhash; |
1411 | | } |
1412 | | |
1413 | | void |
1414 | | _Py_BloomFilter_Init(_PyBloomFilter *bloom) |
1415 | | { |
1416 | | for (int i = 0; i < _Py_BLOOM_FILTER_WORDS; i++) { |
1417 | | bloom->bits[i] = 0; |
1418 | | } |
1419 | | } |
1420 | | |
1421 | | /* We want K hash functions that each set 1 bit. |
1422 | | * A hash function that sets 1 bit in M bits can be trivially |
1423 | | * derived from a log2(M) bit hash function. |
1424 | | * So we extract 8 (log2(256)) bits at a time from |
1425 | | * the 64bit hash. */ |
1426 | | void |
1427 | | _Py_BloomFilter_Add(_PyBloomFilter *bloom, void *ptr) |
1428 | | { |
1429 | | uint64_t hash = address_to_hash(ptr); |
1430 | | assert(K <= 8); |
1431 | | for (int i = 0; i < K; i++) { |
1432 | | uint8_t bits = hash & 255; |
1433 | | bloom->bits[bits >> 5] |= (1 << (bits&31)); |
1434 | | hash >>= 8; |
1435 | | } |
1436 | | } |
1437 | | |
1438 | | static bool |
1439 | | bloom_filter_may_contain(_PyBloomFilter *bloom, _PyBloomFilter *hashes) |
1440 | | { |
1441 | | for (int i = 0; i < _Py_BLOOM_FILTER_WORDS; i++) { |
1442 | | if ((bloom->bits[i] & hashes->bits[i]) != hashes->bits[i]) { |
1443 | | return false; |
1444 | | } |
1445 | | } |
1446 | | return true; |
1447 | | } |
1448 | | |
1449 | | static void |
1450 | | link_executor(_PyExecutorObject *executor) |
1451 | | { |
1452 | | PyInterpreterState *interp = _PyInterpreterState_GET(); |
1453 | | _PyExecutorLinkListNode *links = &executor->vm_data.links; |
1454 | | _PyExecutorObject *head = interp->executor_list_head; |
1455 | | if (head == NULL) { |
1456 | | interp->executor_list_head = executor; |
1457 | | links->previous = NULL; |
1458 | | links->next = NULL; |
1459 | | } |
1460 | | else { |
1461 | | assert(head->vm_data.links.previous == NULL); |
1462 | | links->previous = NULL; |
1463 | | links->next = head; |
1464 | | head->vm_data.links.previous = executor; |
1465 | | interp->executor_list_head = executor; |
1466 | | } |
1467 | | executor->vm_data.linked = true; |
1468 | | /* executor_list_head must be first in list */ |
1469 | | assert(interp->executor_list_head->vm_data.links.previous == NULL); |
1470 | | } |
1471 | | |
1472 | | static void |
1473 | | unlink_executor(_PyExecutorObject *executor) |
1474 | | { |
1475 | | if (!executor->vm_data.linked) { |
1476 | | return; |
1477 | | } |
1478 | | _PyExecutorLinkListNode *links = &executor->vm_data.links; |
1479 | | assert(executor->vm_data.valid); |
1480 | | _PyExecutorObject *next = links->next; |
1481 | | _PyExecutorObject *prev = links->previous; |
1482 | | if (next != NULL) { |
1483 | | next->vm_data.links.previous = prev; |
1484 | | } |
1485 | | if (prev != NULL) { |
1486 | | prev->vm_data.links.next = next; |
1487 | | } |
1488 | | else { |
1489 | | // prev == NULL implies that executor is the list head |
1490 | | PyInterpreterState *interp = PyInterpreterState_Get(); |
1491 | | assert(interp->executor_list_head == executor); |
1492 | | interp->executor_list_head = next; |
1493 | | } |
1494 | | executor->vm_data.linked = false; |
1495 | | } |
1496 | | |
1497 | | /* This must be called by optimizers before using the executor */ |
1498 | | void |
1499 | | _Py_ExecutorInit(_PyExecutorObject *executor, const _PyBloomFilter *dependency_set) |
1500 | | { |
1501 | | executor->vm_data.valid = true; |
1502 | | for (int i = 0; i < _Py_BLOOM_FILTER_WORDS; i++) { |
1503 | | executor->vm_data.bloom.bits[i] = dependency_set->bits[i]; |
1504 | | } |
1505 | | link_executor(executor); |
1506 | | } |
1507 | | |
1508 | | _PyExecutorObject * |
1509 | | _PyExecutor_GetColdExecutor(void) |
1510 | | { |
1511 | | PyInterpreterState *interp = _PyInterpreterState_GET(); |
1512 | | if (interp->cold_executor != NULL) { |
1513 | | return interp->cold_executor; |
1514 | | } |
1515 | | _PyExecutorObject *cold = allocate_executor(0, 1); |
1516 | | if (cold == NULL) { |
1517 | | Py_FatalError("Cannot allocate core JIT code"); |
1518 | | } |
1519 | | ((_PyUOpInstruction *)cold->trace)->opcode = _COLD_EXIT; |
1520 | | #ifdef _Py_JIT |
1521 | | cold->jit_code = NULL; |
1522 | | cold->jit_size = 0; |
1523 | | // This is initialized to true so we can prevent the executor |
1524 | | // from being immediately detected as cold and invalidated. |
1525 | | cold->vm_data.warm = true; |
1526 | | if (_PyJIT_Compile(cold, cold->trace, 1)) { |
1527 | | Py_DECREF(cold); |
1528 | | Py_FatalError("Cannot allocate core JIT code"); |
1529 | | } |
1530 | | #endif |
1531 | | _Py_SetImmortal((PyObject *)cold); |
1532 | | interp->cold_executor = cold; |
1533 | | return cold; |
1534 | | } |
1535 | | |
1536 | | void |
1537 | | _PyExecutor_ClearExit(_PyExitData *exit) |
1538 | | { |
1539 | | if (exit == NULL) { |
1540 | | return; |
1541 | | } |
1542 | | _PyExecutorObject *old = exit->executor; |
1543 | | exit->executor = _PyExecutor_GetColdExecutor(); |
1544 | | Py_DECREF(old); |
1545 | | } |
1546 | | |
1547 | | /* Detaches the executor from the code object (if any) that |
1548 | | * holds a reference to it */ |
1549 | | void |
1550 | | _Py_ExecutorDetach(_PyExecutorObject *executor) |
1551 | | { |
1552 | | PyCodeObject *code = executor->vm_data.code; |
1553 | | if (code == NULL) { |
1554 | | return; |
1555 | | } |
1556 | | _Py_CODEUNIT *instruction = &_PyCode_CODE(code)[executor->vm_data.index]; |
1557 | | assert(instruction->op.code == ENTER_EXECUTOR); |
1558 | | int index = instruction->op.arg; |
1559 | | assert(code->co_executors->executors[index] == executor); |
1560 | | instruction->op.code = executor->vm_data.opcode; |
1561 | | instruction->op.arg = executor->vm_data.oparg; |
1562 | | executor->vm_data.code = NULL; |
1563 | | code->co_executors->executors[index] = NULL; |
1564 | | Py_DECREF(executor); |
1565 | | } |
1566 | | |
1567 | | static int |
1568 | | executor_clear(PyObject *op) |
1569 | | { |
1570 | | _PyExecutorObject *executor = _PyExecutorObject_CAST(op); |
1571 | | if (!executor->vm_data.valid) { |
1572 | | return 0; |
1573 | | } |
1574 | | assert(executor->vm_data.valid == 1); |
1575 | | unlink_executor(executor); |
1576 | | executor->vm_data.valid = 0; |
1577 | | |
1578 | | /* It is possible for an executor to form a reference |
1579 | | * cycle with itself, so decref'ing a side exit could |
1580 | | * free the executor unless we hold a strong reference to it |
1581 | | */ |
1582 | | _PyExecutorObject *cold = _PyExecutor_GetColdExecutor(); |
1583 | | Py_INCREF(executor); |
1584 | | for (uint32_t i = 0; i < executor->exit_count; i++) { |
1585 | | executor->exits[i].temperature = initial_unreachable_backoff_counter(); |
1586 | | _PyExecutorObject *e = executor->exits[i].executor; |
1587 | | executor->exits[i].executor = cold; |
1588 | | Py_DECREF(e); |
1589 | | } |
1590 | | _Py_ExecutorDetach(executor); |
1591 | | Py_DECREF(executor); |
1592 | | return 0; |
1593 | | } |
1594 | | |
1595 | | void |
1596 | | _Py_Executor_DependsOn(_PyExecutorObject *executor, void *obj) |
1597 | | { |
1598 | | assert(executor->vm_data.valid); |
1599 | | _Py_BloomFilter_Add(&executor->vm_data.bloom, obj); |
1600 | | } |
1601 | | |
1602 | | /* Invalidate all executors that depend on `obj` |
1603 | | * May cause other executors to be invalidated as well |
1604 | | */ |
1605 | | void |
1606 | | _Py_Executors_InvalidateDependency(PyInterpreterState *interp, void *obj, int is_invalidation) |
1607 | | { |
1608 | | _PyBloomFilter obj_filter; |
1609 | | _Py_BloomFilter_Init(&obj_filter); |
1610 | | _Py_BloomFilter_Add(&obj_filter, obj); |
1611 | | /* Walk the list of executors */ |
1612 | | /* TO DO -- Use a tree to avoid traversing as many objects */ |
1613 | | PyObject *invalidate = PyList_New(0); |
1614 | | if (invalidate == NULL) { |
1615 | | goto error; |
1616 | | } |
1617 | | /* Clearing an executor can deallocate others, so we need to make a list of |
1618 | | * executors to invalidate first */ |
1619 | | for (_PyExecutorObject *exec = interp->executor_list_head; exec != NULL;) { |
1620 | | assert(exec->vm_data.valid); |
1621 | | _PyExecutorObject *next = exec->vm_data.links.next; |
1622 | | if (bloom_filter_may_contain(&exec->vm_data.bloom, &obj_filter) && |
1623 | | PyList_Append(invalidate, (PyObject *)exec)) |
1624 | | { |
1625 | | goto error; |
1626 | | } |
1627 | | exec = next; |
1628 | | } |
1629 | | for (Py_ssize_t i = 0; i < PyList_GET_SIZE(invalidate); i++) { |
1630 | | PyObject *exec = PyList_GET_ITEM(invalidate, i); |
1631 | | executor_clear(exec); |
1632 | | if (is_invalidation) { |
1633 | | OPT_STAT_INC(executors_invalidated); |
1634 | | } |
1635 | | } |
1636 | | Py_DECREF(invalidate); |
1637 | | return; |
1638 | | error: |
1639 | | PyErr_Clear(); |
1640 | | Py_XDECREF(invalidate); |
1641 | | // If we're truly out of memory, wiping out everything is a fine fallback: |
1642 | | _Py_Executors_InvalidateAll(interp, is_invalidation); |
1643 | | } |
1644 | | |
1645 | | /* Invalidate all executors */ |
1646 | | void |
1647 | | _Py_Executors_InvalidateAll(PyInterpreterState *interp, int is_invalidation) |
1648 | | { |
1649 | | while (interp->executor_list_head) { |
1650 | | _PyExecutorObject *executor = interp->executor_list_head; |
1651 | | assert(executor->vm_data.valid == 1 && executor->vm_data.linked == 1); |
1652 | | if (executor->vm_data.code) { |
1653 | | // Clear the entire code object so its co_executors array be freed: |
1654 | | _PyCode_Clear_Executors(executor->vm_data.code); |
1655 | | } |
1656 | | else { |
1657 | | executor_clear((PyObject *)executor); |
1658 | | } |
1659 | | if (is_invalidation) { |
1660 | | OPT_STAT_INC(executors_invalidated); |
1661 | | } |
1662 | | } |
1663 | | } |
1664 | | |
1665 | | void |
1666 | | _Py_Executors_InvalidateCold(PyInterpreterState *interp) |
1667 | | { |
1668 | | /* Walk the list of executors */ |
1669 | | /* TO DO -- Use a tree to avoid traversing as many objects */ |
1670 | | PyObject *invalidate = PyList_New(0); |
1671 | | if (invalidate == NULL) { |
1672 | | goto error; |
1673 | | } |
1674 | | |
1675 | | /* Clearing an executor can deallocate others, so we need to make a list of |
1676 | | * executors to invalidate first */ |
1677 | | for (_PyExecutorObject *exec = interp->executor_list_head; exec != NULL;) { |
1678 | | assert(exec->vm_data.valid); |
1679 | | _PyExecutorObject *next = exec->vm_data.links.next; |
1680 | | |
1681 | | if (!exec->vm_data.warm && PyList_Append(invalidate, (PyObject *)exec) < 0) { |
1682 | | goto error; |
1683 | | } |
1684 | | else { |
1685 | | exec->vm_data.warm = false; |
1686 | | } |
1687 | | |
1688 | | exec = next; |
1689 | | } |
1690 | | for (Py_ssize_t i = 0; i < PyList_GET_SIZE(invalidate); i++) { |
1691 | | PyObject *exec = PyList_GET_ITEM(invalidate, i); |
1692 | | executor_clear(exec); |
1693 | | } |
1694 | | Py_DECREF(invalidate); |
1695 | | return; |
1696 | | error: |
1697 | | PyErr_Clear(); |
1698 | | Py_XDECREF(invalidate); |
1699 | | // If we're truly out of memory, wiping out everything is a fine fallback |
1700 | | _Py_Executors_InvalidateAll(interp, 0); |
1701 | | } |
1702 | | |
1703 | | static void |
1704 | | write_str(PyObject *str, FILE *out) |
1705 | | { |
1706 | | // Encode the Unicode object to the specified encoding |
1707 | | PyObject *encoded_obj = PyUnicode_AsEncodedString(str, "utf8", "strict"); |
1708 | | if (encoded_obj == NULL) { |
1709 | | PyErr_Clear(); |
1710 | | return; |
1711 | | } |
1712 | | const char *encoded_str = PyBytes_AsString(encoded_obj); |
1713 | | Py_ssize_t encoded_size = PyBytes_Size(encoded_obj); |
1714 | | fwrite(encoded_str, 1, encoded_size, out); |
1715 | | Py_DECREF(encoded_obj); |
1716 | | } |
1717 | | |
1718 | | static int |
1719 | | find_line_number(PyCodeObject *code, _PyExecutorObject *executor) |
1720 | | { |
1721 | | int code_len = (int)Py_SIZE(code); |
1722 | | for (int i = 0; i < code_len; i++) { |
1723 | | _Py_CODEUNIT *instr = &_PyCode_CODE(code)[i]; |
1724 | | int opcode = instr->op.code; |
1725 | | if (opcode == ENTER_EXECUTOR) { |
1726 | | _PyExecutorObject *exec = code->co_executors->executors[instr->op.arg]; |
1727 | | if (exec == executor) { |
1728 | | return PyCode_Addr2Line(code, i*2); |
1729 | | } |
1730 | | } |
1731 | | i += _PyOpcode_Caches[_Py_GetBaseCodeUnit(code, i).op.code]; |
1732 | | } |
1733 | | return -1; |
1734 | | } |
1735 | | |
1736 | | /* Writes the node and outgoing edges for a single tracelet in graphviz format. |
1737 | | * Each tracelet is presented as a table of the uops it contains. |
1738 | | * If Py_STATS is enabled, execution counts are included. |
1739 | | * |
1740 | | * https://graphviz.readthedocs.io/en/stable/manual.html |
1741 | | * https://graphviz.org/gallery/ |
1742 | | */ |
1743 | | static void |
1744 | | executor_to_gv(_PyExecutorObject *executor, FILE *out) |
1745 | | { |
1746 | | PyCodeObject *code = executor->vm_data.code; |
1747 | | fprintf(out, "executor_%p [\n", executor); |
1748 | | fprintf(out, " shape = none\n"); |
1749 | | |
1750 | | /* Write the HTML table for the uops */ |
1751 | | fprintf(out, " label = <<table border=\"0\" cellspacing=\"0\">\n"); |
1752 | | fprintf(out, " <tr><td port=\"start\" border=\"1\" ><b>Executor</b></td></tr>\n"); |
1753 | | if (code == NULL) { |
1754 | | fprintf(out, " <tr><td border=\"1\" >No code object</td></tr>\n"); |
1755 | | } |
1756 | | else { |
1757 | | fprintf(out, " <tr><td border=\"1\" >"); |
1758 | | write_str(code->co_qualname, out); |
1759 | | int line = find_line_number(code, executor); |
1760 | | fprintf(out, ": %d</td></tr>\n", line); |
1761 | | } |
1762 | | for (uint32_t i = 0; i < executor->code_size; i++) { |
1763 | | /* Write row for uop. |
1764 | | * The `port` is a marker so that outgoing edges can |
1765 | | * be placed correctly. If a row is marked `port=17`, |
1766 | | * then the outgoing edge is `{EXEC_NAME}:17 -> {TARGET}` |
1767 | | * https://graphviz.readthedocs.io/en/stable/manual.html#node-ports-compass |
1768 | | */ |
1769 | | _PyUOpInstruction const *inst = &executor->trace[i]; |
1770 | | const char *opname = _PyOpcode_uop_name[inst->opcode]; |
1771 | | #ifdef Py_STATS |
1772 | | fprintf(out, " <tr><td port=\"i%d\" border=\"1\" >%s -- %" PRIu64 "</td></tr>\n", i, opname, inst->execution_count); |
1773 | | #else |
1774 | | fprintf(out, " <tr><td port=\"i%d\" border=\"1\" >%s</td></tr>\n", i, opname); |
1775 | | #endif |
1776 | | if (inst->opcode == _EXIT_TRACE || inst->opcode == _JUMP_TO_TOP) { |
1777 | | break; |
1778 | | } |
1779 | | } |
1780 | | fprintf(out, " </table>>\n"); |
1781 | | fprintf(out, "]\n\n"); |
1782 | | |
1783 | | /* Write all the outgoing edges */ |
1784 | | for (uint32_t i = 0; i < executor->code_size; i++) { |
1785 | | _PyUOpInstruction const *inst = &executor->trace[i]; |
1786 | | uint16_t flags = _PyUop_Flags[inst->opcode]; |
1787 | | _PyExitData *exit = NULL; |
1788 | | if (inst->opcode == _EXIT_TRACE) { |
1789 | | exit = (_PyExitData *)inst->operand0; |
1790 | | } |
1791 | | else if (flags & HAS_EXIT_FLAG) { |
1792 | | assert(inst->format == UOP_FORMAT_JUMP); |
1793 | | _PyUOpInstruction const *exit_inst = &executor->trace[inst->jump_target]; |
1794 | | assert(exit_inst->opcode == _EXIT_TRACE); |
1795 | | exit = (_PyExitData *)exit_inst->operand0; |
1796 | | } |
1797 | | if (exit != NULL && exit->executor != NULL) { |
1798 | | fprintf(out, "executor_%p:i%d -> executor_%p:start\n", executor, i, exit->executor); |
1799 | | } |
1800 | | if (inst->opcode == _EXIT_TRACE || inst->opcode == _JUMP_TO_TOP) { |
1801 | | break; |
1802 | | } |
1803 | | } |
1804 | | } |
1805 | | |
1806 | | /* Write the graph of all the live tracelets in graphviz format. */ |
1807 | | int |
1808 | | _PyDumpExecutors(FILE *out) |
1809 | | { |
1810 | | fprintf(out, "digraph ideal {\n\n"); |
1811 | | fprintf(out, " rankdir = \"LR\"\n\n"); |
1812 | | PyInterpreterState *interp = PyInterpreterState_Get(); |
1813 | | for (_PyExecutorObject *exec = interp->executor_list_head; exec != NULL;) { |
1814 | | executor_to_gv(exec, out); |
1815 | | exec = exec->vm_data.links.next; |
1816 | | } |
1817 | | fprintf(out, "}\n\n"); |
1818 | | return 0; |
1819 | | } |
1820 | | |
1821 | | #else |
1822 | | |
1823 | | int |
1824 | | _PyDumpExecutors(FILE *out) |
1825 | 0 | { |
1826 | 0 | PyErr_SetString(PyExc_NotImplementedError, "No JIT available"); |
1827 | 0 | return -1; |
1828 | 0 | } |
1829 | | |
1830 | | void |
1831 | | _PyExecutor_Free(struct _PyExecutorObject *self) |
1832 | 0 | { |
1833 | | /* This should never be called */ |
1834 | 0 | Py_UNREACHABLE(); |
1835 | 0 | } |
1836 | | |
1837 | | #endif /* _Py_TIER2 */ |