/src/cpython/Python/optimizer.c
Line | Count | Source (jump to first uncovered line) |
1 | | #include "Python.h" |
2 | | |
3 | | #ifdef _Py_TIER2 |
4 | | |
5 | | #include "opcode.h" |
6 | | #include "pycore_interp.h" |
7 | | #include "pycore_backoff.h" |
8 | | #include "pycore_bitutils.h" // _Py_popcount32() |
9 | | #include "pycore_code.h" // _Py_GetBaseCodeUnit |
10 | | #include "pycore_function.h" // _PyFunction_LookupByVersion() |
11 | | #include "pycore_interpframe.h" |
12 | | #include "pycore_object.h" // _PyObject_GC_UNTRACK() |
13 | | #include "pycore_opcode_metadata.h" // _PyOpcode_OpName[] |
14 | | #include "pycore_opcode_utils.h" // MAX_REAL_OPCODE |
15 | | #include "pycore_optimizer.h" // _Py_uop_analyze_and_optimize() |
16 | | #include "pycore_pystate.h" // _PyInterpreterState_GET() |
17 | | #include "pycore_tuple.h" // _PyTuple_FromArraySteal |
18 | | #include "pycore_unicodeobject.h" // _PyUnicode_FromASCII |
19 | | #include "pycore_uop_ids.h" |
20 | | #include "pycore_jit.h" |
21 | | #include <stdbool.h> |
22 | | #include <stdint.h> |
23 | | #include <stddef.h> |
24 | | |
25 | | #define NEED_OPCODE_METADATA |
26 | | #include "pycore_uop_metadata.h" // Uop tables |
27 | | #undef NEED_OPCODE_METADATA |
28 | | |
29 | | #define MAX_EXECUTORS_SIZE 256 |
30 | | |
31 | | #define _PyExecutorObject_CAST(op) ((_PyExecutorObject *)(op)) |
32 | | |
33 | | static bool |
34 | | has_space_for_executor(PyCodeObject *code, _Py_CODEUNIT *instr) |
35 | | { |
36 | | if (instr->op.code == ENTER_EXECUTOR) { |
37 | | return true; |
38 | | } |
39 | | if (code->co_executors == NULL) { |
40 | | return true; |
41 | | } |
42 | | return code->co_executors->size < MAX_EXECUTORS_SIZE; |
43 | | } |
44 | | |
45 | | static int32_t |
46 | | get_index_for_executor(PyCodeObject *code, _Py_CODEUNIT *instr) |
47 | | { |
48 | | if (instr->op.code == ENTER_EXECUTOR) { |
49 | | return instr->op.arg; |
50 | | } |
51 | | _PyExecutorArray *old = code->co_executors; |
52 | | int size = 0; |
53 | | int capacity = 0; |
54 | | if (old != NULL) { |
55 | | size = old->size; |
56 | | capacity = old->capacity; |
57 | | assert(size < MAX_EXECUTORS_SIZE); |
58 | | } |
59 | | assert(size <= capacity); |
60 | | if (size == capacity) { |
61 | | /* Array is full. Grow array */ |
62 | | int new_capacity = capacity ? capacity * 2 : 4; |
63 | | _PyExecutorArray *new = PyMem_Realloc( |
64 | | old, |
65 | | offsetof(_PyExecutorArray, executors) + |
66 | | new_capacity * sizeof(_PyExecutorObject *)); |
67 | | if (new == NULL) { |
68 | | return -1; |
69 | | } |
70 | | new->capacity = new_capacity; |
71 | | new->size = size; |
72 | | code->co_executors = new; |
73 | | } |
74 | | assert(size < code->co_executors->capacity); |
75 | | return size; |
76 | | } |
77 | | |
78 | | static void |
79 | | insert_executor(PyCodeObject *code, _Py_CODEUNIT *instr, int index, _PyExecutorObject *executor) |
80 | | { |
81 | | Py_INCREF(executor); |
82 | | if (instr->op.code == ENTER_EXECUTOR) { |
83 | | assert(index == instr->op.arg); |
84 | | _Py_ExecutorDetach(code->co_executors->executors[index]); |
85 | | } |
86 | | else { |
87 | | assert(code->co_executors->size == index); |
88 | | assert(code->co_executors->capacity > index); |
89 | | code->co_executors->size++; |
90 | | } |
91 | | executor->vm_data.opcode = instr->op.code; |
92 | | executor->vm_data.oparg = instr->op.arg; |
93 | | executor->vm_data.code = code; |
94 | | executor->vm_data.index = (int)(instr - _PyCode_CODE(code)); |
95 | | code->co_executors->executors[index] = executor; |
96 | | assert(index < MAX_EXECUTORS_SIZE); |
97 | | instr->op.code = ENTER_EXECUTOR; |
98 | | instr->op.arg = index; |
99 | | } |
100 | | |
101 | | static _PyExecutorObject * |
102 | | make_executor_from_uops(_PyUOpInstruction *buffer, int length, const _PyBloomFilter *dependencies); |
103 | | |
104 | | static int |
105 | | uop_optimize(_PyInterpreterFrame *frame, _Py_CODEUNIT *instr, |
106 | | _PyExecutorObject **exec_ptr, int curr_stackentries, |
107 | | bool progress_needed); |
108 | | |
109 | | /* Returns 1 if optimized, 0 if not optimized, and -1 for an error. |
110 | | * If optimized, *executor_ptr contains a new reference to the executor |
111 | | */ |
112 | | int |
113 | | _PyOptimizer_Optimize( |
114 | | _PyInterpreterFrame *frame, _Py_CODEUNIT *start, |
115 | | _PyExecutorObject **executor_ptr, int chain_depth) |
116 | | { |
117 | | _PyStackRef *stack_pointer = frame->stackpointer; |
118 | | assert(_PyInterpreterState_GET()->jit); |
119 | | // The first executor in a chain and the MAX_CHAIN_DEPTH'th executor *must* |
120 | | // make progress in order to avoid infinite loops or excessively-long |
121 | | // side-exit chains. We can only insert the executor into the bytecode if |
122 | | // this is true, since a deopt won't infinitely re-enter the executor: |
123 | | chain_depth %= MAX_CHAIN_DEPTH; |
124 | | bool progress_needed = chain_depth == 0; |
125 | | PyCodeObject *code = _PyFrame_GetCode(frame); |
126 | | assert(PyCode_Check(code)); |
127 | | if (progress_needed && !has_space_for_executor(code, start)) { |
128 | | return 0; |
129 | | } |
130 | | int err = uop_optimize(frame, start, executor_ptr, (int)(stack_pointer - _PyFrame_Stackbase(frame)), progress_needed); |
131 | | if (err <= 0) { |
132 | | return err; |
133 | | } |
134 | | assert(*executor_ptr != NULL); |
135 | | if (progress_needed) { |
136 | | int index = get_index_for_executor(code, start); |
137 | | if (index < 0) { |
138 | | /* Out of memory. Don't raise and assume that the |
139 | | * error will show up elsewhere. |
140 | | * |
141 | | * If an optimizer has already produced an executor, |
142 | | * it might get confused by the executor disappearing, |
143 | | * but there is not much we can do about that here. */ |
144 | | Py_DECREF(*executor_ptr); |
145 | | return 0; |
146 | | } |
147 | | insert_executor(code, start, index, *executor_ptr); |
148 | | } |
149 | | else { |
150 | | (*executor_ptr)->vm_data.code = NULL; |
151 | | } |
152 | | (*executor_ptr)->vm_data.chain_depth = chain_depth; |
153 | | assert((*executor_ptr)->vm_data.valid); |
154 | | return 1; |
155 | | } |
156 | | |
157 | | static _PyExecutorObject * |
158 | | get_executor_lock_held(PyCodeObject *code, int offset) |
159 | | { |
160 | | int code_len = (int)Py_SIZE(code); |
161 | | for (int i = 0 ; i < code_len;) { |
162 | | if (_PyCode_CODE(code)[i].op.code == ENTER_EXECUTOR && i*2 == offset) { |
163 | | int oparg = _PyCode_CODE(code)[i].op.arg; |
164 | | _PyExecutorObject *res = code->co_executors->executors[oparg]; |
165 | | Py_INCREF(res); |
166 | | return res; |
167 | | } |
168 | | i += _PyInstruction_GetLength(code, i); |
169 | | } |
170 | | PyErr_SetString(PyExc_ValueError, "no executor at given byte offset"); |
171 | | return NULL; |
172 | | } |
173 | | |
174 | | _PyExecutorObject * |
175 | | _Py_GetExecutor(PyCodeObject *code, int offset) |
176 | | { |
177 | | _PyExecutorObject *executor; |
178 | | Py_BEGIN_CRITICAL_SECTION(code); |
179 | | executor = get_executor_lock_held(code, offset); |
180 | | Py_END_CRITICAL_SECTION(); |
181 | | return executor; |
182 | | } |
183 | | |
184 | | static PyObject * |
185 | | is_valid(PyObject *self, PyObject *Py_UNUSED(ignored)) |
186 | | { |
187 | | return PyBool_FromLong(((_PyExecutorObject *)self)->vm_data.valid); |
188 | | } |
189 | | |
190 | | static PyObject * |
191 | | get_opcode(PyObject *self, PyObject *Py_UNUSED(ignored)) |
192 | | { |
193 | | return PyLong_FromUnsignedLong(((_PyExecutorObject *)self)->vm_data.opcode); |
194 | | } |
195 | | |
196 | | static PyObject * |
197 | | get_oparg(PyObject *self, PyObject *Py_UNUSED(ignored)) |
198 | | { |
199 | | return PyLong_FromUnsignedLong(((_PyExecutorObject *)self)->vm_data.oparg); |
200 | | } |
201 | | |
202 | | ///////////////////// Experimental UOp Optimizer ///////////////////// |
203 | | |
204 | | static int executor_clear(PyObject *executor); |
205 | | static void unlink_executor(_PyExecutorObject *executor); |
206 | | |
207 | | |
208 | | static void |
209 | | free_executor(_PyExecutorObject *self) |
210 | | { |
211 | | #ifdef _Py_JIT |
212 | | _PyJIT_Free(self); |
213 | | #endif |
214 | | PyObject_GC_Del(self); |
215 | | } |
216 | | |
217 | | void |
218 | | _Py_ClearExecutorDeletionList(PyInterpreterState *interp) |
219 | | { |
220 | | _PyRuntimeState *runtime = &_PyRuntime; |
221 | | HEAD_LOCK(runtime); |
222 | | PyThreadState* ts = PyInterpreterState_ThreadHead(interp); |
223 | | HEAD_UNLOCK(runtime); |
224 | | while (ts) { |
225 | | _PyExecutorObject *current = (_PyExecutorObject *)ts->current_executor; |
226 | | if (current != NULL) { |
227 | | /* Anything in this list will be unlinked, so we can reuse the |
228 | | * linked field as a reachability marker. */ |
229 | | current->vm_data.linked = 1; |
230 | | } |
231 | | HEAD_LOCK(runtime); |
232 | | ts = PyThreadState_Next(ts); |
233 | | HEAD_UNLOCK(runtime); |
234 | | } |
235 | | _PyExecutorObject **prev_to_next_ptr = &interp->executor_deletion_list_head; |
236 | | _PyExecutorObject *exec = *prev_to_next_ptr; |
237 | | while (exec != NULL) { |
238 | | if (exec->vm_data.linked) { |
239 | | // This executor is currently executing |
240 | | exec->vm_data.linked = 0; |
241 | | prev_to_next_ptr = &exec->vm_data.links.next; |
242 | | } |
243 | | else { |
244 | | *prev_to_next_ptr = exec->vm_data.links.next; |
245 | | free_executor(exec); |
246 | | } |
247 | | exec = *prev_to_next_ptr; |
248 | | } |
249 | | interp->executor_deletion_list_remaining_capacity = EXECUTOR_DELETE_LIST_MAX; |
250 | | } |
251 | | |
252 | | static void |
253 | | add_to_pending_deletion_list(_PyExecutorObject *self) |
254 | | { |
255 | | PyInterpreterState *interp = PyInterpreterState_Get(); |
256 | | self->vm_data.links.next = interp->executor_deletion_list_head; |
257 | | interp->executor_deletion_list_head = self; |
258 | | if (interp->executor_deletion_list_remaining_capacity > 0) { |
259 | | interp->executor_deletion_list_remaining_capacity--; |
260 | | } |
261 | | else { |
262 | | _Py_ClearExecutorDeletionList(interp); |
263 | | } |
264 | | } |
265 | | |
266 | | static void |
267 | | uop_dealloc(PyObject *op) { |
268 | | _PyExecutorObject *self = _PyExecutorObject_CAST(op); |
269 | | _PyObject_GC_UNTRACK(self); |
270 | | assert(self->vm_data.code == NULL); |
271 | | unlink_executor(self); |
272 | | // Once unlinked it becomes impossible to invalidate an executor, so do it here. |
273 | | self->vm_data.valid = 0; |
274 | | add_to_pending_deletion_list(self); |
275 | | } |
276 | | |
277 | | const char * |
278 | | _PyUOpName(int index) |
279 | | { |
280 | | if (index < 0 || index > MAX_UOP_ID) { |
281 | | return NULL; |
282 | | } |
283 | | return _PyOpcode_uop_name[index]; |
284 | | } |
285 | | |
286 | | #ifdef Py_DEBUG |
287 | | void |
288 | | _PyUOpPrint(const _PyUOpInstruction *uop) |
289 | | { |
290 | | const char *name = _PyUOpName(uop->opcode); |
291 | | if (name == NULL) { |
292 | | printf("<uop %d>", uop->opcode); |
293 | | } |
294 | | else { |
295 | | printf("%s", name); |
296 | | } |
297 | | switch(uop->format) { |
298 | | case UOP_FORMAT_TARGET: |
299 | | printf(" (%d, target=%d, operand0=%#" PRIx64 ", operand1=%#" PRIx64, |
300 | | uop->oparg, |
301 | | uop->target, |
302 | | (uint64_t)uop->operand0, |
303 | | (uint64_t)uop->operand1); |
304 | | break; |
305 | | case UOP_FORMAT_JUMP: |
306 | | printf(" (%d, jump_target=%d, operand0=%#" PRIx64 ", operand1=%#" PRIx64, |
307 | | uop->oparg, |
308 | | uop->jump_target, |
309 | | (uint64_t)uop->operand0, |
310 | | (uint64_t)uop->operand1); |
311 | | break; |
312 | | default: |
313 | | printf(" (%d, Unknown format)", uop->oparg); |
314 | | } |
315 | | if (_PyUop_Flags[uop->opcode] & HAS_ERROR_FLAG) { |
316 | | printf(", error_target=%d", uop->error_target); |
317 | | } |
318 | | |
319 | | printf(")"); |
320 | | } |
321 | | #endif |
322 | | |
323 | | static Py_ssize_t |
324 | | uop_len(PyObject *op) |
325 | | { |
326 | | _PyExecutorObject *self = _PyExecutorObject_CAST(op); |
327 | | return self->code_size; |
328 | | } |
329 | | |
330 | | static PyObject * |
331 | | uop_item(PyObject *op, Py_ssize_t index) |
332 | | { |
333 | | _PyExecutorObject *self = _PyExecutorObject_CAST(op); |
334 | | Py_ssize_t len = uop_len(op); |
335 | | if (index < 0 || index >= len) { |
336 | | PyErr_SetNone(PyExc_IndexError); |
337 | | return NULL; |
338 | | } |
339 | | const char *name = _PyUOpName(self->trace[index].opcode); |
340 | | if (name == NULL) { |
341 | | name = "<nil>"; |
342 | | } |
343 | | PyObject *oname = _PyUnicode_FromASCII(name, strlen(name)); |
344 | | if (oname == NULL) { |
345 | | return NULL; |
346 | | } |
347 | | PyObject *oparg = PyLong_FromUnsignedLong(self->trace[index].oparg); |
348 | | if (oparg == NULL) { |
349 | | Py_DECREF(oname); |
350 | | return NULL; |
351 | | } |
352 | | PyObject *target = PyLong_FromUnsignedLong(self->trace[index].target); |
353 | | if (oparg == NULL) { |
354 | | Py_DECREF(oparg); |
355 | | Py_DECREF(oname); |
356 | | return NULL; |
357 | | } |
358 | | PyObject *operand = PyLong_FromUnsignedLongLong(self->trace[index].operand0); |
359 | | if (operand == NULL) { |
360 | | Py_DECREF(target); |
361 | | Py_DECREF(oparg); |
362 | | Py_DECREF(oname); |
363 | | return NULL; |
364 | | } |
365 | | PyObject *args[4] = { oname, oparg, target, operand }; |
366 | | return _PyTuple_FromArraySteal(args, 4); |
367 | | } |
368 | | |
369 | | PySequenceMethods uop_as_sequence = { |
370 | | .sq_length = uop_len, |
371 | | .sq_item = uop_item, |
372 | | }; |
373 | | |
374 | | static int |
375 | | executor_traverse(PyObject *o, visitproc visit, void *arg) |
376 | | { |
377 | | _PyExecutorObject *executor = _PyExecutorObject_CAST(o); |
378 | | for (uint32_t i = 0; i < executor->exit_count; i++) { |
379 | | Py_VISIT(executor->exits[i].executor); |
380 | | } |
381 | | return 0; |
382 | | } |
383 | | |
384 | | static PyObject * |
385 | | get_jit_code(PyObject *self, PyObject *Py_UNUSED(ignored)) |
386 | | { |
387 | | #ifndef _Py_JIT |
388 | | PyErr_SetString(PyExc_RuntimeError, "JIT support not enabled."); |
389 | | return NULL; |
390 | | #else |
391 | | _PyExecutorObject *executor = _PyExecutorObject_CAST(self); |
392 | | if (executor->jit_code == NULL || executor->jit_size == 0) { |
393 | | Py_RETURN_NONE; |
394 | | } |
395 | | return PyBytes_FromStringAndSize(executor->jit_code, executor->jit_size); |
396 | | #endif |
397 | | } |
398 | | |
399 | | static PyMethodDef uop_executor_methods[] = { |
400 | | { "is_valid", is_valid, METH_NOARGS, NULL }, |
401 | | { "get_jit_code", get_jit_code, METH_NOARGS, NULL}, |
402 | | { "get_opcode", get_opcode, METH_NOARGS, NULL }, |
403 | | { "get_oparg", get_oparg, METH_NOARGS, NULL }, |
404 | | { NULL, NULL }, |
405 | | }; |
406 | | |
407 | | static int |
408 | | executor_is_gc(PyObject *o) |
409 | | { |
410 | | return !_Py_IsImmortal(o); |
411 | | } |
412 | | |
413 | | PyTypeObject _PyUOpExecutor_Type = { |
414 | | PyVarObject_HEAD_INIT(&PyType_Type, 0) |
415 | | .tp_name = "uop_executor", |
416 | | .tp_basicsize = offsetof(_PyExecutorObject, exits), |
417 | | .tp_itemsize = 1, |
418 | | .tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_DISALLOW_INSTANTIATION | Py_TPFLAGS_HAVE_GC, |
419 | | .tp_dealloc = uop_dealloc, |
420 | | .tp_as_sequence = &uop_as_sequence, |
421 | | .tp_methods = uop_executor_methods, |
422 | | .tp_traverse = executor_traverse, |
423 | | .tp_clear = executor_clear, |
424 | | .tp_is_gc = executor_is_gc, |
425 | | }; |
426 | | |
427 | | /* TO DO -- Generate these tables */ |
428 | | static const uint16_t |
429 | | _PyUOp_Replacements[MAX_UOP_ID + 1] = { |
430 | | [_ITER_JUMP_RANGE] = _GUARD_NOT_EXHAUSTED_RANGE, |
431 | | [_ITER_JUMP_LIST] = _GUARD_NOT_EXHAUSTED_LIST, |
432 | | [_ITER_JUMP_TUPLE] = _GUARD_NOT_EXHAUSTED_TUPLE, |
433 | | [_FOR_ITER] = _FOR_ITER_TIER_TWO, |
434 | | [_ITER_NEXT_LIST] = _ITER_NEXT_LIST_TIER_TWO, |
435 | | }; |
436 | | |
437 | | static const uint8_t |
438 | | is_for_iter_test[MAX_UOP_ID + 1] = { |
439 | | [_GUARD_NOT_EXHAUSTED_RANGE] = 1, |
440 | | [_GUARD_NOT_EXHAUSTED_LIST] = 1, |
441 | | [_GUARD_NOT_EXHAUSTED_TUPLE] = 1, |
442 | | [_FOR_ITER_TIER_TWO] = 1, |
443 | | }; |
444 | | |
445 | | static const uint16_t |
446 | | BRANCH_TO_GUARD[4][2] = { |
447 | | [POP_JUMP_IF_FALSE - POP_JUMP_IF_FALSE][0] = _GUARD_IS_TRUE_POP, |
448 | | [POP_JUMP_IF_FALSE - POP_JUMP_IF_FALSE][1] = _GUARD_IS_FALSE_POP, |
449 | | [POP_JUMP_IF_TRUE - POP_JUMP_IF_FALSE][0] = _GUARD_IS_FALSE_POP, |
450 | | [POP_JUMP_IF_TRUE - POP_JUMP_IF_FALSE][1] = _GUARD_IS_TRUE_POP, |
451 | | [POP_JUMP_IF_NONE - POP_JUMP_IF_FALSE][0] = _GUARD_IS_NOT_NONE_POP, |
452 | | [POP_JUMP_IF_NONE - POP_JUMP_IF_FALSE][1] = _GUARD_IS_NONE_POP, |
453 | | [POP_JUMP_IF_NOT_NONE - POP_JUMP_IF_FALSE][0] = _GUARD_IS_NONE_POP, |
454 | | [POP_JUMP_IF_NOT_NONE - POP_JUMP_IF_FALSE][1] = _GUARD_IS_NOT_NONE_POP, |
455 | | }; |
456 | | |
457 | | |
458 | | #define CONFIDENCE_RANGE 1000 |
459 | | #define CONFIDENCE_CUTOFF 333 |
460 | | |
461 | | #ifdef Py_DEBUG |
462 | | #define DPRINTF(level, ...) \ |
463 | | if (lltrace >= (level)) { printf(__VA_ARGS__); } |
464 | | #else |
465 | | #define DPRINTF(level, ...) |
466 | | #endif |
467 | | |
468 | | |
469 | | static inline int |
470 | | add_to_trace( |
471 | | _PyUOpInstruction *trace, |
472 | | int trace_length, |
473 | | uint16_t opcode, |
474 | | uint16_t oparg, |
475 | | uint64_t operand, |
476 | | uint32_t target) |
477 | | { |
478 | | trace[trace_length].opcode = opcode; |
479 | | trace[trace_length].format = UOP_FORMAT_TARGET; |
480 | | trace[trace_length].target = target; |
481 | | trace[trace_length].oparg = oparg; |
482 | | trace[trace_length].operand0 = operand; |
483 | | #ifdef Py_STATS |
484 | | trace[trace_length].execution_count = 0; |
485 | | #endif |
486 | | return trace_length + 1; |
487 | | } |
488 | | |
489 | | #ifdef Py_DEBUG |
490 | | #define ADD_TO_TRACE(OPCODE, OPARG, OPERAND, TARGET) \ |
491 | | assert(trace_length < max_length); \ |
492 | | trace_length = add_to_trace(trace, trace_length, (OPCODE), (OPARG), (OPERAND), (TARGET)); \ |
493 | | if (lltrace >= 2) { \ |
494 | | printf("%4d ADD_TO_TRACE: ", trace_length); \ |
495 | | _PyUOpPrint(&trace[trace_length-1]); \ |
496 | | printf("\n"); \ |
497 | | } |
498 | | #else |
499 | | #define ADD_TO_TRACE(OPCODE, OPARG, OPERAND, TARGET) \ |
500 | | assert(trace_length < max_length); \ |
501 | | trace_length = add_to_trace(trace, trace_length, (OPCODE), (OPARG), (OPERAND), (TARGET)); |
502 | | #endif |
503 | | |
504 | | #define INSTR_IP(INSTR, CODE) \ |
505 | | ((uint32_t)((INSTR) - ((_Py_CODEUNIT *)(CODE)->co_code_adaptive))) |
506 | | |
507 | | // Reserve space for n uops |
508 | | #define RESERVE_RAW(n, opname) \ |
509 | | if (trace_length + (n) > max_length) { \ |
510 | | DPRINTF(2, "No room for %s (need %d, got %d)\n", \ |
511 | | (opname), (n), max_length - trace_length); \ |
512 | | OPT_STAT_INC(trace_too_long); \ |
513 | | goto done; \ |
514 | | } |
515 | | |
516 | | // Reserve space for N uops, plus 3 for _SET_IP, _CHECK_VALIDITY and _EXIT_TRACE |
517 | | #define RESERVE(needed) RESERVE_RAW((needed) + 3, _PyUOpName(opcode)) |
518 | | |
519 | | // Trace stack operations (used by _PUSH_FRAME, _RETURN_VALUE) |
520 | | #define TRACE_STACK_PUSH() \ |
521 | | if (trace_stack_depth >= TRACE_STACK_SIZE) { \ |
522 | | DPRINTF(2, "Trace stack overflow\n"); \ |
523 | | OPT_STAT_INC(trace_stack_overflow); \ |
524 | | return 0; \ |
525 | | } \ |
526 | | assert(func == NULL || func->func_code == (PyObject *)code); \ |
527 | | trace_stack[trace_stack_depth].func = func; \ |
528 | | trace_stack[trace_stack_depth].code = code; \ |
529 | | trace_stack[trace_stack_depth].instr = instr; \ |
530 | | trace_stack_depth++; |
531 | | #define TRACE_STACK_POP() \ |
532 | | if (trace_stack_depth <= 0) { \ |
533 | | Py_FatalError("Trace stack underflow\n"); \ |
534 | | } \ |
535 | | trace_stack_depth--; \ |
536 | | func = trace_stack[trace_stack_depth].func; \ |
537 | | code = trace_stack[trace_stack_depth].code; \ |
538 | | assert(func == NULL || func->func_code == (PyObject *)code); \ |
539 | | instr = trace_stack[trace_stack_depth].instr; |
540 | | |
541 | | /* Returns the length of the trace on success, |
542 | | * 0 if it failed to produce a worthwhile trace, |
543 | | * and -1 on an error. |
544 | | */ |
545 | | static int |
546 | | translate_bytecode_to_trace( |
547 | | _PyInterpreterFrame *frame, |
548 | | _Py_CODEUNIT *instr, |
549 | | _PyUOpInstruction *trace, |
550 | | int buffer_size, |
551 | | _PyBloomFilter *dependencies, bool progress_needed) |
552 | | { |
553 | | bool first = true; |
554 | | PyCodeObject *code = _PyFrame_GetCode(frame); |
555 | | PyFunctionObject *func = _PyFrame_GetFunction(frame); |
556 | | assert(PyFunction_Check(func)); |
557 | | PyCodeObject *initial_code = code; |
558 | | _Py_BloomFilter_Add(dependencies, initial_code); |
559 | | _Py_CODEUNIT *initial_instr = instr; |
560 | | int trace_length = 0; |
561 | | // Leave space for possible trailing _EXIT_TRACE |
562 | | int max_length = buffer_size-2; |
563 | | struct { |
564 | | PyFunctionObject *func; |
565 | | PyCodeObject *code; |
566 | | _Py_CODEUNIT *instr; |
567 | | } trace_stack[TRACE_STACK_SIZE]; |
568 | | int trace_stack_depth = 0; |
569 | | int confidence = CONFIDENCE_RANGE; // Adjusted by branch instructions |
570 | | bool jump_seen = false; |
571 | | |
572 | | #ifdef Py_DEBUG |
573 | | char *python_lltrace = Py_GETENV("PYTHON_LLTRACE"); |
574 | | int lltrace = 0; |
575 | | if (python_lltrace != NULL && *python_lltrace >= '0') { |
576 | | lltrace = *python_lltrace - '0'; // TODO: Parse an int and all that |
577 | | } |
578 | | #endif |
579 | | |
580 | | DPRINTF(2, |
581 | | "Optimizing %s (%s:%d) at byte offset %d\n", |
582 | | PyUnicode_AsUTF8(code->co_qualname), |
583 | | PyUnicode_AsUTF8(code->co_filename), |
584 | | code->co_firstlineno, |
585 | | 2 * INSTR_IP(initial_instr, code)); |
586 | | ADD_TO_TRACE(_START_EXECUTOR, 0, (uintptr_t)instr, INSTR_IP(instr, code)); |
587 | | ADD_TO_TRACE(_MAKE_WARM, 0, 0, 0); |
588 | | uint32_t target = 0; |
589 | | |
590 | | for (;;) { |
591 | | target = INSTR_IP(instr, code); |
592 | | // Need space for _DEOPT |
593 | | max_length--; |
594 | | |
595 | | uint32_t opcode = instr->op.code; |
596 | | uint32_t oparg = instr->op.arg; |
597 | | |
598 | | if (!first && instr == initial_instr) { |
599 | | // We have looped around to the start: |
600 | | RESERVE(1); |
601 | | ADD_TO_TRACE(_JUMP_TO_TOP, 0, 0, 0); |
602 | | goto done; |
603 | | } |
604 | | |
605 | | DPRINTF(2, "%d: %s(%d)\n", target, _PyOpcode_OpName[opcode], oparg); |
606 | | |
607 | | if (opcode == EXTENDED_ARG) { |
608 | | instr++; |
609 | | opcode = instr->op.code; |
610 | | oparg = (oparg << 8) | instr->op.arg; |
611 | | if (opcode == EXTENDED_ARG) { |
612 | | instr--; |
613 | | goto done; |
614 | | } |
615 | | } |
616 | | if (opcode == ENTER_EXECUTOR) { |
617 | | // We have a couple of options here. We *could* peek "underneath" |
618 | | // this executor and continue tracing, which could give us a longer, |
619 | | // more optimizeable trace (at the expense of lots of duplicated |
620 | | // tier two code). Instead, we choose to just end here and stitch to |
621 | | // the other trace, which allows a side-exit traces to rejoin the |
622 | | // "main" trace periodically (and also helps protect us against |
623 | | // pathological behavior where the amount of tier two code explodes |
624 | | // for a medium-length, branchy code path). This seems to work |
625 | | // better in practice, but in the future we could be smarter about |
626 | | // what we do here: |
627 | | goto done; |
628 | | } |
629 | | assert(opcode != ENTER_EXECUTOR && opcode != EXTENDED_ARG); |
630 | | RESERVE_RAW(2, "_CHECK_VALIDITY"); |
631 | | ADD_TO_TRACE(_CHECK_VALIDITY, 0, 0, target); |
632 | | if (!OPCODE_HAS_NO_SAVE_IP(opcode)) { |
633 | | RESERVE_RAW(2, "_SET_IP"); |
634 | | ADD_TO_TRACE(_SET_IP, 0, (uintptr_t)instr, target); |
635 | | } |
636 | | |
637 | | /* Special case the first instruction, |
638 | | * so that we can guarantee forward progress */ |
639 | | if (first && progress_needed) { |
640 | | assert(first); |
641 | | if (OPCODE_HAS_EXIT(opcode) || OPCODE_HAS_DEOPT(opcode)) { |
642 | | opcode = _PyOpcode_Deopt[opcode]; |
643 | | } |
644 | | assert(!OPCODE_HAS_EXIT(opcode)); |
645 | | assert(!OPCODE_HAS_DEOPT(opcode)); |
646 | | } |
647 | | |
648 | | if (OPCODE_HAS_EXIT(opcode)) { |
649 | | // Make space for side exit and final _EXIT_TRACE: |
650 | | RESERVE_RAW(2, "_EXIT_TRACE"); |
651 | | max_length--; |
652 | | } |
653 | | if (OPCODE_HAS_ERROR(opcode)) { |
654 | | // Make space for error stub and final _EXIT_TRACE: |
655 | | RESERVE_RAW(2, "_ERROR_POP_N"); |
656 | | max_length--; |
657 | | } |
658 | | switch (opcode) { |
659 | | case POP_JUMP_IF_NONE: |
660 | | case POP_JUMP_IF_NOT_NONE: |
661 | | case POP_JUMP_IF_FALSE: |
662 | | case POP_JUMP_IF_TRUE: |
663 | | { |
664 | | RESERVE(1); |
665 | | int counter = instr[1].cache; |
666 | | int bitcount = _Py_popcount32(counter); |
667 | | int jump_likely = bitcount > 8; |
668 | | /* If bitcount is 8 (half the jumps were taken), adjust confidence by 50%. |
669 | | For values in between, adjust proportionally. */ |
670 | | if (jump_likely) { |
671 | | confidence = confidence * bitcount / 16; |
672 | | } |
673 | | else { |
674 | | confidence = confidence * (16 - bitcount) / 16; |
675 | | } |
676 | | uint32_t uopcode = BRANCH_TO_GUARD[opcode - POP_JUMP_IF_FALSE][jump_likely]; |
677 | | DPRINTF(2, "%d: %s(%d): counter=%04x, bitcount=%d, likely=%d, confidence=%d, uopcode=%s\n", |
678 | | target, _PyOpcode_OpName[opcode], oparg, |
679 | | counter, bitcount, jump_likely, confidence, _PyUOpName(uopcode)); |
680 | | if (confidence < CONFIDENCE_CUTOFF) { |
681 | | DPRINTF(2, "Confidence too low (%d < %d)\n", confidence, CONFIDENCE_CUTOFF); |
682 | | OPT_STAT_INC(low_confidence); |
683 | | goto done; |
684 | | } |
685 | | _Py_CODEUNIT *next_instr = instr + 1 + _PyOpcode_Caches[_PyOpcode_Deopt[opcode]]; |
686 | | _Py_CODEUNIT *target_instr = next_instr + oparg; |
687 | | if (jump_likely) { |
688 | | DPRINTF(2, "Jump likely (%04x = %d bits), continue at byte offset %d\n", |
689 | | instr[1].cache, bitcount, 2 * INSTR_IP(target_instr, code)); |
690 | | instr = target_instr; |
691 | | ADD_TO_TRACE(uopcode, 0, 0, INSTR_IP(next_instr, code)); |
692 | | goto top; |
693 | | } |
694 | | ADD_TO_TRACE(uopcode, 0, 0, INSTR_IP(target_instr, code)); |
695 | | break; |
696 | | } |
697 | | |
698 | | case JUMP_BACKWARD: |
699 | | case JUMP_BACKWARD_JIT: |
700 | | ADD_TO_TRACE(_CHECK_PERIODIC, 0, 0, target); |
701 | | _Py_FALLTHROUGH; |
702 | | case JUMP_BACKWARD_NO_INTERRUPT: |
703 | | { |
704 | | instr += 1 + _PyOpcode_Caches[_PyOpcode_Deopt[opcode]] - (int)oparg; |
705 | | if (jump_seen) { |
706 | | OPT_STAT_INC(inner_loop); |
707 | | DPRINTF(2, "JUMP_BACKWARD not to top ends trace\n"); |
708 | | goto done; |
709 | | } |
710 | | jump_seen = true; |
711 | | goto top; |
712 | | } |
713 | | |
714 | | case JUMP_FORWARD: |
715 | | { |
716 | | RESERVE(0); |
717 | | // This will emit two _SET_IP instructions; leave it to the optimizer |
718 | | instr += oparg; |
719 | | break; |
720 | | } |
721 | | |
722 | | case RESUME: |
723 | | /* Use a special tier 2 version of RESUME_CHECK to allow traces to |
724 | | * start with RESUME_CHECK */ |
725 | | ADD_TO_TRACE(_TIER2_RESUME_CHECK, 0, 0, target); |
726 | | break; |
727 | | |
728 | | default: |
729 | | { |
730 | | const struct opcode_macro_expansion *expansion = &_PyOpcode_macro_expansion[opcode]; |
731 | | if (expansion->nuops > 0) { |
732 | | // Reserve space for nuops (+ _SET_IP + _EXIT_TRACE) |
733 | | int nuops = expansion->nuops; |
734 | | RESERVE(nuops + 1); /* One extra for exit */ |
735 | | int16_t last_op = expansion->uops[nuops-1].uop; |
736 | | if (last_op == _RETURN_VALUE || last_op == _RETURN_GENERATOR || last_op == _YIELD_VALUE) { |
737 | | // Check for trace stack underflow now: |
738 | | // We can't bail e.g. in the middle of |
739 | | // LOAD_CONST + _RETURN_VALUE. |
740 | | if (trace_stack_depth == 0) { |
741 | | DPRINTF(2, "Trace stack underflow\n"); |
742 | | OPT_STAT_INC(trace_stack_underflow); |
743 | | return 0; |
744 | | } |
745 | | } |
746 | | uint32_t orig_oparg = oparg; // For OPARG_TOP/BOTTOM |
747 | | for (int i = 0; i < nuops; i++) { |
748 | | oparg = orig_oparg; |
749 | | uint32_t uop = expansion->uops[i].uop; |
750 | | uint64_t operand = 0; |
751 | | // Add one to account for the actual opcode/oparg pair: |
752 | | int offset = expansion->uops[i].offset + 1; |
753 | | switch (expansion->uops[i].size) { |
754 | | case OPARG_SIMPLE: |
755 | | assert(opcode != JUMP_BACKWARD_NO_INTERRUPT && opcode != JUMP_BACKWARD); |
756 | | break; |
757 | | case OPARG_CACHE_1: |
758 | | operand = read_u16(&instr[offset].cache); |
759 | | break; |
760 | | case OPARG_CACHE_2: |
761 | | operand = read_u32(&instr[offset].cache); |
762 | | break; |
763 | | case OPARG_CACHE_4: |
764 | | operand = read_u64(&instr[offset].cache); |
765 | | break; |
766 | | case OPARG_TOP: // First half of super-instr |
767 | | oparg = orig_oparg >> 4; |
768 | | break; |
769 | | case OPARG_BOTTOM: // Second half of super-instr |
770 | | oparg = orig_oparg & 0xF; |
771 | | break; |
772 | | case OPARG_SAVE_RETURN_OFFSET: // op=_SAVE_RETURN_OFFSET; oparg=return_offset |
773 | | oparg = offset; |
774 | | assert(uop == _SAVE_RETURN_OFFSET); |
775 | | break; |
776 | | case OPARG_REPLACED: |
777 | | uop = _PyUOp_Replacements[uop]; |
778 | | assert(uop != 0); |
779 | | #ifdef Py_DEBUG |
780 | | { |
781 | | uint32_t next_inst = target + 1 + INLINE_CACHE_ENTRIES_FOR_ITER + (oparg > 255); |
782 | | uint32_t jump_target = next_inst + oparg; |
783 | | assert(_Py_GetBaseCodeUnit(code, jump_target).op.code == END_FOR); |
784 | | assert(_Py_GetBaseCodeUnit(code, jump_target+1).op.code == POP_ITER); |
785 | | } |
786 | | #endif |
787 | | break; |
788 | | case OPERAND1_1: |
789 | | assert(trace[trace_length-1].opcode == uop); |
790 | | operand = read_u16(&instr[offset].cache); |
791 | | trace[trace_length-1].operand1 = operand; |
792 | | continue; |
793 | | case OPERAND1_2: |
794 | | assert(trace[trace_length-1].opcode == uop); |
795 | | operand = read_u32(&instr[offset].cache); |
796 | | trace[trace_length-1].operand1 = operand; |
797 | | continue; |
798 | | case OPERAND1_4: |
799 | | assert(trace[trace_length-1].opcode == uop); |
800 | | operand = read_u64(&instr[offset].cache); |
801 | | trace[trace_length-1].operand1 = operand; |
802 | | continue; |
803 | | default: |
804 | | fprintf(stderr, |
805 | | "opcode=%d, oparg=%d; nuops=%d, i=%d; size=%d, offset=%d\n", |
806 | | opcode, oparg, nuops, i, |
807 | | expansion->uops[i].size, |
808 | | expansion->uops[i].offset); |
809 | | Py_FatalError("garbled expansion"); |
810 | | } |
811 | | |
812 | | if (uop == _RETURN_VALUE || uop == _RETURN_GENERATOR || uop == _YIELD_VALUE) { |
813 | | TRACE_STACK_POP(); |
814 | | /* Set the operand to the function or code object returned to, |
815 | | * to assist optimization passes. (See _PUSH_FRAME below.) |
816 | | */ |
817 | | if (func != NULL) { |
818 | | operand = (uintptr_t)func; |
819 | | } |
820 | | else if (code != NULL) { |
821 | | operand = (uintptr_t)code | 1; |
822 | | } |
823 | | else { |
824 | | operand = 0; |
825 | | } |
826 | | ADD_TO_TRACE(uop, oparg, operand, target); |
827 | | DPRINTF(2, |
828 | | "Returning to %s (%s:%d) at byte offset %d\n", |
829 | | PyUnicode_AsUTF8(code->co_qualname), |
830 | | PyUnicode_AsUTF8(code->co_filename), |
831 | | code->co_firstlineno, |
832 | | 2 * INSTR_IP(instr, code)); |
833 | | goto top; |
834 | | } |
835 | | |
836 | | if (uop == _PUSH_FRAME) { |
837 | | assert(i + 1 == nuops); |
838 | | if (opcode == FOR_ITER_GEN || |
839 | | opcode == LOAD_ATTR_PROPERTY || |
840 | | opcode == BINARY_OP_SUBSCR_GETITEM || |
841 | | opcode == SEND_GEN) |
842 | | { |
843 | | DPRINTF(2, "Bailing due to dynamic target\n"); |
844 | | OPT_STAT_INC(unknown_callee); |
845 | | return 0; |
846 | | } |
847 | | assert(_PyOpcode_Deopt[opcode] == CALL || _PyOpcode_Deopt[opcode] == CALL_KW); |
848 | | int func_version_offset = |
849 | | offsetof(_PyCallCache, func_version)/sizeof(_Py_CODEUNIT) |
850 | | // Add one to account for the actual opcode/oparg pair: |
851 | | + 1; |
852 | | uint32_t func_version = read_u32(&instr[func_version_offset].cache); |
853 | | PyCodeObject *new_code = NULL; |
854 | | PyFunctionObject *new_func = |
855 | | _PyFunction_LookupByVersion(func_version, (PyObject **) &new_code); |
856 | | DPRINTF(2, "Function: version=%#x; new_func=%p, new_code=%p\n", |
857 | | (int)func_version, new_func, new_code); |
858 | | if (new_code != NULL) { |
859 | | if (new_code == code) { |
860 | | // Recursive call, bail (we could be here forever). |
861 | | DPRINTF(2, "Bailing on recursive call to %s (%s:%d)\n", |
862 | | PyUnicode_AsUTF8(new_code->co_qualname), |
863 | | PyUnicode_AsUTF8(new_code->co_filename), |
864 | | new_code->co_firstlineno); |
865 | | OPT_STAT_INC(recursive_call); |
866 | | ADD_TO_TRACE(uop, oparg, 0, target); |
867 | | ADD_TO_TRACE(_EXIT_TRACE, 0, 0, 0); |
868 | | goto done; |
869 | | } |
870 | | if (new_code->co_version != func_version) { |
871 | | // func.__code__ was updated. |
872 | | // Perhaps it may happen again, so don't bother tracing. |
873 | | // TODO: Reason about this -- is it better to bail or not? |
874 | | DPRINTF(2, "Bailing because co_version != func_version\n"); |
875 | | ADD_TO_TRACE(uop, oparg, 0, target); |
876 | | ADD_TO_TRACE(_EXIT_TRACE, 0, 0, 0); |
877 | | goto done; |
878 | | } |
879 | | // Increment IP to the return address |
880 | | instr += _PyOpcode_Caches[_PyOpcode_Deopt[opcode]] + 1; |
881 | | TRACE_STACK_PUSH(); |
882 | | _Py_BloomFilter_Add(dependencies, new_code); |
883 | | /* Set the operand to the callee's function or code object, |
884 | | * to assist optimization passes. |
885 | | * We prefer setting it to the function (for remove_globals()) |
886 | | * but if that's not available but the code is available, |
887 | | * use the code, setting the low bit so the optimizer knows. |
888 | | */ |
889 | | if (new_func != NULL) { |
890 | | operand = (uintptr_t)new_func; |
891 | | } |
892 | | else if (new_code != NULL) { |
893 | | operand = (uintptr_t)new_code | 1; |
894 | | } |
895 | | else { |
896 | | operand = 0; |
897 | | } |
898 | | ADD_TO_TRACE(uop, oparg, operand, target); |
899 | | code = new_code; |
900 | | func = new_func; |
901 | | instr = _PyCode_CODE(code); |
902 | | DPRINTF(2, |
903 | | "Continuing in %s (%s:%d) at byte offset %d\n", |
904 | | PyUnicode_AsUTF8(code->co_qualname), |
905 | | PyUnicode_AsUTF8(code->co_filename), |
906 | | code->co_firstlineno, |
907 | | 2 * INSTR_IP(instr, code)); |
908 | | goto top; |
909 | | } |
910 | | DPRINTF(2, "Bail, new_code == NULL\n"); |
911 | | OPT_STAT_INC(unknown_callee); |
912 | | return 0; |
913 | | } |
914 | | |
915 | | if (uop == _BINARY_OP_INPLACE_ADD_UNICODE) { |
916 | | assert(i + 1 == nuops); |
917 | | _Py_CODEUNIT *next_instr = instr + 1 + _PyOpcode_Caches[_PyOpcode_Deopt[opcode]]; |
918 | | assert(next_instr->op.code == STORE_FAST); |
919 | | operand = next_instr->op.arg; |
920 | | // Skip the STORE_FAST: |
921 | | instr++; |
922 | | } |
923 | | |
924 | | // All other instructions |
925 | | ADD_TO_TRACE(uop, oparg, operand, target); |
926 | | } |
927 | | break; |
928 | | } |
929 | | DPRINTF(2, "Unsupported opcode %s\n", _PyOpcode_OpName[opcode]); |
930 | | OPT_UNSUPPORTED_OPCODE(opcode); |
931 | | goto done; // Break out of loop |
932 | | } // End default |
933 | | |
934 | | } // End switch (opcode) |
935 | | |
936 | | instr++; |
937 | | // Add cache size for opcode |
938 | | instr += _PyOpcode_Caches[_PyOpcode_Deopt[opcode]]; |
939 | | |
940 | | if (opcode == CALL_LIST_APPEND) { |
941 | | assert(instr->op.code == POP_TOP); |
942 | | instr++; |
943 | | } |
944 | | top: |
945 | | // Jump here after _PUSH_FRAME or likely branches. |
946 | | first = false; |
947 | | } // End for (;;) |
948 | | |
949 | | done: |
950 | | while (trace_stack_depth > 0) { |
951 | | TRACE_STACK_POP(); |
952 | | } |
953 | | assert(code == initial_code); |
954 | | // Skip short traces where we can't even translate a single instruction: |
955 | | if (first) { |
956 | | OPT_STAT_INC(trace_too_short); |
957 | | DPRINTF(2, |
958 | | "No trace for %s (%s:%d) at byte offset %d (no progress)\n", |
959 | | PyUnicode_AsUTF8(code->co_qualname), |
960 | | PyUnicode_AsUTF8(code->co_filename), |
961 | | code->co_firstlineno, |
962 | | 2 * INSTR_IP(initial_instr, code)); |
963 | | return 0; |
964 | | } |
965 | | if (!is_terminator(&trace[trace_length-1])) { |
966 | | /* Allow space for _EXIT_TRACE */ |
967 | | max_length += 2; |
968 | | ADD_TO_TRACE(_EXIT_TRACE, 0, 0, target); |
969 | | } |
970 | | DPRINTF(1, |
971 | | "Created a proto-trace for %s (%s:%d) at byte offset %d -- length %d\n", |
972 | | PyUnicode_AsUTF8(code->co_qualname), |
973 | | PyUnicode_AsUTF8(code->co_filename), |
974 | | code->co_firstlineno, |
975 | | 2 * INSTR_IP(initial_instr, code), |
976 | | trace_length); |
977 | | OPT_HIST(trace_length, trace_length_hist); |
978 | | return trace_length; |
979 | | } |
980 | | |
981 | | #undef RESERVE |
982 | | #undef RESERVE_RAW |
983 | | #undef INSTR_IP |
984 | | #undef ADD_TO_TRACE |
985 | | #undef DPRINTF |
986 | | |
987 | | #define UNSET_BIT(array, bit) (array[(bit)>>5] &= ~(1<<((bit)&31))) |
988 | | #define SET_BIT(array, bit) (array[(bit)>>5] |= (1<<((bit)&31))) |
989 | | #define BIT_IS_SET(array, bit) (array[(bit)>>5] & (1<<((bit)&31))) |
990 | | |
991 | | /* Count the number of unused uops and exits |
992 | | */ |
993 | | static int |
994 | | count_exits(_PyUOpInstruction *buffer, int length) |
995 | | { |
996 | | int exit_count = 0; |
997 | | for (int i = 0; i < length; i++) { |
998 | | int opcode = buffer[i].opcode; |
999 | | if (opcode == _EXIT_TRACE) { |
1000 | | exit_count++; |
1001 | | } |
1002 | | } |
1003 | | return exit_count; |
1004 | | } |
1005 | | |
1006 | | static void make_exit(_PyUOpInstruction *inst, int opcode, int target) |
1007 | | { |
1008 | | inst->opcode = opcode; |
1009 | | inst->oparg = 0; |
1010 | | inst->operand0 = 0; |
1011 | | inst->format = UOP_FORMAT_TARGET; |
1012 | | inst->target = target; |
1013 | | #ifdef Py_STATS |
1014 | | inst->execution_count = 0; |
1015 | | #endif |
1016 | | } |
1017 | | |
1018 | | /* Convert implicit exits, errors and deopts |
1019 | | * into explicit ones. */ |
1020 | | static int |
1021 | | prepare_for_execution(_PyUOpInstruction *buffer, int length) |
1022 | | { |
1023 | | int32_t current_jump = -1; |
1024 | | int32_t current_jump_target = -1; |
1025 | | int32_t current_error = -1; |
1026 | | int32_t current_error_target = -1; |
1027 | | int32_t current_popped = -1; |
1028 | | int32_t current_exit_op = -1; |
1029 | | /* Leaving in NOPs slows down the interpreter and messes up the stats */ |
1030 | | _PyUOpInstruction *copy_to = &buffer[0]; |
1031 | | for (int i = 0; i < length; i++) { |
1032 | | _PyUOpInstruction *inst = &buffer[i]; |
1033 | | if (inst->opcode != _NOP) { |
1034 | | if (copy_to != inst) { |
1035 | | *copy_to = *inst; |
1036 | | } |
1037 | | copy_to++; |
1038 | | } |
1039 | | } |
1040 | | length = (int)(copy_to - buffer); |
1041 | | int next_spare = length; |
1042 | | for (int i = 0; i < length; i++) { |
1043 | | _PyUOpInstruction *inst = &buffer[i]; |
1044 | | int opcode = inst->opcode; |
1045 | | int32_t target = (int32_t)uop_get_target(inst); |
1046 | | if (_PyUop_Flags[opcode] & (HAS_EXIT_FLAG | HAS_DEOPT_FLAG)) { |
1047 | | uint16_t exit_op = (_PyUop_Flags[opcode] & HAS_EXIT_FLAG) ? |
1048 | | _EXIT_TRACE : _DEOPT; |
1049 | | int32_t jump_target = target; |
1050 | | if (is_for_iter_test[opcode]) { |
1051 | | /* Target the POP_TOP immediately after the END_FOR, |
1052 | | * leaving only the iterator on the stack. */ |
1053 | | int extended_arg = inst->oparg > 255; |
1054 | | int32_t next_inst = target + 1 + INLINE_CACHE_ENTRIES_FOR_ITER + extended_arg; |
1055 | | jump_target = next_inst + inst->oparg + 1; |
1056 | | } |
1057 | | if (jump_target != current_jump_target || current_exit_op != exit_op) { |
1058 | | make_exit(&buffer[next_spare], exit_op, jump_target); |
1059 | | current_exit_op = exit_op; |
1060 | | current_jump_target = jump_target; |
1061 | | current_jump = next_spare; |
1062 | | next_spare++; |
1063 | | } |
1064 | | buffer[i].jump_target = current_jump; |
1065 | | buffer[i].format = UOP_FORMAT_JUMP; |
1066 | | } |
1067 | | if (_PyUop_Flags[opcode] & HAS_ERROR_FLAG) { |
1068 | | int popped = (_PyUop_Flags[opcode] & HAS_ERROR_NO_POP_FLAG) ? |
1069 | | 0 : _PyUop_num_popped(opcode, inst->oparg); |
1070 | | if (target != current_error_target || popped != current_popped) { |
1071 | | current_popped = popped; |
1072 | | current_error = next_spare; |
1073 | | current_error_target = target; |
1074 | | make_exit(&buffer[next_spare], _ERROR_POP_N, 0); |
1075 | | buffer[next_spare].operand0 = target; |
1076 | | next_spare++; |
1077 | | } |
1078 | | buffer[i].error_target = current_error; |
1079 | | if (buffer[i].format == UOP_FORMAT_TARGET) { |
1080 | | buffer[i].format = UOP_FORMAT_JUMP; |
1081 | | buffer[i].jump_target = 0; |
1082 | | } |
1083 | | } |
1084 | | if (opcode == _JUMP_TO_TOP) { |
1085 | | assert(buffer[0].opcode == _START_EXECUTOR); |
1086 | | buffer[i].format = UOP_FORMAT_JUMP; |
1087 | | buffer[i].jump_target = 1; |
1088 | | } |
1089 | | } |
1090 | | return next_spare; |
1091 | | } |
1092 | | |
1093 | | /* Executor side exits */ |
1094 | | |
1095 | | static _PyExecutorObject * |
1096 | | allocate_executor(int exit_count, int length) |
1097 | | { |
1098 | | int size = exit_count*sizeof(_PyExitData) + length*sizeof(_PyUOpInstruction); |
1099 | | _PyExecutorObject *res = PyObject_GC_NewVar(_PyExecutorObject, &_PyUOpExecutor_Type, size); |
1100 | | if (res == NULL) { |
1101 | | return NULL; |
1102 | | } |
1103 | | res->trace = (_PyUOpInstruction *)(res->exits + exit_count); |
1104 | | res->code_size = length; |
1105 | | res->exit_count = exit_count; |
1106 | | return res; |
1107 | | } |
1108 | | |
1109 | | #ifdef Py_DEBUG |
1110 | | |
1111 | | #define CHECK(PRED) \ |
1112 | | if (!(PRED)) { \ |
1113 | | printf(#PRED " at %d\n", i); \ |
1114 | | assert(0); \ |
1115 | | } |
1116 | | |
1117 | | static int |
1118 | | target_unused(int opcode) |
1119 | | { |
1120 | | return (_PyUop_Flags[opcode] & (HAS_ERROR_FLAG | HAS_EXIT_FLAG | HAS_DEOPT_FLAG)) == 0; |
1121 | | } |
1122 | | |
1123 | | static void |
1124 | | sanity_check(_PyExecutorObject *executor) |
1125 | | { |
1126 | | for (uint32_t i = 0; i < executor->exit_count; i++) { |
1127 | | _PyExitData *exit = &executor->exits[i]; |
1128 | | CHECK(exit->target < (1 << 25)); |
1129 | | } |
1130 | | bool ended = false; |
1131 | | uint32_t i = 0; |
1132 | | CHECK(executor->trace[0].opcode == _START_EXECUTOR); |
1133 | | for (; i < executor->code_size; i++) { |
1134 | | const _PyUOpInstruction *inst = &executor->trace[i]; |
1135 | | uint16_t opcode = inst->opcode; |
1136 | | CHECK(opcode <= MAX_UOP_ID); |
1137 | | CHECK(_PyOpcode_uop_name[opcode] != NULL); |
1138 | | switch(inst->format) { |
1139 | | case UOP_FORMAT_TARGET: |
1140 | | CHECK(target_unused(opcode)); |
1141 | | break; |
1142 | | case UOP_FORMAT_JUMP: |
1143 | | CHECK(inst->jump_target < executor->code_size); |
1144 | | break; |
1145 | | } |
1146 | | if (_PyUop_Flags[opcode] & HAS_ERROR_FLAG) { |
1147 | | CHECK(inst->format == UOP_FORMAT_JUMP); |
1148 | | CHECK(inst->error_target < executor->code_size); |
1149 | | } |
1150 | | if (is_terminator(inst)) { |
1151 | | ended = true; |
1152 | | i++; |
1153 | | break; |
1154 | | } |
1155 | | } |
1156 | | CHECK(ended); |
1157 | | for (; i < executor->code_size; i++) { |
1158 | | const _PyUOpInstruction *inst = &executor->trace[i]; |
1159 | | uint16_t opcode = inst->opcode; |
1160 | | CHECK( |
1161 | | opcode == _DEOPT || |
1162 | | opcode == _EXIT_TRACE || |
1163 | | opcode == _ERROR_POP_N); |
1164 | | } |
1165 | | } |
1166 | | |
1167 | | #undef CHECK |
1168 | | #endif |
1169 | | |
1170 | | /* Makes an executor from a buffer of uops. |
1171 | | * Account for the buffer having gaps and NOPs by computing a "used" |
1172 | | * bit vector and only copying the used uops. Here "used" means reachable |
1173 | | * and not a NOP. |
1174 | | */ |
1175 | | static _PyExecutorObject * |
1176 | | make_executor_from_uops(_PyUOpInstruction *buffer, int length, const _PyBloomFilter *dependencies) |
1177 | | { |
1178 | | int exit_count = count_exits(buffer, length); |
1179 | | _PyExecutorObject *executor = allocate_executor(exit_count, length); |
1180 | | if (executor == NULL) { |
1181 | | return NULL; |
1182 | | } |
1183 | | |
1184 | | /* Initialize exits */ |
1185 | | for (int i = 0; i < exit_count; i++) { |
1186 | | executor->exits[i].executor = NULL; |
1187 | | executor->exits[i].temperature = initial_temperature_backoff_counter(); |
1188 | | } |
1189 | | int next_exit = exit_count-1; |
1190 | | _PyUOpInstruction *dest = (_PyUOpInstruction *)&executor->trace[length]; |
1191 | | assert(buffer[0].opcode == _START_EXECUTOR); |
1192 | | buffer[0].operand0 = (uint64_t)executor; |
1193 | | for (int i = length-1; i >= 0; i--) { |
1194 | | int opcode = buffer[i].opcode; |
1195 | | dest--; |
1196 | | *dest = buffer[i]; |
1197 | | assert(opcode != _POP_JUMP_IF_FALSE && opcode != _POP_JUMP_IF_TRUE); |
1198 | | if (opcode == _EXIT_TRACE) { |
1199 | | _PyExitData *exit = &executor->exits[next_exit]; |
1200 | | exit->target = buffer[i].target; |
1201 | | dest->operand0 = (uint64_t)exit; |
1202 | | next_exit--; |
1203 | | } |
1204 | | } |
1205 | | assert(next_exit == -1); |
1206 | | assert(dest == executor->trace); |
1207 | | assert(dest->opcode == _START_EXECUTOR); |
1208 | | _Py_ExecutorInit(executor, dependencies); |
1209 | | #ifdef Py_DEBUG |
1210 | | char *python_lltrace = Py_GETENV("PYTHON_LLTRACE"); |
1211 | | int lltrace = 0; |
1212 | | if (python_lltrace != NULL && *python_lltrace >= '0') { |
1213 | | lltrace = *python_lltrace - '0'; // TODO: Parse an int and all that |
1214 | | } |
1215 | | if (lltrace >= 2) { |
1216 | | printf("Optimized trace (length %d):\n", length); |
1217 | | for (int i = 0; i < length; i++) { |
1218 | | printf("%4d OPTIMIZED: ", i); |
1219 | | _PyUOpPrint(&executor->trace[i]); |
1220 | | printf("\n"); |
1221 | | } |
1222 | | } |
1223 | | sanity_check(executor); |
1224 | | #endif |
1225 | | #ifdef _Py_JIT |
1226 | | executor->jit_code = NULL; |
1227 | | executor->jit_side_entry = NULL; |
1228 | | executor->jit_size = 0; |
1229 | | // This is initialized to true so we can prevent the executor |
1230 | | // from being immediately detected as cold and invalidated. |
1231 | | executor->vm_data.warm = true; |
1232 | | if (_PyJIT_Compile(executor, executor->trace, length)) { |
1233 | | Py_DECREF(executor); |
1234 | | return NULL; |
1235 | | } |
1236 | | #endif |
1237 | | _PyObject_GC_TRACK(executor); |
1238 | | return executor; |
1239 | | } |
1240 | | |
1241 | | #ifdef Py_STATS |
1242 | | /* Returns the effective trace length. |
1243 | | * Ignores NOPs and trailing exit and error handling.*/ |
1244 | | int effective_trace_length(_PyUOpInstruction *buffer, int length) |
1245 | | { |
1246 | | int nop_count = 0; |
1247 | | for (int i = 0; i < length; i++) { |
1248 | | int opcode = buffer[i].opcode; |
1249 | | if (opcode == _NOP) { |
1250 | | nop_count++; |
1251 | | } |
1252 | | if (is_terminator(&buffer[i])) { |
1253 | | return i+1-nop_count; |
1254 | | } |
1255 | | } |
1256 | | Py_FatalError("No terminating instruction"); |
1257 | | Py_UNREACHABLE(); |
1258 | | } |
1259 | | #endif |
1260 | | |
1261 | | static int |
1262 | | uop_optimize( |
1263 | | _PyInterpreterFrame *frame, |
1264 | | _Py_CODEUNIT *instr, |
1265 | | _PyExecutorObject **exec_ptr, |
1266 | | int curr_stackentries, |
1267 | | bool progress_needed) |
1268 | | { |
1269 | | _PyBloomFilter dependencies; |
1270 | | _Py_BloomFilter_Init(&dependencies); |
1271 | | _PyUOpInstruction buffer[UOP_MAX_TRACE_LENGTH]; |
1272 | | OPT_STAT_INC(attempts); |
1273 | | int length = translate_bytecode_to_trace(frame, instr, buffer, UOP_MAX_TRACE_LENGTH, &dependencies, progress_needed); |
1274 | | if (length <= 0) { |
1275 | | // Error or nothing translated |
1276 | | return length; |
1277 | | } |
1278 | | assert(length < UOP_MAX_TRACE_LENGTH); |
1279 | | OPT_STAT_INC(traces_created); |
1280 | | char *env_var = Py_GETENV("PYTHON_UOPS_OPTIMIZE"); |
1281 | | if (env_var == NULL || *env_var == '\0' || *env_var > '0') { |
1282 | | length = _Py_uop_analyze_and_optimize(frame, buffer, |
1283 | | length, |
1284 | | curr_stackentries, &dependencies); |
1285 | | if (length <= 0) { |
1286 | | return length; |
1287 | | } |
1288 | | } |
1289 | | assert(length < UOP_MAX_TRACE_LENGTH); |
1290 | | assert(length >= 1); |
1291 | | /* Fix up */ |
1292 | | for (int pc = 0; pc < length; pc++) { |
1293 | | int opcode = buffer[pc].opcode; |
1294 | | int oparg = buffer[pc].oparg; |
1295 | | if (oparg < _PyUop_Replication[opcode].stop && oparg >= _PyUop_Replication[opcode].start) { |
1296 | | buffer[pc].opcode = opcode + oparg + 1 - _PyUop_Replication[opcode].start; |
1297 | | assert(strncmp(_PyOpcode_uop_name[buffer[pc].opcode], _PyOpcode_uop_name[opcode], strlen(_PyOpcode_uop_name[opcode])) == 0); |
1298 | | } |
1299 | | else if (is_terminator(&buffer[pc])) { |
1300 | | break; |
1301 | | } |
1302 | | assert(_PyOpcode_uop_name[buffer[pc].opcode]); |
1303 | | } |
1304 | | OPT_HIST(effective_trace_length(buffer, length), optimized_trace_length_hist); |
1305 | | length = prepare_for_execution(buffer, length); |
1306 | | assert(length <= UOP_MAX_TRACE_LENGTH); |
1307 | | _PyExecutorObject *executor = make_executor_from_uops(buffer, length, &dependencies); |
1308 | | if (executor == NULL) { |
1309 | | return -1; |
1310 | | } |
1311 | | assert(length <= UOP_MAX_TRACE_LENGTH); |
1312 | | *exec_ptr = executor; |
1313 | | return 1; |
1314 | | } |
1315 | | |
1316 | | |
1317 | | /***************************************** |
1318 | | * Executor management |
1319 | | ****************************************/ |
1320 | | |
1321 | | /* We use a bloomfilter with k = 6, m = 256 |
1322 | | * The choice of k and the following constants |
1323 | | * could do with a more rigorous analysis, |
1324 | | * but here is a simple analysis: |
1325 | | * |
1326 | | * We want to keep the false positive rate low. |
1327 | | * For n = 5 (a trace depends on 5 objects), |
1328 | | * we expect 30 bits set, giving a false positive |
1329 | | * rate of (30/256)**6 == 2.5e-6 which is plenty |
1330 | | * good enough. |
1331 | | * |
1332 | | * However with n = 10 we expect 60 bits set (worst case), |
1333 | | * giving a false positive of (60/256)**6 == 0.0001 |
1334 | | * |
1335 | | * We choose k = 6, rather than a higher number as |
1336 | | * it means the false positive rate grows slower for high n. |
1337 | | * |
1338 | | * n = 5, k = 6 => fp = 2.6e-6 |
1339 | | * n = 5, k = 8 => fp = 3.5e-7 |
1340 | | * n = 10, k = 6 => fp = 1.6e-4 |
1341 | | * n = 10, k = 8 => fp = 0.9e-4 |
1342 | | * n = 15, k = 6 => fp = 0.18% |
1343 | | * n = 15, k = 8 => fp = 0.23% |
1344 | | * n = 20, k = 6 => fp = 1.1% |
1345 | | * n = 20, k = 8 => fp = 2.3% |
1346 | | * |
1347 | | * The above analysis assumes perfect hash functions, |
1348 | | * but those don't exist, so the real false positive |
1349 | | * rates may be worse. |
1350 | | */ |
1351 | | |
1352 | | #define K 6 |
1353 | | |
1354 | | #define SEED 20221211 |
1355 | | |
1356 | | /* TO DO -- Use more modern hash functions with better distribution of bits */ |
1357 | | static uint64_t |
1358 | | address_to_hash(void *ptr) { |
1359 | | assert(ptr != NULL); |
1360 | | uint64_t uhash = SEED; |
1361 | | uintptr_t addr = (uintptr_t)ptr; |
1362 | | for (int i = 0; i < SIZEOF_VOID_P; i++) { |
1363 | | uhash ^= addr & 255; |
1364 | | uhash *= (uint64_t)PyHASH_MULTIPLIER; |
1365 | | addr >>= 8; |
1366 | | } |
1367 | | return uhash; |
1368 | | } |
1369 | | |
1370 | | void |
1371 | | _Py_BloomFilter_Init(_PyBloomFilter *bloom) |
1372 | | { |
1373 | | for (int i = 0; i < _Py_BLOOM_FILTER_WORDS; i++) { |
1374 | | bloom->bits[i] = 0; |
1375 | | } |
1376 | | } |
1377 | | |
1378 | | /* We want K hash functions that each set 1 bit. |
1379 | | * A hash function that sets 1 bit in M bits can be trivially |
1380 | | * derived from a log2(M) bit hash function. |
1381 | | * So we extract 8 (log2(256)) bits at a time from |
1382 | | * the 64bit hash. */ |
1383 | | void |
1384 | | _Py_BloomFilter_Add(_PyBloomFilter *bloom, void *ptr) |
1385 | | { |
1386 | | uint64_t hash = address_to_hash(ptr); |
1387 | | assert(K <= 8); |
1388 | | for (int i = 0; i < K; i++) { |
1389 | | uint8_t bits = hash & 255; |
1390 | | bloom->bits[bits >> 5] |= (1 << (bits&31)); |
1391 | | hash >>= 8; |
1392 | | } |
1393 | | } |
1394 | | |
1395 | | static bool |
1396 | | bloom_filter_may_contain(_PyBloomFilter *bloom, _PyBloomFilter *hashes) |
1397 | | { |
1398 | | for (int i = 0; i < _Py_BLOOM_FILTER_WORDS; i++) { |
1399 | | if ((bloom->bits[i] & hashes->bits[i]) != hashes->bits[i]) { |
1400 | | return false; |
1401 | | } |
1402 | | } |
1403 | | return true; |
1404 | | } |
1405 | | |
1406 | | static void |
1407 | | link_executor(_PyExecutorObject *executor) |
1408 | | { |
1409 | | PyInterpreterState *interp = _PyInterpreterState_GET(); |
1410 | | _PyExecutorLinkListNode *links = &executor->vm_data.links; |
1411 | | _PyExecutorObject *head = interp->executor_list_head; |
1412 | | if (head == NULL) { |
1413 | | interp->executor_list_head = executor; |
1414 | | links->previous = NULL; |
1415 | | links->next = NULL; |
1416 | | } |
1417 | | else { |
1418 | | assert(head->vm_data.links.previous == NULL); |
1419 | | links->previous = NULL; |
1420 | | links->next = head; |
1421 | | head->vm_data.links.previous = executor; |
1422 | | interp->executor_list_head = executor; |
1423 | | } |
1424 | | executor->vm_data.linked = true; |
1425 | | /* executor_list_head must be first in list */ |
1426 | | assert(interp->executor_list_head->vm_data.links.previous == NULL); |
1427 | | } |
1428 | | |
1429 | | static void |
1430 | | unlink_executor(_PyExecutorObject *executor) |
1431 | | { |
1432 | | if (!executor->vm_data.linked) { |
1433 | | return; |
1434 | | } |
1435 | | _PyExecutorLinkListNode *links = &executor->vm_data.links; |
1436 | | assert(executor->vm_data.valid); |
1437 | | _PyExecutorObject *next = links->next; |
1438 | | _PyExecutorObject *prev = links->previous; |
1439 | | if (next != NULL) { |
1440 | | next->vm_data.links.previous = prev; |
1441 | | } |
1442 | | if (prev != NULL) { |
1443 | | prev->vm_data.links.next = next; |
1444 | | } |
1445 | | else { |
1446 | | // prev == NULL implies that executor is the list head |
1447 | | PyInterpreterState *interp = PyInterpreterState_Get(); |
1448 | | assert(interp->executor_list_head == executor); |
1449 | | interp->executor_list_head = next; |
1450 | | } |
1451 | | executor->vm_data.linked = false; |
1452 | | } |
1453 | | |
1454 | | /* This must be called by optimizers before using the executor */ |
1455 | | void |
1456 | | _Py_ExecutorInit(_PyExecutorObject *executor, const _PyBloomFilter *dependency_set) |
1457 | | { |
1458 | | executor->vm_data.valid = true; |
1459 | | for (int i = 0; i < _Py_BLOOM_FILTER_WORDS; i++) { |
1460 | | executor->vm_data.bloom.bits[i] = dependency_set->bits[i]; |
1461 | | } |
1462 | | link_executor(executor); |
1463 | | } |
1464 | | |
1465 | | /* Detaches the executor from the code object (if any) that |
1466 | | * holds a reference to it */ |
1467 | | void |
1468 | | _Py_ExecutorDetach(_PyExecutorObject *executor) |
1469 | | { |
1470 | | PyCodeObject *code = executor->vm_data.code; |
1471 | | if (code == NULL) { |
1472 | | return; |
1473 | | } |
1474 | | _Py_CODEUNIT *instruction = &_PyCode_CODE(code)[executor->vm_data.index]; |
1475 | | assert(instruction->op.code == ENTER_EXECUTOR); |
1476 | | int index = instruction->op.arg; |
1477 | | assert(code->co_executors->executors[index] == executor); |
1478 | | instruction->op.code = executor->vm_data.opcode; |
1479 | | instruction->op.arg = executor->vm_data.oparg; |
1480 | | executor->vm_data.code = NULL; |
1481 | | code->co_executors->executors[index] = NULL; |
1482 | | Py_DECREF(executor); |
1483 | | } |
1484 | | |
1485 | | static int |
1486 | | executor_clear(PyObject *op) |
1487 | | { |
1488 | | _PyExecutorObject *executor = _PyExecutorObject_CAST(op); |
1489 | | if (!executor->vm_data.valid) { |
1490 | | return 0; |
1491 | | } |
1492 | | assert(executor->vm_data.valid == 1); |
1493 | | unlink_executor(executor); |
1494 | | executor->vm_data.valid = 0; |
1495 | | /* It is possible for an executor to form a reference |
1496 | | * cycle with itself, so decref'ing a side exit could |
1497 | | * free the executor unless we hold a strong reference to it |
1498 | | */ |
1499 | | Py_INCREF(executor); |
1500 | | for (uint32_t i = 0; i < executor->exit_count; i++) { |
1501 | | executor->exits[i].temperature = initial_unreachable_backoff_counter(); |
1502 | | Py_CLEAR(executor->exits[i].executor); |
1503 | | } |
1504 | | _Py_ExecutorDetach(executor); |
1505 | | Py_DECREF(executor); |
1506 | | return 0; |
1507 | | } |
1508 | | |
1509 | | void |
1510 | | _Py_Executor_DependsOn(_PyExecutorObject *executor, void *obj) |
1511 | | { |
1512 | | assert(executor->vm_data.valid); |
1513 | | _Py_BloomFilter_Add(&executor->vm_data.bloom, obj); |
1514 | | } |
1515 | | |
1516 | | /* Invalidate all executors that depend on `obj` |
1517 | | * May cause other executors to be invalidated as well |
1518 | | */ |
1519 | | void |
1520 | | _Py_Executors_InvalidateDependency(PyInterpreterState *interp, void *obj, int is_invalidation) |
1521 | | { |
1522 | | _PyBloomFilter obj_filter; |
1523 | | _Py_BloomFilter_Init(&obj_filter); |
1524 | | _Py_BloomFilter_Add(&obj_filter, obj); |
1525 | | /* Walk the list of executors */ |
1526 | | /* TO DO -- Use a tree to avoid traversing as many objects */ |
1527 | | PyObject *invalidate = PyList_New(0); |
1528 | | if (invalidate == NULL) { |
1529 | | goto error; |
1530 | | } |
1531 | | /* Clearing an executor can deallocate others, so we need to make a list of |
1532 | | * executors to invalidate first */ |
1533 | | for (_PyExecutorObject *exec = interp->executor_list_head; exec != NULL;) { |
1534 | | assert(exec->vm_data.valid); |
1535 | | _PyExecutorObject *next = exec->vm_data.links.next; |
1536 | | if (bloom_filter_may_contain(&exec->vm_data.bloom, &obj_filter) && |
1537 | | PyList_Append(invalidate, (PyObject *)exec)) |
1538 | | { |
1539 | | goto error; |
1540 | | } |
1541 | | exec = next; |
1542 | | } |
1543 | | for (Py_ssize_t i = 0; i < PyList_GET_SIZE(invalidate); i++) { |
1544 | | PyObject *exec = PyList_GET_ITEM(invalidate, i); |
1545 | | executor_clear(exec); |
1546 | | if (is_invalidation) { |
1547 | | OPT_STAT_INC(executors_invalidated); |
1548 | | } |
1549 | | } |
1550 | | Py_DECREF(invalidate); |
1551 | | return; |
1552 | | error: |
1553 | | PyErr_Clear(); |
1554 | | Py_XDECREF(invalidate); |
1555 | | // If we're truly out of memory, wiping out everything is a fine fallback: |
1556 | | _Py_Executors_InvalidateAll(interp, is_invalidation); |
1557 | | } |
1558 | | |
1559 | | /* Invalidate all executors */ |
1560 | | void |
1561 | | _Py_Executors_InvalidateAll(PyInterpreterState *interp, int is_invalidation) |
1562 | | { |
1563 | | while (interp->executor_list_head) { |
1564 | | _PyExecutorObject *executor = interp->executor_list_head; |
1565 | | assert(executor->vm_data.valid == 1 && executor->vm_data.linked == 1); |
1566 | | if (executor->vm_data.code) { |
1567 | | // Clear the entire code object so its co_executors array be freed: |
1568 | | _PyCode_Clear_Executors(executor->vm_data.code); |
1569 | | } |
1570 | | else { |
1571 | | executor_clear((PyObject *)executor); |
1572 | | } |
1573 | | if (is_invalidation) { |
1574 | | OPT_STAT_INC(executors_invalidated); |
1575 | | } |
1576 | | } |
1577 | | } |
1578 | | |
1579 | | void |
1580 | | _Py_Executors_InvalidateCold(PyInterpreterState *interp) |
1581 | | { |
1582 | | /* Walk the list of executors */ |
1583 | | /* TO DO -- Use a tree to avoid traversing as many objects */ |
1584 | | PyObject *invalidate = PyList_New(0); |
1585 | | if (invalidate == NULL) { |
1586 | | goto error; |
1587 | | } |
1588 | | |
1589 | | /* Clearing an executor can deallocate others, so we need to make a list of |
1590 | | * executors to invalidate first */ |
1591 | | for (_PyExecutorObject *exec = interp->executor_list_head; exec != NULL;) { |
1592 | | assert(exec->vm_data.valid); |
1593 | | _PyExecutorObject *next = exec->vm_data.links.next; |
1594 | | |
1595 | | if (!exec->vm_data.warm && PyList_Append(invalidate, (PyObject *)exec) < 0) { |
1596 | | goto error; |
1597 | | } |
1598 | | else { |
1599 | | exec->vm_data.warm = false; |
1600 | | } |
1601 | | |
1602 | | exec = next; |
1603 | | } |
1604 | | for (Py_ssize_t i = 0; i < PyList_GET_SIZE(invalidate); i++) { |
1605 | | PyObject *exec = PyList_GET_ITEM(invalidate, i); |
1606 | | executor_clear(exec); |
1607 | | } |
1608 | | Py_DECREF(invalidate); |
1609 | | return; |
1610 | | error: |
1611 | | PyErr_Clear(); |
1612 | | Py_XDECREF(invalidate); |
1613 | | // If we're truly out of memory, wiping out everything is a fine fallback |
1614 | | _Py_Executors_InvalidateAll(interp, 0); |
1615 | | } |
1616 | | |
1617 | | static void |
1618 | | write_str(PyObject *str, FILE *out) |
1619 | | { |
1620 | | // Encode the Unicode object to the specified encoding |
1621 | | PyObject *encoded_obj = PyUnicode_AsEncodedString(str, "utf8", "strict"); |
1622 | | if (encoded_obj == NULL) { |
1623 | | PyErr_Clear(); |
1624 | | return; |
1625 | | } |
1626 | | const char *encoded_str = PyBytes_AsString(encoded_obj); |
1627 | | Py_ssize_t encoded_size = PyBytes_Size(encoded_obj); |
1628 | | fwrite(encoded_str, 1, encoded_size, out); |
1629 | | Py_DECREF(encoded_obj); |
1630 | | } |
1631 | | |
1632 | | static int |
1633 | | find_line_number(PyCodeObject *code, _PyExecutorObject *executor) |
1634 | | { |
1635 | | int code_len = (int)Py_SIZE(code); |
1636 | | for (int i = 0; i < code_len; i++) { |
1637 | | _Py_CODEUNIT *instr = &_PyCode_CODE(code)[i]; |
1638 | | int opcode = instr->op.code; |
1639 | | if (opcode == ENTER_EXECUTOR) { |
1640 | | _PyExecutorObject *exec = code->co_executors->executors[instr->op.arg]; |
1641 | | if (exec == executor) { |
1642 | | return PyCode_Addr2Line(code, i*2); |
1643 | | } |
1644 | | } |
1645 | | i += _PyOpcode_Caches[_Py_GetBaseCodeUnit(code, i).op.code]; |
1646 | | } |
1647 | | return -1; |
1648 | | } |
1649 | | |
1650 | | /* Writes the node and outgoing edges for a single tracelet in graphviz format. |
1651 | | * Each tracelet is presented as a table of the uops it contains. |
1652 | | * If Py_STATS is enabled, execution counts are included. |
1653 | | * |
1654 | | * https://graphviz.readthedocs.io/en/stable/manual.html |
1655 | | * https://graphviz.org/gallery/ |
1656 | | */ |
1657 | | static void |
1658 | | executor_to_gv(_PyExecutorObject *executor, FILE *out) |
1659 | | { |
1660 | | PyCodeObject *code = executor->vm_data.code; |
1661 | | fprintf(out, "executor_%p [\n", executor); |
1662 | | fprintf(out, " shape = none\n"); |
1663 | | |
1664 | | /* Write the HTML table for the uops */ |
1665 | | fprintf(out, " label = <<table border=\"0\" cellspacing=\"0\">\n"); |
1666 | | fprintf(out, " <tr><td port=\"start\" border=\"1\" ><b>Executor</b></td></tr>\n"); |
1667 | | if (code == NULL) { |
1668 | | fprintf(out, " <tr><td border=\"1\" >No code object</td></tr>\n"); |
1669 | | } |
1670 | | else { |
1671 | | fprintf(out, " <tr><td border=\"1\" >"); |
1672 | | write_str(code->co_qualname, out); |
1673 | | int line = find_line_number(code, executor); |
1674 | | fprintf(out, ": %d</td></tr>\n", line); |
1675 | | } |
1676 | | for (uint32_t i = 0; i < executor->code_size; i++) { |
1677 | | /* Write row for uop. |
1678 | | * The `port` is a marker so that outgoing edges can |
1679 | | * be placed correctly. If a row is marked `port=17`, |
1680 | | * then the outgoing edge is `{EXEC_NAME}:17 -> {TARGET}` |
1681 | | * https://graphviz.readthedocs.io/en/stable/manual.html#node-ports-compass |
1682 | | */ |
1683 | | _PyUOpInstruction const *inst = &executor->trace[i]; |
1684 | | const char *opname = _PyOpcode_uop_name[inst->opcode]; |
1685 | | #ifdef Py_STATS |
1686 | | fprintf(out, " <tr><td port=\"i%d\" border=\"1\" >%s -- %" PRIu64 "</td></tr>\n", i, opname, inst->execution_count); |
1687 | | #else |
1688 | | fprintf(out, " <tr><td port=\"i%d\" border=\"1\" >%s</td></tr>\n", i, opname); |
1689 | | #endif |
1690 | | if (inst->opcode == _EXIT_TRACE || inst->opcode == _JUMP_TO_TOP) { |
1691 | | break; |
1692 | | } |
1693 | | } |
1694 | | fprintf(out, " </table>>\n"); |
1695 | | fprintf(out, "]\n\n"); |
1696 | | |
1697 | | /* Write all the outgoing edges */ |
1698 | | for (uint32_t i = 0; i < executor->code_size; i++) { |
1699 | | _PyUOpInstruction const *inst = &executor->trace[i]; |
1700 | | uint16_t flags = _PyUop_Flags[inst->opcode]; |
1701 | | _PyExitData *exit = NULL; |
1702 | | if (inst->opcode == _EXIT_TRACE) { |
1703 | | exit = (_PyExitData *)inst->operand0; |
1704 | | } |
1705 | | else if (flags & HAS_EXIT_FLAG) { |
1706 | | assert(inst->format == UOP_FORMAT_JUMP); |
1707 | | _PyUOpInstruction const *exit_inst = &executor->trace[inst->jump_target]; |
1708 | | assert(exit_inst->opcode == _EXIT_TRACE); |
1709 | | exit = (_PyExitData *)exit_inst->operand0; |
1710 | | } |
1711 | | if (exit != NULL && exit->executor != NULL) { |
1712 | | fprintf(out, "executor_%p:i%d -> executor_%p:start\n", executor, i, exit->executor); |
1713 | | } |
1714 | | if (inst->opcode == _EXIT_TRACE || inst->opcode == _JUMP_TO_TOP) { |
1715 | | break; |
1716 | | } |
1717 | | } |
1718 | | } |
1719 | | |
1720 | | /* Write the graph of all the live tracelets in graphviz format. */ |
1721 | | int |
1722 | | _PyDumpExecutors(FILE *out) |
1723 | | { |
1724 | | fprintf(out, "digraph ideal {\n\n"); |
1725 | | fprintf(out, " rankdir = \"LR\"\n\n"); |
1726 | | PyInterpreterState *interp = PyInterpreterState_Get(); |
1727 | | for (_PyExecutorObject *exec = interp->executor_list_head; exec != NULL;) { |
1728 | | executor_to_gv(exec, out); |
1729 | | exec = exec->vm_data.links.next; |
1730 | | } |
1731 | | fprintf(out, "}\n\n"); |
1732 | | return 0; |
1733 | | } |
1734 | | |
1735 | | #else |
1736 | | |
1737 | | int |
1738 | | _PyDumpExecutors(FILE *out) |
1739 | 0 | { |
1740 | 0 | PyErr_SetString(PyExc_NotImplementedError, "No JIT available"); |
1741 | 0 | return -1; |
1742 | 0 | } |
1743 | | |
1744 | | #endif /* _Py_TIER2 */ |