/src/php-src/ext/opcache/jit/ir/ir_emit.c
Line | Count | Source |
1 | | /* |
2 | | * IR - Lightweight JIT Compilation Framework |
3 | | * (Native code generator based on DynAsm) |
4 | | * Copyright (C) 2022 Zend by Perforce. |
5 | | * Authors: Dmitry Stogov <dmitry@php.net> |
6 | | */ |
7 | | |
8 | | #include "ir.h" |
9 | | |
10 | | #if defined(IR_TARGET_X86) || defined(IR_TARGET_X64) |
11 | | # include "ir_x86.h" |
12 | | #elif defined(IR_TARGET_AARCH64) |
13 | | # include "ir_aarch64.h" |
14 | | #else |
15 | | # error "Unknown IR target" |
16 | | #endif |
17 | | |
18 | | #include "ir_private.h" |
19 | | #ifndef _WIN32 |
20 | | # include <dlfcn.h> |
21 | | #else |
22 | | # define WIN32_LEAN_AND_MEAN |
23 | | # include <windows.h> |
24 | | # include <psapi.h> |
25 | | #endif |
26 | | |
27 | | #if defined(__linux__) || defined(__sun) |
28 | | # include <alloca.h> |
29 | | #endif |
30 | | |
31 | | #define DASM_M_GROW(ctx, t, p, sz, need) \ |
32 | 0 | do { \ |
33 | 0 | size_t _sz = (sz), _need = (need); \ |
34 | 0 | if (_sz < _need) { \ |
35 | 0 | size_t _limit = sizeof(t) * DASM_SEC2POS(1); \ |
36 | 0 | if (_need > _limit) { \ |
37 | 0 | Dst_REF->status = DASM_S_NOMEM; \ |
38 | 0 | return; \ |
39 | 0 | } \ |
40 | 0 | if (_sz < 16) _sz = 16; \ |
41 | 0 | while (_sz < _need) _sz += _sz; \ |
42 | 0 | if (_sz > _limit) _sz = _limit; \ |
43 | 0 | (p) = (t *)ir_mem_realloc((p), _sz); \ |
44 | 0 | (sz) = _sz; \ |
45 | 0 | } \ |
46 | 0 | } while(0) |
47 | | |
48 | 0 | #define DASM_M_FREE(ctx, p, sz) ir_mem_free(p) |
49 | | |
50 | | #ifdef IR_DEBUG |
51 | | # define DASM_CHECKS |
52 | | #endif |
53 | | |
54 | | typedef struct _ir_copy { |
55 | | ir_type type; |
56 | | ir_reg from; |
57 | | ir_reg to; |
58 | | } ir_copy; |
59 | | |
60 | | typedef struct _ir_dessa_copy { |
61 | | ir_type type; |
62 | | int32_t from; /* negative - constant ref, [0..IR_REG_NUM) - CPU reg, [IR_REG_NUM...) - virtual reg */ |
63 | | int32_t to; /* [0..IR_REG_NUM) - CPU reg, [IR_REG_NUM...) - virtual reg */ |
64 | | } ir_dessa_copy; |
65 | | |
66 | | const ir_proto_t *ir_call_proto(const ir_ctx *ctx, const ir_insn *insn) |
67 | 0 | { |
68 | 0 | if (IR_IS_CONST_REF(insn->op2)) { |
69 | 0 | const ir_insn *func = &ctx->ir_base[insn->op2]; |
70 | |
|
71 | 0 | if (func->op == IR_FUNC || func->op == IR_FUNC_ADDR) { |
72 | 0 | if (func->proto) { |
73 | 0 | return (const ir_proto_t *)ir_get_str(ctx, func->proto); |
74 | 0 | } |
75 | 0 | } |
76 | 0 | } else if (ctx->ir_base[insn->op2].op == IR_PROTO) { |
77 | 0 | return (const ir_proto_t *)ir_get_str(ctx, ctx->ir_base[insn->op2].op2); |
78 | 0 | } |
79 | 0 | return NULL; |
80 | 0 | } |
81 | | |
82 | | IR_ALWAYS_INLINE uint32_t ir_rule(const ir_ctx *ctx, ir_ref ref) |
83 | 0 | { |
84 | 0 | IR_ASSERT(!IR_IS_CONST_REF(ref)); |
85 | 0 | return ctx->rules[ref]; |
86 | 0 | } |
87 | | |
88 | | IR_ALWAYS_INLINE bool ir_in_same_block(ir_ctx *ctx, ir_ref ref) |
89 | 0 | { |
90 | 0 | return ref > ctx->bb_start; |
91 | 0 | } |
92 | | |
93 | | |
94 | | static ir_reg ir_get_param_reg(const ir_ctx *ctx, ir_ref ref) |
95 | 0 | { |
96 | 0 | ir_use_list *use_list = &ctx->use_lists[1]; |
97 | 0 | int i; |
98 | 0 | ir_ref use, *p; |
99 | 0 | ir_insn *insn; |
100 | 0 | int int_param = 0; |
101 | 0 | int fp_param = 0; |
102 | 0 | const ir_call_conv_dsc *cc = ir_get_call_conv_dsc(ctx->flags); |
103 | |
|
104 | 0 | for (i = use_list->count, p = &ctx->use_edges[use_list->refs]; i > 0; p++, i--) { |
105 | 0 | use = *p; |
106 | 0 | insn = &ctx->ir_base[use]; |
107 | 0 | if (insn->op == IR_PARAM) { |
108 | 0 | if (IR_IS_TYPE_INT(insn->type)) { |
109 | 0 | if (use == ref) { |
110 | 0 | if (ctx->value_params && ctx->value_params[insn->op3 - 1].align && cc->pass_struct_by_val) { |
111 | | /* struct passed by value on stack */ |
112 | 0 | return IR_REG_NONE; |
113 | 0 | } else if (int_param < cc->int_param_regs_count) { |
114 | 0 | return cc->int_param_regs[int_param]; |
115 | 0 | } else { |
116 | 0 | return IR_REG_NONE; |
117 | 0 | } |
118 | 0 | } else if (ctx->value_params && ctx->value_params[insn->op3 - 1].align && cc->pass_struct_by_val) { |
119 | | /* struct passed by value on stack */ |
120 | 0 | continue; |
121 | 0 | } |
122 | 0 | int_param++; |
123 | 0 | if (cc->shadow_param_regs) { |
124 | 0 | fp_param++; |
125 | 0 | } |
126 | 0 | } else { |
127 | 0 | IR_ASSERT(IR_IS_TYPE_FP(insn->type)); |
128 | 0 | if (use == ref) { |
129 | 0 | if (fp_param < cc->fp_param_regs_count) { |
130 | 0 | return cc->fp_param_regs[fp_param]; |
131 | 0 | } else { |
132 | 0 | return IR_REG_NONE; |
133 | 0 | } |
134 | 0 | } |
135 | 0 | fp_param++; |
136 | 0 | if (cc->shadow_param_regs) { |
137 | 0 | int_param++; |
138 | 0 | } |
139 | 0 | } |
140 | 0 | } |
141 | 0 | } |
142 | 0 | return IR_REG_NONE; |
143 | 0 | } |
144 | | |
145 | | static int ir_get_args_regs(const ir_ctx *ctx, const ir_insn *insn, const ir_call_conv_dsc *cc, int8_t *regs) |
146 | 0 | { |
147 | 0 | int j, n; |
148 | 0 | ir_type type; |
149 | 0 | int int_param = 0; |
150 | 0 | int fp_param = 0; |
151 | 0 | int count = 0; |
152 | |
|
153 | 0 | n = insn->inputs_count; |
154 | 0 | n = IR_MIN(n, IR_MAX_REG_ARGS + 2); |
155 | 0 | for (j = 3; j <= n; j++) { |
156 | 0 | ir_insn *arg = &ctx->ir_base[ir_insn_op(insn, j)]; |
157 | 0 | type = arg->type; |
158 | 0 | if (IR_IS_TYPE_INT(type)) { |
159 | 0 | if (int_param < cc->int_param_regs_count && arg->op != IR_ARGVAL) { |
160 | 0 | regs[j] = cc->int_param_regs[int_param]; |
161 | 0 | count = j + 1; |
162 | 0 | int_param++; |
163 | 0 | if (cc->shadow_param_regs) { |
164 | 0 | fp_param++; |
165 | 0 | } |
166 | 0 | } else { |
167 | 0 | regs[j] = IR_REG_NONE; |
168 | 0 | } |
169 | 0 | } else { |
170 | 0 | IR_ASSERT(IR_IS_TYPE_FP(type)); |
171 | 0 | if (fp_param < cc->fp_param_regs_count) { |
172 | 0 | regs[j] = cc->fp_param_regs[fp_param]; |
173 | 0 | count = j + 1; |
174 | 0 | fp_param++; |
175 | 0 | if (cc->shadow_param_regs) { |
176 | 0 | int_param++; |
177 | 0 | } |
178 | 0 | } else { |
179 | 0 | regs[j] = IR_REG_NONE; |
180 | 0 | } |
181 | 0 | } |
182 | 0 | } |
183 | 0 | return count; |
184 | 0 | } |
185 | | |
186 | | static bool ir_is_same_mem_var(const ir_ctx *ctx, ir_ref r1, int32_t offset) |
187 | 0 | { |
188 | 0 | ir_live_interval *ival1; |
189 | 0 | int32_t o1; |
190 | |
|
191 | 0 | if (IR_IS_CONST_REF(r1)) { |
192 | 0 | return 0; |
193 | 0 | } |
194 | | |
195 | 0 | IR_ASSERT(ctx->vregs[r1]); |
196 | 0 | ival1 = ctx->live_intervals[ctx->vregs[r1]]; |
197 | 0 | IR_ASSERT(ival1); |
198 | 0 | o1 = ival1->stack_spill_pos; |
199 | 0 | IR_ASSERT(o1 != -1); |
200 | 0 | return o1 == offset; |
201 | 0 | } |
202 | | |
203 | | void *ir_resolve_sym_name(const char *name) |
204 | 0 | { |
205 | 0 | void *addr; |
206 | |
|
207 | 0 | #ifndef _WIN32 |
208 | 0 | void *handle = NULL; |
209 | 0 | # ifdef RTLD_DEFAULT |
210 | 0 | handle = RTLD_DEFAULT; |
211 | 0 | # endif |
212 | 0 | addr = dlsym(handle, name); |
213 | | #else |
214 | | HMODULE mods[256]; |
215 | | DWORD cbNeeded; |
216 | | uint32_t i = 0; |
217 | | |
218 | | addr = NULL; |
219 | | |
220 | | EnumProcessModules(GetCurrentProcess(), mods, sizeof(mods), &cbNeeded); |
221 | | |
222 | | while(i < (cbNeeded / sizeof(HMODULE))) { |
223 | | addr = GetProcAddress(mods[i], name); |
224 | | if (addr) { |
225 | | return addr; |
226 | | } |
227 | | i++; |
228 | | } |
229 | | #endif |
230 | 0 | return addr; |
231 | 0 | } |
232 | | |
233 | | #ifdef IR_SNAPSHOT_HANDLER_DCL |
234 | | IR_SNAPSHOT_HANDLER_DCL(); |
235 | | #endif |
236 | | |
237 | | #if defined(IR_TARGET_X86) || defined(IR_TARGET_X64) |
238 | | static void* ir_sym_addr(ir_ctx *ctx, const ir_insn *addr_insn) |
239 | 0 | { |
240 | 0 | const char *name = ir_get_str(ctx, addr_insn->val.name); |
241 | 0 | void *addr = (ctx->loader && ctx->loader->resolve_sym_name) ? |
242 | 0 | ctx->loader->resolve_sym_name(ctx->loader, name, IR_RESOLVE_SYM_SILENT) : |
243 | 0 | ir_resolve_sym_name(name); |
244 | |
|
245 | 0 | return addr; |
246 | 0 | } |
247 | | #endif |
248 | | |
249 | | static void* ir_sym_val(ir_ctx *ctx, const ir_insn *addr_insn) |
250 | 0 | { |
251 | 0 | const char *name = ir_get_str(ctx, addr_insn->val.name); |
252 | 0 | void *addr = (ctx->loader && ctx->loader->resolve_sym_name) ? |
253 | 0 | ctx->loader->resolve_sym_name(ctx->loader, name, addr_insn->op == IR_FUNC ? IR_RESOLVE_SYM_ADD_THUNK : 0) : |
254 | 0 | ir_resolve_sym_name(name); |
255 | |
|
256 | 0 | IR_ASSERT(addr); |
257 | 0 | return addr; |
258 | 0 | } |
259 | | |
260 | | static void *ir_call_addr(ir_ctx *ctx, ir_insn *insn, ir_insn *addr_insn) |
261 | 0 | { |
262 | 0 | void *addr; |
263 | |
|
264 | 0 | IR_ASSERT(addr_insn->type == IR_ADDR); |
265 | 0 | if (addr_insn->op == IR_FUNC) { |
266 | 0 | addr = ir_sym_val(ctx, addr_insn); |
267 | 0 | } else { |
268 | 0 | IR_ASSERT(addr_insn->op == IR_ADDR || addr_insn->op == IR_FUNC_ADDR); |
269 | 0 | addr = (void*)addr_insn->val.addr; |
270 | 0 | } |
271 | 0 | return addr; |
272 | 0 | } |
273 | | |
274 | | static void *ir_jmp_addr(ir_ctx *ctx, ir_insn *insn, ir_insn *addr_insn) |
275 | 0 | { |
276 | 0 | void *addr = ir_call_addr(ctx, insn, addr_insn); |
277 | |
|
278 | 0 | #ifdef IR_SNAPSHOT_HANDLER |
279 | 0 | if (ctx->ir_base[insn->op1].op == IR_SNAPSHOT) { |
280 | 0 | addr = IR_SNAPSHOT_HANDLER(ctx, insn->op1, &ctx->ir_base[insn->op1], addr); |
281 | 0 | } |
282 | 0 | #endif |
283 | 0 | return addr; |
284 | 0 | } |
285 | | |
286 | | static int8_t ir_get_fused_reg(ir_ctx *ctx, ir_ref root, ir_ref ref_and_op) |
287 | 0 | { |
288 | 0 | if (ctx->fused_regs) { |
289 | 0 | char key[10]; |
290 | 0 | ir_ref val; |
291 | |
|
292 | 0 | memcpy(key, &root, sizeof(ir_ref)); |
293 | 0 | memcpy(key + 4, &ref_and_op, sizeof(ir_ref)); |
294 | |
|
295 | 0 | val = ir_strtab_find(ctx->fused_regs, key, 8); |
296 | 0 | if (val) { |
297 | 0 | return val; |
298 | 0 | } |
299 | 0 | } |
300 | 0 | return ((int8_t*)ctx->regs)[ref_and_op]; |
301 | 0 | } |
302 | | |
303 | | #if defined(__GNUC__) |
304 | | # pragma GCC diagnostic push |
305 | | # pragma GCC diagnostic ignored "-Warray-bounds" |
306 | | # pragma GCC diagnostic ignored "-Wimplicit-fallthrough" |
307 | | #endif |
308 | | |
309 | | #if defined(IR_TARGET_X86) || defined(IR_TARGET_X64) |
310 | | # include "dynasm/dasm_proto.h" |
311 | | # include "dynasm/dasm_x86.h" |
312 | | #elif defined(IR_TARGET_AARCH64) |
313 | | # include "dynasm/dasm_proto.h" |
314 | | static int ir_add_veneer(dasm_State *Dst, void *buffer, uint32_t ins, int *b, uint32_t *cp, ptrdiff_t offset); |
315 | | # define DASM_ADD_VENEER ir_add_veneer |
316 | | # include "dynasm/dasm_arm64.h" |
317 | | #else |
318 | | # error "Unknown IR target" |
319 | | #endif |
320 | | |
321 | | #if defined(__GNUC__) |
322 | | # pragma GCC diagnostic pop |
323 | | #endif |
324 | | |
325 | | /* Forward Declarations */ |
326 | | static void ir_emit_osr_entry_loads(ir_ctx *ctx, int b, ir_block *bb); |
327 | | static int ir_parallel_copy(ir_ctx *ctx, ir_copy *copies, int count, ir_reg tmp_reg, ir_reg tmp_fp_reg); |
328 | | static void ir_emit_dessa_moves(ir_ctx *ctx, int b, ir_block *bb); |
329 | | |
330 | | typedef struct _ir_common_backend_data { |
331 | | ir_reg_alloc_data ra_data; |
332 | | dasm_State *dasm_state; |
333 | | ir_bitset emit_constants; |
334 | | } ir_common_backend_data; |
335 | | |
336 | | static int ir_get_const_label(ir_ctx *ctx, ir_ref ref) |
337 | 0 | { |
338 | 0 | ir_common_backend_data *data = ctx->data; |
339 | 0 | int label = ctx->cfg_blocks_count - ref; |
340 | |
|
341 | 0 | IR_ASSERT(IR_IS_CONST_REF(ref)); |
342 | 0 | ir_bitset_incl(data->emit_constants, -ref); |
343 | 0 | return label; |
344 | 0 | } |
345 | | |
346 | | #if defined(IR_TARGET_X86) || defined(IR_TARGET_X64) |
347 | | # include <ir_emit_x86.h> |
348 | | #elif defined(IR_TARGET_AARCH64) |
349 | | # include <ir_emit_aarch64.h> |
350 | | #else |
351 | | # error "Unknown IR target" |
352 | | #endif |
353 | | |
354 | | static IR_NEVER_INLINE void ir_emit_osr_entry_loads(ir_ctx *ctx, int b, ir_block *bb) |
355 | 0 | { |
356 | 0 | ir_list *list = (ir_list*)ctx->osr_entry_loads; |
357 | 0 | int pos = 0, count, i; |
358 | 0 | ir_ref ref; |
359 | |
|
360 | 0 | IR_ASSERT(ctx->binding); |
361 | 0 | IR_ASSERT(list); |
362 | 0 | while (1) { |
363 | 0 | i = ir_list_at(list, pos); |
364 | 0 | if (b == i) { |
365 | 0 | break; |
366 | 0 | } |
367 | 0 | IR_ASSERT(i != 0); /* end marker */ |
368 | 0 | pos++; |
369 | 0 | count = ir_list_at(list, pos); |
370 | 0 | pos += count + 1; |
371 | 0 | } |
372 | 0 | pos++; |
373 | 0 | count = ir_list_at(list, pos); |
374 | 0 | pos++; |
375 | |
|
376 | 0 | for (i = 0; i < count; i++, pos++) { |
377 | 0 | ref = ir_list_at(list, pos); |
378 | 0 | IR_ASSERT(ref >= 0 && ctx->vregs[ref] && ctx->live_intervals[ctx->vregs[ref]]); |
379 | 0 | if (!(ctx->live_intervals[ctx->vregs[ref]]->flags & IR_LIVE_INTERVAL_SPILLED)) { |
380 | | /* not spilled */ |
381 | 0 | ir_reg reg = ctx->live_intervals[ctx->vregs[ref]]->reg; |
382 | 0 | ir_type type = ctx->ir_base[ref].type; |
383 | 0 | int32_t offset = -ir_binding_find(ctx, ref); |
384 | |
|
385 | 0 | IR_ASSERT(offset > 0); |
386 | 0 | ir_emit_load_mem(ctx, type, reg, IR_MEM_BO(ctx->spill_base, offset)); |
387 | 0 | } else { |
388 | 0 | IR_ASSERT(ctx->live_intervals[ctx->vregs[ref]]->flags & IR_LIVE_INTERVAL_SPILL_SPECIAL); |
389 | 0 | } |
390 | 0 | } |
391 | 0 | } |
392 | | |
393 | | /* |
394 | | * Parallel copy sequentialization algorithm |
395 | | * |
396 | | * The implementation is based on algorithm 1 desriebed in |
397 | | * "Revisiting Out-of-SSA Translation for Correctness, Code Quality and Efficiency", |
398 | | * Benoit Boissinot, Alain Darte, Fabrice Rastello, Benoit Dupont de Dinechin, Christophe Guillon. |
399 | | * 2009 International Symposium on Code Generation and Optimization, Seattle, WA, USA, 2009, |
400 | | * pp. 114-125, doi: 10.1109/CGO.2009.19. |
401 | | */ |
402 | | static int ir_parallel_copy(ir_ctx *ctx, ir_copy *copies, int count, ir_reg tmp_reg, ir_reg tmp_fp_reg) |
403 | 0 | { |
404 | 0 | int i; |
405 | 0 | int8_t *pred, *loc, *types; |
406 | 0 | ir_reg to, from; |
407 | 0 | ir_type type; |
408 | 0 | ir_regset todo, ready, srcs; |
409 | |
|
410 | 0 | if (count == 1) { |
411 | 0 | to = copies[0].to; |
412 | 0 | from = copies[0].from; |
413 | 0 | IR_ASSERT(from != to); |
414 | 0 | type = copies[0].type; |
415 | 0 | if (IR_IS_TYPE_INT(type)) { |
416 | 0 | ir_emit_mov(ctx, type, to, from); |
417 | 0 | } else { |
418 | 0 | ir_emit_fp_mov(ctx, type, to, from); |
419 | 0 | } |
420 | 0 | return 1; |
421 | 0 | } |
422 | | |
423 | 0 | loc = alloca(IR_REG_NUM * 3 * sizeof(int8_t)); |
424 | 0 | pred = loc + IR_REG_NUM; |
425 | 0 | types = pred + IR_REG_NUM; |
426 | 0 | todo = IR_REGSET_EMPTY; |
427 | 0 | srcs = IR_REGSET_EMPTY; |
428 | |
|
429 | 0 | for (i = 0; i < count; i++) { |
430 | 0 | from = copies[i].from; |
431 | 0 | to = copies[i].to; |
432 | 0 | IR_ASSERT(from != to); |
433 | 0 | IR_REGSET_INCL(srcs, from); |
434 | 0 | loc[from] = from; |
435 | 0 | pred[to] = from; |
436 | 0 | types[from] = copies[i].type; |
437 | 0 | IR_ASSERT(!IR_REGSET_IN(todo, to)); |
438 | 0 | IR_REGSET_INCL(todo, to); |
439 | 0 | } |
440 | |
|
441 | 0 | ready = IR_REGSET_DIFFERENCE(todo, srcs); |
442 | |
|
443 | 0 | if (ready == todo) { |
444 | 0 | for (i = 0; i < count; i++) { |
445 | 0 | from = copies[i].from; |
446 | 0 | to = copies[i].to; |
447 | 0 | IR_ASSERT(from != to); |
448 | 0 | type = copies[i].type; |
449 | 0 | if (IR_IS_TYPE_INT(type)) { |
450 | 0 | ir_emit_mov(ctx, type, to, from); |
451 | 0 | } else { |
452 | 0 | ir_emit_fp_mov(ctx, type, to, from); |
453 | 0 | } |
454 | 0 | } |
455 | 0 | return 1; |
456 | 0 | } |
457 | | |
458 | | /* temporary registers can't be the same as some of the destinations */ |
459 | 0 | IR_ASSERT(tmp_reg == IR_REG_NONE || !IR_REGSET_IN(todo, tmp_reg)); |
460 | 0 | IR_ASSERT(tmp_fp_reg == IR_REG_NONE || !IR_REGSET_IN(todo, tmp_fp_reg)); |
461 | | |
462 | | /* first we resolve all "windmill blades" - trees (this doesn't requre temporary registers) */ |
463 | 0 | while (ready != IR_REGSET_EMPTY) { |
464 | 0 | ir_reg r; |
465 | |
|
466 | 0 | to = ir_regset_pop_first(&ready); |
467 | 0 | from = pred[to]; |
468 | 0 | r = loc[from]; |
469 | 0 | type = types[from]; |
470 | 0 | if (IR_IS_TYPE_INT(type)) { |
471 | 0 | ir_emit_mov_ext(ctx, type, to, r); |
472 | 0 | } else { |
473 | 0 | ir_emit_fp_mov(ctx, type, to, r); |
474 | 0 | } |
475 | 0 | IR_REGSET_EXCL(todo, to); |
476 | 0 | loc[from] = to; |
477 | 0 | if (from == r && IR_REGSET_IN(todo, from)) { |
478 | 0 | IR_REGSET_INCL(ready, from); |
479 | 0 | } |
480 | 0 | } |
481 | 0 | if (todo == IR_REGSET_EMPTY) { |
482 | 0 | return 1; |
483 | 0 | } |
484 | | |
485 | | /* at this point the sources that are the same as temoraries are already moved */ |
486 | 0 | IR_ASSERT(tmp_reg == IR_REG_NONE || !IR_REGSET_IN(srcs, tmp_reg) || pred[loc[tmp_reg]] == tmp_reg); |
487 | 0 | IR_ASSERT(tmp_fp_reg == IR_REG_NONE || !IR_REGSET_IN(srcs, tmp_fp_reg) || pred[loc[tmp_fp_reg]] == tmp_fp_reg); |
488 | | |
489 | | /* now we resolve all "windmill axles" - cycles (this reuires temporary registers) */ |
490 | 0 | while (todo != IR_REGSET_EMPTY) { |
491 | 0 | to = ir_regset_pop_first(&todo); |
492 | 0 | from = pred[to]; |
493 | 0 | IR_ASSERT(to != loc[from]); |
494 | 0 | type = types[from]; |
495 | 0 | if (IR_IS_TYPE_INT(type)) { |
496 | 0 | #ifdef IR_HAVE_SWAP_INT |
497 | 0 | if (pred[from] == to) { |
498 | 0 | if (ir_type_size[types[to]] > ir_type_size[type]) { |
499 | 0 | type = types[to]; |
500 | 0 | } |
501 | 0 | ir_emit_swap(ctx, type, to, from); |
502 | 0 | IR_REGSET_EXCL(todo, from); |
503 | 0 | loc[to] = from; |
504 | 0 | loc[from] = to; |
505 | 0 | continue; |
506 | 0 | } |
507 | 0 | #endif |
508 | 0 | IR_ASSERT(tmp_reg != IR_REG_NONE); |
509 | 0 | IR_ASSERT(tmp_reg >= IR_REG_GP_FIRST && tmp_reg <= IR_REG_GP_LAST); |
510 | 0 | ir_emit_mov(ctx, type, tmp_reg, to); |
511 | 0 | loc[to] = tmp_reg; |
512 | 0 | } else { |
513 | | #ifdef IR_HAVE_SWAP_FP |
514 | | if (pred[from] == to && types[to] == type) { |
515 | | ir_emit_swap_fp(ctx, type, to, from); |
516 | | IR_REGSET_EXCL(todo, from); |
517 | | loc[to] = from; |
518 | | loc[from] = to; |
519 | | continue; |
520 | | } |
521 | | #endif |
522 | 0 | IR_ASSERT(tmp_fp_reg != IR_REG_NONE); |
523 | 0 | IR_ASSERT(tmp_fp_reg >= IR_REG_FP_FIRST && tmp_fp_reg <= IR_REG_FP_LAST); |
524 | 0 | ir_emit_fp_mov(ctx, type, tmp_fp_reg, to); |
525 | 0 | loc[to] = tmp_fp_reg; |
526 | 0 | } |
527 | 0 | while (1) { |
528 | 0 | ir_reg r; |
529 | |
|
530 | 0 | from = pred[to]; |
531 | 0 | r = loc[from]; |
532 | 0 | type = types[from]; |
533 | 0 | if (IR_IS_TYPE_INT(type)) { |
534 | 0 | ir_emit_mov_ext(ctx, type, to, r); |
535 | 0 | } else { |
536 | 0 | ir_emit_fp_mov(ctx, type, to, r); |
537 | 0 | } |
538 | 0 | IR_REGSET_EXCL(todo, to); |
539 | 0 | loc[from] = to; |
540 | 0 | if (from == r && IR_REGSET_IN(todo, from)) { |
541 | 0 | to = from; |
542 | 0 | } else { |
543 | 0 | break; |
544 | 0 | } |
545 | 0 | } |
546 | 0 | } |
547 | | |
548 | 0 | return 1; |
549 | 0 | } |
550 | | |
551 | | static void ir_emit_dessa_move(ir_ctx *ctx, ir_type type, ir_ref to, ir_ref from, ir_reg tmp_reg, ir_reg tmp_fp_reg) |
552 | 0 | { |
553 | 0 | ir_mem mem_from, mem_to; |
554 | |
|
555 | 0 | IR_ASSERT(from != to); |
556 | 0 | if (to < IR_REG_NUM) { |
557 | 0 | if (IR_IS_CONST_REF(from)) { |
558 | 0 | if (-from < ctx->consts_count) { |
559 | | /* constant reference */ |
560 | 0 | ir_emit_load(ctx, type, to, from); |
561 | 0 | } else { |
562 | | /* local variable address */ |
563 | 0 | ir_load_local_addr(ctx, to, -from - ctx->consts_count); |
564 | 0 | } |
565 | 0 | } else if (from < IR_REG_NUM) { |
566 | 0 | if (IR_IS_TYPE_INT(type)) { |
567 | 0 | ir_emit_mov(ctx, type, to, from); |
568 | 0 | } else { |
569 | 0 | ir_emit_fp_mov(ctx, type, to, from); |
570 | 0 | } |
571 | 0 | } else { |
572 | 0 | mem_from = ir_vreg_spill_slot(ctx, from - IR_REG_NUM); |
573 | 0 | ir_emit_load_mem(ctx, type, to, mem_from); |
574 | 0 | } |
575 | 0 | } else { |
576 | 0 | mem_to = ir_vreg_spill_slot(ctx, to - IR_REG_NUM); |
577 | 0 | if (IR_IS_CONST_REF(from)) { |
578 | 0 | if (-from < ctx->consts_count) { |
579 | | /* constant reference */ |
580 | 0 | #if defined(IR_TARGET_X86) || defined(IR_TARGET_X64) |
581 | 0 | if (IR_IS_TYPE_INT(type) |
582 | 0 | && !IR_IS_SYM_CONST(ctx->ir_base[from].op) |
583 | 0 | && (ir_type_size[type] != 8 || IR_IS_SIGNED_32BIT(ctx->ir_base[from].val.i64))) { |
584 | 0 | ir_emit_store_mem_imm(ctx, type, mem_to, ctx->ir_base[from].val.i32); |
585 | 0 | return; |
586 | 0 | } |
587 | 0 | #endif |
588 | 0 | ir_reg tmp = IR_IS_TYPE_INT(type) ? tmp_reg : tmp_fp_reg; |
589 | 0 | IR_ASSERT(tmp != IR_REG_NONE); |
590 | 0 | ir_emit_load(ctx, type, tmp, from); |
591 | 0 | ir_emit_store_mem(ctx, type, mem_to, tmp); |
592 | 0 | } else { |
593 | | /* local variable address */ |
594 | 0 | IR_ASSERT(IR_IS_TYPE_INT(type)); |
595 | 0 | IR_ASSERT(tmp_reg != IR_REG_NONE); |
596 | 0 | ir_load_local_addr(ctx, tmp_reg, -from - ctx->consts_count); |
597 | 0 | ir_emit_store_mem(ctx, type, mem_to, tmp_reg); |
598 | 0 | } |
599 | 0 | } else if (from < IR_REG_NUM) { |
600 | 0 | ir_emit_store_mem(ctx, type, mem_to, from); |
601 | 0 | } else { |
602 | 0 | mem_from = ir_vreg_spill_slot(ctx, from - IR_REG_NUM); |
603 | 0 | IR_ASSERT(IR_MEM_VAL(mem_to) != IR_MEM_VAL(mem_from)); |
604 | 0 | ir_reg tmp = IR_IS_TYPE_INT(type) ? tmp_reg : tmp_fp_reg; |
605 | 0 | IR_ASSERT(tmp != IR_REG_NONE); |
606 | 0 | ir_emit_load_mem(ctx, type, tmp, mem_from); |
607 | 0 | ir_emit_store_mem(ctx, type, mem_to, tmp); |
608 | 0 | } |
609 | 0 | } |
610 | 0 | } |
611 | | |
612 | | IR_ALWAYS_INLINE void ir_dessa_resolve_cycle(ir_ctx *ctx, int32_t *pred, int32_t *loc, int8_t *types, ir_bitset todo, int32_t to, ir_reg tmp_reg, ir_reg tmp_fp_reg) |
613 | 0 | { |
614 | 0 | ir_ref from; |
615 | 0 | ir_mem tmp_spill_slot; |
616 | 0 | ir_type type; |
617 | |
|
618 | 0 | IR_MEM_VAL(tmp_spill_slot) = 0; |
619 | 0 | IR_ASSERT(!IR_IS_CONST_REF(to)); |
620 | 0 | from = pred[to]; |
621 | 0 | type = types[from]; |
622 | 0 | IR_ASSERT(!IR_IS_CONST_REF(from)); |
623 | 0 | IR_ASSERT(from != to); |
624 | 0 | IR_ASSERT(loc[from] == from); |
625 | |
|
626 | 0 | if (IR_IS_TYPE_INT(type)) { |
627 | 0 | #ifdef IR_HAVE_SWAP_INT |
628 | 0 | if (pred[from] == to && to < IR_REG_NUM && from < IR_REG_NUM) { |
629 | | /* a simple cycle from 2 elements */ |
630 | 0 | if (ir_type_size[types[to]] > ir_type_size[type]) { |
631 | 0 | type = types[to]; |
632 | 0 | } |
633 | 0 | ir_emit_swap(ctx, type, to, from); |
634 | 0 | ir_bitset_excl(todo, from); |
635 | 0 | ir_bitset_excl(todo, to); |
636 | 0 | loc[to] = from; |
637 | 0 | loc[from] = to; |
638 | 0 | return; |
639 | 0 | } |
640 | 0 | #endif |
641 | 0 | IR_ASSERT(tmp_reg != IR_REG_NONE); |
642 | 0 | IR_ASSERT(tmp_reg >= IR_REG_GP_FIRST && tmp_reg <= IR_REG_GP_LAST); |
643 | 0 | loc[to] = tmp_reg; |
644 | 0 | if (to < IR_REG_NUM) { |
645 | 0 | ir_emit_mov(ctx, type, tmp_reg, to); |
646 | 0 | } else { |
647 | 0 | ir_emit_load_mem_int(ctx, type, tmp_reg, ir_vreg_spill_slot(ctx, to - IR_REG_NUM)); |
648 | 0 | } |
649 | 0 | } else { |
650 | | #ifdef IR_HAVE_SWAP_FP |
651 | | if (pred[from] == to && to < IR_REG_NUM && from < IR_REG_NUM && types[to] == type) { |
652 | | /* a simple cycle from 2 elements */ |
653 | | ir_emit_swap_fp(ctx, type, to, from); |
654 | | IR_REGSET_EXCL(todo, from); |
655 | | IR_REGSET_EXCL(todo, to); |
656 | | loc[to] = from; |
657 | | loc[from] = to; |
658 | | return; |
659 | | } |
660 | | #endif |
661 | 0 | IR_ASSERT(tmp_fp_reg != IR_REG_NONE); |
662 | 0 | IR_ASSERT(tmp_fp_reg >= IR_REG_FP_FIRST && tmp_fp_reg <= IR_REG_FP_LAST); |
663 | 0 | loc[to] = tmp_fp_reg; |
664 | 0 | if (to < IR_REG_NUM) { |
665 | 0 | ir_emit_fp_mov(ctx, type, tmp_fp_reg, to); |
666 | 0 | } else { |
667 | 0 | ir_emit_load_mem_fp(ctx, type, tmp_fp_reg, ir_vreg_spill_slot(ctx, to - IR_REG_NUM)); |
668 | 0 | } |
669 | 0 | } |
670 | | |
671 | 0 | while (1) { |
672 | 0 | int32_t r; |
673 | |
|
674 | 0 | from = pred[to]; |
675 | 0 | r = loc[from]; |
676 | 0 | type = types[to]; |
677 | |
|
678 | 0 | if (from == r && ir_bitset_in(todo, from)) { |
679 | | /* Memory to memory move inside an isolated or "blocked" cycle requres an additional temporary register */ |
680 | 0 | if (to >= IR_REG_NUM && r >= IR_REG_NUM) { |
681 | 0 | ir_reg tmp = IR_IS_TYPE_INT(type) ? tmp_reg : tmp_fp_reg; |
682 | |
|
683 | 0 | if (!IR_MEM_VAL(tmp_spill_slot)) { |
684 | | /* Free a register, saving it in a temporary spill slot */ |
685 | 0 | tmp_spill_slot = IR_MEM_BO(IR_REG_STACK_POINTER, -16); |
686 | 0 | ir_emit_store_mem(ctx, type, tmp_spill_slot, tmp); |
687 | 0 | } |
688 | 0 | ir_emit_dessa_move(ctx, type, to, r, tmp_reg, tmp_fp_reg); |
689 | 0 | } else { |
690 | 0 | ir_emit_dessa_move(ctx, type, to, r, IR_REG_NONE, IR_REG_NONE); |
691 | 0 | } |
692 | 0 | ir_bitset_excl(todo, to); |
693 | 0 | loc[from] = to; |
694 | 0 | to = from; |
695 | 0 | } else { |
696 | 0 | break; |
697 | 0 | } |
698 | 0 | } |
699 | |
|
700 | 0 | type = types[to]; |
701 | 0 | if (IR_MEM_VAL(tmp_spill_slot)) { |
702 | 0 | ir_emit_load_mem(ctx, type, IR_IS_TYPE_INT(type) ? tmp_reg : tmp_fp_reg, tmp_spill_slot); |
703 | 0 | } |
704 | 0 | ir_emit_dessa_move(ctx, type, to, loc[from], IR_REG_NONE, IR_REG_NONE); |
705 | 0 | ir_bitset_excl(todo, to); |
706 | 0 | loc[from] = to; |
707 | 0 | } |
708 | | |
709 | | static int ir_dessa_parallel_copy(ir_ctx *ctx, ir_dessa_copy *copies, int count, ir_reg tmp_reg, ir_reg tmp_fp_reg) |
710 | 0 | { |
711 | 0 | int i; |
712 | 0 | int32_t *pred, *loc, to, from; |
713 | 0 | int8_t *types; |
714 | 0 | ir_type type; |
715 | 0 | uint32_t len; |
716 | 0 | ir_bitset todo, ready, srcs, visited; |
717 | |
|
718 | 0 | if (count == 1) { |
719 | 0 | to = copies[0].to; |
720 | 0 | from = copies[0].from; |
721 | 0 | IR_ASSERT(from != to); |
722 | 0 | type = copies[0].type; |
723 | 0 | ir_emit_dessa_move(ctx, type, to, from, tmp_reg, tmp_fp_reg); |
724 | 0 | return 1; |
725 | 0 | } |
726 | | |
727 | 0 | len = IR_REG_NUM + ctx->vregs_count + 1; |
728 | 0 | todo = ir_bitset_malloc(len); |
729 | 0 | srcs = ir_bitset_malloc(len); |
730 | 0 | loc = ir_mem_malloc(len * 2 * sizeof(int32_t) + len * sizeof(int8_t)); |
731 | 0 | pred = loc + len; |
732 | 0 | types = (int8_t*)(pred + len); |
733 | |
|
734 | 0 | for (i = 0; i < count; i++) { |
735 | 0 | from = copies[i].from; |
736 | 0 | to = copies[i].to; |
737 | 0 | IR_ASSERT(from != to); |
738 | 0 | if (!IR_IS_CONST_REF(from)) { |
739 | 0 | ir_bitset_incl(srcs, from); |
740 | 0 | loc[from] = from; |
741 | 0 | } |
742 | 0 | pred[to] = from; |
743 | 0 | types[to] = copies[i].type; |
744 | 0 | IR_ASSERT(!ir_bitset_in(todo, to)); |
745 | 0 | ir_bitset_incl(todo, to); |
746 | 0 | } |
747 | | |
748 | | /* temporary registers can't be the same as some of the sources */ |
749 | 0 | IR_ASSERT(tmp_reg == IR_REG_NONE || !ir_bitset_in(srcs, tmp_reg)); |
750 | 0 | IR_ASSERT(tmp_fp_reg == IR_REG_NONE || !ir_bitset_in(srcs, tmp_fp_reg)); |
751 | | |
752 | | /* first we resolve all "windmill blades" - trees, that don't set temporary registers */ |
753 | 0 | ready = ir_bitset_malloc(len); |
754 | 0 | ir_bitset_copy(ready, todo, ir_bitset_len(len)); |
755 | 0 | ir_bitset_difference(ready, srcs, ir_bitset_len(len)); |
756 | 0 | if (tmp_reg != IR_REG_NONE) { |
757 | 0 | ir_bitset_excl(ready, tmp_reg); |
758 | 0 | } |
759 | 0 | if (tmp_fp_reg != IR_REG_NONE) { |
760 | 0 | ir_bitset_excl(ready, tmp_fp_reg); |
761 | 0 | } |
762 | 0 | while ((to = ir_bitset_pop_first(ready, ir_bitset_len(len))) >= 0) { |
763 | 0 | ir_bitset_excl(todo, to); |
764 | 0 | type = types[to]; |
765 | 0 | from = pred[to]; |
766 | 0 | if (IR_IS_CONST_REF(from)) { |
767 | 0 | ir_emit_dessa_move(ctx, type, to, from, tmp_reg, tmp_fp_reg); |
768 | 0 | } else { |
769 | 0 | int32_t r = loc[from]; |
770 | 0 | ir_emit_dessa_move(ctx, type, to, r, tmp_reg, tmp_fp_reg); |
771 | 0 | loc[from] = to; |
772 | 0 | if (from == r && ir_bitset_in(todo, from) && from != tmp_reg && from != tmp_fp_reg) { |
773 | 0 | ir_bitset_incl(ready, from); |
774 | 0 | } |
775 | 0 | } |
776 | 0 | } |
777 | | |
778 | | /* then we resolve all "windmill axles" - cycles (this requres temporary registers) */ |
779 | 0 | visited = ir_bitset_malloc(len); |
780 | 0 | ir_bitset_copy(ready, todo, ir_bitset_len(len)); |
781 | 0 | ir_bitset_intersection(ready, srcs, ir_bitset_len(len)); |
782 | 0 | while ((to = ir_bitset_first(ready, ir_bitset_len(len))) >= 0) { |
783 | 0 | ir_bitset_clear(visited, ir_bitset_len(len)); |
784 | 0 | ir_bitset_incl(visited, to); |
785 | 0 | to = pred[to]; |
786 | 0 | while (!IR_IS_CONST_REF(to) && ir_bitset_in(ready, to)) { |
787 | 0 | to = pred[to]; |
788 | 0 | if (IR_IS_CONST_REF(to)) { |
789 | 0 | break; |
790 | 0 | } else if (ir_bitset_in(visited, to)) { |
791 | | /* We found a cycle. Resolve it. */ |
792 | 0 | ir_bitset_incl(visited, to); |
793 | 0 | ir_dessa_resolve_cycle(ctx, pred, loc, types, todo, to, tmp_reg, tmp_fp_reg); |
794 | 0 | break; |
795 | 0 | } |
796 | 0 | ir_bitset_incl(visited, to); |
797 | 0 | } |
798 | 0 | ir_bitset_difference(ready, visited, ir_bitset_len(len)); |
799 | 0 | } |
800 | | |
801 | | /* finally we resolve remaining "windmill blades" - trees that set temporary registers */ |
802 | 0 | ir_bitset_copy(ready, todo, ir_bitset_len(len)); |
803 | 0 | ir_bitset_difference(ready, srcs, ir_bitset_len(len)); |
804 | 0 | while ((to = ir_bitset_pop_first(ready, ir_bitset_len(len))) >= 0) { |
805 | 0 | ir_bitset_excl(todo, to); |
806 | 0 | type = types[to]; |
807 | 0 | from = pred[to]; |
808 | 0 | if (IR_IS_CONST_REF(from)) { |
809 | 0 | ir_emit_dessa_move(ctx, type, to, from, tmp_reg, tmp_fp_reg); |
810 | 0 | } else { |
811 | 0 | int32_t r = loc[from]; |
812 | 0 | ir_emit_dessa_move(ctx, type, to, r, tmp_reg, tmp_fp_reg); |
813 | 0 | loc[from] = to; |
814 | 0 | if (from == r && ir_bitset_in(todo, from)) { |
815 | 0 | ir_bitset_incl(ready, from); |
816 | 0 | } |
817 | 0 | } |
818 | 0 | } |
819 | |
|
820 | 0 | IR_ASSERT(ir_bitset_empty(todo, ir_bitset_len(len))); |
821 | |
|
822 | 0 | ir_mem_free(visited); |
823 | 0 | ir_mem_free(ready); |
824 | 0 | ir_mem_free(loc); |
825 | 0 | ir_mem_free(srcs); |
826 | 0 | ir_mem_free(todo); |
827 | 0 | return 1; |
828 | 0 | } |
829 | | |
830 | | static void ir_emit_dessa_moves(ir_ctx *ctx, int b, ir_block *bb) |
831 | 0 | { |
832 | 0 | uint32_t succ, k, n = 0; |
833 | 0 | ir_block *succ_bb; |
834 | 0 | ir_use_list *use_list; |
835 | 0 | ir_ref i, *p; |
836 | 0 | ir_dessa_copy *copies; |
837 | 0 | ir_reg tmp_reg = ctx->regs[bb->end][0]; |
838 | 0 | ir_reg tmp_fp_reg = ctx->regs[bb->end][1]; |
839 | |
|
840 | 0 | IR_ASSERT(bb->successors_count == 1); |
841 | 0 | succ = ctx->cfg_edges[bb->successors]; |
842 | 0 | succ_bb = &ctx->cfg_blocks[succ]; |
843 | 0 | IR_ASSERT(succ_bb->predecessors_count > 1); |
844 | 0 | use_list = &ctx->use_lists[succ_bb->start]; |
845 | 0 | k = ir_phi_input_number(ctx, succ_bb, b); |
846 | |
|
847 | 0 | copies = alloca(use_list->count * sizeof(ir_dessa_copy)); |
848 | |
|
849 | 0 | for (i = use_list->count, p = &ctx->use_edges[use_list->refs]; i > 0; p++, i--) { |
850 | 0 | ir_ref ref = *p; |
851 | 0 | ir_insn *insn = &ctx->ir_base[ref]; |
852 | |
|
853 | 0 | if (insn->op == IR_PHI) { |
854 | 0 | ir_ref input = ir_insn_op(insn, k); |
855 | 0 | ir_reg src = ir_get_alocated_reg(ctx, ref, k); |
856 | 0 | ir_reg dst = ctx->regs[ref][0]; |
857 | 0 | ir_ref from, to; |
858 | |
|
859 | 0 | IR_ASSERT(dst == IR_REG_NONE || !IR_REG_SPILLED(dst)); |
860 | 0 | if (IR_IS_CONST_REF(input)) { |
861 | 0 | from = input; |
862 | 0 | } else if (ir_rule(ctx, input) == IR_STATIC_ALLOCA) { |
863 | | /* encode local variable address */ |
864 | 0 | from = -(ctx->consts_count + input); |
865 | 0 | } else { |
866 | 0 | from = (src != IR_REG_NONE && !IR_REG_SPILLED(src)) ? |
867 | 0 | (ir_ref)src : (ir_ref)(IR_REG_NUM + ctx->vregs[input]); |
868 | 0 | } |
869 | 0 | to = (dst != IR_REG_NONE) ? |
870 | 0 | (ir_ref)dst : (ir_ref)(IR_REG_NUM + ctx->vregs[ref]); |
871 | 0 | if (to != from) { |
872 | 0 | if (to >= IR_REG_NUM |
873 | 0 | && from >= IR_REG_NUM |
874 | 0 | && IR_MEM_VAL(ir_vreg_spill_slot(ctx, from - IR_REG_NUM)) == |
875 | 0 | IR_MEM_VAL(ir_vreg_spill_slot(ctx, to - IR_REG_NUM))) { |
876 | | /* It's possible that different virtual registers share the same special spill slot */ |
877 | | // TODO: See ext/opcache/tests/jit/gh11917.phpt failure on Linux 32-bit |
878 | 0 | continue; |
879 | 0 | } |
880 | 0 | copies[n].type = insn->type; |
881 | 0 | copies[n].from = from; |
882 | 0 | copies[n].to = to; |
883 | 0 | n++; |
884 | 0 | } |
885 | 0 | } |
886 | 0 | } |
887 | | |
888 | 0 | if (n > 0) { |
889 | 0 | ir_dessa_parallel_copy(ctx, copies, n, tmp_reg, tmp_fp_reg); |
890 | 0 | } |
891 | 0 | } |
892 | | |
893 | | int ir_match(ir_ctx *ctx) |
894 | 0 | { |
895 | 0 | uint32_t b; |
896 | 0 | ir_ref start, ref, *prev_ref; |
897 | 0 | ir_block *bb; |
898 | 0 | ir_insn *insn; |
899 | 0 | uint32_t entries_count = 0; |
900 | |
|
901 | 0 | ctx->rules = ir_mem_calloc(ctx->insns_count, sizeof(uint32_t)); |
902 | |
|
903 | 0 | prev_ref = ctx->prev_ref; |
904 | 0 | if (!prev_ref) { |
905 | 0 | ir_build_prev_refs(ctx); |
906 | 0 | prev_ref = ctx->prev_ref; |
907 | 0 | } |
908 | |
|
909 | 0 | if (ctx->entries_count) { |
910 | 0 | ctx->entries = ir_mem_malloc(ctx->entries_count * sizeof(ir_ref)); |
911 | 0 | } |
912 | |
|
913 | 0 | for (b = ctx->cfg_blocks_count, bb = ctx->cfg_blocks + b; b > 0; b--, bb--) { |
914 | 0 | IR_ASSERT(!(bb->flags & IR_BB_UNREACHABLE)); |
915 | 0 | start = bb->start; |
916 | 0 | if (UNEXPECTED(bb->flags & IR_BB_ENTRY)) { |
917 | 0 | IR_ASSERT(entries_count < ctx->entries_count); |
918 | 0 | insn = &ctx->ir_base[start]; |
919 | 0 | IR_ASSERT(insn->op == IR_ENTRY); |
920 | 0 | insn->op3 = entries_count; |
921 | 0 | ctx->entries[entries_count] = b; |
922 | 0 | entries_count++; |
923 | 0 | } |
924 | 0 | ctx->rules[start] = IR_SKIPPED | IR_NOP; |
925 | 0 | if (ctx->ir_base[start].op == IR_BEGIN && ctx->ir_base[start].op2) { |
926 | 0 | ctx->flags2 |= IR_HAS_BLOCK_ADDR; |
927 | 0 | } |
928 | 0 | ref = bb->end; |
929 | 0 | if (bb->successors_count == 1) { |
930 | 0 | insn = &ctx->ir_base[ref]; |
931 | 0 | if (insn->op == IR_END || insn->op == IR_LOOP_END) { |
932 | 0 | if (!ctx->rules[ref]) { |
933 | 0 | ctx->rules[ref] = insn->op; |
934 | 0 | } |
935 | 0 | ref = prev_ref[ref]; |
936 | 0 | if (ref == start && ctx->cfg_edges[bb->successors] != b) { |
937 | 0 | if (EXPECTED(!(bb->flags & IR_BB_ENTRY))) { |
938 | 0 | bb->flags |= IR_BB_EMPTY; |
939 | 0 | } else if (ctx->flags & IR_MERGE_EMPTY_ENTRIES) { |
940 | 0 | bb->flags |= IR_BB_EMPTY; |
941 | 0 | if (ctx->cfg_edges[bb->successors] == b + 1) { |
942 | 0 | (bb + 1)->flags |= IR_BB_PREV_EMPTY_ENTRY; |
943 | 0 | } |
944 | 0 | } |
945 | 0 | continue; |
946 | 0 | } |
947 | 0 | } |
948 | 0 | } |
949 | | |
950 | 0 | ctx->bb_start = start; /* bb_start is used by matcher to avoid fusion of insns from different blocks */ |
951 | |
|
952 | 0 | while (ref != start) { |
953 | 0 | uint32_t rule = ctx->rules[ref]; |
954 | |
|
955 | 0 | if (!rule) { |
956 | 0 | ctx->rules[ref] = rule = ir_match_insn(ctx, ref); |
957 | 0 | } |
958 | 0 | ir_match_insn2(ctx, ref, rule); |
959 | 0 | ref = prev_ref[ref]; |
960 | 0 | } |
961 | 0 | } |
962 | | |
963 | 0 | if (ctx->entries_count) { |
964 | 0 | ctx->entries_count = entries_count; |
965 | 0 | if (!entries_count) { |
966 | 0 | ir_mem_free(ctx->entries); |
967 | 0 | ctx->entries = NULL; |
968 | 0 | } |
969 | 0 | } |
970 | |
|
971 | 0 | return 1; |
972 | 0 | } |
973 | | |
974 | | int32_t ir_get_spill_slot_offset(ir_ctx *ctx, ir_ref ref) |
975 | 0 | { |
976 | 0 | int32_t offset; |
977 | |
|
978 | 0 | IR_ASSERT(ref >= 0 && ctx->vregs[ref] && ctx->live_intervals[ctx->vregs[ref]]); |
979 | 0 | offset = ctx->live_intervals[ctx->vregs[ref]]->stack_spill_pos; |
980 | 0 | IR_ASSERT(offset != -1); |
981 | 0 | return IR_SPILL_POS_TO_OFFSET(offset); |
982 | 0 | } |
983 | | |
984 | | const ir_call_conv_dsc *ir_get_call_conv_dsc(uint32_t flags) |
985 | 0 | { |
986 | | #ifdef IR_TARGET_X86 |
987 | | if ((flags & IR_CALL_CONV_MASK) == IR_CC_FASTCALL) { |
988 | | return &ir_call_conv_x86_fastcall; |
989 | | } |
990 | | #elif defined(IR_TARGET_X64) |
991 | 0 | switch (flags & IR_CALL_CONV_MASK) { |
992 | 0 | case IR_CC_DEFAULT: return &ir_call_conv_default; |
993 | 0 | case IR_CC_FASTCALL: return &ir_call_conv_default; |
994 | 0 | case IR_CC_PRESERVE_NONE: return &ir_call_conv_x86_64_preserve_none; |
995 | 0 | case IR_CC_X86_64_SYSV: return &ir_call_conv_x86_64_sysv; |
996 | 0 | case IR_CC_X86_64_MS: return &ir_call_conv_x86_64_ms; |
997 | 0 | default: break; |
998 | 0 | } |
999 | | #elif defined(IR_TARGET_AARCH64) |
1000 | | switch (flags & IR_CALL_CONV_MASK) { |
1001 | | case IR_CC_DEFAULT: return &ir_call_conv_default; |
1002 | | case IR_CC_FASTCALL: return &ir_call_conv_default; |
1003 | | case IR_CC_PRESERVE_NONE: return &ir_call_conv_aarch64_preserve_none; |
1004 | | case IR_CC_AARCH64_SYSV: return &ir_call_conv_aarch64_sysv; |
1005 | | case IR_CC_AARCH64_DARWIN: return &ir_call_conv_aarch64_darwin; |
1006 | | default: break; |
1007 | | } |
1008 | | #endif |
1009 | 0 | IR_ASSERT((flags & IR_CALL_CONV_MASK) == IR_CC_DEFAULT || (flags & IR_CALL_CONV_MASK) == IR_CC_BUILTIN); |
1010 | 0 | return &ir_call_conv_default; |
1011 | 0 | } |