/src/capstonenext/arch/AArch64/AArch64Mapping.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* Capstone Disassembly Engine */ |
2 | | /* By Nguyen Anh Quynh <aquynh@gmail.com>, 2013-2019 */ |
3 | | |
4 | | #ifdef CAPSTONE_HAS_AARCH64 |
5 | | |
6 | | #include <stdio.h> // debug |
7 | | #include <string.h> |
8 | | |
9 | | #include "capstone/aarch64.h" |
10 | | |
11 | | #include "../../cs_simple_types.h" |
12 | | #include "../../Mapping.h" |
13 | | #include "../../MathExtras.h" |
14 | | #include "../../utils.h" |
15 | | |
16 | | #include "AArch64AddressingModes.h" |
17 | | #include "AArch64BaseInfo.h" |
18 | | #include "AArch64DisassemblerExtension.h" |
19 | | #include "AArch64Linkage.h" |
20 | | #include "AArch64Mapping.h" |
21 | | |
22 | 2.12k | #define CHAR(c) #c[0] |
23 | | |
24 | 3.96k | static float aarch64_exact_fp_to_fp(aarch64_exactfpimm exact) { |
25 | 3.96k | switch (exact) { |
26 | 0 | default: |
27 | 0 | CS_ASSERT(0 && "Not handled."); |
28 | 0 | return 999.0; |
29 | 255 | case AARCH64_EXACTFPIMM_HALF: |
30 | 255 | return 0.5; |
31 | 317 | case AARCH64_EXACTFPIMM_ONE: |
32 | 317 | return 1.0; |
33 | 535 | case AARCH64_EXACTFPIMM_TWO: |
34 | 535 | return 2.0; |
35 | 2.85k | case AARCH64_EXACTFPIMM_ZERO: |
36 | 2.85k | return 0.0; |
37 | 3.96k | } |
38 | 3.96k | } |
39 | | |
40 | | #ifndef CAPSTONE_DIET |
41 | | static const aarch64_reg aarch64_flag_regs[] = { |
42 | | AARCH64_REG_NZCV, |
43 | | }; |
44 | | |
45 | | static const aarch64_sysreg aarch64_flag_sys_regs[] = { |
46 | | AARCH64_SYSREG_NZCV, AARCH64_SYSREG_PMOVSCLR_EL0, |
47 | | AARCH64_SYSREG_PMOVSSET_EL0, AARCH64_SYSREG_SPMOVSCLR_EL0, |
48 | | AARCH64_SYSREG_SPMOVSSET_EL0 |
49 | | }; |
50 | | #endif // CAPSTONE_DIET |
51 | | |
52 | | static AArch64Layout_VectorLayout sme_reg_to_vas(aarch64_reg reg) |
53 | 0 | { |
54 | 0 | switch (reg) { |
55 | 0 | default: |
56 | 0 | return AARCH64LAYOUT_INVALID; |
57 | 0 | case AARCH64_REG_ZAB0: |
58 | 0 | return AARCH64LAYOUT_VL_B; |
59 | 0 | case AARCH64_REG_ZAH0: |
60 | 0 | case AARCH64_REG_ZAH1: |
61 | 0 | return AARCH64LAYOUT_VL_H; |
62 | 0 | case AARCH64_REG_ZAS0: |
63 | 0 | case AARCH64_REG_ZAS1: |
64 | 0 | case AARCH64_REG_ZAS2: |
65 | 0 | case AARCH64_REG_ZAS3: |
66 | 0 | return AARCH64LAYOUT_VL_S; |
67 | 0 | case AARCH64_REG_ZAD0: |
68 | 0 | case AARCH64_REG_ZAD1: |
69 | 0 | case AARCH64_REG_ZAD2: |
70 | 0 | case AARCH64_REG_ZAD3: |
71 | 0 | case AARCH64_REG_ZAD4: |
72 | 0 | case AARCH64_REG_ZAD5: |
73 | 0 | case AARCH64_REG_ZAD6: |
74 | 0 | case AARCH64_REG_ZAD7: |
75 | 0 | return AARCH64LAYOUT_VL_D; |
76 | 0 | case AARCH64_REG_ZAQ0: |
77 | 0 | case AARCH64_REG_ZAQ1: |
78 | 0 | case AARCH64_REG_ZAQ2: |
79 | 0 | case AARCH64_REG_ZAQ3: |
80 | 0 | case AARCH64_REG_ZAQ4: |
81 | 0 | case AARCH64_REG_ZAQ5: |
82 | 0 | case AARCH64_REG_ZAQ6: |
83 | 0 | case AARCH64_REG_ZAQ7: |
84 | 0 | case AARCH64_REG_ZAQ8: |
85 | 0 | case AARCH64_REG_ZAQ9: |
86 | 0 | case AARCH64_REG_ZAQ10: |
87 | 0 | case AARCH64_REG_ZAQ11: |
88 | 0 | case AARCH64_REG_ZAQ12: |
89 | 0 | case AARCH64_REG_ZAQ13: |
90 | 0 | case AARCH64_REG_ZAQ14: |
91 | 0 | case AARCH64_REG_ZAQ15: |
92 | 0 | return AARCH64LAYOUT_VL_Q; |
93 | 0 | case AARCH64_REG_ZA: |
94 | 0 | return AARCH64LAYOUT_VL_COMPLETE; |
95 | 0 | } |
96 | 0 | } |
97 | | |
98 | | void AArch64_init_mri(MCRegisterInfo *MRI) |
99 | 7.73k | { |
100 | 7.73k | MCRegisterInfo_InitMCRegisterInfo( |
101 | 7.73k | MRI, AArch64RegDesc, AARCH64_REG_ENDING, 0, 0, |
102 | 7.73k | AArch64MCRegisterClasses, ARR_SIZE(AArch64MCRegisterClasses), 0, |
103 | 7.73k | 0, AArch64RegDiffLists, 0, AArch64SubRegIdxLists, |
104 | 7.73k | ARR_SIZE(AArch64SubRegIdxLists), 0); |
105 | 7.73k | } |
106 | | |
107 | | |
108 | | /// Sets up a new SME matrix operand at the currently active detail operand. |
109 | | static void setup_sme_operand(MCInst *MI) |
110 | 15.2k | { |
111 | 15.2k | if (!detail_is_set(MI)) |
112 | 0 | return; |
113 | | |
114 | 15.2k | AArch64_get_detail_op(MI, 0)->type = AARCH64_OP_SME; |
115 | 15.2k | AArch64_get_detail_op(MI, 0)->sme.type = AARCH64_SME_OP_INVALID; |
116 | 15.2k | AArch64_get_detail_op(MI, 0)->sme.tile = AARCH64_REG_INVALID; |
117 | 15.2k | AArch64_get_detail_op(MI, 0)->sme.slice_reg = AARCH64_REG_INVALID; |
118 | 15.2k | AArch64_get_detail_op(MI, 0)->sme.slice_offset.imm = AARCH64_SLICE_IMM_INVALID; |
119 | 15.2k | AArch64_get_detail_op(MI, 0)->sme.slice_offset.imm_range.first = AARCH64_SLICE_IMM_RANGE_INVALID; |
120 | 15.2k | AArch64_get_detail_op(MI, 0)->sme.slice_offset.imm_range.offset = AARCH64_SLICE_IMM_RANGE_INVALID; |
121 | 15.2k | } |
122 | | |
123 | | static void setup_pred_operand(MCInst *MI) |
124 | 34.8k | { |
125 | 34.8k | if (!detail_is_set(MI)) |
126 | 0 | return; |
127 | | |
128 | 34.8k | AArch64_get_detail_op(MI, 0)->type = AARCH64_OP_PRED; |
129 | 34.8k | AArch64_get_detail_op(MI, 0)->pred.imm_index = -1; |
130 | 34.8k | } |
131 | | |
132 | | const insn_map aarch64_insns[] = { |
133 | | #include "AArch64GenCSMappingInsn.inc" |
134 | | }; |
135 | | |
136 | | static const name_map insn_alias_mnem_map[] = { |
137 | | #include "AArch64GenCSAliasMnemMap.inc" |
138 | | { AARCH64_INS_ALIAS_CFP, "cfp" }, |
139 | | { AARCH64_INS_ALIAS_DVP, "dvp" }, |
140 | | { AARCH64_INS_ALIAS_COSP, "cosp" }, |
141 | | { AARCH64_INS_ALIAS_CPP, "cpp" }, |
142 | | { AARCH64_INS_ALIAS_IC, "ic" }, |
143 | | { AARCH64_INS_ALIAS_DC, "dc" }, |
144 | | { AARCH64_INS_ALIAS_AT, "at" }, |
145 | | { AARCH64_INS_ALIAS_TLBI, "tlbi" }, |
146 | | { AARCH64_INS_ALIAS_TLBIP, "tlbip" }, |
147 | | { AARCH64_INS_ALIAS_RPRFM, "rprfm" }, |
148 | | { AARCH64_INS_ALIAS_LSL, "lsl" }, |
149 | | { AARCH64_INS_ALIAS_SBFX, "sbfx" }, |
150 | | { AARCH64_INS_ALIAS_UBFX, "ubfx" }, |
151 | | { AARCH64_INS_ALIAS_SBFIZ, "sbfiz" }, |
152 | | { AARCH64_INS_ALIAS_UBFIZ, "ubfiz" }, |
153 | | { AARCH64_INS_ALIAS_BFC, "bfc" }, |
154 | | { AARCH64_INS_ALIAS_BFI, "bfi" }, |
155 | | { AARCH64_INS_ALIAS_BFXIL, "bfxil" }, |
156 | | { AARCH64_INS_ALIAS_END, NULL }, |
157 | | }; |
158 | | |
159 | | static const char *get_custom_reg_alias(unsigned reg) |
160 | 38.6k | { |
161 | 38.6k | switch (reg) { |
162 | 301 | case AARCH64_REG_X29: |
163 | 301 | return "fp"; |
164 | 1.25k | case AARCH64_REG_X30: |
165 | 1.25k | return "lr"; |
166 | 38.6k | } |
167 | 37.0k | return NULL; |
168 | 38.6k | } |
169 | | |
170 | | /// Very annoyingly LLVM hard codes the vector layout post-fixes into the asm string. |
171 | | /// In this function we check for these cases and add the vectorlayout/arrangement |
172 | | /// specifier. |
173 | | void AArch64_add_vas(MCInst *MI, const SStream *OS) |
174 | 181k | { |
175 | 181k | if (!detail_is_set(MI)) { |
176 | 0 | return; |
177 | 0 | } |
178 | | |
179 | 181k | if (AArch64_get_detail(MI)->op_count == 0) { |
180 | 405 | return; |
181 | 405 | } |
182 | 181k | if (MCInst_getOpcode(MI) == AArch64_MUL53HI || MCInst_getOpcode(MI) == AArch64_MUL53LO) { |
183 | | // Proprietary Apple instrucions. |
184 | 0 | AArch64_get_detail(MI)->operands[0].vas = AARCH64LAYOUT_VL_2D; |
185 | 0 | AArch64_get_detail(MI)->operands[1].vas = AARCH64LAYOUT_VL_2D; |
186 | 0 | return; |
187 | 0 | } |
188 | | |
189 | | // Search for r".[0-9]{1,2}[bhsdq]\W" |
190 | | // with poor mans regex |
191 | 181k | const char *vl_ptr = strchr(OS->buffer, '.'); |
192 | 386k | while (vl_ptr) { |
193 | | // Number after dot? |
194 | 205k | unsigned num = 0; |
195 | 205k | if (strchr("1248", vl_ptr[1])) { |
196 | 41.6k | num = atoi(vl_ptr + 1); |
197 | 41.6k | vl_ptr = num > 9 ? vl_ptr + 3 : vl_ptr + 2; |
198 | 164k | } else { |
199 | 164k | vl_ptr++; |
200 | 164k | } |
201 | | |
202 | | // Layout letter |
203 | 205k | char letter = '\0'; |
204 | 205k | if (strchr("bhsdq", vl_ptr[0])) { |
205 | 198k | letter = vl_ptr[0]; |
206 | 198k | } |
207 | 205k | if (!letter) { |
208 | 7.59k | goto next_dot_continue; |
209 | 7.59k | } |
210 | | |
211 | 198k | AArch64Layout_VectorLayout vl = AARCH64LAYOUT_INVALID; |
212 | 198k | switch (letter) { |
213 | 0 | default: |
214 | 0 | CS_ASSERT_RET(0 && "Unhandled vector layout letter."); |
215 | 0 | return; |
216 | 47.8k | case 'b': |
217 | 47.8k | vl = AARCH64LAYOUT_VL_B; |
218 | 47.8k | break; |
219 | 49.8k | case 'h': |
220 | 49.8k | vl = AARCH64LAYOUT_VL_H; |
221 | 49.8k | break; |
222 | 45.6k | case 's': |
223 | 45.6k | vl = AARCH64LAYOUT_VL_S; |
224 | 45.6k | break; |
225 | 52.1k | case 'd': |
226 | 52.1k | vl = AARCH64LAYOUT_VL_D; |
227 | 52.1k | break; |
228 | 2.53k | case 'q': |
229 | 2.53k | vl = AARCH64LAYOUT_VL_Q; |
230 | 2.53k | break; |
231 | 198k | } |
232 | 198k | vl |= (num << 8); |
233 | | |
234 | | // Determine op index by searching for trailing commata after op string |
235 | 198k | uint32_t op_idx = 0; |
236 | 198k | const char *comma_ptr = strchr(OS->buffer, ','); |
237 | 198k | ; |
238 | 436k | while (comma_ptr && comma_ptr < vl_ptr) { |
239 | 238k | ++op_idx; |
240 | 238k | comma_ptr = strchr(comma_ptr + 1, ','); |
241 | 238k | } |
242 | 198k | if (!comma_ptr) { |
243 | | // Last op doesn't have a trailing commata. |
244 | 31.6k | op_idx = AArch64_get_detail(MI)->op_count - 1; |
245 | 31.6k | } |
246 | 198k | if (op_idx >= AArch64_get_detail(MI)->op_count) { |
247 | | // A memory operand with a commata in [base, dist] |
248 | 5.22k | op_idx = AArch64_get_detail(MI)->op_count - 1; |
249 | 5.22k | } |
250 | | |
251 | | // Search for the operand this one belongs to. |
252 | 198k | cs_aarch64_op *op = &AArch64_get_detail(MI)->operands[op_idx]; |
253 | 198k | if ((op->type != AARCH64_OP_REG && |
254 | 198k | op->type != AARCH64_OP_SME) || |
255 | 198k | op->vas != AARCH64LAYOUT_INVALID) { |
256 | 165k | goto next_dot_continue; |
257 | 165k | } |
258 | 32.6k | op->vas = vl; |
259 | | |
260 | 205k | next_dot_continue: |
261 | 205k | vl_ptr = strchr(vl_ptr + 1, '.'); |
262 | 205k | } |
263 | 181k | } |
264 | | |
265 | | const char *AArch64_reg_name(csh handle, unsigned int reg) |
266 | 38.6k | { |
267 | 38.6k | int syntax_opt = ((cs_struct *)(uintptr_t)handle)->syntax; |
268 | 38.6k | const char *alias = get_custom_reg_alias(reg); |
269 | 38.6k | if ((syntax_opt & CS_OPT_SYNTAX_CS_REG_ALIAS) && alias) |
270 | 0 | return alias; |
271 | | |
272 | 38.6k | if (((cs_struct *)(uintptr_t)handle)->syntax & |
273 | 38.6k | CS_OPT_SYNTAX_NOREGNAME) { |
274 | 0 | return AArch64_LLVM_getRegisterName(reg, AArch64_NoRegAltName); |
275 | 0 | } |
276 | | // TODO Add options for the other register names |
277 | 38.6k | return AArch64_LLVM_getRegisterName(reg, AArch64_NoRegAltName); |
278 | 38.6k | } |
279 | | |
280 | | void AArch64_setup_op(cs_aarch64_op *op) |
281 | 2.98M | { |
282 | 2.98M | memset(op, 0, sizeof(cs_aarch64_op)); |
283 | 2.98M | op->type = AARCH64_OP_INVALID; |
284 | 2.98M | op->vector_index = -1; |
285 | 2.98M | } |
286 | | |
287 | | void AArch64_init_cs_detail(MCInst *MI) |
288 | 185k | { |
289 | 185k | if (detail_is_set(MI)) { |
290 | 185k | memset(get_detail(MI), 0, |
291 | 185k | offsetof(cs_detail, aarch64) + sizeof(cs_aarch64)); |
292 | 3.15M | for (int i = 0; i < ARR_SIZE(AArch64_get_detail(MI)->operands); |
293 | 2.97M | i++) |
294 | 2.97M | AArch64_setup_op(&AArch64_get_detail(MI)->operands[i]); |
295 | 185k | AArch64_get_detail(MI)->cc = AArch64CC_Invalid; |
296 | 185k | } |
297 | 185k | } |
298 | | |
299 | | /// Unfortunately, the AARCH64 definitions do not indicate in any way |
300 | | /// (exception are the instruction identifiers), if memory accesses |
301 | | /// is post- or pre-indexed. |
302 | | /// So the only generic way to determine, if the memory access is in |
303 | | /// post-indexed addressing mode, is by search for "<membase>], #<memdisp>" in |
304 | | /// @p OS. |
305 | | /// Searching the asm string to determine such a property is enormously ugly |
306 | | /// and wastes resources. |
307 | | /// Sorry, I know and do feel bad about it. But for now it works. |
308 | | static bool AArch64_check_post_index_am(const MCInst *MI, const SStream *OS) |
309 | 181k | { |
310 | 181k | if (AArch64_get_detail(MI)->post_index) { |
311 | 0 | return true; |
312 | 0 | } |
313 | 181k | cs_aarch64_op *memop = NULL; |
314 | 626k | for (int i = 0; i < AArch64_get_detail(MI)->op_count; ++i) { |
315 | 511k | if (AArch64_get_detail(MI)->operands[i].type & CS_OP_MEM) { |
316 | 66.5k | memop = &AArch64_get_detail(MI)->operands[i]; |
317 | 66.5k | break; |
318 | 66.5k | } |
319 | 511k | } |
320 | 181k | if (!memop) |
321 | 115k | return false; |
322 | 66.5k | if (memop->mem.base == AARCH64_REG_INVALID) { |
323 | | // Load/Store from/to label. Has no register base. |
324 | 3.46k | return false; |
325 | 3.46k | } |
326 | 63.0k | const char *membase = AArch64_LLVM_getRegisterName( |
327 | 63.0k | memop->mem.base, AArch64_NoRegAltName); |
328 | 63.0k | int64_t memdisp = memop->mem.disp; |
329 | 63.0k | SStream pattern = { 0 }; |
330 | 63.0k | SStream_concat(&pattern, membase); |
331 | 63.0k | SStream_concat(&pattern, "], "); |
332 | 63.0k | printInt32Bang(&pattern, memdisp); |
333 | 63.0k | return strstr(OS->buffer, pattern.buffer) != NULL; |
334 | 66.5k | } |
335 | | |
336 | | static void AArch64_check_updates_flags(MCInst *MI) |
337 | 181k | { |
338 | 181k | #ifndef CAPSTONE_DIET |
339 | 181k | if (!detail_is_set(MI)) |
340 | 0 | return; |
341 | 181k | cs_detail *detail = get_detail(MI); |
342 | | // Implicitly written registers |
343 | 200k | for (int i = 0; i < detail->regs_write_count; ++i) { |
344 | 27.6k | if (detail->regs_write[i] == 0) |
345 | 0 | break; |
346 | 46.0k | for (int j = 0; j < ARR_SIZE(aarch64_flag_regs); ++j) { |
347 | 27.6k | if (detail->regs_write[i] == aarch64_flag_regs[j]) { |
348 | 9.26k | detail->aarch64.update_flags = true; |
349 | 9.26k | return; |
350 | 9.26k | } |
351 | 27.6k | } |
352 | 27.6k | } |
353 | 668k | for (int i = 0; i < detail->aarch64.op_count; ++i) { |
354 | 496k | if (detail->aarch64.operands[i].type == AARCH64_OP_SYSREG && |
355 | 496k | detail->aarch64.operands[i].sysop.sub_type == |
356 | 4.79k | AARCH64_OP_REG_MSR) { |
357 | 17.3k | for (int j = 0; j < ARR_SIZE(aarch64_flag_sys_regs); |
358 | 14.3k | ++j) |
359 | 14.4k | if (detail->aarch64.operands[i] |
360 | 14.4k | .sysop.reg.sysreg == |
361 | 14.4k | aarch64_flag_sys_regs[j]) { |
362 | 39 | detail->aarch64.update_flags = true; |
363 | 39 | return; |
364 | 39 | } |
365 | 493k | } else if (detail->aarch64.operands[i].type == AARCH64_OP_REG && |
366 | 493k | detail->aarch64.operands[i].access & CS_AC_WRITE) { |
367 | 289k | for (int j = 0; j < ARR_SIZE(aarch64_flag_regs); ++j) |
368 | 144k | if (detail->aarch64.operands[i].reg == |
369 | 144k | aarch64_flag_regs[j]) { |
370 | 0 | detail->aarch64.update_flags = true; |
371 | 0 | return; |
372 | 0 | } |
373 | 144k | } |
374 | 496k | } |
375 | 172k | #endif // CAPSTONE_DIET |
376 | 172k | } |
377 | | |
378 | 151 | static aarch64_shifter id_to_shifter(unsigned Opcode) { |
379 | 151 | switch (Opcode) { |
380 | 0 | default: |
381 | 0 | return AARCH64_SFT_INVALID; |
382 | 23 | case AArch64_RORVXr: |
383 | 33 | case AArch64_RORVWr: |
384 | 33 | return AARCH64_SFT_ROR_REG; |
385 | 29 | case AArch64_LSRVXr: |
386 | 39 | case AArch64_LSRVWr: |
387 | 39 | return AARCH64_SFT_LSR_REG; |
388 | 19 | case AArch64_LSLVXr: |
389 | 29 | case AArch64_LSLVWr: |
390 | 29 | return AARCH64_SFT_LSL_REG; |
391 | 23 | case AArch64_ASRVXr: |
392 | 50 | case AArch64_ASRVWr: |
393 | 50 | return AARCH64_SFT_ASR_REG; |
394 | 151 | } |
395 | 151 | } |
396 | | |
397 | | static void add_non_alias_details(MCInst *MI) |
398 | 156k | { |
399 | 156k | unsigned Opcode = MCInst_getOpcode(MI); |
400 | 156k | switch (Opcode) { |
401 | 149k | default: |
402 | 149k | break; |
403 | 149k | case AArch64_RORVXr: |
404 | 33 | case AArch64_RORVWr: |
405 | 62 | case AArch64_LSRVXr: |
406 | 72 | case AArch64_LSRVWr: |
407 | 91 | case AArch64_LSLVXr: |
408 | 101 | case AArch64_LSLVWr: |
409 | 124 | case AArch64_ASRVXr: |
410 | 151 | case AArch64_ASRVWr: |
411 | 151 | if (AArch64_get_detail(MI)->op_count != 3) { |
412 | 0 | return; |
413 | 0 | } |
414 | 151 | CS_ASSERT_RET(AArch64_get_detail_op(MI, -1)->type == AARCH64_OP_REG); |
415 | | |
416 | | // The shift by register instructions don't set the shift value properly. |
417 | | // Correct it here. |
418 | 151 | uint64_t shift = AArch64_get_detail_op(MI, -1)->reg; |
419 | 151 | cs_aarch64_op *op1 = AArch64_get_detail_op(MI, -2); |
420 | 151 | op1->shift.type = id_to_shifter(Opcode); |
421 | 151 | op1->shift.value = shift; |
422 | 151 | AArch64_dec_op_count(MI); |
423 | 151 | break; |
424 | 131 | case AArch64_FCMPDri: |
425 | 362 | case AArch64_FCMPEDri: |
426 | 1.39k | case AArch64_FCMPEHri: |
427 | 1.44k | case AArch64_FCMPESri: |
428 | 2.28k | case AArch64_FCMPHri: |
429 | 2.42k | case AArch64_FCMPSri: |
430 | 2.42k | AArch64_insert_detail_op_reg_at(MI, -1, AARCH64_REG_XZR, |
431 | 2.42k | CS_AC_READ); |
432 | 2.42k | break; |
433 | 115 | case AArch64_CMEQv16i8rz: |
434 | 151 | case AArch64_CMEQv1i64rz: |
435 | 156 | case AArch64_CMEQv2i32rz: |
436 | 164 | case AArch64_CMEQv2i64rz: |
437 | 171 | case AArch64_CMEQv4i16rz: |
438 | 241 | case AArch64_CMEQv4i32rz: |
439 | 328 | case AArch64_CMEQv8i16rz: |
440 | 372 | case AArch64_CMEQv8i8rz: |
441 | 421 | case AArch64_CMGEv16i8rz: |
442 | 439 | case AArch64_CMGEv1i64rz: |
443 | 496 | case AArch64_CMGEv2i32rz: |
444 | 529 | case AArch64_CMGEv2i64rz: |
445 | 543 | case AArch64_CMGEv4i16rz: |
446 | 550 | case AArch64_CMGEv4i32rz: |
447 | 565 | case AArch64_CMGEv8i16rz: |
448 | 973 | case AArch64_CMGEv8i8rz: |
449 | 1.01k | case AArch64_CMGTv16i8rz: |
450 | 1.04k | case AArch64_CMGTv1i64rz: |
451 | 1.04k | case AArch64_CMGTv2i32rz: |
452 | 1.16k | case AArch64_CMGTv2i64rz: |
453 | 1.16k | case AArch64_CMGTv4i16rz: |
454 | 1.19k | case AArch64_CMGTv4i32rz: |
455 | 1.22k | case AArch64_CMGTv8i16rz: |
456 | 1.44k | case AArch64_CMGTv8i8rz: |
457 | 1.47k | case AArch64_CMLEv16i8rz: |
458 | 1.47k | case AArch64_CMLEv1i64rz: |
459 | 1.48k | case AArch64_CMLEv2i32rz: |
460 | 1.49k | case AArch64_CMLEv2i64rz: |
461 | 1.51k | case AArch64_CMLEv4i16rz: |
462 | 1.52k | case AArch64_CMLEv4i32rz: |
463 | 1.71k | case AArch64_CMLEv8i16rz: |
464 | 1.80k | case AArch64_CMLEv8i8rz: |
465 | 1.82k | case AArch64_CMLTv16i8rz: |
466 | 1.83k | case AArch64_CMLTv1i64rz: |
467 | 1.85k | case AArch64_CMLTv2i32rz: |
468 | 1.99k | case AArch64_CMLTv2i64rz: |
469 | 1.99k | case AArch64_CMLTv4i16rz: |
470 | 2.01k | case AArch64_CMLTv4i32rz: |
471 | 2.03k | case AArch64_CMLTv8i16rz: |
472 | 2.04k | case AArch64_CMLTv8i8rz: |
473 | 2.04k | AArch64_insert_detail_op_imm_at(MI, -1, 0); |
474 | 2.04k | break; |
475 | 47 | case AArch64_FCMEQ_PPzZ0_D: |
476 | 106 | case AArch64_FCMEQ_PPzZ0_H: |
477 | 133 | case AArch64_FCMEQ_PPzZ0_S: |
478 | 427 | case AArch64_FCMEQv1i16rz: |
479 | 436 | case AArch64_FCMEQv1i32rz: |
480 | 477 | case AArch64_FCMEQv1i64rz: |
481 | 503 | case AArch64_FCMEQv2i32rz: |
482 | 533 | case AArch64_FCMEQv2i64rz: |
483 | 542 | case AArch64_FCMEQv4i16rz: |
484 | 559 | case AArch64_FCMEQv4i32rz: |
485 | 604 | case AArch64_FCMEQv8i16rz: |
486 | 641 | case AArch64_FCMGE_PPzZ0_D: |
487 | 651 | case AArch64_FCMGE_PPzZ0_H: |
488 | 812 | case AArch64_FCMGE_PPzZ0_S: |
489 | 846 | case AArch64_FCMGEv1i16rz: |
490 | 853 | case AArch64_FCMGEv1i32rz: |
491 | 856 | case AArch64_FCMGEv1i64rz: |
492 | 904 | case AArch64_FCMGEv2i32rz: |
493 | 913 | case AArch64_FCMGEv2i64rz: |
494 | 933 | case AArch64_FCMGEv4i16rz: |
495 | 961 | case AArch64_FCMGEv4i32rz: |
496 | 971 | case AArch64_FCMGEv8i16rz: |
497 | 989 | case AArch64_FCMGT_PPzZ0_D: |
498 | 991 | case AArch64_FCMGT_PPzZ0_H: |
499 | 1.11k | case AArch64_FCMGT_PPzZ0_S: |
500 | 1.14k | case AArch64_FCMGTv1i16rz: |
501 | 1.19k | case AArch64_FCMGTv1i32rz: |
502 | 1.23k | case AArch64_FCMGTv1i64rz: |
503 | 1.30k | case AArch64_FCMGTv2i32rz: |
504 | 1.35k | case AArch64_FCMGTv2i64rz: |
505 | 1.37k | case AArch64_FCMGTv4i16rz: |
506 | 1.40k | case AArch64_FCMGTv4i32rz: |
507 | 1.44k | case AArch64_FCMGTv8i16rz: |
508 | 1.47k | case AArch64_FCMLE_PPzZ0_D: |
509 | 1.48k | case AArch64_FCMLE_PPzZ0_H: |
510 | 1.68k | case AArch64_FCMLE_PPzZ0_S: |
511 | 1.69k | case AArch64_FCMLEv1i16rz: |
512 | 1.70k | case AArch64_FCMLEv1i32rz: |
513 | 1.70k | case AArch64_FCMLEv1i64rz: |
514 | 1.89k | case AArch64_FCMLEv2i32rz: |
515 | 1.91k | case AArch64_FCMLEv2i64rz: |
516 | 1.92k | case AArch64_FCMLEv4i16rz: |
517 | 1.93k | case AArch64_FCMLEv4i32rz: |
518 | 1.94k | case AArch64_FCMLEv8i16rz: |
519 | 1.95k | case AArch64_FCMLT_PPzZ0_D: |
520 | 2.02k | case AArch64_FCMLT_PPzZ0_H: |
521 | 2.02k | case AArch64_FCMLT_PPzZ0_S: |
522 | 2.13k | case AArch64_FCMLTv1i16rz: |
523 | 2.13k | case AArch64_FCMLTv1i32rz: |
524 | 2.19k | case AArch64_FCMLTv1i64rz: |
525 | 2.23k | case AArch64_FCMLTv2i32rz: |
526 | 2.26k | case AArch64_FCMLTv2i64rz: |
527 | 2.28k | case AArch64_FCMLTv4i16rz: |
528 | 2.32k | case AArch64_FCMLTv4i32rz: |
529 | 2.77k | case AArch64_FCMLTv8i16rz: |
530 | 2.79k | case AArch64_FCMNE_PPzZ0_D: |
531 | 2.79k | case AArch64_FCMNE_PPzZ0_H: |
532 | 2.80k | case AArch64_FCMNE_PPzZ0_S: { |
533 | 2.80k | aarch64_sysop sysop = { 0 }; |
534 | 2.80k | sysop.imm.exactfpimm = AARCH64_EXACTFPIMM_ZERO; |
535 | 2.80k | sysop.sub_type = AARCH64_OP_EXACTFPIMM; |
536 | 2.80k | AArch64_insert_detail_op_sys(MI, -1, sysop, AARCH64_OP_SYSIMM); |
537 | 2.80k | break; |
538 | 2.79k | } |
539 | 156k | } |
540 | 156k | } |
541 | | |
542 | | #define ADD_ZA0_S \ |
543 | 305 | { aarch64_op_sme za0_op = { \ |
544 | 305 | .type = AARCH64_SME_OP_TILE, \ |
545 | 305 | .tile = AARCH64_REG_ZAS0, \ |
546 | 305 | .slice_reg = AARCH64_REG_INVALID, \ |
547 | 305 | .slice_offset = { -1 }, \ |
548 | 305 | .has_range_offset = false, \ |
549 | 305 | .is_vertical = false, \ |
550 | 305 | }; \ |
551 | 305 | AArch64_insert_detail_op_sme(MI, -1, za0_op); \ |
552 | 305 | AArch64_get_detail_op(MI, -1)->vas = AARCH64LAYOUT_VL_S; \ |
553 | 305 | AArch64_get_detail_op(MI, -1)->access = CS_AC_WRITE; \ |
554 | 305 | } |
555 | | #define ADD_ZA1_S \ |
556 | 600 | { aarch64_op_sme za1_op = { \ |
557 | 600 | .type = AARCH64_SME_OP_TILE, \ |
558 | 600 | .tile = AARCH64_REG_ZAS1, \ |
559 | 600 | .slice_reg = AARCH64_REG_INVALID, \ |
560 | 600 | .slice_offset = { -1 }, \ |
561 | 600 | .has_range_offset = false, \ |
562 | 600 | .is_vertical = false, \ |
563 | 600 | }; \ |
564 | 600 | AArch64_insert_detail_op_sme(MI, -1, za1_op); \ |
565 | 600 | AArch64_get_detail_op(MI, -1)->vas = AARCH64LAYOUT_VL_S; \ |
566 | 600 | AArch64_get_detail_op(MI, -1)->access = CS_AC_WRITE; \ |
567 | 600 | } |
568 | | #define ADD_ZA2_S \ |
569 | 558 | { aarch64_op_sme za2_op = { \ |
570 | 558 | .type = AARCH64_SME_OP_TILE, \ |
571 | 558 | .tile = AARCH64_REG_ZAS2, \ |
572 | 558 | .slice_reg = AARCH64_REG_INVALID, \ |
573 | 558 | .slice_offset = { -1 }, \ |
574 | 558 | .has_range_offset = false, \ |
575 | 558 | .is_vertical = false, \ |
576 | 558 | }; \ |
577 | 558 | AArch64_insert_detail_op_sme(MI, -1, za2_op); \ |
578 | 558 | AArch64_get_detail_op(MI, -1)->vas = AARCH64LAYOUT_VL_S; \ |
579 | 558 | AArch64_get_detail_op(MI, -1)->access = CS_AC_WRITE; \ |
580 | 558 | } |
581 | | #define ADD_ZA3_S \ |
582 | 291 | { aarch64_op_sme za3_op = { \ |
583 | 291 | .type = AARCH64_SME_OP_TILE, \ |
584 | 291 | .tile = AARCH64_REG_ZAS3, \ |
585 | 291 | .slice_reg = AARCH64_REG_INVALID, \ |
586 | 291 | .slice_offset = { -1 }, \ |
587 | 291 | .has_range_offset = false, \ |
588 | 291 | .is_vertical = false, \ |
589 | 291 | }; \ |
590 | 291 | AArch64_insert_detail_op_sme(MI, -1, za3_op); \ |
591 | 291 | AArch64_get_detail_op(MI, -1)->vas = AARCH64LAYOUT_VL_S; \ |
592 | 291 | AArch64_get_detail_op(MI, -1)->access = CS_AC_WRITE; \ |
593 | 291 | } |
594 | | #define ADD_ZA \ |
595 | 22 | { aarch64_op_sme za_op = \ |
596 | 22 | { \ |
597 | 22 | .type = AARCH64_SME_OP_TILE, \ |
598 | 22 | .tile = AARCH64_REG_ZA, \ |
599 | 22 | .slice_reg = AARCH64_REG_INVALID, \ |
600 | 22 | .slice_offset = { -1 }, \ |
601 | 22 | .has_range_offset = false, \ |
602 | 22 | .is_vertical = false, \ |
603 | 22 | }; \ |
604 | 22 | AArch64_insert_detail_op_sme(MI, -1, za_op); \ |
605 | 22 | AArch64_get_detail_op(MI, -1)->access = CS_AC_WRITE; \ |
606 | 22 | } |
607 | | |
608 | | static void AArch64_add_not_defined_ops(MCInst *MI, const SStream *OS) |
609 | 181k | { |
610 | 181k | if (!detail_is_set(MI)) |
611 | 0 | return; |
612 | | |
613 | 181k | if (!MI->flat_insn->is_alias || !MI->flat_insn->usesAliasDetails) { |
614 | 156k | add_non_alias_details(MI); |
615 | 156k | return; |
616 | 156k | } |
617 | | |
618 | | // Alias details |
619 | 24.8k | switch (MI->flat_insn->alias_id) { |
620 | 21.3k | default: |
621 | 21.3k | return; |
622 | 21.3k | case AARCH64_INS_ALIAS_ROR: |
623 | 47 | if (AArch64_get_detail(MI)->op_count != 3) { |
624 | 0 | return; |
625 | 0 | } |
626 | | // The ROR alias doesn't set the shift value properly. |
627 | | // Correct it here. |
628 | 47 | bool reg_shift = AArch64_get_detail_op(MI, -1)->type == AARCH64_OP_REG; |
629 | 47 | uint64_t shift = reg_shift ? AArch64_get_detail_op(MI, -1)->reg : AArch64_get_detail_op(MI, -1)->imm; |
630 | 47 | cs_aarch64_op *op1 = AArch64_get_detail_op(MI, -2); |
631 | 47 | op1->shift.type = reg_shift ? AARCH64_SFT_ROR_REG : AARCH64_SFT_ROR; |
632 | 47 | op1->shift.value = shift; |
633 | 47 | AArch64_dec_op_count(MI); |
634 | 47 | break; |
635 | 22 | case AARCH64_INS_ALIAS_FMOV: |
636 | 22 | if (AArch64_get_detail_op(MI, -1)->type == AARCH64_OP_FP) { |
637 | 22 | break; |
638 | 22 | } |
639 | 0 | AArch64_insert_detail_op_float_at(MI, -1, 0.0f, CS_AC_READ); |
640 | 0 | break; |
641 | 63 | case AARCH64_INS_ALIAS_LD1: |
642 | 114 | case AARCH64_INS_ALIAS_LD1R: |
643 | 477 | case AARCH64_INS_ALIAS_LD2: |
644 | 656 | case AARCH64_INS_ALIAS_LD2R: |
645 | 856 | case AARCH64_INS_ALIAS_LD3: |
646 | 863 | case AARCH64_INS_ALIAS_LD3R: |
647 | 1.56k | case AARCH64_INS_ALIAS_LD4: |
648 | 1.72k | case AARCH64_INS_ALIAS_LD4R: |
649 | 1.87k | case AARCH64_INS_ALIAS_ST1: |
650 | 2.05k | case AARCH64_INS_ALIAS_ST2: |
651 | 2.06k | case AARCH64_INS_ALIAS_ST3: |
652 | 2.47k | case AARCH64_INS_ALIAS_ST4: { |
653 | | // Add post-index disp |
654 | 2.47k | const char *disp_off = strrchr(OS->buffer, '#'); |
655 | 2.47k | if (!disp_off) |
656 | 0 | return; |
657 | 2.47k | unsigned disp = atoi(disp_off + 1); |
658 | 2.47k | AArch64_get_detail_op(MI, -1)->type = AARCH64_OP_MEM; |
659 | 2.47k | AArch64_get_detail_op(MI, -1)->mem.base = |
660 | 2.47k | AArch64_get_detail_op(MI, -1)->reg; |
661 | 2.47k | AArch64_get_detail_op(MI, -1)->mem.disp = disp; |
662 | 2.47k | AArch64_get_detail(MI)->post_index = true; |
663 | 2.47k | break; |
664 | 2.47k | } |
665 | 2 | case AARCH64_INS_ALIAS_GCSB: |
666 | | // TODO |
667 | | // Only CSYNC is defined in LLVM. So we need to add it. |
668 | | // /* 2825 */ "gcsb dsync\0" |
669 | 2 | break; |
670 | 134 | case AARCH64_INS_ALIAS_SMSTART: |
671 | 205 | case AARCH64_INS_ALIAS_SMSTOP: { |
672 | 205 | const char *disp_off = NULL; |
673 | 205 | disp_off = strstr(OS->buffer, "smstart\tza"); |
674 | 205 | if (disp_off) { |
675 | 130 | aarch64_sysop sysop = { 0 }; |
676 | 130 | sysop.alias.svcr = AARCH64_SVCR_SVCRZA; |
677 | 130 | sysop.sub_type = AARCH64_OP_SVCR; |
678 | 130 | AArch64_insert_detail_op_sys(MI, -1, sysop, |
679 | 130 | AARCH64_OP_SYSALIAS); |
680 | 130 | return; |
681 | 130 | } |
682 | 75 | disp_off = strstr(OS->buffer, "smstart\tsm"); |
683 | 75 | if (disp_off) { |
684 | 4 | aarch64_sysop sysop = { 0 }; |
685 | 4 | sysop.alias.svcr = AARCH64_SVCR_SVCRSM; |
686 | 4 | sysop.sub_type = AARCH64_OP_SVCR; |
687 | 4 | AArch64_insert_detail_op_sys(MI, -1, sysop, |
688 | 4 | AARCH64_OP_SYSALIAS); |
689 | 4 | return; |
690 | 4 | } |
691 | 71 | break; |
692 | 75 | } |
693 | 797 | case AARCH64_INS_ALIAS_ZERO: { |
694 | | // It is ugly, but the hard coded search patterns do it for now. |
695 | 797 | const char *disp_off = NULL; |
696 | | |
697 | 797 | disp_off = strstr(OS->buffer, "{za}"); |
698 | 797 | if (disp_off) { |
699 | 22 | ADD_ZA; |
700 | 22 | return; |
701 | 22 | } |
702 | 775 | disp_off = strstr(OS->buffer, "{za1.h}"); |
703 | 775 | if (disp_off) { |
704 | 35 | aarch64_op_sme op = |
705 | 35 | { |
706 | 35 | .type = AARCH64_SME_OP_TILE, |
707 | 35 | .tile = AARCH64_REG_ZAH1, |
708 | 35 | .slice_reg = AARCH64_REG_INVALID, |
709 | 35 | .slice_offset = { -1 }, |
710 | 35 | .has_range_offset = false, |
711 | 35 | .is_vertical = false, |
712 | 35 | }; |
713 | 35 | AArch64_insert_detail_op_sme(MI, -1, op); |
714 | 35 | AArch64_get_detail_op(MI, -1)->vas = AARCH64LAYOUT_VL_H; |
715 | 35 | AArch64_get_detail_op(MI, -1)->access = CS_AC_WRITE; |
716 | 35 | return; |
717 | 35 | } |
718 | 740 | disp_off = strstr(OS->buffer, "{za0.h}"); |
719 | 740 | if (disp_off) { |
720 | 9 | aarch64_op_sme op = |
721 | 9 | { |
722 | 9 | .type = AARCH64_SME_OP_TILE, |
723 | 9 | .tile = AARCH64_REG_ZAH0, |
724 | 9 | .slice_reg = AARCH64_REG_INVALID, |
725 | 9 | .slice_offset = { -1 }, |
726 | 9 | .has_range_offset = false, |
727 | 9 | .is_vertical = false, |
728 | 9 | }; |
729 | 9 | AArch64_insert_detail_op_sme(MI, -1, op); |
730 | 9 | AArch64_get_detail_op(MI, -1)->vas = AARCH64LAYOUT_VL_H; |
731 | 9 | AArch64_get_detail_op(MI, -1)->access = CS_AC_WRITE; |
732 | 9 | return; |
733 | 9 | } |
734 | 731 | disp_off = strstr(OS->buffer, "{za0.s}"); |
735 | 731 | if (disp_off) { |
736 | 30 | ADD_ZA0_S; |
737 | 30 | return; |
738 | 30 | } |
739 | 701 | disp_off = strstr(OS->buffer, "{za1.s}"); |
740 | 701 | if (disp_off) { |
741 | 13 | ADD_ZA1_S; |
742 | 13 | return; |
743 | 13 | } |
744 | 688 | disp_off = strstr(OS->buffer, "{za2.s}"); |
745 | 688 | if (disp_off) { |
746 | 7 | ADD_ZA2_S; |
747 | 7 | return; |
748 | 7 | } |
749 | 681 | disp_off = strstr(OS->buffer, "{za3.s}"); |
750 | 681 | if (disp_off) { |
751 | 22 | ADD_ZA3_S; |
752 | 22 | return; |
753 | 22 | } |
754 | 659 | disp_off = strstr(OS->buffer, "{za0.s,za1.s}"); |
755 | 659 | if (disp_off) { |
756 | 45 | ADD_ZA0_S; |
757 | 45 | ADD_ZA1_S; |
758 | 45 | return; |
759 | 45 | } |
760 | 614 | disp_off = strstr(OS->buffer, "{za0.s,za3.s}"); |
761 | 614 | if (disp_off) { |
762 | 53 | ADD_ZA0_S; |
763 | 53 | ADD_ZA3_S; |
764 | 53 | return; |
765 | 53 | } |
766 | 561 | disp_off = strstr(OS->buffer, "{za1.s,za2.s}"); |
767 | 561 | if (disp_off) { |
768 | 183 | ADD_ZA1_S; |
769 | 183 | ADD_ZA2_S; |
770 | 183 | return; |
771 | 183 | } |
772 | 378 | disp_off = strstr(OS->buffer, "{za2.s,za3.s}"); |
773 | 378 | if (disp_off) { |
774 | 14 | ADD_ZA2_S; |
775 | 14 | ADD_ZA3_S; |
776 | 14 | return; |
777 | 14 | } |
778 | 364 | disp_off = strstr(OS->buffer, "{za0.s,za1.s,za2.s}"); |
779 | 364 | if (disp_off) { |
780 | 162 | ADD_ZA0_S; |
781 | 162 | ADD_ZA1_S; |
782 | 162 | ADD_ZA2_S; |
783 | 162 | return; |
784 | 162 | } |
785 | 202 | disp_off = strstr(OS->buffer, "{za0.s,za1.s,za3.s}"); |
786 | 202 | if (disp_off) { |
787 | 10 | ADD_ZA0_S; |
788 | 10 | ADD_ZA1_S; |
789 | 10 | ADD_ZA3_S; |
790 | 10 | return; |
791 | 10 | } |
792 | 192 | disp_off = strstr(OS->buffer, "{za0.s,za2.s,za3.s}"); |
793 | 192 | if (disp_off) { |
794 | 5 | ADD_ZA0_S; |
795 | 5 | ADD_ZA2_S; |
796 | 5 | ADD_ZA3_S; |
797 | 5 | return; |
798 | 5 | } |
799 | 187 | disp_off = strstr(OS->buffer, "{za1.s,za2.s,za3.s}"); |
800 | 187 | if (disp_off) { |
801 | 187 | ADD_ZA1_S; |
802 | 187 | ADD_ZA2_S; |
803 | 187 | ADD_ZA3_S; |
804 | 187 | return; |
805 | 187 | } |
806 | 0 | break; |
807 | 187 | } |
808 | 24.8k | } |
809 | 24.8k | } |
810 | | |
811 | | void AArch64_set_instr_map_data(MCInst *MI) |
812 | 185k | { |
813 | 185k | map_cs_id(MI, aarch64_insns, ARR_SIZE(aarch64_insns)); |
814 | 185k | map_implicit_reads(MI, aarch64_insns); |
815 | 185k | map_implicit_writes(MI, aarch64_insns); |
816 | 185k | map_groups(MI, aarch64_insns); |
817 | 185k | } |
818 | | |
819 | | bool AArch64_getInstruction(csh handle, const uint8_t *code, size_t code_len, |
820 | | MCInst *MI, uint16_t *size, uint64_t address, |
821 | | void *info) |
822 | 185k | { |
823 | 185k | AArch64_init_cs_detail(MI); |
824 | 185k | DecodeStatus Result = AArch64_LLVM_getInstruction(handle, code, code_len, MI, |
825 | 185k | size, address, |
826 | 185k | info); |
827 | 185k | AArch64_set_instr_map_data(MI); |
828 | 185k | if (Result == MCDisassembler_SoftFail) { |
829 | 6.58k | MCInst_setSoftFail(MI); |
830 | 6.58k | } |
831 | 185k | return Result != MCDisassembler_Fail; |
832 | 185k | } |
833 | | |
834 | | /// Patches the register names with Capstone specific alias. |
835 | | /// Those are common alias for registers (e.g. r15 = pc) |
836 | | /// which are not set in LLVM. |
837 | | static void patch_cs_reg_alias(char *asm_str) |
838 | 0 | { |
839 | 0 | bool skip_sub = false; |
840 | 0 | char *x29 = strstr(asm_str, "x29"); |
841 | 0 | if (x29 > asm_str && strstr(asm_str, "0x29") == (x29 - 1)) { |
842 | | // Check for hex prefix |
843 | 0 | skip_sub = true; |
844 | 0 | } |
845 | 0 | while (x29 && !skip_sub) { |
846 | 0 | x29[0] = 'f'; |
847 | 0 | x29[1] = 'p'; |
848 | 0 | memmove(x29 + 2, x29 + 3, strlen(x29 + 3)); |
849 | 0 | asm_str[strlen(asm_str) - 1] = '\0'; |
850 | 0 | x29 = strstr(asm_str, "x29"); |
851 | 0 | } |
852 | 0 | skip_sub = false; |
853 | 0 | char *x30 = strstr(asm_str, "x30"); |
854 | 0 | if (x30 > asm_str && strstr(asm_str, "0x30") == (x30 - 1)) { |
855 | | // Check for hex prefix |
856 | 0 | skip_sub = true; |
857 | 0 | } |
858 | 0 | while (x30 && !skip_sub) { |
859 | 0 | x30[0] = 'l'; |
860 | 0 | x30[1] = 'r'; |
861 | 0 | memmove(x30 + 2, x30 + 3, strlen(x30 + 3)); |
862 | 0 | asm_str[strlen(asm_str) - 1] = '\0'; |
863 | 0 | x30 = strstr(asm_str, "x30"); |
864 | 0 | } |
865 | 0 | } |
866 | | |
867 | | /// Adds group to the instruction which are not defined in LLVM. |
868 | | static void AArch64_add_cs_groups(MCInst *MI) |
869 | 181k | { |
870 | 181k | unsigned Opcode = MI->flat_insn->id; |
871 | 181k | switch (Opcode) { |
872 | 177k | default: |
873 | 177k | return; |
874 | 177k | case AARCH64_INS_SVC: |
875 | 15 | add_group(MI, AARCH64_GRP_INT); |
876 | 15 | break; |
877 | 34 | case AARCH64_INS_SMC: |
878 | 3.65k | case AARCH64_INS_MSR: |
879 | 4.44k | case AARCH64_INS_MRS: |
880 | 4.44k | add_group(MI, AARCH64_GRP_PRIVILEGE); |
881 | 4.44k | break; |
882 | 39 | case AARCH64_INS_RET: |
883 | 70 | case AARCH64_INS_RETAA: |
884 | 167 | case AARCH64_INS_RETAB: |
885 | 167 | add_group(MI, AARCH64_GRP_RET); |
886 | 167 | break; |
887 | 181k | } |
888 | 181k | } |
889 | | |
890 | 181k | static void AArch64_correct_mem_access(MCInst *MI) { |
891 | 181k | if (!detail_is_set(MI)) |
892 | 0 | return; |
893 | 181k | cs_ac_type access = aarch64_insns[MI->Opcode].suppl_info.aarch64.mem_acc; |
894 | 181k | if (access == CS_AC_INVALID) { |
895 | 120k | return; |
896 | 120k | } |
897 | 129k | for (int i = 0; i < AArch64_get_detail(MI)->op_count; ++i) { |
898 | 128k | if (AArch64_get_detail_op(MI, -i)->type == AARCH64_OP_MEM) { |
899 | 60.4k | AArch64_get_detail_op(MI, -i)->access = access; |
900 | 60.4k | return; |
901 | 60.4k | } |
902 | 128k | } |
903 | 61.5k | } |
904 | | |
905 | | void AArch64_printer(MCInst *MI, SStream *O, void * /* MCRegisterInfo* */ info) |
906 | 181k | { |
907 | 181k | MCRegisterInfo *MRI = (MCRegisterInfo *)info; |
908 | 181k | MI->MRI = MRI; |
909 | 181k | MI->fillDetailOps = detail_is_set(MI); |
910 | 181k | MI->flat_insn->usesAliasDetails = map_use_alias_details(MI); |
911 | 181k | AArch64_LLVM_printInstruction(MI, O, info); |
912 | 181k | if (detail_is_set(MI)) { |
913 | 181k | if (AArch64_get_detail(MI)->is_doing_sme) { |
914 | | // Last operand still needs to be closed. |
915 | 2.58k | AArch64_get_detail(MI)->is_doing_sme = false; |
916 | 2.58k | AArch64_inc_op_count(MI); |
917 | 2.58k | } |
918 | 181k | AArch64_get_detail(MI)->post_index = |
919 | 181k | AArch64_check_post_index_am(MI, O); |
920 | 181k | } |
921 | 181k | AArch64_check_updates_flags(MI); |
922 | 181k | map_set_alias_id(MI, O, insn_alias_mnem_map, |
923 | 181k | ARR_SIZE(insn_alias_mnem_map) - 1); |
924 | 181k | int syntax_opt = MI->csh->syntax; |
925 | 181k | if (syntax_opt & CS_OPT_SYNTAX_CS_REG_ALIAS) |
926 | 0 | patch_cs_reg_alias(O->buffer); |
927 | 181k | AArch64_add_not_defined_ops(MI, O); |
928 | 181k | AArch64_add_cs_groups(MI); |
929 | 181k | AArch64_add_vas(MI, O); |
930 | 181k | AArch64_correct_mem_access(MI); |
931 | 181k | } |
932 | | |
933 | | // given internal insn id, return public instruction info |
934 | | void AArch64_get_insn_id(cs_struct *h, cs_insn *insn, unsigned int id) |
935 | 181k | { |
936 | | // Done after disassembly |
937 | 181k | return; |
938 | 181k | } |
939 | | |
940 | | static const char *const insn_name_maps[] = { |
941 | | #include "AArch64GenCSMappingInsnName.inc" |
942 | | }; |
943 | | |
944 | | const char *AArch64_insn_name(csh handle, unsigned int id) |
945 | 181k | { |
946 | 181k | #ifndef CAPSTONE_DIET |
947 | 181k | if (id < AARCH64_INS_ALIAS_END && id > AARCH64_INS_ALIAS_BEGIN) { |
948 | 0 | if (id - AARCH64_INS_ALIAS_BEGIN >= |
949 | 0 | ARR_SIZE(insn_alias_mnem_map)) |
950 | 0 | return NULL; |
951 | | |
952 | 0 | return insn_alias_mnem_map[id - AARCH64_INS_ALIAS_BEGIN - 1] |
953 | 0 | .name; |
954 | 0 | } |
955 | 181k | if (id >= AARCH64_INS_ENDING) |
956 | 0 | return NULL; |
957 | | |
958 | 181k | if (id < ARR_SIZE(insn_name_maps)) |
959 | 181k | return insn_name_maps[id]; |
960 | | |
961 | | // not found |
962 | 0 | return NULL; |
963 | | #else |
964 | | return NULL; |
965 | | #endif |
966 | 181k | } |
967 | | |
968 | | #ifndef CAPSTONE_DIET |
969 | | static const name_map group_name_maps[] = { |
970 | | // generic groups |
971 | | { AARCH64_GRP_INVALID, NULL }, |
972 | | { AARCH64_GRP_JUMP, "jump" }, |
973 | | { AARCH64_GRP_CALL, "call" }, |
974 | | { AARCH64_GRP_RET, "return" }, |
975 | | { AARCH64_GRP_PRIVILEGE, "privilege" }, |
976 | | { AARCH64_GRP_INT, "int" }, |
977 | | { AARCH64_GRP_BRANCH_RELATIVE, "branch_relative" }, |
978 | | |
979 | | // architecture-specific groups |
980 | | #include "AArch64GenCSFeatureName.inc" |
981 | | }; |
982 | | #endif |
983 | | |
984 | | const char *AArch64_group_name(csh handle, unsigned int id) |
985 | 223k | { |
986 | 223k | #ifndef CAPSTONE_DIET |
987 | 223k | return id2name(group_name_maps, ARR_SIZE(group_name_maps), id); |
988 | | #else |
989 | | return NULL; |
990 | | #endif |
991 | 223k | } |
992 | | |
993 | | // map instruction name to public instruction ID |
994 | | aarch64_insn AArch64_map_insn(const char *name) |
995 | 33.1k | { |
996 | 33.1k | unsigned int i; |
997 | | |
998 | 22.9M | for (i = 1; i < ARR_SIZE(insn_name_maps); i++) { |
999 | 22.9M | if (!strcmp(name, insn_name_maps[i])) |
1000 | 32.9k | return i; |
1001 | 22.9M | } |
1002 | | |
1003 | | // not found |
1004 | 208 | return AARCH64_INS_INVALID; |
1005 | 33.1k | } |
1006 | | |
1007 | | #ifndef CAPSTONE_DIET |
1008 | | |
1009 | | static const map_insn_ops insn_operands[] = { |
1010 | | #include "AArch64GenCSMappingInsnOp.inc" |
1011 | | }; |
1012 | | |
1013 | | void AArch64_reg_access(const cs_insn *insn, cs_regs regs_read, |
1014 | | uint8_t *regs_read_count, cs_regs regs_write, |
1015 | | uint8_t *regs_write_count) |
1016 | 0 | { |
1017 | 0 | uint8_t i; |
1018 | 0 | uint8_t read_count, write_count; |
1019 | 0 | cs_aarch64 *aarch64 = &(insn->detail->aarch64); |
1020 | |
|
1021 | 0 | read_count = insn->detail->regs_read_count; |
1022 | 0 | write_count = insn->detail->regs_write_count; |
1023 | | |
1024 | | // implicit registers |
1025 | 0 | memcpy(regs_read, insn->detail->regs_read, |
1026 | 0 | read_count * sizeof(insn->detail->regs_read[0])); |
1027 | 0 | memcpy(regs_write, insn->detail->regs_write, |
1028 | 0 | write_count * sizeof(insn->detail->regs_write[0])); |
1029 | | |
1030 | | // explicit registers |
1031 | 0 | for (i = 0; i < aarch64->op_count; i++) { |
1032 | 0 | cs_aarch64_op *op = &(aarch64->operands[i]); |
1033 | 0 | switch ((int)op->type) { |
1034 | 0 | case AARCH64_OP_REG: |
1035 | 0 | if ((op->access & CS_AC_READ) && |
1036 | 0 | !arr_exist(regs_read, read_count, op->reg)) { |
1037 | 0 | regs_read[read_count] = (uint16_t)op->reg; |
1038 | 0 | read_count++; |
1039 | 0 | } |
1040 | 0 | if ((op->access & CS_AC_WRITE) && |
1041 | 0 | !arr_exist(regs_write, write_count, op->reg)) { |
1042 | 0 | regs_write[write_count] = (uint16_t)op->reg; |
1043 | 0 | write_count++; |
1044 | 0 | } |
1045 | 0 | break; |
1046 | 0 | case AARCH64_OP_MEM: |
1047 | | // registers appeared in memory references always being read |
1048 | 0 | if ((op->mem.base != AARCH64_REG_INVALID) && |
1049 | 0 | !arr_exist(regs_read, read_count, op->mem.base)) { |
1050 | 0 | regs_read[read_count] = (uint16_t)op->mem.base; |
1051 | 0 | read_count++; |
1052 | 0 | } |
1053 | 0 | if ((op->mem.index != AARCH64_REG_INVALID) && |
1054 | 0 | !arr_exist(regs_read, read_count, op->mem.index)) { |
1055 | 0 | regs_read[read_count] = (uint16_t)op->mem.index; |
1056 | 0 | read_count++; |
1057 | 0 | } |
1058 | 0 | if ((insn->detail->writeback) && |
1059 | 0 | (op->mem.base != AARCH64_REG_INVALID) && |
1060 | 0 | !arr_exist(regs_write, write_count, op->mem.base)) { |
1061 | 0 | regs_write[write_count] = |
1062 | 0 | (uint16_t)op->mem.base; |
1063 | 0 | write_count++; |
1064 | 0 | } |
1065 | 0 | break; |
1066 | 0 | case AARCH64_OP_SME: |
1067 | 0 | if ((op->access & CS_AC_READ) && |
1068 | 0 | (op->sme.tile != AARCH64_REG_INVALID) && |
1069 | 0 | !arr_exist(regs_read, read_count, op->sme.tile)) { |
1070 | 0 | regs_read[read_count] = (uint16_t)op->sme.tile; |
1071 | 0 | read_count++; |
1072 | 0 | } |
1073 | 0 | if ((op->access & CS_AC_WRITE) && |
1074 | 0 | (op->sme.tile != AARCH64_REG_INVALID) && |
1075 | 0 | !arr_exist(regs_write, write_count, op->sme.tile)) { |
1076 | 0 | regs_write[write_count] = (uint16_t)op->sme.tile; |
1077 | 0 | write_count++; |
1078 | 0 | } |
1079 | 0 | if ((op->sme.slice_reg != AARCH64_REG_INVALID) && |
1080 | 0 | !arr_exist(regs_read, read_count, op->sme.slice_reg)) { |
1081 | 0 | regs_read[read_count] = (uint16_t)op->sme.slice_reg; |
1082 | 0 | read_count++; |
1083 | 0 | } |
1084 | 0 | break; |
1085 | 0 | case AARCH64_OP_PRED: |
1086 | 0 | if ((op->access & CS_AC_READ) && |
1087 | 0 | (op->pred.reg != AARCH64_REG_INVALID) && |
1088 | 0 | !arr_exist(regs_read, read_count, op->pred.reg)) { |
1089 | 0 | regs_read[read_count] = (uint16_t)op->pred.reg; |
1090 | 0 | read_count++; |
1091 | 0 | } |
1092 | 0 | if ((op->access & CS_AC_WRITE) && |
1093 | 0 | (op->pred.reg != AARCH64_REG_INVALID) && |
1094 | 0 | !arr_exist(regs_write, write_count, op->pred.reg)) { |
1095 | 0 | regs_write[write_count] = (uint16_t)op->pred.reg; |
1096 | 0 | write_count++; |
1097 | 0 | } |
1098 | 0 | if ((op->pred.vec_select != AARCH64_REG_INVALID) && |
1099 | 0 | !arr_exist(regs_read, read_count, op->pred.vec_select)) { |
1100 | 0 | regs_read[read_count] = (uint16_t)op->pred.vec_select; |
1101 | 0 | read_count++; |
1102 | 0 | } |
1103 | 0 | break; |
1104 | 0 | default: |
1105 | 0 | break; |
1106 | 0 | } |
1107 | 0 | if (op->shift.type >= AARCH64_SFT_LSL_REG) { |
1108 | 0 | if (!arr_exist(regs_read, read_count, op->shift.value)) { |
1109 | 0 | regs_read[read_count] = (uint16_t)op->shift.value; |
1110 | 0 | read_count++; |
1111 | 0 | } |
1112 | 0 | } |
1113 | 0 | } |
1114 | | |
1115 | 0 | switch (insn->alias_id) { |
1116 | 0 | default: |
1117 | 0 | break; |
1118 | 0 | case AARCH64_INS_ALIAS_RET: |
1119 | 0 | regs_read[read_count] = AARCH64_REG_X30; |
1120 | 0 | read_count++; |
1121 | 0 | break; |
1122 | 0 | } |
1123 | | |
1124 | 0 | *regs_read_count = read_count; |
1125 | 0 | *regs_write_count = write_count; |
1126 | 0 | } |
1127 | | #endif |
1128 | | |
1129 | | static AArch64Layout_VectorLayout get_vl_by_suffix(const char suffix) |
1130 | 117k | { |
1131 | 117k | switch (suffix) { |
1132 | 30.3k | default: |
1133 | 30.3k | return AARCH64LAYOUT_INVALID; |
1134 | 19.8k | case 'b': |
1135 | 19.8k | case 'B': |
1136 | 19.8k | return AARCH64LAYOUT_VL_B; |
1137 | 21.1k | case 'h': |
1138 | 21.1k | case 'H': |
1139 | 21.1k | return AARCH64LAYOUT_VL_H; |
1140 | 21.7k | case 's': |
1141 | 21.7k | case 'S': |
1142 | 21.7k | return AARCH64LAYOUT_VL_S; |
1143 | 22.6k | case 'd': |
1144 | 22.6k | case 'D': |
1145 | 22.6k | return AARCH64LAYOUT_VL_D; |
1146 | 1.28k | case 'q': |
1147 | 1.28k | case 'Q': |
1148 | 1.28k | return AARCH64LAYOUT_VL_Q; |
1149 | 117k | } |
1150 | 117k | } |
1151 | | |
1152 | | static unsigned get_vec_list_num_regs(MCInst *MI, unsigned Reg) |
1153 | 32.5k | { |
1154 | | // Work out how many registers there are in the list (if there is an actual |
1155 | | // list). |
1156 | 32.5k | unsigned NumRegs = 1; |
1157 | 32.5k | if (MCRegisterClass_contains( |
1158 | 32.5k | MCRegisterInfo_getRegClass(MI->MRI, AArch64_DDRegClassID), |
1159 | 32.5k | Reg) || |
1160 | 32.5k | MCRegisterClass_contains( |
1161 | 32.1k | MCRegisterInfo_getRegClass(MI->MRI, AArch64_ZPR2RegClassID), |
1162 | 32.1k | Reg) || |
1163 | 32.5k | MCRegisterClass_contains( |
1164 | 26.7k | MCRegisterInfo_getRegClass(MI->MRI, AArch64_QQRegClassID), |
1165 | 26.7k | Reg) || |
1166 | 32.5k | MCRegisterClass_contains( |
1167 | 23.2k | MCRegisterInfo_getRegClass(MI->MRI, AArch64_PPR2RegClassID), |
1168 | 23.2k | Reg) || |
1169 | 32.5k | MCRegisterClass_contains( |
1170 | 22.8k | MCRegisterInfo_getRegClass(MI->MRI, |
1171 | 22.8k | AArch64_ZPR2StridedRegClassID), |
1172 | 22.8k | Reg)) |
1173 | 10.6k | NumRegs = 2; |
1174 | 21.8k | else if (MCRegisterClass_contains( |
1175 | 21.8k | MCRegisterInfo_getRegClass(MI->MRI, |
1176 | 21.8k | AArch64_DDDRegClassID), |
1177 | 21.8k | Reg) || |
1178 | 21.8k | MCRegisterClass_contains( |
1179 | 21.1k | MCRegisterInfo_getRegClass(MI->MRI, |
1180 | 21.1k | AArch64_ZPR3RegClassID), |
1181 | 21.1k | Reg) || |
1182 | 21.8k | MCRegisterClass_contains( |
1183 | 21.0k | MCRegisterInfo_getRegClass(MI->MRI, |
1184 | 21.0k | AArch64_QQQRegClassID), |
1185 | 21.0k | Reg)) |
1186 | 3.79k | NumRegs = 3; |
1187 | 18.0k | else if (MCRegisterClass_contains( |
1188 | 18.0k | MCRegisterInfo_getRegClass(MI->MRI, |
1189 | 18.0k | AArch64_DDDDRegClassID), |
1190 | 18.0k | Reg) || |
1191 | 18.0k | MCRegisterClass_contains( |
1192 | 17.7k | MCRegisterInfo_getRegClass(MI->MRI, |
1193 | 17.7k | AArch64_ZPR4RegClassID), |
1194 | 17.7k | Reg) || |
1195 | 18.0k | MCRegisterClass_contains( |
1196 | 13.9k | MCRegisterInfo_getRegClass(MI->MRI, |
1197 | 13.9k | AArch64_QQQQRegClassID), |
1198 | 13.9k | Reg) || |
1199 | 18.0k | MCRegisterClass_contains( |
1200 | 9.74k | MCRegisterInfo_getRegClass( |
1201 | 9.74k | MI->MRI, AArch64_ZPR4StridedRegClassID), |
1202 | 9.74k | Reg)) |
1203 | 8.96k | NumRegs = 4; |
1204 | 32.5k | return NumRegs; |
1205 | 32.5k | } |
1206 | | |
1207 | | static unsigned get_vec_list_stride(MCInst *MI, unsigned Reg) |
1208 | 32.5k | { |
1209 | 32.5k | unsigned Stride = 1; |
1210 | 32.5k | if (MCRegisterClass_contains( |
1211 | 32.5k | MCRegisterInfo_getRegClass(MI->MRI, |
1212 | 32.5k | AArch64_ZPR2StridedRegClassID), |
1213 | 32.5k | Reg)) |
1214 | 979 | Stride = 8; |
1215 | 31.5k | else if (MCRegisterClass_contains( |
1216 | 31.5k | MCRegisterInfo_getRegClass( |
1217 | 31.5k | MI->MRI, AArch64_ZPR4StridedRegClassID), |
1218 | 31.5k | Reg)) |
1219 | 629 | Stride = 4; |
1220 | 32.5k | return Stride; |
1221 | 32.5k | } |
1222 | | |
1223 | | static unsigned get_vec_list_first_reg(MCInst *MI, unsigned RegL) |
1224 | 32.5k | { |
1225 | 32.5k | unsigned Reg = RegL; |
1226 | | // Now forget about the list and find out what the first register is. |
1227 | 32.5k | if (MCRegisterInfo_getSubReg(MI->MRI, RegL, AArch64_dsub0)) |
1228 | 1.43k | Reg = MCRegisterInfo_getSubReg(MI->MRI, RegL, AArch64_dsub0); |
1229 | 31.0k | else if (MCRegisterInfo_getSubReg(MI->MRI, RegL, AArch64_qsub0)) |
1230 | 10.6k | Reg = MCRegisterInfo_getSubReg(MI->MRI, RegL, AArch64_qsub0); |
1231 | 20.4k | else if (MCRegisterInfo_getSubReg(MI->MRI, RegL, AArch64_zsub0)) |
1232 | 10.9k | Reg = MCRegisterInfo_getSubReg(MI->MRI, RegL, AArch64_zsub0); |
1233 | 9.46k | else if (MCRegisterInfo_getSubReg(MI->MRI, RegL, AArch64_psub0)) |
1234 | 348 | Reg = MCRegisterInfo_getSubReg(MI->MRI, RegL, AArch64_psub0); |
1235 | | |
1236 | | // If it's a D-reg, we need to promote it to the equivalent Q-reg before |
1237 | | // printing (otherwise getRegisterName fails). |
1238 | 32.5k | if (MCRegisterClass_contains(MCRegisterInfo_getRegClass( |
1239 | 32.5k | MI->MRI, AArch64_FPR64RegClassID), |
1240 | 32.5k | Reg)) { |
1241 | 1.48k | const MCRegisterClass *FPR128RC = MCRegisterInfo_getRegClass( |
1242 | 1.48k | MI->MRI, AArch64_FPR128RegClassID); |
1243 | 1.48k | Reg = MCRegisterInfo_getMatchingSuperReg( |
1244 | 1.48k | MI->MRI, Reg, AArch64_dsub, FPR128RC); |
1245 | 1.48k | } |
1246 | 32.5k | return Reg; |
1247 | 32.5k | } |
1248 | | |
1249 | | static bool is_vector_reg(unsigned Reg) |
1250 | 106k | { |
1251 | 106k | if ((Reg >= AArch64_Q0) && (Reg <= AArch64_Q31)) |
1252 | 39.5k | return true; |
1253 | 67.4k | else if ((Reg >= AArch64_Z0) && (Reg <= AArch64_Z31)) |
1254 | 66.7k | return true; |
1255 | 711 | else if ((Reg >= AArch64_P0) && (Reg <= AArch64_P15)) |
1256 | 711 | return true; |
1257 | 0 | return false; |
1258 | 106k | } |
1259 | | |
1260 | | static unsigned getNextVectorRegister(unsigned Reg, unsigned Stride /* = 1 */) |
1261 | 77.9k | { |
1262 | 184k | while (Stride--) { |
1263 | 106k | if (!is_vector_reg(Reg)) { |
1264 | 0 | CS_ASSERT(0 && "Vector register expected!"); |
1265 | 0 | return 0; |
1266 | 0 | } |
1267 | | // Vector lists can wrap around. |
1268 | 106k | else if (Reg == AArch64_Q31) |
1269 | 1.89k | Reg = AArch64_Q0; |
1270 | | // Vector lists can wrap around. |
1271 | 105k | else if (Reg == AArch64_Z31) |
1272 | 1.03k | Reg = AArch64_Z0; |
1273 | | // Vector lists can wrap around. |
1274 | 104k | else if (Reg == AArch64_P15) |
1275 | 30 | Reg = AArch64_P0; |
1276 | 104k | else |
1277 | | // Assume ordered registers |
1278 | 104k | ++Reg; |
1279 | 106k | } |
1280 | 77.9k | return Reg; |
1281 | 77.9k | } |
1282 | | |
1283 | | static aarch64_extender llvm_to_cs_ext(AArch64_AM_ShiftExtendType ExtType) |
1284 | 9.11k | { |
1285 | 9.11k | switch (ExtType) { |
1286 | 7.32k | default: |
1287 | 7.32k | return AARCH64_EXT_INVALID; |
1288 | 393 | case AArch64_AM_UXTB: |
1289 | 393 | return AARCH64_EXT_UXTB; |
1290 | 384 | case AArch64_AM_UXTH: |
1291 | 384 | return AARCH64_EXT_UXTH; |
1292 | 156 | case AArch64_AM_UXTW: |
1293 | 156 | return AARCH64_EXT_UXTW; |
1294 | 238 | case AArch64_AM_UXTX: |
1295 | 238 | return AARCH64_EXT_UXTX; |
1296 | 246 | case AArch64_AM_SXTB: |
1297 | 246 | return AARCH64_EXT_SXTB; |
1298 | 32 | case AArch64_AM_SXTH: |
1299 | 32 | return AARCH64_EXT_SXTH; |
1300 | 21 | case AArch64_AM_SXTW: |
1301 | 21 | return AARCH64_EXT_SXTW; |
1302 | 321 | case AArch64_AM_SXTX: |
1303 | 321 | return AARCH64_EXT_SXTX; |
1304 | 9.11k | } |
1305 | 9.11k | } |
1306 | | |
1307 | | static aarch64_shifter llvm_to_cs_shift(AArch64_AM_ShiftExtendType ShiftExtType) |
1308 | 7.32k | { |
1309 | 7.32k | switch (ShiftExtType) { |
1310 | 0 | default: |
1311 | 0 | return AARCH64_SFT_INVALID; |
1312 | 3.89k | case AArch64_AM_LSL: |
1313 | 3.89k | return AARCH64_SFT_LSL; |
1314 | 1.00k | case AArch64_AM_LSR: |
1315 | 1.00k | return AARCH64_SFT_LSR; |
1316 | 975 | case AArch64_AM_ASR: |
1317 | 975 | return AARCH64_SFT_ASR; |
1318 | 1.19k | case AArch64_AM_ROR: |
1319 | 1.19k | return AARCH64_SFT_ROR; |
1320 | 258 | case AArch64_AM_MSL: |
1321 | 258 | return AARCH64_SFT_MSL; |
1322 | 7.32k | } |
1323 | 7.32k | } |
1324 | | |
1325 | | /// Initializes or finishes a memory operand of Capstone (depending on \p |
1326 | | /// status). A memory operand in Capstone can be assembled by two LLVM operands. |
1327 | | /// E.g. the base register and the immediate disponent. |
1328 | | void AArch64_set_mem_access(MCInst *MI, bool status) |
1329 | 219k | { |
1330 | 219k | if (!detail_is_set(MI)) |
1331 | 0 | return; |
1332 | 219k | set_doing_mem(MI, status); |
1333 | 219k | if (status) { |
1334 | 109k | if (AArch64_get_detail(MI)->op_count > 0 && |
1335 | 109k | AArch64_get_detail_op(MI, -1)->type == AARCH64_OP_MEM && |
1336 | 109k | AArch64_get_detail_op(MI, -1)->mem.index == |
1337 | 43.0k | AARCH64_REG_INVALID && |
1338 | 109k | AArch64_get_detail_op(MI, -1)->mem.disp == 0) { |
1339 | | // Previous memory operand not done yet. Select it. |
1340 | 42.7k | AArch64_dec_op_count(MI); |
1341 | 42.7k | return; |
1342 | 42.7k | } |
1343 | | |
1344 | | // Init a new one. |
1345 | 66.8k | AArch64_get_detail_op(MI, 0)->type = AARCH64_OP_MEM; |
1346 | 66.8k | AArch64_get_detail_op(MI, 0)->mem.base = AARCH64_REG_INVALID; |
1347 | 66.8k | AArch64_get_detail_op(MI, 0)->mem.index = AARCH64_REG_INVALID; |
1348 | 66.8k | AArch64_get_detail_op(MI, 0)->mem.disp = 0; |
1349 | | |
1350 | 66.8k | #ifndef CAPSTONE_DIET |
1351 | 66.8k | uint8_t access = |
1352 | 66.8k | map_get_op_access(MI, AArch64_get_detail(MI)->op_count); |
1353 | 66.8k | AArch64_get_detail_op(MI, 0)->access = access; |
1354 | 66.8k | #endif |
1355 | 109k | } else { |
1356 | | // done, select the next operand slot |
1357 | 109k | AArch64_inc_op_count(MI); |
1358 | 109k | } |
1359 | 219k | } |
1360 | | |
1361 | | /// Common prefix for all AArch64_add_cs_detail_* functions |
1362 | | static bool add_cs_detail_begin(MCInst *MI, unsigned op_num) |
1363 | 558k | { |
1364 | 558k | if (!detail_is_set(MI) || !map_fill_detail_ops(MI)) |
1365 | 0 | return false; |
1366 | | |
1367 | 558k | if (AArch64_get_detail(MI)->is_doing_sme) { |
1368 | | // Unset the flag if there is no bound operand anymore. |
1369 | 60.8k | if (!(map_get_op_type(MI, op_num) & CS_OP_BOUND)) { |
1370 | 42.7k | AArch64_get_detail(MI)->is_doing_sme = false; |
1371 | 42.7k | AArch64_inc_op_count(MI); |
1372 | 42.7k | } |
1373 | 60.8k | } |
1374 | 558k | return true; |
1375 | 558k | } |
1376 | | |
1377 | | /// Fills cs_detail with the data of the operand. |
1378 | | /// This function handles operands which's original printer function has no |
1379 | | /// specialities. |
1380 | | void AArch64_add_cs_detail_0(MCInst *MI, aarch64_op_group op_group, |
1381 | | unsigned OpNum) |
1382 | 324k | { |
1383 | 324k | if (!add_cs_detail_begin(MI, OpNum)) |
1384 | 0 | return; |
1385 | | |
1386 | | // Fill cs_detail |
1387 | 324k | switch (op_group) { |
1388 | 0 | default: |
1389 | 0 | printf("ERROR: Operand group %d not handled!\n", op_group); |
1390 | 0 | CS_ASSERT_RET(0); |
1391 | 237k | case AArch64_OP_GROUP_Operand: { |
1392 | 237k | cs_op_type primary_op_type = map_get_op_type(MI, OpNum) & |
1393 | 237k | ~(CS_OP_MEM | CS_OP_BOUND); |
1394 | 237k | switch (primary_op_type) { |
1395 | 0 | default: |
1396 | 0 | printf("Unhandled operand type 0x%x\n", |
1397 | 0 | primary_op_type); |
1398 | 0 | CS_ASSERT_RET(0); |
1399 | 203k | case AARCH64_OP_REG: |
1400 | 203k | AArch64_set_detail_op_reg(MI, OpNum, |
1401 | 203k | MCInst_getOpVal(MI, OpNum)); |
1402 | 203k | break; |
1403 | 33.9k | case AARCH64_OP_IMM: |
1404 | 33.9k | AArch64_set_detail_op_imm(MI, OpNum, AARCH64_OP_IMM, |
1405 | 33.9k | MCInst_getOpVal(MI, OpNum)); |
1406 | 33.9k | break; |
1407 | 328 | case AARCH64_OP_FP: { |
1408 | | // printOperand does not handle FP operands. But sometimes |
1409 | | // is used to print FP operands as normal immediate. |
1410 | 328 | AArch64_get_detail_op(MI, 0)->type = AARCH64_OP_IMM; |
1411 | 328 | AArch64_get_detail_op(MI, 0)->imm = |
1412 | 328 | MCInst_getOpVal(MI, OpNum); |
1413 | 328 | AArch64_get_detail_op(MI, 0)->access = |
1414 | 328 | map_get_op_access(MI, OpNum); |
1415 | 328 | AArch64_inc_op_count(MI); |
1416 | 328 | break; |
1417 | 0 | } |
1418 | 237k | } |
1419 | 237k | break; |
1420 | 237k | } |
1421 | 237k | case AArch64_OP_GROUP_AddSubImm: { |
1422 | 1.66k | unsigned Val = (MCInst_getOpVal(MI, OpNum) & 0xfff); |
1423 | 1.66k | AArch64_set_detail_op_imm(MI, OpNum, AARCH64_OP_IMM, Val); |
1424 | | // Shift is added in printShifter() |
1425 | 1.66k | break; |
1426 | 237k | } |
1427 | 0 | case AArch64_OP_GROUP_AdrLabel: { |
1428 | 0 | if (MCOperand_isImm(MCInst_getOperand(MI, OpNum))) { |
1429 | 0 | int64_t Offset = MCInst_getOpVal(MI, OpNum); |
1430 | 0 | AArch64_set_detail_op_imm(MI, OpNum, AARCH64_OP_IMM, |
1431 | 0 | (MI->address & -4) + Offset); |
1432 | 0 | } else { |
1433 | | // Expression |
1434 | 0 | AArch64_set_detail_op_imm(MI, OpNum, AARCH64_OP_IMM, |
1435 | 0 | MCOperand_isImm(MCInst_getOperand(MI, OpNum))); |
1436 | 0 | } |
1437 | 0 | break; |
1438 | 237k | } |
1439 | 0 | case AArch64_OP_GROUP_AdrpLabel: { |
1440 | 0 | if (MCOperand_isImm(MCInst_getOperand(MI, OpNum))) { |
1441 | 0 | int64_t Offset = MCInst_getOpVal(MI, OpNum) * 4096; |
1442 | 0 | AArch64_set_detail_op_imm(MI, OpNum, AARCH64_OP_IMM, |
1443 | 0 | (MI->address & -4096) + Offset); |
1444 | 0 | } else { |
1445 | | // Expression |
1446 | 0 | AArch64_set_detail_op_imm(MI, OpNum, AARCH64_OP_IMM, |
1447 | 0 | MCOperand_isImm(MCInst_getOperand(MI, OpNum))); |
1448 | 0 | } |
1449 | 0 | break; |
1450 | 237k | } |
1451 | 3.46k | case AArch64_OP_GROUP_AdrAdrpLabel: { |
1452 | 3.46k | if (!MCOperand_isImm(MCInst_getOperand(MI, OpNum))) { |
1453 | | // Expression |
1454 | 0 | AArch64_set_detail_op_imm(MI, OpNum, AARCH64_OP_IMM, |
1455 | 0 | MCOperand_isImm(MCInst_getOperand(MI, OpNum))); |
1456 | 0 | break; |
1457 | 0 | } |
1458 | 3.46k | int64_t Offset = MCInst_getOpVal(MI, OpNum); |
1459 | 3.46k | uint64_t Address = MI->address; |
1460 | 3.46k | if (MCInst_getOpcode(MI) == AArch64_ADRP) { |
1461 | 2.00k | Offset = Offset * 4096; |
1462 | 2.00k | Address = Address & -4096; |
1463 | 2.00k | } |
1464 | 3.46k | AArch64_set_detail_op_imm(MI, OpNum, AARCH64_OP_IMM, |
1465 | 3.46k | Address + Offset); |
1466 | 3.46k | break; |
1467 | 3.46k | } |
1468 | 8.80k | case AArch64_OP_GROUP_AlignedLabel: { |
1469 | 8.80k | if (MCOperand_isImm(MCInst_getOperand(MI, OpNum))) { |
1470 | 8.73k | int64_t Offset = MCInst_getOpVal(MI, OpNum) * 4; |
1471 | 8.73k | AArch64_set_detail_op_imm(MI, OpNum, AARCH64_OP_IMM, |
1472 | 8.73k | MI->address + Offset); |
1473 | 8.73k | } else { |
1474 | | // Expression |
1475 | 65 | AArch64_set_detail_op_imm(MI, OpNum, AARCH64_OP_IMM, |
1476 | 65 | MCOperand_isImm(MCInst_getOperand(MI, OpNum))); |
1477 | 65 | } |
1478 | 8.80k | break; |
1479 | 3.46k | } |
1480 | 0 | case AArch64_OP_GROUP_AMNoIndex: { |
1481 | 0 | AArch64_set_detail_op_mem(MI, OpNum, |
1482 | 0 | MCInst_getOpVal(MI, OpNum)); |
1483 | 0 | break; |
1484 | 3.46k | } |
1485 | 1.79k | case AArch64_OP_GROUP_ArithExtend: { |
1486 | 1.79k | unsigned Val = MCInst_getOpVal(MI, OpNum); |
1487 | 1.79k | AArch64_AM_ShiftExtendType ExtType = |
1488 | 1.79k | AArch64_AM_getArithExtendType(Val); |
1489 | 1.79k | unsigned ShiftVal = AArch64_AM_getArithShiftValue(Val); |
1490 | | |
1491 | 1.79k | AArch64_get_detail_op(MI, -1)->ext = llvm_to_cs_ext(ExtType); |
1492 | 1.79k | AArch64_get_detail_op(MI, -1)->shift.value = ShiftVal; |
1493 | 1.79k | AArch64_get_detail_op(MI, -1)->shift.type = AARCH64_SFT_LSL; |
1494 | 1.79k | break; |
1495 | 3.46k | } |
1496 | 67 | case AArch64_OP_GROUP_BarriernXSOption: { |
1497 | 67 | unsigned Val = MCInst_getOpVal(MI, OpNum); |
1498 | 67 | aarch64_sysop sysop = { 0 }; |
1499 | 67 | const AArch64DBnXS_DBnXS *DB = |
1500 | 67 | AArch64DBnXS_lookupDBnXSByEncoding(Val); |
1501 | 67 | if (DB) |
1502 | 67 | sysop.imm.dbnxs = (aarch64_dbnxs) DB->SysImm.dbnxs; |
1503 | 0 | else |
1504 | 0 | sysop.imm.raw_val = Val; |
1505 | 67 | sysop.sub_type = AARCH64_OP_DBNXS; |
1506 | 67 | AArch64_set_detail_op_sys(MI, OpNum, sysop, AARCH64_OP_SYSIMM); |
1507 | 67 | break; |
1508 | 3.46k | } |
1509 | 12 | case AArch64_OP_GROUP_AppleSysBarrierOption: { |
1510 | | // Proprietary stuff. We just add the |
1511 | | // immediate here. |
1512 | 12 | unsigned Val = MCOperand_getImm(MCInst_getOperand(MI, OpNum)); |
1513 | 12 | AArch64_set_detail_op_imm(MI, OpNum, AARCH64_OP_IMM, Val); |
1514 | 12 | break; |
1515 | 3.46k | } |
1516 | 113 | case AArch64_OP_GROUP_BarrierOption: { |
1517 | 113 | unsigned Val = MCOperand_getImm(MCInst_getOperand(MI, OpNum)); |
1518 | 113 | unsigned Opcode = MCInst_getOpcode(MI); |
1519 | 113 | aarch64_sysop sysop = { 0 }; |
1520 | | |
1521 | 113 | if (Opcode == AArch64_ISB) { |
1522 | 40 | const AArch64ISB_ISB *ISB = |
1523 | 40 | AArch64ISB_lookupISBByEncoding(Val); |
1524 | 40 | if (ISB) |
1525 | 0 | sysop.alias.isb = (aarch64_isb) ISB->SysAlias.isb; |
1526 | 40 | else |
1527 | 40 | sysop.alias.raw_val = Val; |
1528 | 40 | sysop.sub_type = AARCH64_OP_ISB; |
1529 | 40 | AArch64_set_detail_op_sys(MI, OpNum, sysop, |
1530 | 40 | AARCH64_OP_SYSALIAS); |
1531 | 73 | } else if (Opcode == AArch64_TSB) { |
1532 | 12 | const AArch64TSB_TSB *TSB = |
1533 | 12 | AArch64TSB_lookupTSBByEncoding(Val); |
1534 | 12 | if (TSB) |
1535 | 12 | sysop.alias.tsb = (aarch64_tsb) TSB->SysAlias.tsb; |
1536 | 0 | else |
1537 | 0 | sysop.alias.raw_val = Val; |
1538 | 12 | sysop.sub_type = AARCH64_OP_TSB; |
1539 | 12 | AArch64_set_detail_op_sys(MI, OpNum, sysop, |
1540 | 12 | AARCH64_OP_SYSALIAS); |
1541 | 61 | } else { |
1542 | 61 | const AArch64DB_DB *DB = |
1543 | 61 | AArch64DB_lookupDBByEncoding(Val); |
1544 | 61 | if (DB) |
1545 | 52 | sysop.alias.db = (aarch64_db) DB->SysAlias.db; |
1546 | 9 | else |
1547 | 9 | sysop.alias.raw_val = Val; |
1548 | 61 | sysop.sub_type = AARCH64_OP_DB; |
1549 | 61 | AArch64_set_detail_op_sys(MI, OpNum, sysop, |
1550 | 61 | AARCH64_OP_SYSALIAS); |
1551 | 61 | } |
1552 | 113 | break; |
1553 | 3.46k | } |
1554 | 279 | case AArch64_OP_GROUP_BTIHintOp: { |
1555 | 279 | aarch64_sysop sysop = { 0 }; |
1556 | 279 | unsigned btihintop = MCInst_getOpVal(MI, OpNum) ^ 32; |
1557 | 279 | const AArch64BTIHint_BTI *BTI = |
1558 | 279 | AArch64BTIHint_lookupBTIByEncoding(btihintop); |
1559 | 279 | if (BTI) |
1560 | 279 | sysop.alias.bti = (aarch64_bti) BTI->SysAlias.bti; |
1561 | 0 | else |
1562 | 0 | sysop.alias.raw_val = btihintop; |
1563 | 279 | sysop.sub_type = AARCH64_OP_BTI; |
1564 | 279 | AArch64_set_detail_op_sys(MI, OpNum, sysop, |
1565 | 279 | AARCH64_OP_SYSALIAS); |
1566 | 279 | break; |
1567 | 3.46k | } |
1568 | 1.44k | case AArch64_OP_GROUP_CondCode: { |
1569 | 1.44k | AArch64_get_detail(MI)->cc = MCInst_getOpVal(MI, OpNum); |
1570 | 1.44k | break; |
1571 | 3.46k | } |
1572 | 855 | case AArch64_OP_GROUP_ExtendedRegister: { |
1573 | 855 | AArch64_set_detail_op_reg(MI, OpNum, |
1574 | 855 | MCInst_getOpVal(MI, OpNum)); |
1575 | 855 | break; |
1576 | 3.46k | } |
1577 | 410 | case AArch64_OP_GROUP_FPImmOperand: { |
1578 | 410 | MCOperand *MO = MCInst_getOperand(MI, (OpNum)); |
1579 | 410 | float FPImm = |
1580 | 410 | MCOperand_isDFPImm(MO) ? |
1581 | 0 | BitsToDouble(MCOperand_getImm(MO)) : |
1582 | 410 | AArch64_AM_getFPImmFloat(MCOperand_getImm(MO)); |
1583 | 410 | AArch64_set_detail_op_float(MI, OpNum, FPImm); |
1584 | 410 | break; |
1585 | 3.46k | } |
1586 | 3.09k | case AArch64_OP_GROUP_GPR64as32: { |
1587 | 3.09k | unsigned Reg = MCInst_getOpVal(MI, OpNum); |
1588 | 3.09k | AArch64_set_detail_op_reg(MI, OpNum, getWRegFromXReg(Reg)); |
1589 | 3.09k | break; |
1590 | 3.46k | } |
1591 | 26 | case AArch64_OP_GROUP_GPR64x8: { |
1592 | 26 | unsigned Reg = MCInst_getOpVal(MI, (OpNum)); |
1593 | 26 | Reg = MCRegisterInfo_getSubReg(MI->MRI, Reg, AArch64_x8sub_0); |
1594 | 26 | AArch64_set_detail_op_reg(MI, OpNum, Reg); |
1595 | 26 | break; |
1596 | 3.46k | } |
1597 | 2.62k | case AArch64_OP_GROUP_Imm: |
1598 | 2.67k | case AArch64_OP_GROUP_ImmHex: |
1599 | 2.67k | AArch64_set_detail_op_imm(MI, OpNum, AARCH64_OP_IMM, |
1600 | 2.67k | MCInst_getOpVal(MI, OpNum)); |
1601 | 2.67k | break; |
1602 | 0 | case AArch64_OP_GROUP_ImplicitlyTypedVectorList: |
1603 | | // The TypedVectorList implements the logic of implicitly typed operand. |
1604 | 0 | AArch64_add_cs_detail_2(MI, AArch64_OP_GROUP_TypedVectorList_0_b, OpNum, |
1605 | 0 | 0, 0); |
1606 | 0 | break; |
1607 | 38 | case AArch64_OP_GROUP_InverseCondCode: { |
1608 | 38 | AArch64CC_CondCode CC = (AArch64CC_CondCode)MCOperand_getImm( |
1609 | 38 | MCInst_getOperand(MI, (OpNum))); |
1610 | 38 | AArch64_get_detail(MI)->cc = AArch64CC_getInvertedCondCode(CC); |
1611 | 38 | break; |
1612 | 2.62k | } |
1613 | 1.55k | case AArch64_OP_GROUP_MatrixTile: { |
1614 | 1.55k | const char *RegName = AArch64_LLVM_getRegisterName( |
1615 | 1.55k | MCInst_getOpVal(MI, OpNum), AArch64_NoRegAltName); |
1616 | 1.55k | const char *Dot = strstr(RegName, "."); |
1617 | 1.55k | AArch64Layout_VectorLayout vas = AARCH64LAYOUT_INVALID; |
1618 | 1.55k | if (!Dot) { |
1619 | | // The matrix dimensions are machine dependent. |
1620 | | // Currently we do not support differentiation of machines. |
1621 | | // So we just indicate the use of the complete matrix. |
1622 | 0 | vas = sme_reg_to_vas(MCInst_getOpVal(MI, OpNum)); |
1623 | 0 | } else |
1624 | 1.55k | vas = get_vl_by_suffix(Dot[1]); |
1625 | 1.55k | AArch64_set_detail_op_sme(MI, OpNum, AARCH64_SME_MATRIX_TILE, |
1626 | 1.55k | vas); |
1627 | 1.55k | break; |
1628 | 2.62k | } |
1629 | 252 | case AArch64_OP_GROUP_MatrixTileList: { |
1630 | 252 | unsigned MaxRegs = 8; |
1631 | 252 | unsigned RegMask = MCInst_getOpVal(MI, (OpNum)); |
1632 | | |
1633 | 2.26k | for (unsigned I = 0; I < MaxRegs; ++I) { |
1634 | 2.01k | unsigned Reg = RegMask & (1 << I); |
1635 | 2.01k | if (Reg == 0) |
1636 | 879 | continue; |
1637 | 1.13k | AArch64_get_detail_op(MI, 0)->is_list_member = true; |
1638 | 1.13k | AArch64_set_detail_op_sme(MI, OpNum, |
1639 | 1.13k | AARCH64_SME_MATRIX_TILE_LIST, |
1640 | 1.13k | AARCH64LAYOUT_VL_D, |
1641 | 1.13k | (int) (AARCH64_REG_ZAD0 + I)); |
1642 | 1.13k | AArch64_inc_op_count(MI); |
1643 | 1.13k | } |
1644 | 252 | AArch64_get_detail(MI)->is_doing_sme = false; |
1645 | 252 | break; |
1646 | 2.62k | } |
1647 | 1.15k | case AArch64_OP_GROUP_MRSSystemRegister: |
1648 | 4.06k | case AArch64_OP_GROUP_MSRSystemRegister: { |
1649 | 4.06k | unsigned Val = MCInst_getOpVal(MI, OpNum); |
1650 | 4.06k | const AArch64SysReg_SysReg *Reg = |
1651 | 4.06k | AArch64SysReg_lookupSysRegByEncoding(Val); |
1652 | 4.06k | bool Read = (op_group == AArch64_OP_GROUP_MRSSystemRegister) ? |
1653 | 1.15k | true : |
1654 | 4.06k | false; |
1655 | | |
1656 | 4.06k | bool isValidSysReg = |
1657 | 4.06k | (Reg && (Read ? Reg->Readable : Reg->Writeable) && |
1658 | 4.06k | AArch64_testFeatureList(MI->csh->mode, |
1659 | 240 | Reg->FeaturesRequired)); |
1660 | | |
1661 | 4.06k | if (Reg && !isValidSysReg) |
1662 | 648 | Reg = AArch64SysReg_lookupSysRegByName(Reg->AltName); |
1663 | 4.06k | aarch64_sysop sysop = { 0 }; |
1664 | | // If Reg is NULL it is a generic system register. |
1665 | 4.06k | if (Reg) |
1666 | 885 | sysop.reg.sysreg = (aarch64_sysreg) Reg->SysReg.sysreg; |
1667 | 3.18k | else { |
1668 | 3.18k | sysop.reg.raw_val = Val; |
1669 | 3.18k | } |
1670 | 4.06k | aarch64_op_type type = |
1671 | 4.06k | (op_group == AArch64_OP_GROUP_MRSSystemRegister) ? |
1672 | 1.15k | AARCH64_OP_REG_MRS : |
1673 | 4.06k | AARCH64_OP_REG_MSR; |
1674 | 4.06k | sysop.sub_type = type; |
1675 | 4.06k | AArch64_set_detail_op_sys(MI, OpNum, sysop, AARCH64_OP_SYSREG); |
1676 | 4.06k | break; |
1677 | 1.15k | } |
1678 | 63 | case AArch64_OP_GROUP_PSBHintOp: { |
1679 | 63 | unsigned psbhintop = MCInst_getOpVal(MI, OpNum); |
1680 | 63 | const AArch64PSBHint_PSB *PSB = |
1681 | 63 | AArch64PSBHint_lookupPSBByEncoding(psbhintop); |
1682 | 63 | aarch64_sysop sysop = { 0 }; |
1683 | 63 | if (PSB) |
1684 | 63 | sysop.alias.psb = (aarch64_psb) PSB->SysAlias.psb; |
1685 | 0 | else |
1686 | 0 | sysop.alias.raw_val = psbhintop; |
1687 | 63 | sysop.sub_type = AARCH64_OP_PSB; |
1688 | 63 | AArch64_set_detail_op_sys(MI, OpNum, sysop, |
1689 | 63 | AARCH64_OP_SYSALIAS); |
1690 | 63 | break; |
1691 | 1.15k | } |
1692 | 279 | case AArch64_OP_GROUP_RPRFMOperand: { |
1693 | 279 | unsigned prfop = MCInst_getOpVal(MI, OpNum); |
1694 | 279 | const AArch64PRFM_PRFM *PRFM = |
1695 | 279 | AArch64PRFM_lookupPRFMByEncoding(prfop); |
1696 | 279 | aarch64_sysop sysop = { 0 }; |
1697 | 279 | if (PRFM) |
1698 | 219 | sysop.alias.prfm = (aarch64_prfm) PRFM->SysAlias.prfm; |
1699 | 60 | else |
1700 | 60 | sysop.alias.raw_val = prfop; |
1701 | 279 | sysop.sub_type = AARCH64_OP_PRFM; |
1702 | 279 | AArch64_set_detail_op_sys(MI, OpNum, sysop, |
1703 | 279 | AARCH64_OP_SYSALIAS); |
1704 | 279 | break; |
1705 | 1.15k | } |
1706 | 4.54k | case AArch64_OP_GROUP_ShiftedRegister: { |
1707 | 4.54k | AArch64_set_detail_op_reg(MI, OpNum, |
1708 | 4.54k | MCInst_getOpVal(MI, OpNum)); |
1709 | | // Shift part is handled in printShifter() |
1710 | 4.54k | break; |
1711 | 1.15k | } |
1712 | 7.32k | case AArch64_OP_GROUP_Shifter: { |
1713 | 7.32k | unsigned Val = MCInst_getOpVal(MI, OpNum); |
1714 | 7.32k | AArch64_AM_ShiftExtendType ShExtType = |
1715 | 7.32k | AArch64_AM_getShiftType(Val); |
1716 | 7.32k | AArch64_get_detail_op(MI, -1)->ext = llvm_to_cs_ext(ShExtType); |
1717 | 7.32k | AArch64_get_detail_op(MI, -1)->shift.type = |
1718 | 7.32k | llvm_to_cs_shift(ShExtType); |
1719 | 7.32k | AArch64_get_detail_op(MI, -1)->shift.value = |
1720 | 7.32k | AArch64_AM_getShiftValue(Val); |
1721 | 7.32k | break; |
1722 | 1.15k | } |
1723 | 812 | case AArch64_OP_GROUP_SIMDType10Operand: { |
1724 | 812 | unsigned RawVal = MCInst_getOpVal(MI, OpNum); |
1725 | 812 | uint64_t Val = AArch64_AM_decodeAdvSIMDModImmType10(RawVal); |
1726 | 812 | AArch64_set_detail_op_imm(MI, OpNum, AARCH64_OP_IMM, Val); |
1727 | 812 | break; |
1728 | 1.15k | } |
1729 | 0 | case AArch64_OP_GROUP_SVCROp: { |
1730 | 0 | unsigned svcrop = MCInst_getOpVal(MI, OpNum); |
1731 | 0 | const AArch64SVCR_SVCR *SVCR = |
1732 | 0 | AArch64SVCR_lookupSVCRByEncoding(svcrop); |
1733 | 0 | aarch64_sysop sysop = { 0 }; |
1734 | 0 | if (SVCR) |
1735 | 0 | sysop.alias.svcr = (aarch64_svcr) SVCR->SysAlias.svcr; |
1736 | 0 | else |
1737 | 0 | sysop.alias.raw_val = svcrop; |
1738 | 0 | sysop.sub_type = AARCH64_OP_SVCR; |
1739 | 0 | AArch64_set_detail_op_sys(MI, OpNum, sysop, |
1740 | 0 | AARCH64_OP_SYSALIAS); |
1741 | 0 | break; |
1742 | 1.15k | } |
1743 | 3.62k | case AArch64_OP_GROUP_SVEPattern: { |
1744 | 3.62k | unsigned Val = MCInst_getOpVal(MI, OpNum); |
1745 | 3.62k | const AArch64SVEPredPattern_SVEPREDPAT *Pat = |
1746 | 3.62k | AArch64SVEPredPattern_lookupSVEPREDPATByEncoding(Val); |
1747 | 3.62k | if (!Pat) { |
1748 | 1.41k | AArch64_set_detail_op_imm(MI, OpNum, AARCH64_OP_IMM, Val); |
1749 | 1.41k | break; |
1750 | 1.41k | } |
1751 | 2.21k | aarch64_sysop sysop = { 0 }; |
1752 | 2.21k | sysop.alias = Pat->SysAlias; |
1753 | 2.21k | sysop.sub_type = AARCH64_OP_SVEPREDPAT; |
1754 | 2.21k | AArch64_set_detail_op_sys(MI, OpNum, sysop, |
1755 | 2.21k | AARCH64_OP_SYSALIAS); |
1756 | 2.21k | break; |
1757 | 3.62k | } |
1758 | 291 | case AArch64_OP_GROUP_SVEVecLenSpecifier: { |
1759 | 291 | unsigned Val = MCInst_getOpVal(MI, OpNum); |
1760 | | // Pattern has only 1 bit |
1761 | 291 | if (Val > 1) |
1762 | 0 | CS_ASSERT_RET(0 && "Invalid vector length specifier"); |
1763 | 291 | const AArch64SVEVecLenSpecifier_SVEVECLENSPECIFIER *Pat = |
1764 | 291 | AArch64SVEVecLenSpecifier_lookupSVEVECLENSPECIFIERByEncoding( |
1765 | 291 | Val); |
1766 | 291 | if (!Pat) |
1767 | 0 | break; |
1768 | 291 | aarch64_sysop sysop = { 0 }; |
1769 | 291 | sysop.alias = Pat->SysAlias; |
1770 | 291 | sysop.sub_type = AARCH64_OP_SVEVECLENSPECIFIER; |
1771 | 291 | AArch64_set_detail_op_sys(MI, OpNum, sysop, |
1772 | 291 | AARCH64_OP_SYSALIAS); |
1773 | 291 | break; |
1774 | 291 | } |
1775 | 5.97k | case AArch64_OP_GROUP_SysCROperand: { |
1776 | 5.97k | uint64_t cimm = MCInst_getOpVal(MI, OpNum); |
1777 | 5.97k | AArch64_set_detail_op_imm(MI, OpNum, AARCH64_OP_CIMM, cimm); |
1778 | 5.97k | break; |
1779 | 291 | } |
1780 | 180 | case AArch64_OP_GROUP_SyspXzrPair: { |
1781 | 180 | unsigned Reg = MCInst_getOpVal(MI, OpNum); |
1782 | 180 | AArch64_set_detail_op_reg(MI, OpNum, Reg); |
1783 | 180 | AArch64_set_detail_op_reg(MI, OpNum, Reg); |
1784 | 180 | break; |
1785 | 291 | } |
1786 | 568 | case AArch64_OP_GROUP_SystemPStateField: { |
1787 | 568 | unsigned Val = MCInst_getOpVal(MI, OpNum); |
1788 | | |
1789 | 568 | aarch64_sysop sysop = { 0 }; |
1790 | 568 | const AArch64PState_PStateImm0_15 *PStateImm15 = |
1791 | 568 | AArch64PState_lookupPStateImm0_15ByEncoding(Val); |
1792 | 568 | const AArch64PState_PStateImm0_1 *PStateImm1 = |
1793 | 568 | AArch64PState_lookupPStateImm0_1ByEncoding(Val); |
1794 | 568 | if (PStateImm15 && |
1795 | 568 | AArch64_testFeatureList(MI->csh->mode, |
1796 | 564 | PStateImm15->FeaturesRequired)) { |
1797 | 564 | sysop.alias = PStateImm15->SysAlias; |
1798 | 564 | sysop.sub_type = AARCH64_OP_PSTATEIMM0_15; |
1799 | 564 | AArch64_set_detail_op_sys(MI, OpNum, sysop, |
1800 | 564 | AARCH64_OP_SYSALIAS); |
1801 | 564 | } else if (PStateImm1 && |
1802 | 4 | AArch64_testFeatureList( |
1803 | 4 | MI->csh->mode, |
1804 | 4 | PStateImm1->FeaturesRequired)) { |
1805 | 4 | sysop.alias = PStateImm1->SysAlias; |
1806 | 4 | sysop.sub_type = AARCH64_OP_PSTATEIMM0_1; |
1807 | 4 | AArch64_set_detail_op_sys(MI, OpNum, sysop, |
1808 | 4 | AARCH64_OP_SYSALIAS); |
1809 | 4 | } else { |
1810 | 0 | AArch64_set_detail_op_imm(MI, OpNum, AARCH64_OP_IMM, |
1811 | 0 | Val); |
1812 | 0 | } |
1813 | 568 | break; |
1814 | 291 | } |
1815 | 32.6k | case AArch64_OP_GROUP_VRegOperand: { |
1816 | 32.6k | unsigned Reg = MCInst_getOpVal(MI, OpNum); |
1817 | 32.6k | AArch64_get_detail_op(MI, 0)->is_vreg = true; |
1818 | 32.6k | AArch64_set_detail_op_reg(MI, OpNum, Reg); |
1819 | 32.6k | break; |
1820 | 291 | } |
1821 | 324k | } |
1822 | 324k | } |
1823 | | |
1824 | | /// Fills cs_detail with the data of the operand. |
1825 | | /// This function handles operands which original printer function is a template |
1826 | | /// with one argument. |
1827 | | void AArch64_add_cs_detail_1(MCInst *MI, aarch64_op_group op_group, |
1828 | | unsigned OpNum, uint64_t temp_arg_0) |
1829 | 183k | { |
1830 | 183k | if (!add_cs_detail_begin(MI, OpNum)) |
1831 | 0 | return; |
1832 | 183k | switch (op_group) { |
1833 | 0 | default: |
1834 | 0 | printf("ERROR: Operand group %d not handled!\n", op_group); |
1835 | 0 | CS_ASSERT_RET(0); |
1836 | 60 | case AArch64_OP_GROUP_GPRSeqPairsClassOperand_32: |
1837 | 861 | case AArch64_OP_GROUP_GPRSeqPairsClassOperand_64: { |
1838 | 861 | unsigned size = temp_arg_0; |
1839 | 861 | unsigned Reg = MCInst_getOpVal(MI, (OpNum)); |
1840 | | |
1841 | 861 | unsigned Sube = (size == 32) ? AArch64_sube32 : AArch64_sube64; |
1842 | 861 | unsigned Subo = (size == 32) ? AArch64_subo32 : AArch64_subo64; |
1843 | | |
1844 | 861 | unsigned Even = MCRegisterInfo_getSubReg(MI->MRI, Reg, Sube); |
1845 | 861 | unsigned Odd = MCRegisterInfo_getSubReg(MI->MRI, Reg, Subo); |
1846 | 861 | AArch64_set_detail_op_reg(MI, OpNum, Even); |
1847 | 861 | AArch64_set_detail_op_reg(MI, OpNum, Odd); |
1848 | 861 | break; |
1849 | 60 | } |
1850 | 528 | case AArch64_OP_GROUP_Imm8OptLsl_int16_t: |
1851 | 696 | case AArch64_OP_GROUP_Imm8OptLsl_int32_t: |
1852 | 891 | case AArch64_OP_GROUP_Imm8OptLsl_int64_t: |
1853 | 1.02k | case AArch64_OP_GROUP_Imm8OptLsl_int8_t: |
1854 | 1.39k | case AArch64_OP_GROUP_Imm8OptLsl_uint16_t: |
1855 | 1.99k | case AArch64_OP_GROUP_Imm8OptLsl_uint32_t: |
1856 | 2.33k | case AArch64_OP_GROUP_Imm8OptLsl_uint64_t: |
1857 | 2.38k | case AArch64_OP_GROUP_Imm8OptLsl_uint8_t: { |
1858 | 2.38k | unsigned UnscaledVal = MCInst_getOpVal(MI, (OpNum)); |
1859 | 2.38k | unsigned Shift = MCInst_getOpVal(MI, (OpNum + 1)); |
1860 | | |
1861 | 2.38k | if ((UnscaledVal == 0) && |
1862 | 2.38k | (AArch64_AM_getShiftValue(Shift) != 0)) { |
1863 | 265 | AArch64_set_detail_op_imm(MI, OpNum, AARCH64_OP_IMM, |
1864 | 265 | UnscaledVal); |
1865 | | // Shift is handled in printShifter() |
1866 | 265 | break; |
1867 | 265 | } |
1868 | | |
1869 | 2.12k | #define SCALE_SET(T) \ |
1870 | 2.12k | do { \ |
1871 | 2.12k | T Val; \ |
1872 | 2.12k | if (CHAR(T) == 'i') /* Signed */ \ |
1873 | 2.12k | Val = (int8_t)UnscaledVal * \ |
1874 | 918 | (1 << AArch64_AM_getShiftValue(Shift)); \ |
1875 | 2.12k | else \ |
1876 | 2.12k | Val = (uint8_t)UnscaledVal * \ |
1877 | 1.20k | (1 << AArch64_AM_getShiftValue(Shift)); \ |
1878 | 2.12k | AArch64_set_detail_op_imm(MI, OpNum, AARCH64_OP_IMM, Val); \ |
1879 | 2.12k | } while (0) |
1880 | | |
1881 | 2.12k | switch (op_group) { |
1882 | 0 | default: |
1883 | 0 | CS_ASSERT_RET(0 && |
1884 | 0 | "Operand group for Imm8OptLsl not handled."); |
1885 | 505 | case AArch64_OP_GROUP_Imm8OptLsl_int16_t: { |
1886 | 505 | SCALE_SET(int16_t); |
1887 | 505 | break; |
1888 | 0 | } |
1889 | 96 | case AArch64_OP_GROUP_Imm8OptLsl_int32_t: { |
1890 | 96 | SCALE_SET(int32_t); |
1891 | 96 | break; |
1892 | 0 | } |
1893 | 184 | case AArch64_OP_GROUP_Imm8OptLsl_int64_t: { |
1894 | 184 | SCALE_SET(int64_t); |
1895 | 184 | break; |
1896 | 0 | } |
1897 | 133 | case AArch64_OP_GROUP_Imm8OptLsl_int8_t: { |
1898 | 133 | SCALE_SET(int8_t); |
1899 | 133 | break; |
1900 | 0 | } |
1901 | 344 | case AArch64_OP_GROUP_Imm8OptLsl_uint16_t: { |
1902 | 344 | SCALE_SET(uint16_t); |
1903 | 344 | break; |
1904 | 0 | } |
1905 | 531 | case AArch64_OP_GROUP_Imm8OptLsl_uint32_t: { |
1906 | 531 | SCALE_SET(uint32_t); |
1907 | 531 | break; |
1908 | 0 | } |
1909 | 280 | case AArch64_OP_GROUP_Imm8OptLsl_uint64_t: { |
1910 | 280 | SCALE_SET(uint64_t); |
1911 | 280 | break; |
1912 | 0 | } |
1913 | 47 | case AArch64_OP_GROUP_Imm8OptLsl_uint8_t: { |
1914 | 47 | SCALE_SET(uint8_t); |
1915 | 47 | break; |
1916 | 0 | } |
1917 | 2.12k | } |
1918 | 2.12k | break; |
1919 | 2.12k | } |
1920 | 3.79k | case AArch64_OP_GROUP_ImmScale_16: |
1921 | 4.99k | case AArch64_OP_GROUP_ImmScale_2: |
1922 | 5.05k | case AArch64_OP_GROUP_ImmScale_3: |
1923 | 5.09k | case AArch64_OP_GROUP_ImmScale_32: |
1924 | 12.1k | case AArch64_OP_GROUP_ImmScale_4: |
1925 | 17.0k | case AArch64_OP_GROUP_ImmScale_8: { |
1926 | 17.0k | unsigned Scale = temp_arg_0; |
1927 | 17.0k | AArch64_set_detail_op_imm(MI, OpNum, AARCH64_OP_IMM, |
1928 | 17.0k | Scale * MCInst_getOpVal(MI, OpNum)); |
1929 | 17.0k | break; |
1930 | 12.1k | } |
1931 | 655 | case AArch64_OP_GROUP_LogicalImm_int16_t: |
1932 | 2.00k | case AArch64_OP_GROUP_LogicalImm_int32_t: |
1933 | 3.34k | case AArch64_OP_GROUP_LogicalImm_int64_t: |
1934 | 5.35k | case AArch64_OP_GROUP_LogicalImm_int8_t: { |
1935 | 5.35k | unsigned TypeSize = temp_arg_0; |
1936 | 5.35k | uint64_t Val = AArch64_AM_decodeLogicalImmediate( |
1937 | 5.35k | MCInst_getOpVal(MI, OpNum), 8 * TypeSize); |
1938 | 5.35k | AArch64_set_detail_op_imm(MI, OpNum, AARCH64_OP_IMM, Val); |
1939 | 5.35k | break; |
1940 | 3.34k | } |
1941 | 58 | case AArch64_OP_GROUP_Matrix_0: |
1942 | 1.63k | case AArch64_OP_GROUP_Matrix_16: |
1943 | 3.88k | case AArch64_OP_GROUP_Matrix_32: |
1944 | 5.21k | case AArch64_OP_GROUP_Matrix_64: { |
1945 | 5.21k | unsigned EltSize = temp_arg_0; |
1946 | 5.21k | AArch64_set_detail_op_sme(MI, OpNum, AARCH64_SME_MATRIX_TILE, |
1947 | 5.21k | (AArch64Layout_VectorLayout)EltSize); |
1948 | 5.21k | break; |
1949 | 3.88k | } |
1950 | 0 | case AArch64_OP_GROUP_MatrixIndex_0: |
1951 | 5.14k | case AArch64_OP_GROUP_MatrixIndex_1: |
1952 | 5.31k | case AArch64_OP_GROUP_MatrixIndex_8: { |
1953 | 5.31k | unsigned scale = temp_arg_0; |
1954 | 5.31k | if (AArch64_get_detail_op(MI, 0)->type == |
1955 | 5.31k | AARCH64_OP_SME) { |
1956 | | // The index is part of an SME matrix |
1957 | 4.88k | AArch64_set_detail_op_sme(MI, OpNum, |
1958 | 4.88k | AARCH64_SME_MATRIX_SLICE_OFF, |
1959 | 4.88k | AARCH64LAYOUT_INVALID, |
1960 | 4.88k | (uint32_t) (MCInst_getOpVal(MI, OpNum) * scale)); |
1961 | 4.88k | } else if (AArch64_get_detail_op(MI, 0)->type == AARCH64_OP_PRED) { |
1962 | | // The index is part of a predicate |
1963 | 190 | AArch64_set_detail_op_pred(MI, OpNum); |
1964 | 240 | } else { |
1965 | | // The index is used for an SVE2 instruction. |
1966 | 240 | AArch64_set_detail_op_imm(MI, OpNum, AARCH64_OP_IMM, |
1967 | 240 | scale * MCInst_getOpVal(MI, OpNum)); |
1968 | 240 | } |
1969 | 5.31k | break; |
1970 | 5.14k | } |
1971 | 1.21k | case AArch64_OP_GROUP_MatrixTileVector_0: |
1972 | 3.66k | case AArch64_OP_GROUP_MatrixTileVector_1: { |
1973 | 3.66k | bool isVertical = temp_arg_0; |
1974 | 3.66k | const char *RegName = AArch64_LLVM_getRegisterName( |
1975 | 3.66k | MCInst_getOpVal(MI, OpNum), AArch64_NoRegAltName); |
1976 | 3.66k | const char *Dot = strstr(RegName, "."); |
1977 | 3.66k | AArch64Layout_VectorLayout vas = AARCH64LAYOUT_INVALID; |
1978 | 3.66k | if (!Dot) { |
1979 | | // The matrix dimensions are machine dependent. |
1980 | | // Currently we do not support differentiation of machines. |
1981 | | // So we just indicate the use of the complete matrix. |
1982 | 0 | vas = sme_reg_to_vas(MCInst_getOpVal(MI, OpNum)); |
1983 | 0 | } else |
1984 | 3.66k | vas = get_vl_by_suffix(Dot[1]); |
1985 | 3.66k | setup_sme_operand(MI); |
1986 | 3.66k | AArch64_set_detail_op_sme(MI, OpNum, AARCH64_SME_MATRIX_TILE, |
1987 | 3.66k | vas); |
1988 | 3.66k | AArch64_get_detail_op(MI, 0)->sme.is_vertical = isVertical; |
1989 | 3.66k | break; |
1990 | 1.21k | } |
1991 | 588 | case AArch64_OP_GROUP_PostIncOperand_1: |
1992 | 685 | case AArch64_OP_GROUP_PostIncOperand_12: |
1993 | 1.61k | case AArch64_OP_GROUP_PostIncOperand_16: |
1994 | 1.79k | case AArch64_OP_GROUP_PostIncOperand_2: |
1995 | 2.85k | case AArch64_OP_GROUP_PostIncOperand_24: |
1996 | 2.97k | case AArch64_OP_GROUP_PostIncOperand_3: |
1997 | 3.38k | case AArch64_OP_GROUP_PostIncOperand_32: |
1998 | 3.83k | case AArch64_OP_GROUP_PostIncOperand_4: |
1999 | 4.26k | case AArch64_OP_GROUP_PostIncOperand_48: |
2000 | 4.76k | case AArch64_OP_GROUP_PostIncOperand_6: |
2001 | 4.78k | case AArch64_OP_GROUP_PostIncOperand_64: |
2002 | 5.79k | case AArch64_OP_GROUP_PostIncOperand_8: { |
2003 | 5.79k | uint64_t Imm = temp_arg_0; |
2004 | 5.79k | unsigned Reg = MCInst_getOpVal(MI, OpNum); |
2005 | 5.79k | if (Reg == AArch64_XZR) { |
2006 | 0 | AArch64_get_detail_op(MI, -1)->mem.disp = Imm; |
2007 | 0 | AArch64_get_detail(MI)->post_index = true; |
2008 | 0 | AArch64_inc_op_count(MI); |
2009 | 0 | } else |
2010 | 5.79k | AArch64_set_detail_op_reg(MI, OpNum, Reg); |
2011 | 5.79k | break; |
2012 | 4.78k | } |
2013 | 3.30k | case AArch64_OP_GROUP_PredicateAsCounter_0: |
2014 | 3.38k | case AArch64_OP_GROUP_PredicateAsCounter_16: |
2015 | 3.42k | case AArch64_OP_GROUP_PredicateAsCounter_32: |
2016 | 3.53k | case AArch64_OP_GROUP_PredicateAsCounter_64: |
2017 | 3.59k | case AArch64_OP_GROUP_PredicateAsCounter_8: { |
2018 | 3.59k | unsigned EltSize = temp_arg_0; |
2019 | 3.59k | AArch64_get_detail_op(MI, 0)->vas = EltSize; |
2020 | 3.59k | AArch64_set_detail_op_reg( |
2021 | 3.59k | MI, OpNum, MCInst_getOpVal(MI, OpNum)); |
2022 | 3.59k | break; |
2023 | 3.53k | } |
2024 | 1.29k | case AArch64_OP_GROUP_PrefetchOp_0: |
2025 | 2.87k | case AArch64_OP_GROUP_PrefetchOp_1: { |
2026 | 2.87k | bool IsSVEPrefetch = (bool)temp_arg_0; |
2027 | 2.87k | unsigned prfop = MCInst_getOpVal(MI, (OpNum)); |
2028 | 2.87k | aarch64_sysop sysop = { 0 }; |
2029 | 2.87k | if (IsSVEPrefetch) { |
2030 | 1.58k | const AArch64SVEPRFM_SVEPRFM *PRFM = |
2031 | 1.58k | AArch64SVEPRFM_lookupSVEPRFMByEncoding(prfop); |
2032 | 1.58k | if (PRFM) { |
2033 | 1.28k | sysop.alias = PRFM->SysAlias; |
2034 | 1.28k | sysop.sub_type = AARCH64_OP_SVEPRFM; |
2035 | 1.28k | AArch64_set_detail_op_sys(MI, OpNum, sysop, |
2036 | 1.28k | AARCH64_OP_SYSALIAS); |
2037 | 1.28k | break; |
2038 | 1.28k | } |
2039 | 1.58k | } else { |
2040 | 1.29k | const AArch64PRFM_PRFM *PRFM = |
2041 | 1.29k | AArch64PRFM_lookupPRFMByEncoding(prfop); |
2042 | 1.29k | if (PRFM && |
2043 | 1.29k | AArch64_testFeatureList(MI->csh->mode, |
2044 | 439 | PRFM->FeaturesRequired)) { |
2045 | 439 | sysop.alias = PRFM->SysAlias; |
2046 | 439 | sysop.sub_type = AARCH64_OP_PRFM; |
2047 | 439 | AArch64_set_detail_op_sys(MI, OpNum, sysop, |
2048 | 439 | AARCH64_OP_SYSALIAS); |
2049 | 439 | break; |
2050 | 439 | } |
2051 | 1.29k | } |
2052 | 1.14k | AArch64_get_detail_op(MI, 0)->type = AARCH64_OP_IMM; |
2053 | 1.14k | AArch64_get_detail_op(MI, 0)->imm = prfop; |
2054 | 1.14k | AArch64_get_detail_op(MI, 0)->access = |
2055 | 1.14k | map_get_op_access(MI, OpNum); |
2056 | 1.14k | AArch64_inc_op_count(MI); |
2057 | 1.14k | break; |
2058 | 2.87k | } |
2059 | 340 | case AArch64_OP_GROUP_SImm_16: |
2060 | 686 | case AArch64_OP_GROUP_SImm_8: { |
2061 | 686 | AArch64_set_detail_op_imm(MI, OpNum, AARCH64_OP_IMM, |
2062 | 686 | MCInst_getOpVal(MI, OpNum)); |
2063 | 686 | break; |
2064 | 340 | } |
2065 | 202 | case AArch64_OP_GROUP_SVELogicalImm_int16_t: |
2066 | 921 | case AArch64_OP_GROUP_SVELogicalImm_int32_t: |
2067 | 1.28k | case AArch64_OP_GROUP_SVELogicalImm_int64_t: { |
2068 | | // General issue here that we do not save the operand type |
2069 | | // for each operand. So we choose the largest type. |
2070 | 1.28k | uint64_t Val = MCInst_getOpVal(MI, OpNum); |
2071 | 1.28k | uint64_t DecodedVal = |
2072 | 1.28k | AArch64_AM_decodeLogicalImmediate(Val, 64); |
2073 | 1.28k | AArch64_set_detail_op_imm(MI, OpNum, AARCH64_OP_IMM, |
2074 | 1.28k | DecodedVal); |
2075 | 1.28k | break; |
2076 | 921 | } |
2077 | 27.2k | case AArch64_OP_GROUP_SVERegOp_0: |
2078 | 46.3k | case AArch64_OP_GROUP_SVERegOp_b: |
2079 | 63.3k | case AArch64_OP_GROUP_SVERegOp_d: |
2080 | 84.0k | case AArch64_OP_GROUP_SVERegOp_h: |
2081 | 84.8k | case AArch64_OP_GROUP_SVERegOp_q: |
2082 | 104k | case AArch64_OP_GROUP_SVERegOp_s: { |
2083 | 104k | char Suffix = (char)temp_arg_0; |
2084 | 104k | AArch64_get_detail_op(MI, 0)->vas = get_vl_by_suffix(Suffix); |
2085 | 104k | AArch64_set_detail_op_reg(MI, OpNum, |
2086 | 104k | MCInst_getOpVal(MI, OpNum)); |
2087 | 104k | break; |
2088 | 84.8k | } |
2089 | 2.20k | case AArch64_OP_GROUP_UImm12Offset_1: |
2090 | 2.41k | case AArch64_OP_GROUP_UImm12Offset_16: |
2091 | 3.20k | case AArch64_OP_GROUP_UImm12Offset_2: |
2092 | 3.87k | case AArch64_OP_GROUP_UImm12Offset_4: |
2093 | 4.50k | case AArch64_OP_GROUP_UImm12Offset_8: { |
2094 | | // Otherwise it is an expression. For which we only add the immediate |
2095 | 4.50k | unsigned Scale = MCOperand_isImm(MCInst_getOperand(MI, OpNum)) ? temp_arg_0 : 1; |
2096 | 4.50k | AArch64_set_detail_op_imm(MI, OpNum, AARCH64_OP_IMM, |
2097 | 4.50k | Scale * MCInst_getOpVal(MI, OpNum)); |
2098 | 4.50k | break; |
2099 | 3.87k | } |
2100 | 20.2k | case AArch64_OP_GROUP_VectorIndex_1: |
2101 | 20.2k | case AArch64_OP_GROUP_VectorIndex_8: { |
2102 | 20.2k | CS_ASSERT_RET(AArch64_get_detail(MI)->op_count > 0); |
2103 | 20.2k | unsigned Scale = temp_arg_0; |
2104 | 20.2k | unsigned VIndex = Scale * MCInst_getOpVal(MI, OpNum); |
2105 | | // The index can either be for one operand, or for each operand of a list. |
2106 | 20.2k | if (!AArch64_get_detail_op(MI, -1)->is_list_member) { |
2107 | 9.84k | AArch64_get_detail_op(MI, -1)->vector_index = VIndex; |
2108 | 9.84k | break; |
2109 | 9.84k | } |
2110 | 37.8k | for (int i = AArch64_get_detail(MI)->op_count - 1; i >= 0; |
2111 | 27.4k | --i) { |
2112 | 27.4k | if (!AArch64_get_detail(MI)->operands[i].is_list_member) |
2113 | 0 | break; |
2114 | 27.4k | AArch64_get_detail(MI)->operands[i].vector_index = |
2115 | 27.4k | VIndex; |
2116 | 27.4k | } |
2117 | 10.3k | break; |
2118 | 20.2k | } |
2119 | 11 | case AArch64_OP_GROUP_ZPRasFPR_128: |
2120 | 143 | case AArch64_OP_GROUP_ZPRasFPR_16: |
2121 | 300 | case AArch64_OP_GROUP_ZPRasFPR_32: |
2122 | 681 | case AArch64_OP_GROUP_ZPRasFPR_64: |
2123 | 709 | case AArch64_OP_GROUP_ZPRasFPR_8: { |
2124 | 709 | unsigned Base = AArch64_NoRegister; |
2125 | 709 | unsigned Width = temp_arg_0; |
2126 | 709 | switch (Width) { |
2127 | 28 | case 8: |
2128 | 28 | Base = AArch64_B0; |
2129 | 28 | break; |
2130 | 132 | case 16: |
2131 | 132 | Base = AArch64_H0; |
2132 | 132 | break; |
2133 | 157 | case 32: |
2134 | 157 | Base = AArch64_S0; |
2135 | 157 | break; |
2136 | 381 | case 64: |
2137 | 381 | Base = AArch64_D0; |
2138 | 381 | break; |
2139 | 11 | case 128: |
2140 | 11 | Base = AArch64_Q0; |
2141 | 11 | break; |
2142 | 0 | default: |
2143 | 0 | CS_ASSERT_RET(0 && "Unsupported width"); |
2144 | 709 | } |
2145 | 709 | unsigned Reg = MCInst_getOpVal(MI, (OpNum)); |
2146 | 709 | AArch64_set_detail_op_reg(MI, OpNum, Reg - AArch64_Z0 + Base); |
2147 | 709 | break; |
2148 | 709 | } |
2149 | 183k | } |
2150 | 183k | } |
2151 | | |
2152 | | /// Fills cs_detail with the data of the operand. |
2153 | | /// This function handles operands which original printer function is a template |
2154 | | /// with two arguments. |
2155 | | void AArch64_add_cs_detail_2(MCInst *MI, aarch64_op_group op_group, |
2156 | | unsigned OpNum, uint64_t temp_arg_0, |
2157 | | uint64_t temp_arg_1) |
2158 | 42.0k | { |
2159 | 42.0k | if (!add_cs_detail_begin(MI, OpNum)) |
2160 | 0 | return; |
2161 | 42.0k | switch (op_group) { |
2162 | 0 | default: |
2163 | 0 | printf("ERROR: Operand group %d not handled!\n", op_group); |
2164 | 0 | CS_ASSERT_RET(0); |
2165 | 439 | case AArch64_OP_GROUP_ComplexRotationOp_180_90: |
2166 | 1.43k | case AArch64_OP_GROUP_ComplexRotationOp_90_0: { |
2167 | 1.43k | unsigned Angle = temp_arg_0; |
2168 | 1.43k | unsigned Remainder = temp_arg_1; |
2169 | 1.43k | unsigned Imm = (MCInst_getOpVal(MI, OpNum) * Angle) + Remainder; |
2170 | 1.43k | AArch64_set_detail_op_imm(MI, OpNum, AARCH64_OP_IMM, Imm); |
2171 | 1.43k | break; |
2172 | 439 | } |
2173 | 48 | case AArch64_OP_GROUP_ExactFPImm_AArch64ExactFPImm_half_AArch64ExactFPImm_one: |
2174 | 822 | case AArch64_OP_GROUP_ExactFPImm_AArch64ExactFPImm_half_AArch64ExactFPImm_two: |
2175 | 1.15k | case AArch64_OP_GROUP_ExactFPImm_AArch64ExactFPImm_zero_AArch64ExactFPImm_one: { |
2176 | 1.15k | aarch64_exactfpimm ImmIs0 = temp_arg_0; |
2177 | 1.15k | aarch64_exactfpimm ImmIs1 = temp_arg_1; |
2178 | 1.15k | const AArch64ExactFPImm_ExactFPImm *Imm0Desc = |
2179 | 1.15k | AArch64ExactFPImm_lookupExactFPImmByEnum(ImmIs0); |
2180 | 1.15k | const AArch64ExactFPImm_ExactFPImm *Imm1Desc = |
2181 | 1.15k | AArch64ExactFPImm_lookupExactFPImmByEnum(ImmIs1); |
2182 | 1.15k | unsigned Val = MCInst_getOpVal(MI, (OpNum)); |
2183 | 1.15k | aarch64_sysop sysop = { 0 }; |
2184 | 1.15k | sysop.imm = Val ? Imm1Desc->SysImm : Imm0Desc->SysImm; |
2185 | 1.15k | sysop.sub_type = AARCH64_OP_EXACTFPIMM; |
2186 | 1.15k | AArch64_set_detail_op_sys(MI, OpNum, sysop, AARCH64_OP_SYSIMM); |
2187 | 1.15k | break; |
2188 | 822 | } |
2189 | 2.40k | case AArch64_OP_GROUP_ImmRangeScale_2_1: |
2190 | 3.99k | case AArch64_OP_GROUP_ImmRangeScale_4_3: { |
2191 | 3.99k | uint64_t Scale = temp_arg_0; |
2192 | 3.99k | uint64_t Offset = temp_arg_1; |
2193 | 3.99k | unsigned FirstImm = Scale * MCInst_getOpVal(MI, (OpNum)); |
2194 | 3.99k | AArch64_set_detail_op_imm_range(MI, OpNum, FirstImm, FirstImm + Offset); |
2195 | 3.99k | break; |
2196 | 2.40k | } |
2197 | 7 | case AArch64_OP_GROUP_MemExtend_w_128: |
2198 | 213 | case AArch64_OP_GROUP_MemExtend_w_16: |
2199 | 328 | case AArch64_OP_GROUP_MemExtend_w_32: |
2200 | 404 | case AArch64_OP_GROUP_MemExtend_w_64: |
2201 | 1.31k | case AArch64_OP_GROUP_MemExtend_w_8: |
2202 | 1.34k | case AArch64_OP_GROUP_MemExtend_x_128: |
2203 | 1.95k | case AArch64_OP_GROUP_MemExtend_x_16: |
2204 | 2.13k | case AArch64_OP_GROUP_MemExtend_x_32: |
2205 | 2.21k | case AArch64_OP_GROUP_MemExtend_x_64: |
2206 | 2.90k | case AArch64_OP_GROUP_MemExtend_x_8: { |
2207 | 2.90k | char SrcRegKind = (char)temp_arg_0; |
2208 | 2.90k | unsigned ExtWidth = temp_arg_1; |
2209 | 2.90k | bool SignExtend = MCInst_getOpVal(MI, OpNum); |
2210 | 2.90k | bool DoShift = MCInst_getOpVal(MI, OpNum + 1); |
2211 | 2.90k | AArch64_set_detail_shift_ext(MI, OpNum, SignExtend, DoShift, |
2212 | 2.90k | ExtWidth, SrcRegKind); |
2213 | 2.90k | break; |
2214 | 2.21k | } |
2215 | 6.18k | case AArch64_OP_GROUP_TypedVectorList_0_b: |
2216 | 16.3k | case AArch64_OP_GROUP_TypedVectorList_0_d: |
2217 | 21.8k | case AArch64_OP_GROUP_TypedVectorList_0_h: |
2218 | 22.4k | case AArch64_OP_GROUP_TypedVectorList_0_q: |
2219 | 28.1k | case AArch64_OP_GROUP_TypedVectorList_0_s: |
2220 | 28.1k | case AArch64_OP_GROUP_TypedVectorList_0_0: |
2221 | 29.6k | case AArch64_OP_GROUP_TypedVectorList_16_b: |
2222 | 30.0k | case AArch64_OP_GROUP_TypedVectorList_1_d: |
2223 | 30.8k | case AArch64_OP_GROUP_TypedVectorList_2_d: |
2224 | 31.2k | case AArch64_OP_GROUP_TypedVectorList_2_s: |
2225 | 31.6k | case AArch64_OP_GROUP_TypedVectorList_4_h: |
2226 | 31.9k | case AArch64_OP_GROUP_TypedVectorList_4_s: |
2227 | 32.1k | case AArch64_OP_GROUP_TypedVectorList_8_b: |
2228 | 32.5k | case AArch64_OP_GROUP_TypedVectorList_8_h: { |
2229 | 32.5k | uint8_t NumLanes = (uint8_t)temp_arg_0; |
2230 | 32.5k | char LaneKind = (char)temp_arg_1; |
2231 | 32.5k | uint16_t Pair = ((NumLanes << 8) | LaneKind); |
2232 | | |
2233 | 32.5k | AArch64Layout_VectorLayout vas = AARCH64LAYOUT_INVALID; |
2234 | 32.5k | switch (Pair) { |
2235 | 0 | default: |
2236 | 0 | printf("Typed vector list with NumLanes = %d and LaneKind = %c not handled.\n", |
2237 | 0 | NumLanes, LaneKind); |
2238 | 0 | CS_ASSERT_RET(0); |
2239 | 238 | case ((8 << 8) | 'b'): |
2240 | 238 | vas = AARCH64LAYOUT_VL_8B; |
2241 | 238 | break; |
2242 | 397 | case ((4 << 8) | 'h'): |
2243 | 397 | vas = AARCH64LAYOUT_VL_4H; |
2244 | 397 | break; |
2245 | 427 | case ((2 << 8) | 's'): |
2246 | 427 | vas = AARCH64LAYOUT_VL_2S; |
2247 | 427 | break; |
2248 | 426 | case ((1 << 8) | 'd'): |
2249 | 426 | vas = AARCH64LAYOUT_VL_1D; |
2250 | 426 | break; |
2251 | 1.45k | case ((16 << 8) | 'b'): |
2252 | 1.45k | vas = AARCH64LAYOUT_VL_16B; |
2253 | 1.45k | break; |
2254 | 388 | case ((8 << 8) | 'h'): |
2255 | 388 | vas = AARCH64LAYOUT_VL_8H; |
2256 | 388 | break; |
2257 | 223 | case ((4 << 8) | 's'): |
2258 | 223 | vas = AARCH64LAYOUT_VL_4S; |
2259 | 223 | break; |
2260 | 777 | case ((2 << 8) | 'd'): |
2261 | 777 | vas = AARCH64LAYOUT_VL_2D; |
2262 | 777 | break; |
2263 | 6.18k | case 'b': |
2264 | 6.18k | vas = AARCH64LAYOUT_VL_B; |
2265 | 6.18k | break; |
2266 | 5.43k | case 'h': |
2267 | 5.43k | vas = AARCH64LAYOUT_VL_H; |
2268 | 5.43k | break; |
2269 | 5.65k | case 's': |
2270 | 5.65k | vas = AARCH64LAYOUT_VL_S; |
2271 | 5.65k | break; |
2272 | 10.2k | case 'd': |
2273 | 10.2k | vas = AARCH64LAYOUT_VL_D; |
2274 | 10.2k | break; |
2275 | 648 | case 'q': |
2276 | 648 | vas = AARCH64LAYOUT_VL_Q; |
2277 | 648 | break; |
2278 | 72 | case '0': |
2279 | | // Implicitly Typed register |
2280 | 72 | break; |
2281 | 32.5k | } |
2282 | | |
2283 | 32.5k | unsigned Reg = MCOperand_getReg(MCInst_getOperand(MI, OpNum)); |
2284 | 32.5k | unsigned NumRegs = get_vec_list_num_regs(MI, Reg); |
2285 | 32.5k | unsigned Stride = get_vec_list_stride(MI, Reg); |
2286 | 32.5k | Reg = get_vec_list_first_reg(MI, Reg); |
2287 | | |
2288 | 32.5k | if ((MCRegisterClass_contains( |
2289 | 32.5k | MCRegisterInfo_getRegClass(MI->MRI, |
2290 | 32.5k | AArch64_ZPRRegClassID), |
2291 | 32.5k | Reg) || |
2292 | 32.5k | MCRegisterClass_contains( |
2293 | 15.0k | MCRegisterInfo_getRegClass(MI->MRI, |
2294 | 15.0k | AArch64_PPRRegClassID), |
2295 | 15.0k | Reg)) && |
2296 | 32.5k | NumRegs > 1 && Stride == 1 && |
2297 | 32.5k | Reg < getNextVectorRegister(Reg, NumRegs - 1)) { |
2298 | 9.36k | AArch64_get_detail_op(MI, 0)->is_list_member = true; |
2299 | 9.36k | AArch64_get_detail_op(MI, 0)->vas = vas; |
2300 | 9.36k | AArch64_set_detail_op_reg(MI, OpNum, Reg); |
2301 | 9.36k | if (NumRegs > 1) { |
2302 | | // Add all registers of the list to the details. |
2303 | 25.8k | for (size_t i = 0; i < NumRegs - 1; ++i) { |
2304 | 16.4k | AArch64_get_detail_op(MI, 0)->is_list_member = |
2305 | 16.4k | true; |
2306 | 16.4k | AArch64_get_detail_op(MI, 0)->vas = vas; |
2307 | 16.4k | AArch64_set_detail_op_reg( |
2308 | 16.4k | MI, OpNum, |
2309 | 16.4k | getNextVectorRegister(Reg + i, 1)); |
2310 | 16.4k | } |
2311 | 9.36k | } |
2312 | 23.1k | } else { |
2313 | 74.9k | for (unsigned i = 0; i < NumRegs; |
2314 | 51.7k | ++i, Reg = getNextVectorRegister(Reg, Stride)) { |
2315 | 51.7k | if (!(MCRegisterClass_contains( |
2316 | 51.7k | MCRegisterInfo_getRegClass( |
2317 | 51.7k | MI->MRI, AArch64_ZPRRegClassID), |
2318 | 51.7k | Reg) || |
2319 | 51.7k | MCRegisterClass_contains( |
2320 | 39.5k | MCRegisterInfo_getRegClass( |
2321 | 39.5k | MI->MRI, AArch64_PPRRegClassID), |
2322 | 39.5k | Reg))) { |
2323 | 39.5k | AArch64_get_detail_op(MI, 0)->is_vreg = true; |
2324 | 39.5k | } |
2325 | 51.7k | AArch64_get_detail_op(MI, 0)->is_list_member = |
2326 | 51.7k | true; |
2327 | 51.7k | AArch64_get_detail_op(MI, 0)->vas = vas; |
2328 | 51.7k | AArch64_set_detail_op_reg(MI, OpNum, Reg); |
2329 | 51.7k | } |
2330 | 23.1k | } |
2331 | 32.5k | } |
2332 | 42.0k | } |
2333 | 42.0k | } |
2334 | | |
2335 | | /// Fills cs_detail with the data of the operand. |
2336 | | /// This function handles operands which original printer function is a template |
2337 | | /// with four arguments. |
2338 | | void AArch64_add_cs_detail_4(MCInst *MI, aarch64_op_group op_group, |
2339 | | unsigned OpNum, uint64_t temp_arg_0, |
2340 | | uint64_t temp_arg_1, uint64_t temp_arg_2, |
2341 | | uint64_t temp_arg_3) |
2342 | 7.61k | { |
2343 | 7.61k | if (!add_cs_detail_begin(MI, OpNum)) |
2344 | 0 | return; |
2345 | 7.61k | switch (op_group) { |
2346 | 0 | default: |
2347 | 0 | printf("ERROR: Operand group %d not handled!\n", op_group); |
2348 | 0 | CS_ASSERT_RET(0); |
2349 | 502 | case AArch64_OP_GROUP_RegWithShiftExtend_0_128_x_0: |
2350 | 635 | case AArch64_OP_GROUP_RegWithShiftExtend_0_16_w_d: |
2351 | 751 | case AArch64_OP_GROUP_RegWithShiftExtend_0_16_w_s: |
2352 | 1.21k | case AArch64_OP_GROUP_RegWithShiftExtend_0_16_x_0: |
2353 | 1.32k | case AArch64_OP_GROUP_RegWithShiftExtend_0_16_x_d: |
2354 | 1.34k | case AArch64_OP_GROUP_RegWithShiftExtend_0_16_x_s: |
2355 | 1.59k | case AArch64_OP_GROUP_RegWithShiftExtend_0_32_w_d: |
2356 | 1.70k | case AArch64_OP_GROUP_RegWithShiftExtend_0_32_w_s: |
2357 | 2.08k | case AArch64_OP_GROUP_RegWithShiftExtend_0_32_x_0: |
2358 | 2.23k | case AArch64_OP_GROUP_RegWithShiftExtend_0_32_x_d: |
2359 | 2.29k | case AArch64_OP_GROUP_RegWithShiftExtend_0_32_x_s: |
2360 | 2.52k | case AArch64_OP_GROUP_RegWithShiftExtend_0_64_w_d: |
2361 | 2.55k | case AArch64_OP_GROUP_RegWithShiftExtend_0_64_w_s: |
2362 | 3.40k | case AArch64_OP_GROUP_RegWithShiftExtend_0_64_x_0: |
2363 | 3.47k | case AArch64_OP_GROUP_RegWithShiftExtend_0_64_x_d: |
2364 | 3.48k | case AArch64_OP_GROUP_RegWithShiftExtend_0_64_x_s: |
2365 | 4.44k | case AArch64_OP_GROUP_RegWithShiftExtend_0_8_w_d: |
2366 | 4.60k | case AArch64_OP_GROUP_RegWithShiftExtend_0_8_w_s: |
2367 | 5.51k | case AArch64_OP_GROUP_RegWithShiftExtend_0_8_x_0: |
2368 | 6.41k | case AArch64_OP_GROUP_RegWithShiftExtend_0_8_x_d: |
2369 | 6.42k | case AArch64_OP_GROUP_RegWithShiftExtend_0_8_x_s: |
2370 | 6.54k | case AArch64_OP_GROUP_RegWithShiftExtend_1_16_w_d: |
2371 | 6.60k | case AArch64_OP_GROUP_RegWithShiftExtend_1_16_w_s: |
2372 | 6.82k | case AArch64_OP_GROUP_RegWithShiftExtend_1_32_w_d: |
2373 | 6.87k | case AArch64_OP_GROUP_RegWithShiftExtend_1_32_w_s: |
2374 | 6.97k | case AArch64_OP_GROUP_RegWithShiftExtend_1_64_w_d: |
2375 | 7.03k | case AArch64_OP_GROUP_RegWithShiftExtend_1_64_w_s: |
2376 | 7.44k | case AArch64_OP_GROUP_RegWithShiftExtend_1_8_w_d: |
2377 | 7.61k | case AArch64_OP_GROUP_RegWithShiftExtend_1_8_w_s: { |
2378 | | // signed (s) and unsigned (u) extend |
2379 | 7.61k | bool SignExtend = (bool)temp_arg_0; |
2380 | | // Extend width |
2381 | 7.61k | int ExtWidth = (int)temp_arg_1; |
2382 | | // w = word, x = doubleword |
2383 | 7.61k | char SrcRegKind = (char)temp_arg_2; |
2384 | | // Vector register element/arrangement specifier: |
2385 | | // B = 8bit, H = 16bit, S = 32bit, D = 64bit, Q = 128bit |
2386 | | // No suffix = complete register |
2387 | | // According to: ARM Reference manual supplement, doc number: DDI 0584 |
2388 | 7.61k | char Suffix = (char)temp_arg_3; |
2389 | | |
2390 | | // Register will be added in printOperand() afterwards. Here we only handle |
2391 | | // shift and extend. |
2392 | 7.61k | AArch64_get_detail_op(MI, -1)->vas = get_vl_by_suffix(Suffix); |
2393 | | |
2394 | 7.61k | bool DoShift = ExtWidth != 8; |
2395 | 7.61k | if (!(SignExtend || DoShift || SrcRegKind == 'w')) |
2396 | 1.82k | return; |
2397 | | |
2398 | 5.78k | AArch64_set_detail_shift_ext(MI, OpNum, SignExtend, DoShift, |
2399 | 5.78k | ExtWidth, SrcRegKind); |
2400 | 5.78k | break; |
2401 | 7.61k | } |
2402 | 7.61k | } |
2403 | 7.61k | } |
2404 | | |
2405 | | /// Adds a register AArch64 operand at position OpNum and increases the op_count by |
2406 | | /// one. |
2407 | | void AArch64_set_detail_op_reg(MCInst *MI, unsigned OpNum, aarch64_reg Reg) |
2408 | 447k | { |
2409 | 447k | if (!detail_is_set(MI)) |
2410 | 0 | return; |
2411 | 447k | AArch64_check_safe_inc(MI); |
2412 | | |
2413 | 447k | if (Reg == AARCH64_REG_ZA || |
2414 | 447k | (Reg >= AARCH64_REG_ZAB0 && Reg < AARCH64_REG_ZT0)) { |
2415 | | // A tile register should be treated as SME operand. |
2416 | 0 | AArch64_set_detail_op_sme(MI, OpNum, AARCH64_SME_MATRIX_TILE, |
2417 | 0 | sme_reg_to_vas(Reg)); |
2418 | 0 | return; |
2419 | 447k | } else if (((Reg >= AARCH64_REG_P0) && (Reg <= AARCH64_REG_P15)) || |
2420 | 447k | ((Reg >= AARCH64_REG_PN0) && (Reg <= AARCH64_REG_PN15))) { |
2421 | | // SME/SVE predicate register. |
2422 | 35.2k | AArch64_set_detail_op_pred(MI, OpNum); |
2423 | 35.2k | return; |
2424 | 412k | } else if (AArch64_get_detail(MI)->is_doing_sme) { |
2425 | 9.06k | CS_ASSERT_RET(map_get_op_type(MI, OpNum) & CS_OP_BOUND); |
2426 | 9.06k | if (AArch64_get_detail_op(MI, 0)->type == AARCH64_OP_SME) { |
2427 | 8.87k | AArch64_set_detail_op_sme(MI, OpNum, |
2428 | 8.87k | AARCH64_SME_MATRIX_SLICE_REG, |
2429 | 8.87k | AARCH64LAYOUT_INVALID); |
2430 | 8.87k | } else if (AArch64_get_detail_op(MI, 0)->type == AARCH64_OP_PRED) { |
2431 | 190 | AArch64_set_detail_op_pred(MI, OpNum); |
2432 | 190 | } else { |
2433 | 0 | CS_ASSERT_RET(0 && "Unkown SME/SVE operand type"); |
2434 | 0 | } |
2435 | 9.06k | return; |
2436 | 9.06k | } |
2437 | 403k | if (map_get_op_type(MI, OpNum) & CS_OP_MEM) { |
2438 | 75.8k | AArch64_set_detail_op_mem(MI, OpNum, Reg); |
2439 | 75.8k | return; |
2440 | 75.8k | } |
2441 | | |
2442 | 327k | CS_ASSERT_RET(!(map_get_op_type(MI, OpNum) & CS_OP_BOUND)); |
2443 | 327k | CS_ASSERT_RET(!(map_get_op_type(MI, OpNum) & CS_OP_MEM)); |
2444 | 327k | CS_ASSERT_RET(map_get_op_type(MI, OpNum) == CS_OP_REG); |
2445 | | |
2446 | 327k | AArch64_get_detail_op(MI, 0)->type = AARCH64_OP_REG; |
2447 | 327k | AArch64_get_detail_op(MI, 0)->reg = Reg; |
2448 | 327k | AArch64_get_detail_op(MI, 0)->access = map_get_op_access(MI, OpNum); |
2449 | 327k | AArch64_inc_op_count(MI); |
2450 | 327k | } |
2451 | | |
2452 | | /// Check if the previous operand is a memory operand |
2453 | | /// with only the base register set AND if this base register |
2454 | | /// is write-back. |
2455 | | /// This indicates the following immediate is a post-indexed |
2456 | | /// memory offset. |
2457 | 70.1k | static bool prev_is_membase_wb(MCInst *MI) { |
2458 | 70.1k | return AArch64_get_detail(MI)->op_count > 0 && |
2459 | 70.1k | AArch64_get_detail_op(MI, -1)->type == AARCH64_OP_MEM && |
2460 | 70.1k | AArch64_get_detail_op(MI, -1)->mem.disp == 0 && |
2461 | 70.1k | get_detail(MI)->writeback; |
2462 | 70.1k | } |
2463 | | |
2464 | | /// Adds an immediate AArch64 operand at position OpNum and increases the op_count |
2465 | | /// by one. |
2466 | | void AArch64_set_detail_op_imm(MCInst *MI, unsigned OpNum, |
2467 | | aarch64_op_type ImmType, int64_t Imm) |
2468 | 97.5k | { |
2469 | 97.5k | if (!detail_is_set(MI)) |
2470 | 0 | return; |
2471 | 97.5k | AArch64_check_safe_inc(MI); |
2472 | | |
2473 | 97.5k | if (AArch64_get_detail(MI)->is_doing_sme) { |
2474 | 0 | CS_ASSERT_RET(map_get_op_type(MI, OpNum) & CS_OP_BOUND); |
2475 | 0 | if (AArch64_get_detail_op(MI, 0)->type == AARCH64_OP_SME) { |
2476 | 0 | AArch64_set_detail_op_sme(MI, OpNum, |
2477 | 0 | AARCH64_SME_MATRIX_SLICE_OFF, |
2478 | 0 | AARCH64LAYOUT_INVALID, (uint32_t) 1); |
2479 | 0 | } else if (AArch64_get_detail_op(MI, 0)->type == AARCH64_OP_PRED) { |
2480 | 0 | AArch64_set_detail_op_pred(MI, OpNum); |
2481 | 0 | } else { |
2482 | 0 | CS_ASSERT_RET(0 && "Unkown SME operand type"); |
2483 | 0 | } |
2484 | 0 | return; |
2485 | 0 | } |
2486 | 97.5k | if (map_get_op_type(MI, OpNum) & CS_OP_MEM || prev_is_membase_wb(MI)) { |
2487 | 33.7k | AArch64_set_detail_op_mem(MI, OpNum, Imm); |
2488 | 33.7k | return; |
2489 | 33.7k | } |
2490 | | |
2491 | 63.7k | CS_ASSERT_RET(!(map_get_op_type(MI, OpNum) & CS_OP_MEM)); |
2492 | 63.7k | CS_ASSERT_RET((map_get_op_type(MI, OpNum) & ~CS_OP_BOUND) == CS_OP_IMM); |
2493 | 63.7k | CS_ASSERT_RET(ImmType == AARCH64_OP_IMM || ImmType == AARCH64_OP_CIMM); |
2494 | | |
2495 | 63.7k | AArch64_get_detail_op(MI, 0)->type = ImmType; |
2496 | 63.7k | AArch64_get_detail_op(MI, 0)->imm = Imm; |
2497 | 63.7k | AArch64_get_detail_op(MI, 0)->access = map_get_op_access(MI, OpNum); |
2498 | 63.7k | AArch64_inc_op_count(MI); |
2499 | 63.7k | } |
2500 | | |
2501 | | void AArch64_set_detail_op_imm_range(MCInst *MI, unsigned OpNum, |
2502 | | uint32_t FirstImm, uint32_t Offset) |
2503 | 3.99k | { |
2504 | 3.99k | if (!detail_is_set(MI)) |
2505 | 0 | return; |
2506 | 3.99k | AArch64_check_safe_inc(MI); |
2507 | | |
2508 | 3.99k | if (AArch64_get_detail(MI)->is_doing_sme) { |
2509 | 3.99k | CS_ASSERT_RET(map_get_op_type(MI, OpNum) & CS_OP_BOUND); |
2510 | 3.99k | if (AArch64_get_detail_op(MI, 0)->type == AARCH64_OP_SME) { |
2511 | 3.99k | AArch64_set_detail_op_sme(MI, OpNum, |
2512 | 3.99k | AARCH64_SME_MATRIX_SLICE_OFF_RANGE, |
2513 | 3.99k | AARCH64LAYOUT_INVALID, (uint32_t) FirstImm, |
2514 | 3.99k | (uint32_t) Offset); |
2515 | 3.99k | } else if (AArch64_get_detail_op(MI, 0)->type == AARCH64_OP_PRED) { |
2516 | 0 | CS_ASSERT_RET(0 && "Unkown SME predicate imm range type"); |
2517 | 0 | } else { |
2518 | 0 | CS_ASSERT_RET(0 && "Unkown SME operand type"); |
2519 | 0 | } |
2520 | 3.99k | return; |
2521 | 3.99k | } |
2522 | | |
2523 | 0 | CS_ASSERT_RET(!(map_get_op_type(MI, OpNum) & CS_OP_MEM)); |
2524 | 0 | CS_ASSERT_RET(map_get_op_type(MI, OpNum) == CS_OP_IMM); |
2525 | |
|
2526 | 0 | AArch64_get_detail_op(MI, 0)->type = AARCH64_OP_IMM_RANGE; |
2527 | 0 | AArch64_get_detail_op(MI, 0)->imm_range.first = FirstImm; |
2528 | 0 | AArch64_get_detail_op(MI, 0)->imm_range.offset = Offset; |
2529 | 0 | AArch64_get_detail_op(MI, 0)->access = map_get_op_access(MI, OpNum); |
2530 | 0 | AArch64_inc_op_count(MI); |
2531 | 0 | } |
2532 | | |
2533 | | /// Adds a memory AARCH64 operand at position OpNum. op_count is *not* increased by |
2534 | | /// one. This is done by set_mem_access(). |
2535 | | void AArch64_set_detail_op_mem(MCInst *MI, unsigned OpNum, uint64_t Val) |
2536 | 109k | { |
2537 | 109k | if (!detail_is_set(MI)) |
2538 | 0 | return; |
2539 | 109k | AArch64_check_safe_inc(MI); |
2540 | | |
2541 | 109k | AArch64_set_mem_access(MI, true); |
2542 | | |
2543 | 109k | cs_op_type secondary_type = map_get_op_type(MI, OpNum) & ~CS_OP_MEM; |
2544 | 109k | switch (secondary_type) { |
2545 | 0 | default: |
2546 | 0 | CS_ASSERT_RET(0 && "Secondary type not supported yet."); |
2547 | 75.8k | case CS_OP_REG: { |
2548 | 75.8k | bool is_index_reg = AArch64_get_detail_op(MI, 0)->mem.base != |
2549 | 75.8k | AARCH64_REG_INVALID; |
2550 | 75.8k | if (is_index_reg) |
2551 | 12.7k | AArch64_get_detail_op(MI, 0)->mem.index = Val; |
2552 | 63.1k | else { |
2553 | 63.1k | AArch64_get_detail_op(MI, 0)->mem.base = Val; |
2554 | 63.1k | } |
2555 | | |
2556 | 75.8k | if (MCInst_opIsTying(MI, OpNum)) { |
2557 | | // Especially base registers can be writeback registers. |
2558 | | // For this they tie an MC operand which has write |
2559 | | // access. But this one is never processed in the printer |
2560 | | // (because it is never emitted). Therefor it is never |
2561 | | // added to the modified list. |
2562 | | // Here we check for this case and add the memory register |
2563 | | // to the modified list. |
2564 | 17.5k | map_add_implicit_write(MI, MCInst_getOpVal(MI, OpNum)); |
2565 | 17.5k | } |
2566 | 75.8k | break; |
2567 | 0 | } |
2568 | 33.7k | case CS_OP_IMM: { |
2569 | 33.7k | AArch64_get_detail_op(MI, 0)->mem.disp = Val; |
2570 | 33.7k | break; |
2571 | 0 | } |
2572 | 109k | } |
2573 | | |
2574 | 109k | AArch64_get_detail_op(MI, 0)->type = AARCH64_OP_MEM; |
2575 | 109k | AArch64_get_detail_op(MI, 0)->access = map_get_op_access(MI, OpNum); |
2576 | 109k | AArch64_set_mem_access(MI, false); |
2577 | 109k | } |
2578 | | |
2579 | | /// Adds the shift and sign extend info to the previous operand. |
2580 | | /// op_count is *not* incremented by one. |
2581 | | void AArch64_set_detail_shift_ext(MCInst *MI, unsigned OpNum, bool SignExtend, |
2582 | | bool DoShift, unsigned ExtWidth, |
2583 | | char SrcRegKind) |
2584 | 8.68k | { |
2585 | 8.68k | bool IsLSL = !SignExtend && SrcRegKind == 'x'; |
2586 | 8.68k | if (IsLSL) |
2587 | 3.88k | AArch64_get_detail_op(MI, -1)->shift.type = AARCH64_SFT_LSL; |
2588 | 4.80k | else { |
2589 | 4.80k | aarch64_extender ext = SignExtend ? AARCH64_EXT_SXTB : |
2590 | 4.80k | AARCH64_EXT_UXTB; |
2591 | 4.80k | switch (SrcRegKind) { |
2592 | 0 | default: |
2593 | 0 | CS_ASSERT_RET(0 && "Extender not handled\n"); |
2594 | 0 | case 'b': |
2595 | 0 | ext += 0; |
2596 | 0 | break; |
2597 | 0 | case 'h': |
2598 | 0 | ext += 1; |
2599 | 0 | break; |
2600 | 4.48k | case 'w': |
2601 | 4.48k | ext += 2; |
2602 | 4.48k | break; |
2603 | 326 | case 'x': |
2604 | 326 | ext += 3; |
2605 | 326 | break; |
2606 | 4.80k | } |
2607 | 4.80k | AArch64_get_detail_op(MI, -1)->ext = ext; |
2608 | 4.80k | } |
2609 | 8.68k | if (DoShift || IsLSL) { |
2610 | 5.79k | unsigned ShiftAmount = DoShift ? Log2_32(ExtWidth / 8) : 0; |
2611 | 5.79k | AArch64_get_detail_op(MI, -1)->shift.type = AARCH64_SFT_LSL; |
2612 | 5.79k | AArch64_get_detail_op(MI, -1)->shift.value = ShiftAmount; |
2613 | 5.79k | } |
2614 | 8.68k | } |
2615 | | |
2616 | | /// Transforms the immediate of the operand to a float and stores it. |
2617 | | /// Increments the op_counter by one. |
2618 | | void AArch64_set_detail_op_float(MCInst *MI, unsigned OpNum, float Val) |
2619 | 410 | { |
2620 | 410 | if (!detail_is_set(MI)) |
2621 | 0 | return; |
2622 | 410 | AArch64_check_safe_inc(MI); |
2623 | | |
2624 | 410 | AArch64_get_detail_op(MI, 0)->type = AARCH64_OP_FP; |
2625 | 410 | AArch64_get_detail_op(MI, 0)->fp = Val; |
2626 | 410 | AArch64_get_detail_op(MI, 0)->access = map_get_op_access(MI, OpNum); |
2627 | 410 | AArch64_inc_op_count(MI); |
2628 | 410 | } |
2629 | | |
2630 | | /// Adds a the system operand and increases the op_count by |
2631 | | /// one. |
2632 | | void AArch64_set_detail_op_sys(MCInst *MI, unsigned OpNum, aarch64_sysop sys_op, |
2633 | | aarch64_op_type type) |
2634 | 10.8k | { |
2635 | 10.8k | if (!detail_is_set(MI)) |
2636 | 0 | return; |
2637 | 10.8k | AArch64_check_safe_inc(MI); |
2638 | | |
2639 | 10.8k | AArch64_get_detail_op(MI, 0)->type = type; |
2640 | 10.8k | AArch64_get_detail_op(MI, 0)->sysop = sys_op; |
2641 | 10.8k | if (sys_op.sub_type == AARCH64_OP_EXACTFPIMM) { |
2642 | 1.15k | AArch64_get_detail_op(MI, 0)->fp = aarch64_exact_fp_to_fp(sys_op.imm.exactfpimm); |
2643 | 1.15k | } |
2644 | 10.8k | AArch64_inc_op_count(MI); |
2645 | 10.8k | } |
2646 | | |
2647 | 35.6k | void AArch64_set_detail_op_pred(MCInst *MI, unsigned OpNum) { |
2648 | 35.6k | if (!detail_is_set(MI)) |
2649 | 0 | return; |
2650 | 35.6k | AArch64_check_safe_inc(MI); |
2651 | | |
2652 | 35.6k | if (AArch64_get_detail_op(MI, 0)->type == AARCH64_OP_INVALID) { |
2653 | 34.8k | setup_pred_operand(MI); |
2654 | 34.8k | } |
2655 | 35.6k | aarch64_op_pred *p = &AArch64_get_detail_op(MI, 0)->pred; |
2656 | 35.6k | if (p->reg == AARCH64_REG_INVALID) { |
2657 | 34.8k | p->reg = MCInst_getOpVal(MI, OpNum); |
2658 | 34.8k | AArch64_get_detail_op(MI, 0)->access = map_get_op_access(MI, OpNum); |
2659 | 34.8k | AArch64_get_detail(MI)->is_doing_sme = true; |
2660 | 34.8k | return; |
2661 | 34.8k | } else if (p->vec_select == AARCH64_REG_INVALID) { |
2662 | 538 | p->vec_select = MCInst_getOpVal(MI, OpNum); |
2663 | 538 | return; |
2664 | 538 | } else if (p->imm_index == -1) { |
2665 | 190 | p->imm_index = MCInst_getOpVal(MI, OpNum); |
2666 | 190 | return; |
2667 | 190 | } |
2668 | 0 | CS_ASSERT_RET(0 && "Should not be reached."); |
2669 | 0 | } |
2670 | | |
2671 | | /// Adds a SME matrix component to a SME operand. |
2672 | | void AArch64_set_detail_op_sme(MCInst *MI, unsigned OpNum, |
2673 | | aarch64_sme_op_part part, |
2674 | | AArch64Layout_VectorLayout vas, ...) |
2675 | 29.3k | { |
2676 | 29.3k | if (!detail_is_set(MI)) |
2677 | 0 | return; |
2678 | 29.3k | AArch64_check_safe_inc(MI); |
2679 | | |
2680 | 29.3k | AArch64_get_detail_op(MI, 0)->type = AARCH64_OP_SME; |
2681 | 29.3k | switch (part) { |
2682 | 0 | default: |
2683 | 0 | printf("Unhandled SME operand part %d\n", part); |
2684 | 0 | CS_ASSERT_RET(0); |
2685 | 1.13k | case AARCH64_SME_MATRIX_TILE_LIST: { |
2686 | 1.13k | setup_sme_operand(MI); |
2687 | 1.13k | va_list args; |
2688 | 1.13k | va_start(args, vas); |
2689 | 1.13k | int Tile = va_arg(args, int); // NOLINT(clang-analyzer-valist.Uninitialized) |
2690 | 1.13k | va_end(args); |
2691 | 1.13k | AArch64_get_detail_op(MI, 0)->sme.type = AARCH64_SME_OP_TILE; |
2692 | 1.13k | AArch64_get_detail_op(MI, 0)->sme.tile = Tile; |
2693 | 1.13k | AArch64_get_detail_op(MI, 0)->vas = vas; |
2694 | 1.13k | AArch64_get_detail_op(MI, 0)->access = map_get_op_access(MI, OpNum); |
2695 | 1.13k | AArch64_get_detail(MI)->is_doing_sme = true; |
2696 | 1.13k | break; |
2697 | 0 | } |
2698 | 10.4k | case AARCH64_SME_MATRIX_TILE: |
2699 | 10.4k | CS_ASSERT_RET(map_get_op_type(MI, OpNum) == CS_OP_REG); |
2700 | | |
2701 | 10.4k | setup_sme_operand(MI); |
2702 | 10.4k | AArch64_get_detail_op(MI, 0)->sme.type = AARCH64_SME_OP_TILE; |
2703 | 10.4k | AArch64_get_detail_op(MI, 0)->sme.tile = |
2704 | 10.4k | MCInst_getOpVal(MI, OpNum); |
2705 | 10.4k | AArch64_get_detail_op(MI, 0)->vas = vas; |
2706 | 10.4k | AArch64_get_detail_op(MI, 0)->access = map_get_op_access(MI, OpNum); |
2707 | 10.4k | AArch64_get_detail(MI)->is_doing_sme = true; |
2708 | 10.4k | break; |
2709 | 8.87k | case AARCH64_SME_MATRIX_SLICE_REG: |
2710 | 8.87k | CS_ASSERT_RET((map_get_op_type(MI, OpNum) & ~(CS_OP_MEM | CS_OP_BOUND)) == CS_OP_REG); |
2711 | 8.87k | CS_ASSERT_RET(AArch64_get_detail_op(MI, 0)->type == AARCH64_OP_SME); |
2712 | | |
2713 | | // SME operand already present. Add the slice to it. |
2714 | 8.87k | AArch64_get_detail_op(MI, 0)->sme.type = |
2715 | 8.87k | AARCH64_SME_OP_TILE_VEC; |
2716 | 8.87k | AArch64_get_detail_op(MI, 0)->sme.slice_reg = |
2717 | 8.87k | MCInst_getOpVal(MI, OpNum); |
2718 | 8.87k | break; |
2719 | 4.88k | case AARCH64_SME_MATRIX_SLICE_OFF: { |
2720 | 4.88k | CS_ASSERT_RET((map_get_op_type(MI, OpNum) & ~(CS_OP_MEM | CS_OP_BOUND)) == CS_OP_IMM); |
2721 | | // Because we took care of the slice register before, the op at -1 must be a SME operand. |
2722 | 4.88k | CS_ASSERT_RET(AArch64_get_detail_op(MI, 0)->type == |
2723 | 4.88k | AARCH64_OP_SME); |
2724 | 4.88k | CS_ASSERT_RET(AArch64_get_detail_op(MI, 0)->sme.slice_offset.imm == |
2725 | 4.88k | AARCH64_SLICE_IMM_INVALID); |
2726 | 4.88k | va_list args; |
2727 | 4.88k | va_start(args, vas); |
2728 | 4.88k | uint16_t offset = va_arg(args, uint32_t); // NOLINT(clang-analyzer-valist.Uninitialized) |
2729 | 4.88k | va_end(args); |
2730 | 4.88k | AArch64_get_detail_op(MI, 0)->sme.slice_offset.imm = |
2731 | 4.88k | offset; |
2732 | 4.88k | break; |
2733 | 0 | } |
2734 | 3.99k | case AARCH64_SME_MATRIX_SLICE_OFF_RANGE: { |
2735 | 3.99k | va_list args; |
2736 | 3.99k | va_start(args, vas); |
2737 | 3.99k | uint8_t First = va_arg(args, uint32_t); // NOLINT(clang-analyzer-valist.Uninitialized) |
2738 | 3.99k | uint8_t Offset = va_arg(args, uint32_t); // NOLINT(clang-analyzer-valist.Uninitialized) |
2739 | 3.99k | va_end(args); |
2740 | 3.99k | AArch64_get_detail_op(MI, 0)->sme.slice_offset.imm_range.first = |
2741 | 3.99k | First; |
2742 | 3.99k | AArch64_get_detail_op(MI, 0)->sme.slice_offset.imm_range.offset = |
2743 | 3.99k | Offset; |
2744 | 3.99k | AArch64_get_detail_op(MI, 0)->sme.has_range_offset = true; |
2745 | 3.99k | break; |
2746 | 0 | } |
2747 | 29.3k | } |
2748 | 29.3k | } |
2749 | | |
2750 | | static void insert_op(MCInst *MI, unsigned index, cs_aarch64_op op) |
2751 | 9.23k | { |
2752 | 9.23k | if (!detail_is_set(MI)) { |
2753 | 0 | return; |
2754 | 0 | } |
2755 | | |
2756 | 9.23k | AArch64_check_safe_inc(MI); |
2757 | 9.23k | cs_aarch64_op *ops = AArch64_get_detail(MI)->operands; |
2758 | 9.23k | int i = AArch64_get_detail(MI)->op_count; |
2759 | 9.23k | if (index == -1) { |
2760 | 9.23k | ops[i] = op; |
2761 | 9.23k | AArch64_inc_op_count(MI); |
2762 | 9.23k | return; |
2763 | 9.23k | } |
2764 | 0 | for (; i > 0 && i > index; --i) { |
2765 | 0 | ops[i] = ops[i - 1]; |
2766 | 0 | } |
2767 | 0 | ops[index] = op; |
2768 | 0 | AArch64_inc_op_count(MI); |
2769 | 0 | } |
2770 | | |
2771 | | /// Inserts a float to the detail operands at @index. |
2772 | | /// If @index == -1, it pushes the operand to the end of the ops array. |
2773 | | /// Already present operands are moved. |
2774 | | void AArch64_insert_detail_op_float_at(MCInst *MI, unsigned index, double val, |
2775 | | cs_ac_type access) |
2776 | 0 | { |
2777 | 0 | if (!detail_is_set(MI)) |
2778 | 0 | return; |
2779 | | |
2780 | 0 | AArch64_check_safe_inc(MI); |
2781 | |
|
2782 | 0 | cs_aarch64_op op; |
2783 | 0 | AArch64_setup_op(&op); |
2784 | 0 | op.type = AARCH64_OP_FP; |
2785 | 0 | op.fp = val; |
2786 | 0 | op.access = access; |
2787 | |
|
2788 | 0 | insert_op(MI, index, op); |
2789 | 0 | } |
2790 | | |
2791 | | /// Inserts a register to the detail operands at @index. |
2792 | | /// If @index == -1, it pushes the operand to the end of the ops array. |
2793 | | /// Already present operands are moved. |
2794 | | void AArch64_insert_detail_op_reg_at(MCInst *MI, unsigned index, |
2795 | | aarch64_reg Reg, cs_ac_type access) |
2796 | 2.42k | { |
2797 | 2.42k | if (!detail_is_set(MI)) |
2798 | 0 | return; |
2799 | | |
2800 | 2.42k | AArch64_check_safe_inc(MI); |
2801 | | |
2802 | 2.42k | cs_aarch64_op op; |
2803 | 2.42k | AArch64_setup_op(&op); |
2804 | 2.42k | op.type = AARCH64_OP_REG; |
2805 | 2.42k | op.reg = Reg; |
2806 | 2.42k | op.access = access; |
2807 | | |
2808 | 2.42k | insert_op(MI, index, op); |
2809 | 2.42k | } |
2810 | | |
2811 | | /// Inserts a immediate to the detail operands at @index. |
2812 | | /// If @index == -1, it pushes the operand to the end of the ops array. |
2813 | | /// Already present operands are moved. |
2814 | | void AArch64_insert_detail_op_imm_at(MCInst *MI, unsigned index, int64_t Imm) |
2815 | 2.04k | { |
2816 | 2.04k | if (!detail_is_set(MI)) |
2817 | 0 | return; |
2818 | 2.04k | AArch64_check_safe_inc(MI); |
2819 | | |
2820 | 2.04k | cs_aarch64_op op; |
2821 | 2.04k | AArch64_setup_op(&op); |
2822 | 2.04k | op.type = AARCH64_OP_IMM; |
2823 | 2.04k | op.imm = Imm; |
2824 | 2.04k | op.access = CS_AC_READ; |
2825 | | |
2826 | 2.04k | insert_op(MI, index, op); |
2827 | 2.04k | } |
2828 | | |
2829 | | void AArch64_insert_detail_op_sys(MCInst *MI, unsigned index, aarch64_sysop sys_op, |
2830 | | aarch64_op_type type) |
2831 | 2.94k | { |
2832 | 2.94k | if (!detail_is_set(MI)) |
2833 | 0 | return; |
2834 | 2.94k | AArch64_check_safe_inc(MI); |
2835 | | |
2836 | 2.94k | cs_aarch64_op op; |
2837 | 2.94k | AArch64_setup_op(&op); |
2838 | 2.94k | op.type = type; |
2839 | 2.94k | op.sysop = sys_op; |
2840 | 2.94k | if (op.sysop.sub_type == AARCH64_OP_EXACTFPIMM) { |
2841 | 2.80k | op.fp = aarch64_exact_fp_to_fp(op.sysop.imm.exactfpimm); |
2842 | 2.80k | } |
2843 | 2.94k | insert_op(MI, index, op); |
2844 | 2.94k | } |
2845 | | |
2846 | | |
2847 | | void AArch64_insert_detail_op_sme(MCInst *MI, unsigned index, aarch64_op_sme sme_op) |
2848 | 1.82k | { |
2849 | 1.82k | if (!detail_is_set(MI)) |
2850 | 0 | return; |
2851 | 1.82k | AArch64_check_safe_inc(MI); |
2852 | | |
2853 | 1.82k | cs_aarch64_op op; |
2854 | 1.82k | AArch64_setup_op(&op); |
2855 | 1.82k | op.type = AARCH64_OP_SME; |
2856 | 1.82k | op.sme = sme_op; |
2857 | 1.82k | insert_op(MI, index, op); |
2858 | 1.82k | } |
2859 | | |
2860 | | #endif |