/src/capstonenext/arch/AArch64/AArch64Mapping.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* Capstone Disassembly Engine */ |
2 | | /* By Nguyen Anh Quynh <aquynh@gmail.com>, 2013-2019 */ |
3 | | |
4 | | #ifdef CAPSTONE_HAS_AARCH64 |
5 | | |
6 | | #include <stdio.h> // debug |
7 | | #include <string.h> |
8 | | |
9 | | #include "capstone/aarch64.h" |
10 | | |
11 | | #include "../../cs_simple_types.h" |
12 | | #include "../../Mapping.h" |
13 | | #include "../../MathExtras.h" |
14 | | #include "../../utils.h" |
15 | | |
16 | | #include "AArch64AddressingModes.h" |
17 | | #include "AArch64BaseInfo.h" |
18 | | #include "AArch64DisassemblerExtension.h" |
19 | | #include "AArch64Linkage.h" |
20 | | #include "AArch64Mapping.h" |
21 | | |
22 | 5.01k | #define CHAR(c) #c[0] |
23 | | |
24 | | static float aarch64_exact_fp_to_fp(aarch64_exactfpimm exact) |
25 | 8.54k | { |
26 | 8.54k | switch (exact) { |
27 | 0 | default: |
28 | 0 | CS_ASSERT(0 && "Not handled."); |
29 | 0 | return 999.0; |
30 | 293 | case AARCH64_EXACTFPIMM_HALF: |
31 | 293 | return 0.5; |
32 | 1.00k | case AARCH64_EXACTFPIMM_ONE: |
33 | 1.00k | return 1.0; |
34 | 380 | case AARCH64_EXACTFPIMM_TWO: |
35 | 380 | return 2.0; |
36 | 6.86k | case AARCH64_EXACTFPIMM_ZERO: |
37 | 6.86k | return 0.0; |
38 | 8.54k | } |
39 | 8.54k | } |
40 | | |
41 | | #ifndef CAPSTONE_DIET |
42 | | static const aarch64_reg aarch64_flag_regs[] = { |
43 | | AARCH64_REG_NZCV, |
44 | | }; |
45 | | |
46 | | static const aarch64_sysreg aarch64_flag_sys_regs[] = { |
47 | | AARCH64_SYSREG_NZCV, AARCH64_SYSREG_PMOVSCLR_EL0, |
48 | | AARCH64_SYSREG_PMOVSSET_EL0, AARCH64_SYSREG_SPMOVSCLR_EL0, |
49 | | AARCH64_SYSREG_SPMOVSSET_EL0 |
50 | | }; |
51 | | #endif // CAPSTONE_DIET |
52 | | |
53 | | static AArch64Layout_VectorLayout sme_reg_to_vas(aarch64_reg reg) |
54 | 0 | { |
55 | 0 | switch (reg) { |
56 | 0 | default: |
57 | 0 | return AARCH64LAYOUT_INVALID; |
58 | 0 | case AARCH64_REG_ZAB0: |
59 | 0 | return AARCH64LAYOUT_VL_B; |
60 | 0 | case AARCH64_REG_ZAH0: |
61 | 0 | case AARCH64_REG_ZAH1: |
62 | 0 | return AARCH64LAYOUT_VL_H; |
63 | 0 | case AARCH64_REG_ZAS0: |
64 | 0 | case AARCH64_REG_ZAS1: |
65 | 0 | case AARCH64_REG_ZAS2: |
66 | 0 | case AARCH64_REG_ZAS3: |
67 | 0 | return AARCH64LAYOUT_VL_S; |
68 | 0 | case AARCH64_REG_ZAD0: |
69 | 0 | case AARCH64_REG_ZAD1: |
70 | 0 | case AARCH64_REG_ZAD2: |
71 | 0 | case AARCH64_REG_ZAD3: |
72 | 0 | case AARCH64_REG_ZAD4: |
73 | 0 | case AARCH64_REG_ZAD5: |
74 | 0 | case AARCH64_REG_ZAD6: |
75 | 0 | case AARCH64_REG_ZAD7: |
76 | 0 | return AARCH64LAYOUT_VL_D; |
77 | 0 | case AARCH64_REG_ZAQ0: |
78 | 0 | case AARCH64_REG_ZAQ1: |
79 | 0 | case AARCH64_REG_ZAQ2: |
80 | 0 | case AARCH64_REG_ZAQ3: |
81 | 0 | case AARCH64_REG_ZAQ4: |
82 | 0 | case AARCH64_REG_ZAQ5: |
83 | 0 | case AARCH64_REG_ZAQ6: |
84 | 0 | case AARCH64_REG_ZAQ7: |
85 | 0 | case AARCH64_REG_ZAQ8: |
86 | 0 | case AARCH64_REG_ZAQ9: |
87 | 0 | case AARCH64_REG_ZAQ10: |
88 | 0 | case AARCH64_REG_ZAQ11: |
89 | 0 | case AARCH64_REG_ZAQ12: |
90 | 0 | case AARCH64_REG_ZAQ13: |
91 | 0 | case AARCH64_REG_ZAQ14: |
92 | 0 | case AARCH64_REG_ZAQ15: |
93 | 0 | return AARCH64LAYOUT_VL_Q; |
94 | 0 | case AARCH64_REG_ZA: |
95 | 0 | return AARCH64LAYOUT_VL_COMPLETE; |
96 | 0 | } |
97 | 0 | } |
98 | | |
99 | | void AArch64_init_mri(MCRegisterInfo *MRI) |
100 | 10.5k | { |
101 | 10.5k | MCRegisterInfo_InitMCRegisterInfo( |
102 | 10.5k | MRI, AArch64RegDesc, AARCH64_REG_ENDING, 0, 0, |
103 | 10.5k | AArch64MCRegisterClasses, ARR_SIZE(AArch64MCRegisterClasses), 0, |
104 | 10.5k | 0, AArch64RegDiffLists, 0, AArch64SubRegIdxLists, |
105 | 10.5k | ARR_SIZE(AArch64SubRegIdxLists), 0); |
106 | 10.5k | } |
107 | | |
108 | | /// Sets up a new SME matrix operand at the currently active detail operand. |
109 | | static void setup_sme_operand(MCInst *MI) |
110 | 31.0k | { |
111 | 31.0k | if (!detail_is_set(MI)) |
112 | 0 | return; |
113 | | |
114 | 31.0k | AArch64_get_detail_op(MI, 0)->type = AARCH64_OP_SME; |
115 | 31.0k | AArch64_get_detail_op(MI, 0)->sme.type = AARCH64_SME_OP_INVALID; |
116 | 31.0k | AArch64_get_detail_op(MI, 0)->sme.tile = AARCH64_REG_INVALID; |
117 | 31.0k | AArch64_get_detail_op(MI, 0)->sme.slice_reg = AARCH64_REG_INVALID; |
118 | 31.0k | AArch64_get_detail_op(MI, 0)->sme.slice_offset.imm = |
119 | 31.0k | AARCH64_SLICE_IMM_INVALID; |
120 | 31.0k | AArch64_get_detail_op(MI, 0)->sme.slice_offset.imm_range.first = |
121 | 31.0k | AARCH64_SLICE_IMM_RANGE_INVALID; |
122 | 31.0k | AArch64_get_detail_op(MI, 0)->sme.slice_offset.imm_range.offset = |
123 | 31.0k | AARCH64_SLICE_IMM_RANGE_INVALID; |
124 | 31.0k | } |
125 | | |
126 | | static void setup_pred_operand(MCInst *MI) |
127 | 73.4k | { |
128 | 73.4k | if (!detail_is_set(MI)) |
129 | 0 | return; |
130 | | |
131 | 73.4k | AArch64_get_detail_op(MI, 0)->type = AARCH64_OP_PRED; |
132 | 73.4k | AArch64_get_detail_op(MI, 0)->pred.imm_index = -1; |
133 | 73.4k | } |
134 | | |
135 | | const insn_map aarch64_insns[] = { |
136 | | #include "AArch64GenCSMappingInsn.inc" |
137 | | }; |
138 | | |
139 | | static const name_map insn_alias_mnem_map[] = { |
140 | | #include "AArch64GenCSAliasMnemMap.inc" |
141 | | { AARCH64_INS_ALIAS_CFP, "cfp" }, |
142 | | { AARCH64_INS_ALIAS_DVP, "dvp" }, |
143 | | { AARCH64_INS_ALIAS_COSP, "cosp" }, |
144 | | { AARCH64_INS_ALIAS_CPP, "cpp" }, |
145 | | { AARCH64_INS_ALIAS_IC, "ic" }, |
146 | | { AARCH64_INS_ALIAS_DC, "dc" }, |
147 | | { AARCH64_INS_ALIAS_AT, "at" }, |
148 | | { AARCH64_INS_ALIAS_TLBI, "tlbi" }, |
149 | | { AARCH64_INS_ALIAS_TLBIP, "tlbip" }, |
150 | | { AARCH64_INS_ALIAS_RPRFM, "rprfm" }, |
151 | | { AARCH64_INS_ALIAS_LSL, "lsl" }, |
152 | | { AARCH64_INS_ALIAS_SBFX, "sbfx" }, |
153 | | { AARCH64_INS_ALIAS_UBFX, "ubfx" }, |
154 | | { AARCH64_INS_ALIAS_SBFIZ, "sbfiz" }, |
155 | | { AARCH64_INS_ALIAS_UBFIZ, "ubfiz" }, |
156 | | { AARCH64_INS_ALIAS_BFC, "bfc" }, |
157 | | { AARCH64_INS_ALIAS_BFI, "bfi" }, |
158 | | { AARCH64_INS_ALIAS_BFXIL, "bfxil" }, |
159 | | { AARCH64_INS_ALIAS_END, NULL }, |
160 | | }; |
161 | | |
162 | | static const char *get_custom_reg_alias(unsigned reg) |
163 | 61.6k | { |
164 | 61.6k | switch (reg) { |
165 | 240 | case AARCH64_REG_X29: |
166 | 240 | return "fp"; |
167 | 1.66k | case AARCH64_REG_X30: |
168 | 1.66k | return "lr"; |
169 | 61.6k | } |
170 | 59.7k | return NULL; |
171 | 61.6k | } |
172 | | |
173 | | /// Very annoyingly LLVM hard codes the vector layout post-fixes into the asm string. |
174 | | /// In this function we check for these cases and add the vectorlayout/arrangement |
175 | | /// specifier. |
176 | | void AArch64_add_vas(MCInst *MI, const SStream *OS) |
177 | 290k | { |
178 | 290k | if (!detail_is_set(MI)) { |
179 | 0 | return; |
180 | 0 | } |
181 | | |
182 | 290k | if (AArch64_get_detail(MI)->op_count == 0) { |
183 | 344 | return; |
184 | 344 | } |
185 | 289k | if (MCInst_getOpcode(MI) == AArch64_MUL53HI || |
186 | 289k | MCInst_getOpcode(MI) == AArch64_MUL53LO) { |
187 | | // Proprietary Apple instrucions. |
188 | 0 | AArch64_get_detail(MI)->operands[0].vas = AARCH64LAYOUT_VL_2D; |
189 | 0 | AArch64_get_detail(MI)->operands[1].vas = AARCH64LAYOUT_VL_2D; |
190 | 0 | return; |
191 | 0 | } |
192 | | |
193 | | // Search for r".[0-9]{1,2}[bhsdq]\W" |
194 | | // with poor mans regex |
195 | 289k | const char *vl_ptr = strchr(OS->buffer, '.'); |
196 | 659k | while (vl_ptr) { |
197 | | // Number after dot? |
198 | 369k | unsigned num = 0; |
199 | 369k | if (strchr("1248", vl_ptr[1])) { |
200 | 72.8k | num = atoi(vl_ptr + 1); |
201 | 72.8k | vl_ptr = num > 9 ? vl_ptr + 3 : vl_ptr + 2; |
202 | 296k | } else { |
203 | 296k | vl_ptr++; |
204 | 296k | } |
205 | | |
206 | | // Layout letter |
207 | 369k | char letter = '\0'; |
208 | 369k | if (strchr("bhsdq", vl_ptr[0])) { |
209 | 359k | letter = vl_ptr[0]; |
210 | 359k | } |
211 | 369k | if (!letter) { |
212 | 10.2k | goto next_dot_continue; |
213 | 10.2k | } |
214 | | |
215 | 359k | AArch64Layout_VectorLayout vl = AARCH64LAYOUT_INVALID; |
216 | 359k | switch (letter) { |
217 | 0 | default: |
218 | 0 | CS_ASSERT_RET(0 && "Unhandled vector layout letter."); |
219 | 0 | return; |
220 | 87.6k | case 'b': |
221 | 87.6k | vl = AARCH64LAYOUT_VL_B; |
222 | 87.6k | break; |
223 | 83.0k | case 'h': |
224 | 83.0k | vl = AARCH64LAYOUT_VL_H; |
225 | 83.0k | break; |
226 | 87.9k | case 's': |
227 | 87.9k | vl = AARCH64LAYOUT_VL_S; |
228 | 87.9k | break; |
229 | 95.5k | case 'd': |
230 | 95.5k | vl = AARCH64LAYOUT_VL_D; |
231 | 95.5k | break; |
232 | 5.31k | case 'q': |
233 | 5.31k | vl = AARCH64LAYOUT_VL_Q; |
234 | 5.31k | break; |
235 | 359k | } |
236 | 359k | vl |= (num << 8); |
237 | | |
238 | | // Determine op index by searching for trailing commata after op string |
239 | 359k | uint32_t op_idx = 0; |
240 | 359k | const char *comma_ptr = strchr(OS->buffer, ','); |
241 | 359k | ; |
242 | 814k | while (comma_ptr && comma_ptr < vl_ptr) { |
243 | 454k | ++op_idx; |
244 | 454k | comma_ptr = strchr(comma_ptr + 1, ','); |
245 | 454k | } |
246 | 359k | if (!comma_ptr) { |
247 | | // Last op doesn't have a trailing commata. |
248 | 57.3k | op_idx = AArch64_get_detail(MI)->op_count - 1; |
249 | 57.3k | } |
250 | 359k | if (op_idx >= AArch64_get_detail(MI)->op_count) { |
251 | | // A memory operand with a commata in [base, dist] |
252 | 16.6k | op_idx = AArch64_get_detail(MI)->op_count - 1; |
253 | 16.6k | } |
254 | | |
255 | | // Search for the operand this one belongs to. |
256 | 359k | cs_aarch64_op *op = &AArch64_get_detail(MI)->operands[op_idx]; |
257 | 359k | if ((op->type != AARCH64_OP_REG && |
258 | 359k | op->type != AARCH64_OP_SME) || |
259 | 359k | op->vas != AARCH64LAYOUT_INVALID) { |
260 | 296k | goto next_dot_continue; |
261 | 296k | } |
262 | 62.9k | op->vas = vl; |
263 | | |
264 | 369k | next_dot_continue: |
265 | 369k | vl_ptr = strchr(vl_ptr + 1, '.'); |
266 | 369k | } |
267 | 289k | } |
268 | | |
269 | | const char *AArch64_reg_name(csh handle, unsigned int reg) |
270 | 61.6k | { |
271 | 61.6k | int syntax_opt = ((cs_struct *)(uintptr_t)handle)->syntax; |
272 | 61.6k | const char *alias = get_custom_reg_alias(reg); |
273 | 61.6k | if ((syntax_opt & CS_OPT_SYNTAX_CS_REG_ALIAS) && alias) |
274 | 0 | return alias; |
275 | | |
276 | 61.6k | if (((cs_struct *)(uintptr_t)handle)->syntax & |
277 | 61.6k | CS_OPT_SYNTAX_NOREGNAME) { |
278 | 0 | return AArch64_LLVM_getRegisterName(reg, AArch64_NoRegAltName); |
279 | 0 | } |
280 | | // TODO Add options for the other register names |
281 | 61.6k | return AArch64_LLVM_getRegisterName(reg, AArch64_NoRegAltName); |
282 | 61.6k | } |
283 | | |
284 | | void AArch64_setup_op(cs_aarch64_op *op) |
285 | 4.76M | { |
286 | 4.76M | memset(op, 0, sizeof(cs_aarch64_op)); |
287 | 4.76M | op->type = AARCH64_OP_INVALID; |
288 | 4.76M | op->vector_index = -1; |
289 | 4.76M | } |
290 | | |
291 | | void AArch64_init_cs_detail(MCInst *MI) |
292 | 296k | { |
293 | 296k | if (detail_is_set(MI)) { |
294 | 296k | memset(get_detail(MI), 0, |
295 | 296k | offsetof(cs_detail, aarch64) + sizeof(cs_aarch64)); |
296 | 5.04M | for (int i = 0; i < ARR_SIZE(AArch64_get_detail(MI)->operands); |
297 | 4.74M | i++) |
298 | 4.74M | AArch64_setup_op(&AArch64_get_detail(MI)->operands[i]); |
299 | 296k | AArch64_get_detail(MI)->cc = AArch64CC_Invalid; |
300 | 296k | } |
301 | 296k | } |
302 | | |
303 | | /// Unfortunately, the AARCH64 definitions do not indicate in any way |
304 | | /// (exception are the instruction identifiers), if memory accesses |
305 | | /// is post- or pre-indexed. |
306 | | /// So the only generic way to determine, if the memory access is in |
307 | | /// post-indexed addressing mode, is by search for "<membase>], #<memdisp>" in |
308 | | /// @p OS. |
309 | | /// Searching the asm string to determine such a property is enormously ugly |
310 | | /// and wastes resources. |
311 | | /// Sorry, I know and do feel bad about it. But for now it works. |
312 | | static bool AArch64_check_post_index_am(const MCInst *MI, const SStream *OS) |
313 | 290k | { |
314 | 290k | if (AArch64_get_detail(MI)->post_index) { |
315 | 0 | return true; |
316 | 0 | } |
317 | 290k | cs_aarch64_op *memop = NULL; |
318 | 1.03M | for (int i = 0; i < AArch64_get_detail(MI)->op_count; ++i) { |
319 | 844k | if (AArch64_get_detail(MI)->operands[i].type & CS_OP_MEM) { |
320 | 102k | memop = &AArch64_get_detail(MI)->operands[i]; |
321 | 102k | break; |
322 | 102k | } |
323 | 844k | } |
324 | 290k | if (!memop) |
325 | 187k | return false; |
326 | 102k | if (memop->mem.base == AARCH64_REG_INVALID) { |
327 | | // Load/Store from/to label. Has no register base. |
328 | 3.61k | return false; |
329 | 3.61k | } |
330 | 98.6k | const char *membase = AArch64_LLVM_getRegisterName( |
331 | 98.6k | memop->mem.base, AArch64_NoRegAltName); |
332 | 98.6k | int64_t memdisp = memop->mem.disp; |
333 | 98.6k | SStream pattern = { 0 }; |
334 | 98.6k | SStream_concat(&pattern, membase); |
335 | 98.6k | SStream_concat(&pattern, "], "); |
336 | 98.6k | printInt32Bang(&pattern, memdisp); |
337 | 98.6k | return strstr(OS->buffer, pattern.buffer) != NULL; |
338 | 102k | } |
339 | | |
340 | | static void AArch64_check_updates_flags(MCInst *MI) |
341 | 290k | { |
342 | 290k | #ifndef CAPSTONE_DIET |
343 | 290k | if (!detail_is_set(MI)) |
344 | 0 | return; |
345 | 290k | cs_detail *detail = get_detail(MI); |
346 | | // Implicitly written registers |
347 | 318k | for (int i = 0; i < detail->regs_write_count; ++i) { |
348 | 41.9k | if (detail->regs_write[i] == 0) |
349 | 0 | break; |
350 | 70.5k | for (int j = 0; j < ARR_SIZE(aarch64_flag_regs); ++j) { |
351 | 41.9k | if (detail->regs_write[i] == aarch64_flag_regs[j]) { |
352 | 13.3k | detail->aarch64.update_flags = true; |
353 | 13.3k | return; |
354 | 13.3k | } |
355 | 41.9k | } |
356 | 41.9k | } |
357 | 1.08M | for (int i = 0; i < detail->aarch64.op_count; ++i) { |
358 | 813k | if (detail->aarch64.operands[i].type == AARCH64_OP_SYSREG && |
359 | 813k | detail->aarch64.operands[i].sysop.sub_type == |
360 | 7.52k | AARCH64_OP_REG_MSR) { |
361 | 26.8k | for (int j = 0; j < ARR_SIZE(aarch64_flag_sys_regs); |
362 | 21.9k | ++j) |
363 | 22.4k | if (detail->aarch64.operands[i] |
364 | 22.4k | .sysop.reg.sysreg == |
365 | 22.4k | aarch64_flag_sys_regs[j]) { |
366 | 494 | detail->aarch64.update_flags = true; |
367 | 494 | return; |
368 | 494 | } |
369 | 808k | } else if (detail->aarch64.operands[i].type == AARCH64_OP_REG && |
370 | 808k | detail->aarch64.operands[i].access & CS_AC_WRITE) { |
371 | 487k | for (int j = 0; j < ARR_SIZE(aarch64_flag_regs); ++j) |
372 | 243k | if (detail->aarch64.operands[i].reg == |
373 | 243k | aarch64_flag_regs[j]) { |
374 | 0 | detail->aarch64.update_flags = true; |
375 | 0 | return; |
376 | 0 | } |
377 | 243k | } |
378 | 813k | } |
379 | 276k | #endif // CAPSTONE_DIET |
380 | 276k | } |
381 | | |
382 | | static aarch64_shifter id_to_shifter(unsigned Opcode) |
383 | 360 | { |
384 | 360 | switch (Opcode) { |
385 | 0 | default: |
386 | 0 | return AARCH64_SFT_INVALID; |
387 | 10 | case AArch64_RORVXr: |
388 | 44 | case AArch64_RORVWr: |
389 | 44 | return AARCH64_SFT_ROR_REG; |
390 | 79 | case AArch64_LSRVXr: |
391 | 97 | case AArch64_LSRVWr: |
392 | 97 | return AARCH64_SFT_LSR_REG; |
393 | 108 | case AArch64_LSLVXr: |
394 | 125 | case AArch64_LSLVWr: |
395 | 125 | return AARCH64_SFT_LSL_REG; |
396 | 73 | case AArch64_ASRVXr: |
397 | 94 | case AArch64_ASRVWr: |
398 | 94 | return AARCH64_SFT_ASR_REG; |
399 | 360 | } |
400 | 360 | } |
401 | | |
402 | | static void add_non_alias_details(MCInst *MI) |
403 | 248k | { |
404 | 248k | unsigned Opcode = MCInst_getOpcode(MI); |
405 | 248k | switch (Opcode) { |
406 | 236k | default: |
407 | 236k | break; |
408 | 236k | case AArch64_RORVXr: |
409 | 44 | case AArch64_RORVWr: |
410 | 123 | case AArch64_LSRVXr: |
411 | 141 | case AArch64_LSRVWr: |
412 | 249 | case AArch64_LSLVXr: |
413 | 266 | case AArch64_LSLVWr: |
414 | 339 | case AArch64_ASRVXr: |
415 | 360 | case AArch64_ASRVWr: |
416 | 360 | if (AArch64_get_detail(MI)->op_count != 3) { |
417 | 0 | return; |
418 | 0 | } |
419 | 360 | CS_ASSERT_RET(AArch64_get_detail_op(MI, -1)->type == |
420 | 360 | AARCH64_OP_REG); |
421 | | |
422 | | // The shift by register instructions don't set the shift value properly. |
423 | | // Correct it here. |
424 | 360 | uint64_t shift = AArch64_get_detail_op(MI, -1)->reg; |
425 | 360 | cs_aarch64_op *op1 = AArch64_get_detail_op(MI, -2); |
426 | 360 | op1->shift.type = id_to_shifter(Opcode); |
427 | 360 | op1->shift.value = shift; |
428 | 360 | AArch64_dec_op_count(MI); |
429 | 360 | break; |
430 | 88 | case AArch64_FCMPDri: |
431 | 179 | case AArch64_FCMPEDri: |
432 | 258 | case AArch64_FCMPEHri: |
433 | 333 | case AArch64_FCMPESri: |
434 | 455 | case AArch64_FCMPHri: |
435 | 465 | case AArch64_FCMPSri: |
436 | 465 | AArch64_insert_detail_op_reg_at(MI, -1, AARCH64_REG_XZR, |
437 | 465 | CS_AC_READ); |
438 | 465 | break; |
439 | 43 | case AArch64_CMEQv16i8rz: |
440 | 63 | case AArch64_CMEQv1i64rz: |
441 | 88 | case AArch64_CMEQv2i32rz: |
442 | 106 | case AArch64_CMEQv2i64rz: |
443 | 391 | case AArch64_CMEQv4i16rz: |
444 | 452 | case AArch64_CMEQv4i32rz: |
445 | 471 | case AArch64_CMEQv8i16rz: |
446 | 485 | case AArch64_CMEQv8i8rz: |
447 | 524 | case AArch64_CMGEv16i8rz: |
448 | 536 | case AArch64_CMGEv1i64rz: |
449 | 558 | case AArch64_CMGEv2i32rz: |
450 | 598 | case AArch64_CMGEv2i64rz: |
451 | 616 | case AArch64_CMGEv4i16rz: |
452 | 655 | case AArch64_CMGEv4i32rz: |
453 | 669 | case AArch64_CMGEv8i16rz: |
454 | 880 | case AArch64_CMGEv8i8rz: |
455 | 1.01k | case AArch64_CMGTv16i8rz: |
456 | 1.15k | case AArch64_CMGTv1i64rz: |
457 | 1.18k | case AArch64_CMGTv2i32rz: |
458 | 2.02k | case AArch64_CMGTv2i64rz: |
459 | 2.06k | case AArch64_CMGTv4i16rz: |
460 | 2.10k | case AArch64_CMGTv4i32rz: |
461 | 2.33k | case AArch64_CMGTv8i16rz: |
462 | 2.57k | case AArch64_CMGTv8i8rz: |
463 | 3.33k | case AArch64_CMLEv16i8rz: |
464 | 3.37k | case AArch64_CMLEv1i64rz: |
465 | 3.40k | case AArch64_CMLEv2i32rz: |
466 | 3.47k | case AArch64_CMLEv2i64rz: |
467 | 3.54k | case AArch64_CMLEv4i16rz: |
468 | 3.78k | case AArch64_CMLEv4i32rz: |
469 | 3.85k | case AArch64_CMLEv8i16rz: |
470 | 3.92k | case AArch64_CMLEv8i8rz: |
471 | 3.96k | case AArch64_CMLTv16i8rz: |
472 | 3.97k | case AArch64_CMLTv1i64rz: |
473 | 4.01k | case AArch64_CMLTv2i32rz: |
474 | 4.98k | case AArch64_CMLTv2i64rz: |
475 | 5.00k | case AArch64_CMLTv4i16rz: |
476 | 5.01k | case AArch64_CMLTv4i32rz: |
477 | 5.09k | case AArch64_CMLTv8i16rz: |
478 | 5.12k | case AArch64_CMLTv8i8rz: |
479 | 5.12k | AArch64_insert_detail_op_imm_at(MI, -1, 0); |
480 | 5.12k | break; |
481 | 367 | case AArch64_FCMEQ_PPzZ0_D: |
482 | 377 | case AArch64_FCMEQ_PPzZ0_H: |
483 | 406 | case AArch64_FCMEQ_PPzZ0_S: |
484 | 867 | case AArch64_FCMEQv1i16rz: |
485 | 917 | case AArch64_FCMEQv1i32rz: |
486 | 1.02k | case AArch64_FCMEQv1i64rz: |
487 | 1.05k | case AArch64_FCMEQv2i32rz: |
488 | 1.12k | case AArch64_FCMEQv2i64rz: |
489 | 1.13k | case AArch64_FCMEQv4i16rz: |
490 | 1.22k | case AArch64_FCMEQv4i32rz: |
491 | 1.29k | case AArch64_FCMEQv8i16rz: |
492 | 1.41k | case AArch64_FCMGE_PPzZ0_D: |
493 | 1.42k | case AArch64_FCMGE_PPzZ0_H: |
494 | 1.46k | case AArch64_FCMGE_PPzZ0_S: |
495 | 2.09k | case AArch64_FCMGEv1i16rz: |
496 | 2.11k | case AArch64_FCMGEv1i32rz: |
497 | 2.12k | case AArch64_FCMGEv1i64rz: |
498 | 3.31k | case AArch64_FCMGEv2i32rz: |
499 | 3.33k | case AArch64_FCMGEv2i64rz: |
500 | 3.41k | case AArch64_FCMGEv4i16rz: |
501 | 3.48k | case AArch64_FCMGEv4i32rz: |
502 | 3.51k | case AArch64_FCMGEv8i16rz: |
503 | 3.56k | case AArch64_FCMGT_PPzZ0_D: |
504 | 3.57k | case AArch64_FCMGT_PPzZ0_H: |
505 | 3.65k | case AArch64_FCMGT_PPzZ0_S: |
506 | 3.67k | case AArch64_FCMGTv1i16rz: |
507 | 3.73k | case AArch64_FCMGTv1i32rz: |
508 | 3.74k | case AArch64_FCMGTv1i64rz: |
509 | 3.92k | case AArch64_FCMGTv2i32rz: |
510 | 4.13k | case AArch64_FCMGTv2i64rz: |
511 | 4.34k | case AArch64_FCMGTv4i16rz: |
512 | 4.40k | case AArch64_FCMGTv4i32rz: |
513 | 4.54k | case AArch64_FCMGTv8i16rz: |
514 | 4.57k | case AArch64_FCMLE_PPzZ0_D: |
515 | 4.59k | case AArch64_FCMLE_PPzZ0_H: |
516 | 4.82k | case AArch64_FCMLE_PPzZ0_S: |
517 | 4.90k | case AArch64_FCMLEv1i16rz: |
518 | 4.96k | case AArch64_FCMLEv1i32rz: |
519 | 4.98k | case AArch64_FCMLEv1i64rz: |
520 | 5.09k | case AArch64_FCMLEv2i32rz: |
521 | 5.12k | case AArch64_FCMLEv2i64rz: |
522 | 5.16k | case AArch64_FCMLEv4i16rz: |
523 | 5.19k | case AArch64_FCMLEv4i32rz: |
524 | 5.20k | case AArch64_FCMLEv8i16rz: |
525 | 5.27k | case AArch64_FCMLT_PPzZ0_D: |
526 | 5.29k | case AArch64_FCMLT_PPzZ0_H: |
527 | 5.31k | case AArch64_FCMLT_PPzZ0_S: |
528 | 5.52k | case AArch64_FCMLTv1i16rz: |
529 | 5.54k | case AArch64_FCMLTv1i32rz: |
530 | 5.55k | case AArch64_FCMLTv1i64rz: |
531 | 5.62k | case AArch64_FCMLTv2i32rz: |
532 | 5.65k | case AArch64_FCMLTv2i64rz: |
533 | 5.72k | case AArch64_FCMLTv4i16rz: |
534 | 5.79k | case AArch64_FCMLTv4i32rz: |
535 | 6.53k | case AArch64_FCMLTv8i16rz: |
536 | 6.55k | case AArch64_FCMNE_PPzZ0_D: |
537 | 6.59k | case AArch64_FCMNE_PPzZ0_H: |
538 | 6.76k | case AArch64_FCMNE_PPzZ0_S: { |
539 | 6.76k | aarch64_sysop sysop = { 0 }; |
540 | 6.76k | sysop.imm.exactfpimm = AARCH64_EXACTFPIMM_ZERO; |
541 | 6.76k | sysop.sub_type = AARCH64_OP_EXACTFPIMM; |
542 | 6.76k | AArch64_insert_detail_op_sys(MI, -1, sysop, AARCH64_OP_SYSIMM); |
543 | 6.76k | break; |
544 | 6.59k | } |
545 | 248k | } |
546 | 248k | } |
547 | | |
548 | | #define ADD_ZA0_S \ |
549 | 231 | { \ |
550 | 231 | aarch64_op_sme za0_op = { \ |
551 | 231 | .type = AARCH64_SME_OP_TILE, \ |
552 | 231 | .tile = AARCH64_REG_ZAS0, \ |
553 | 231 | .slice_reg = AARCH64_REG_INVALID, \ |
554 | 231 | .slice_offset = { -1 }, \ |
555 | 231 | .has_range_offset = false, \ |
556 | 231 | .is_vertical = false, \ |
557 | 231 | }; \ |
558 | 231 | AArch64_insert_detail_op_sme(MI, -1, za0_op); \ |
559 | 231 | AArch64_get_detail_op(MI, -1)->vas = AARCH64LAYOUT_VL_S; \ |
560 | 231 | AArch64_get_detail_op(MI, -1)->access = CS_AC_WRITE; \ |
561 | 231 | } |
562 | | #define ADD_ZA1_S \ |
563 | 931 | { \ |
564 | 931 | aarch64_op_sme za1_op = { \ |
565 | 931 | .type = AARCH64_SME_OP_TILE, \ |
566 | 931 | .tile = AARCH64_REG_ZAS1, \ |
567 | 931 | .slice_reg = AARCH64_REG_INVALID, \ |
568 | 931 | .slice_offset = { -1 }, \ |
569 | 931 | .has_range_offset = false, \ |
570 | 931 | .is_vertical = false, \ |
571 | 931 | }; \ |
572 | 931 | AArch64_insert_detail_op_sme(MI, -1, za1_op); \ |
573 | 931 | AArch64_get_detail_op(MI, -1)->vas = AARCH64LAYOUT_VL_S; \ |
574 | 931 | AArch64_get_detail_op(MI, -1)->access = CS_AC_WRITE; \ |
575 | 931 | } |
576 | | #define ADD_ZA2_S \ |
577 | 1.22k | { \ |
578 | 1.22k | aarch64_op_sme za2_op = { \ |
579 | 1.22k | .type = AARCH64_SME_OP_TILE, \ |
580 | 1.22k | .tile = AARCH64_REG_ZAS2, \ |
581 | 1.22k | .slice_reg = AARCH64_REG_INVALID, \ |
582 | 1.22k | .slice_offset = { -1 }, \ |
583 | 1.22k | .has_range_offset = false, \ |
584 | 1.22k | .is_vertical = false, \ |
585 | 1.22k | }; \ |
586 | 1.22k | AArch64_insert_detail_op_sme(MI, -1, za2_op); \ |
587 | 1.22k | AArch64_get_detail_op(MI, -1)->vas = AARCH64LAYOUT_VL_S; \ |
588 | 1.22k | AArch64_get_detail_op(MI, -1)->access = CS_AC_WRITE; \ |
589 | 1.22k | } |
590 | | #define ADD_ZA3_S \ |
591 | 1.22k | { \ |
592 | 1.22k | aarch64_op_sme za3_op = { \ |
593 | 1.22k | .type = AARCH64_SME_OP_TILE, \ |
594 | 1.22k | .tile = AARCH64_REG_ZAS3, \ |
595 | 1.22k | .slice_reg = AARCH64_REG_INVALID, \ |
596 | 1.22k | .slice_offset = { -1 }, \ |
597 | 1.22k | .has_range_offset = false, \ |
598 | 1.22k | .is_vertical = false, \ |
599 | 1.22k | }; \ |
600 | 1.22k | AArch64_insert_detail_op_sme(MI, -1, za3_op); \ |
601 | 1.22k | AArch64_get_detail_op(MI, -1)->vas = AARCH64LAYOUT_VL_S; \ |
602 | 1.22k | AArch64_get_detail_op(MI, -1)->access = CS_AC_WRITE; \ |
603 | 1.22k | } |
604 | | #define ADD_ZA \ |
605 | 70 | { \ |
606 | 70 | aarch64_op_sme za_op = { \ |
607 | 70 | .type = AARCH64_SME_OP_TILE, \ |
608 | 70 | .tile = AARCH64_REG_ZA, \ |
609 | 70 | .slice_reg = AARCH64_REG_INVALID, \ |
610 | 70 | .slice_offset = { -1 }, \ |
611 | 70 | .has_range_offset = false, \ |
612 | 70 | .is_vertical = false, \ |
613 | 70 | }; \ |
614 | 70 | AArch64_insert_detail_op_sme(MI, -1, za_op); \ |
615 | 70 | AArch64_get_detail_op(MI, -1)->access = CS_AC_WRITE; \ |
616 | 70 | } |
617 | | |
618 | | static void AArch64_add_not_defined_ops(MCInst *MI, const SStream *OS) |
619 | 290k | { |
620 | 290k | if (!detail_is_set(MI)) |
621 | 0 | return; |
622 | | |
623 | 290k | if (!MI->flat_insn->is_alias || !MI->flat_insn->usesAliasDetails) { |
624 | 248k | add_non_alias_details(MI); |
625 | 248k | return; |
626 | 248k | } |
627 | | |
628 | | // Alias details |
629 | 41.3k | switch (MI->flat_insn->alias_id) { |
630 | 35.4k | default: |
631 | 35.4k | return; |
632 | 35.4k | case AARCH64_INS_ALIAS_ROR: |
633 | 348 | if (AArch64_get_detail(MI)->op_count != 3) { |
634 | 0 | return; |
635 | 0 | } |
636 | | // The ROR alias doesn't set the shift value properly. |
637 | | // Correct it here. |
638 | 348 | bool reg_shift = AArch64_get_detail_op(MI, -1)->type == |
639 | 348 | AARCH64_OP_REG; |
640 | 348 | uint64_t shift = reg_shift ? |
641 | 0 | AArch64_get_detail_op(MI, -1)->reg : |
642 | 348 | AArch64_get_detail_op(MI, -1)->imm; |
643 | 348 | cs_aarch64_op *op1 = AArch64_get_detail_op(MI, -2); |
644 | 348 | op1->shift.type = reg_shift ? AARCH64_SFT_ROR_REG : |
645 | 348 | AARCH64_SFT_ROR; |
646 | 348 | op1->shift.value = shift; |
647 | 348 | AArch64_dec_op_count(MI); |
648 | 348 | break; |
649 | 229 | case AARCH64_INS_ALIAS_FMOV: |
650 | 229 | if (AArch64_get_detail_op(MI, -1)->type == AARCH64_OP_FP) { |
651 | 229 | break; |
652 | 229 | } |
653 | 0 | AArch64_insert_detail_op_float_at(MI, -1, 0.0f, CS_AC_READ); |
654 | 0 | break; |
655 | 127 | case AARCH64_INS_ALIAS_LD1: |
656 | 181 | case AARCH64_INS_ALIAS_LD1R: |
657 | 797 | case AARCH64_INS_ALIAS_LD2: |
658 | 997 | case AARCH64_INS_ALIAS_LD2R: |
659 | 1.18k | case AARCH64_INS_ALIAS_LD3: |
660 | 1.20k | case AARCH64_INS_ALIAS_LD3R: |
661 | 2.09k | case AARCH64_INS_ALIAS_LD4: |
662 | 2.30k | case AARCH64_INS_ALIAS_LD4R: |
663 | 2.45k | case AARCH64_INS_ALIAS_ST1: |
664 | 2.60k | case AARCH64_INS_ALIAS_ST2: |
665 | 2.64k | case AARCH64_INS_ALIAS_ST3: |
666 | 3.42k | case AARCH64_INS_ALIAS_ST4: { |
667 | | // Add post-index disp |
668 | 3.42k | const char *disp_off = strrchr(OS->buffer, '#'); |
669 | 3.42k | if (!disp_off) |
670 | 0 | return; |
671 | 3.42k | unsigned disp = atoi(disp_off + 1); |
672 | 3.42k | AArch64_get_detail_op(MI, -1)->type = AARCH64_OP_MEM; |
673 | 3.42k | AArch64_get_detail_op(MI, -1)->mem.base = |
674 | 3.42k | AArch64_get_detail_op(MI, -1)->reg; |
675 | 3.42k | AArch64_get_detail_op(MI, -1)->mem.disp = disp; |
676 | 3.42k | AArch64_get_detail(MI)->post_index = true; |
677 | 3.42k | break; |
678 | 3.42k | } |
679 | 3 | case AARCH64_INS_ALIAS_GCSB: |
680 | | // TODO |
681 | | // Only CSYNC is defined in LLVM. So we need to add it. |
682 | | // /* 2825 */ "gcsb dsync\0" |
683 | 3 | break; |
684 | 273 | case AARCH64_INS_ALIAS_SMSTART: |
685 | 308 | case AARCH64_INS_ALIAS_SMSTOP: { |
686 | 308 | const char *disp_off = NULL; |
687 | 308 | disp_off = strstr(OS->buffer, "smstart\tza"); |
688 | 308 | if (disp_off) { |
689 | 254 | aarch64_sysop sysop = { 0 }; |
690 | 254 | sysop.alias.svcr = AARCH64_SVCR_SVCRZA; |
691 | 254 | sysop.sub_type = AARCH64_OP_SVCR; |
692 | 254 | AArch64_insert_detail_op_sys(MI, -1, sysop, |
693 | 254 | AARCH64_OP_SYSALIAS); |
694 | 254 | return; |
695 | 254 | } |
696 | 54 | disp_off = strstr(OS->buffer, "smstart\tsm"); |
697 | 54 | if (disp_off) { |
698 | 19 | aarch64_sysop sysop = { 0 }; |
699 | 19 | sysop.alias.svcr = AARCH64_SVCR_SVCRSM; |
700 | 19 | sysop.sub_type = AARCH64_OP_SVCR; |
701 | 19 | AArch64_insert_detail_op_sys(MI, -1, sysop, |
702 | 19 | AARCH64_OP_SYSALIAS); |
703 | 19 | return; |
704 | 19 | } |
705 | 35 | break; |
706 | 54 | } |
707 | 1.57k | case AARCH64_INS_ALIAS_ZERO: { |
708 | | // It is ugly, but the hard coded search patterns do it for now. |
709 | 1.57k | const char *disp_off = NULL; |
710 | | |
711 | 1.57k | disp_off = strstr(OS->buffer, "{za}"); |
712 | 1.57k | if (disp_off) { |
713 | 70 | ADD_ZA; |
714 | 70 | return; |
715 | 70 | } |
716 | 1.50k | disp_off = strstr(OS->buffer, "{za1.h}"); |
717 | 1.50k | if (disp_off) { |
718 | 78 | aarch64_op_sme op = { |
719 | 78 | .type = AARCH64_SME_OP_TILE, |
720 | 78 | .tile = AARCH64_REG_ZAH1, |
721 | 78 | .slice_reg = AARCH64_REG_INVALID, |
722 | 78 | .slice_offset = { -1 }, |
723 | 78 | .has_range_offset = false, |
724 | 78 | .is_vertical = false, |
725 | 78 | }; |
726 | 78 | AArch64_insert_detail_op_sme(MI, -1, op); |
727 | 78 | AArch64_get_detail_op(MI, -1)->vas = AARCH64LAYOUT_VL_H; |
728 | 78 | AArch64_get_detail_op(MI, -1)->access = CS_AC_WRITE; |
729 | 78 | return; |
730 | 78 | } |
731 | 1.42k | disp_off = strstr(OS->buffer, "{za0.h}"); |
732 | 1.42k | if (disp_off) { |
733 | 10 | aarch64_op_sme op = { |
734 | 10 | .type = AARCH64_SME_OP_TILE, |
735 | 10 | .tile = AARCH64_REG_ZAH0, |
736 | 10 | .slice_reg = AARCH64_REG_INVALID, |
737 | 10 | .slice_offset = { -1 }, |
738 | 10 | .has_range_offset = false, |
739 | 10 | .is_vertical = false, |
740 | 10 | }; |
741 | 10 | AArch64_insert_detail_op_sme(MI, -1, op); |
742 | 10 | AArch64_get_detail_op(MI, -1)->vas = AARCH64LAYOUT_VL_H; |
743 | 10 | AArch64_get_detail_op(MI, -1)->access = CS_AC_WRITE; |
744 | 10 | return; |
745 | 10 | } |
746 | 1.41k | disp_off = strstr(OS->buffer, "{za0.s}"); |
747 | 1.41k | if (disp_off) { |
748 | 44 | ADD_ZA0_S; |
749 | 44 | return; |
750 | 44 | } |
751 | 1.37k | disp_off = strstr(OS->buffer, "{za1.s}"); |
752 | 1.37k | if (disp_off) { |
753 | 22 | ADD_ZA1_S; |
754 | 22 | return; |
755 | 22 | } |
756 | 1.35k | disp_off = strstr(OS->buffer, "{za2.s}"); |
757 | 1.35k | if (disp_off) { |
758 | 37 | ADD_ZA2_S; |
759 | 37 | return; |
760 | 37 | } |
761 | 1.31k | disp_off = strstr(OS->buffer, "{za3.s}"); |
762 | 1.31k | if (disp_off) { |
763 | 10 | ADD_ZA3_S; |
764 | 10 | return; |
765 | 10 | } |
766 | 1.30k | disp_off = strstr(OS->buffer, "{za0.s,za1.s}"); |
767 | 1.30k | if (disp_off) { |
768 | 27 | ADD_ZA0_S; |
769 | 27 | ADD_ZA1_S; |
770 | 27 | return; |
771 | 27 | } |
772 | 1.27k | disp_off = strstr(OS->buffer, "{za0.s,za3.s}"); |
773 | 1.27k | if (disp_off) { |
774 | 71 | ADD_ZA0_S; |
775 | 71 | ADD_ZA3_S; |
776 | 71 | return; |
777 | 71 | } |
778 | 1.20k | disp_off = strstr(OS->buffer, "{za1.s,za2.s}"); |
779 | 1.20k | if (disp_off) { |
780 | 43 | ADD_ZA1_S; |
781 | 43 | ADD_ZA2_S; |
782 | 43 | return; |
783 | 43 | } |
784 | 1.16k | disp_off = strstr(OS->buffer, "{za2.s,za3.s}"); |
785 | 1.16k | if (disp_off) { |
786 | 281 | ADD_ZA2_S; |
787 | 281 | ADD_ZA3_S; |
788 | 281 | return; |
789 | 281 | } |
790 | 883 | disp_off = strstr(OS->buffer, "{za0.s,za1.s,za2.s}"); |
791 | 883 | if (disp_off) { |
792 | 25 | ADD_ZA0_S; |
793 | 25 | ADD_ZA1_S; |
794 | 25 | ADD_ZA2_S; |
795 | 25 | return; |
796 | 25 | } |
797 | 858 | disp_off = strstr(OS->buffer, "{za0.s,za1.s,za3.s}"); |
798 | 858 | if (disp_off) { |
799 | 20 | ADD_ZA0_S; |
800 | 20 | ADD_ZA1_S; |
801 | 20 | ADD_ZA3_S; |
802 | 20 | return; |
803 | 20 | } |
804 | 838 | disp_off = strstr(OS->buffer, "{za0.s,za2.s,za3.s}"); |
805 | 838 | if (disp_off) { |
806 | 44 | ADD_ZA0_S; |
807 | 44 | ADD_ZA2_S; |
808 | 44 | ADD_ZA3_S; |
809 | 44 | return; |
810 | 44 | } |
811 | 794 | disp_off = strstr(OS->buffer, "{za1.s,za2.s,za3.s}"); |
812 | 794 | if (disp_off) { |
813 | 794 | ADD_ZA1_S; |
814 | 794 | ADD_ZA2_S; |
815 | 794 | ADD_ZA3_S; |
816 | 794 | return; |
817 | 794 | } |
818 | 0 | break; |
819 | 794 | } |
820 | 41.3k | } |
821 | 41.3k | } |
822 | | |
823 | | void AArch64_set_instr_map_data(MCInst *MI) |
824 | 296k | { |
825 | 296k | map_cs_id(MI, aarch64_insns, ARR_SIZE(aarch64_insns)); |
826 | 296k | map_implicit_reads(MI, aarch64_insns); |
827 | 296k | map_implicit_writes(MI, aarch64_insns); |
828 | 296k | map_groups(MI, aarch64_insns); |
829 | 296k | } |
830 | | |
831 | | bool AArch64_getInstruction(csh handle, const uint8_t *code, size_t code_len, |
832 | | MCInst *MI, uint16_t *size, uint64_t address, |
833 | | void *info) |
834 | 296k | { |
835 | 296k | AArch64_init_cs_detail(MI); |
836 | 296k | DecodeStatus Result = AArch64_LLVM_getInstruction( |
837 | 296k | handle, code, code_len, MI, size, address, info); |
838 | 296k | AArch64_set_instr_map_data(MI); |
839 | 296k | if (Result == MCDisassembler_SoftFail) { |
840 | 6.57k | MCInst_setSoftFail(MI); |
841 | 6.57k | } |
842 | 296k | return Result != MCDisassembler_Fail; |
843 | 296k | } |
844 | | |
845 | | /// Patches the register names with Capstone specific alias. |
846 | | /// Those are common alias for registers (e.g. r15 = pc) |
847 | | /// which are not set in LLVM. |
848 | | static void patch_cs_reg_alias(char *asm_str) |
849 | 0 | { |
850 | 0 | bool skip_sub = false; |
851 | 0 | char *x29 = strstr(asm_str, "x29"); |
852 | 0 | if (x29 > asm_str && strstr(asm_str, "0x29") == (x29 - 1)) { |
853 | | // Check for hex prefix |
854 | 0 | skip_sub = true; |
855 | 0 | } |
856 | 0 | while (x29 && !skip_sub) { |
857 | 0 | x29[0] = 'f'; |
858 | 0 | x29[1] = 'p'; |
859 | 0 | memmove(x29 + 2, x29 + 3, strlen(x29 + 3)); |
860 | 0 | asm_str[strlen(asm_str) - 1] = '\0'; |
861 | 0 | x29 = strstr(asm_str, "x29"); |
862 | 0 | } |
863 | 0 | skip_sub = false; |
864 | 0 | char *x30 = strstr(asm_str, "x30"); |
865 | 0 | if (x30 > asm_str && strstr(asm_str, "0x30") == (x30 - 1)) { |
866 | | // Check for hex prefix |
867 | 0 | skip_sub = true; |
868 | 0 | } |
869 | 0 | while (x30 && !skip_sub) { |
870 | 0 | x30[0] = 'l'; |
871 | 0 | x30[1] = 'r'; |
872 | 0 | memmove(x30 + 2, x30 + 3, strlen(x30 + 3)); |
873 | 0 | asm_str[strlen(asm_str) - 1] = '\0'; |
874 | 0 | x30 = strstr(asm_str, "x30"); |
875 | 0 | } |
876 | 0 | } |
877 | | |
878 | | /// Adds group to the instruction which are not defined in LLVM. |
879 | | static void AArch64_add_cs_groups(MCInst *MI) |
880 | 290k | { |
881 | 290k | unsigned Opcode = MI->flat_insn->id; |
882 | 290k | switch (Opcode) { |
883 | 284k | default: |
884 | 284k | return; |
885 | 284k | case AARCH64_INS_SVC: |
886 | 18 | add_group(MI, AARCH64_GRP_INT); |
887 | 18 | break; |
888 | 41 | case AARCH64_INS_SMC: |
889 | 4.91k | case AARCH64_INS_MSR: |
890 | 5.58k | case AARCH64_INS_MRS: |
891 | 5.58k | add_group(MI, AARCH64_GRP_PRIVILEGE); |
892 | 5.58k | break; |
893 | 43 | case AARCH64_INS_RET: |
894 | 62 | case AARCH64_INS_RETAA: |
895 | 72 | case AARCH64_INS_RETAB: |
896 | 72 | add_group(MI, AARCH64_GRP_RET); |
897 | 72 | break; |
898 | 290k | } |
899 | 290k | } |
900 | | |
901 | | static void AArch64_correct_mem_access(MCInst *MI) |
902 | 290k | { |
903 | 290k | if (!detail_is_set(MI)) |
904 | 0 | return; |
905 | 290k | cs_ac_type access = |
906 | 290k | aarch64_insns[MI->Opcode].suppl_info.aarch64.mem_acc; |
907 | 290k | if (access == CS_AC_INVALID) { |
908 | 196k | return; |
909 | 196k | } |
910 | 196k | for (int i = 0; i < AArch64_get_detail(MI)->op_count; ++i) { |
911 | 194k | if (AArch64_get_detail_op(MI, -i)->type == AARCH64_OP_MEM) { |
912 | 91.7k | AArch64_get_detail_op(MI, -i)->access = access; |
913 | 91.7k | return; |
914 | 91.7k | } |
915 | 194k | } |
916 | 93.8k | } |
917 | | |
918 | | void AArch64_printer(MCInst *MI, SStream *O, void * /* MCRegisterInfo* */ info) |
919 | 290k | { |
920 | 290k | MCRegisterInfo *MRI = (MCRegisterInfo *)info; |
921 | 290k | MI->MRI = MRI; |
922 | 290k | MI->fillDetailOps = detail_is_set(MI); |
923 | 290k | MI->flat_insn->usesAliasDetails = map_use_alias_details(MI); |
924 | 290k | AArch64_LLVM_printInstruction(MI, O, info); |
925 | 290k | if (detail_is_set(MI)) { |
926 | 290k | if (AArch64_get_detail(MI)->is_doing_sme) { |
927 | | // Last operand still needs to be closed. |
928 | 5.38k | AArch64_get_detail(MI)->is_doing_sme = false; |
929 | 5.38k | AArch64_inc_op_count(MI); |
930 | 5.38k | } |
931 | 290k | AArch64_get_detail(MI)->post_index = |
932 | 290k | AArch64_check_post_index_am(MI, O); |
933 | 290k | } |
934 | 290k | AArch64_check_updates_flags(MI); |
935 | 290k | map_set_alias_id(MI, O, insn_alias_mnem_map, |
936 | 290k | ARR_SIZE(insn_alias_mnem_map) - 1); |
937 | 290k | int syntax_opt = MI->csh->syntax; |
938 | 290k | if (syntax_opt & CS_OPT_SYNTAX_CS_REG_ALIAS) |
939 | 0 | patch_cs_reg_alias(O->buffer); |
940 | 290k | AArch64_add_not_defined_ops(MI, O); |
941 | 290k | AArch64_add_cs_groups(MI); |
942 | 290k | AArch64_add_vas(MI, O); |
943 | 290k | AArch64_correct_mem_access(MI); |
944 | 290k | } |
945 | | |
946 | | // given internal insn id, return public instruction info |
947 | | void AArch64_get_insn_id(cs_struct *h, cs_insn *insn, unsigned int id) |
948 | 290k | { |
949 | | // Done after disassembly |
950 | 290k | return; |
951 | 290k | } |
952 | | |
953 | | static const char *const insn_name_maps[] = { |
954 | | #include "AArch64GenCSMappingInsnName.inc" |
955 | | }; |
956 | | |
957 | | const char *AArch64_insn_name(csh handle, unsigned int id) |
958 | 290k | { |
959 | 290k | #ifndef CAPSTONE_DIET |
960 | 290k | if (id < AARCH64_INS_ALIAS_END && id > AARCH64_INS_ALIAS_BEGIN) { |
961 | 0 | if (id - AARCH64_INS_ALIAS_BEGIN >= |
962 | 0 | ARR_SIZE(insn_alias_mnem_map)) |
963 | 0 | return NULL; |
964 | | |
965 | 0 | return insn_alias_mnem_map[id - AARCH64_INS_ALIAS_BEGIN - 1] |
966 | 0 | .name; |
967 | 0 | } |
968 | 290k | if (id >= AARCH64_INS_ENDING) |
969 | 0 | return NULL; |
970 | | |
971 | 290k | if (id < ARR_SIZE(insn_name_maps)) |
972 | 290k | return insn_name_maps[id]; |
973 | | |
974 | | // not found |
975 | 0 | return NULL; |
976 | | #else |
977 | | return NULL; |
978 | | #endif |
979 | 290k | } |
980 | | |
981 | | #ifndef CAPSTONE_DIET |
982 | | static const name_map group_name_maps[] = { |
983 | | // generic groups |
984 | | { AARCH64_GRP_INVALID, NULL }, |
985 | | { AARCH64_GRP_JUMP, "jump" }, |
986 | | { AARCH64_GRP_CALL, "call" }, |
987 | | { AARCH64_GRP_RET, "return" }, |
988 | | { AARCH64_GRP_PRIVILEGE, "privilege" }, |
989 | | { AARCH64_GRP_INT, "int" }, |
990 | | { AARCH64_GRP_BRANCH_RELATIVE, "branch_relative" }, |
991 | | |
992 | | // architecture-specific groups |
993 | | #include "AArch64GenCSFeatureName.inc" |
994 | | }; |
995 | | #endif |
996 | | |
997 | | const char *AArch64_group_name(csh handle, unsigned int id) |
998 | 239k | { |
999 | 239k | #ifndef CAPSTONE_DIET |
1000 | 239k | return id2name(group_name_maps, ARR_SIZE(group_name_maps), id); |
1001 | | #else |
1002 | | return NULL; |
1003 | | #endif |
1004 | 239k | } |
1005 | | |
1006 | | // map instruction name to public instruction ID |
1007 | | aarch64_insn AArch64_map_insn(const char *name) |
1008 | 0 | { |
1009 | 0 | unsigned int i; |
1010 | |
|
1011 | 0 | for (i = 1; i < ARR_SIZE(insn_name_maps); i++) { |
1012 | 0 | if (!strcmp(name, insn_name_maps[i])) |
1013 | 0 | return i; |
1014 | 0 | } |
1015 | | |
1016 | | // not found |
1017 | 0 | return AARCH64_INS_INVALID; |
1018 | 0 | } |
1019 | | |
1020 | | #ifndef CAPSTONE_DIET |
1021 | | |
1022 | | static const map_insn_ops insn_operands[] = { |
1023 | | #include "AArch64GenCSMappingInsnOp.inc" |
1024 | | }; |
1025 | | |
1026 | | void AArch64_reg_access(const cs_insn *insn, cs_regs regs_read, |
1027 | | uint8_t *regs_read_count, cs_regs regs_write, |
1028 | | uint8_t *regs_write_count) |
1029 | 0 | { |
1030 | 0 | uint8_t i; |
1031 | 0 | uint8_t read_count, write_count; |
1032 | 0 | cs_aarch64 *aarch64 = &(insn->detail->aarch64); |
1033 | |
|
1034 | 0 | read_count = insn->detail->regs_read_count; |
1035 | 0 | write_count = insn->detail->regs_write_count; |
1036 | | |
1037 | | // implicit registers |
1038 | 0 | memcpy(regs_read, insn->detail->regs_read, |
1039 | 0 | read_count * sizeof(insn->detail->regs_read[0])); |
1040 | 0 | memcpy(regs_write, insn->detail->regs_write, |
1041 | 0 | write_count * sizeof(insn->detail->regs_write[0])); |
1042 | | |
1043 | | // explicit registers |
1044 | 0 | for (i = 0; i < aarch64->op_count; i++) { |
1045 | 0 | cs_aarch64_op *op = &(aarch64->operands[i]); |
1046 | 0 | switch ((int)op->type) { |
1047 | 0 | case AARCH64_OP_REG: |
1048 | 0 | if ((op->access & CS_AC_READ) && |
1049 | 0 | !arr_exist(regs_read, read_count, op->reg)) { |
1050 | 0 | regs_read[read_count] = (uint16_t)op->reg; |
1051 | 0 | read_count++; |
1052 | 0 | } |
1053 | 0 | if ((op->access & CS_AC_WRITE) && |
1054 | 0 | !arr_exist(regs_write, write_count, op->reg)) { |
1055 | 0 | regs_write[write_count] = (uint16_t)op->reg; |
1056 | 0 | write_count++; |
1057 | 0 | } |
1058 | 0 | break; |
1059 | 0 | case AARCH64_OP_MEM: |
1060 | | // registers appeared in memory references always being read |
1061 | 0 | if ((op->mem.base != AARCH64_REG_INVALID) && |
1062 | 0 | !arr_exist(regs_read, read_count, op->mem.base)) { |
1063 | 0 | regs_read[read_count] = (uint16_t)op->mem.base; |
1064 | 0 | read_count++; |
1065 | 0 | } |
1066 | 0 | if ((op->mem.index != AARCH64_REG_INVALID) && |
1067 | 0 | !arr_exist(regs_read, read_count, op->mem.index)) { |
1068 | 0 | regs_read[read_count] = (uint16_t)op->mem.index; |
1069 | 0 | read_count++; |
1070 | 0 | } |
1071 | 0 | if ((insn->detail->writeback) && |
1072 | 0 | (op->mem.base != AARCH64_REG_INVALID) && |
1073 | 0 | !arr_exist(regs_write, write_count, op->mem.base)) { |
1074 | 0 | regs_write[write_count] = |
1075 | 0 | (uint16_t)op->mem.base; |
1076 | 0 | write_count++; |
1077 | 0 | } |
1078 | 0 | break; |
1079 | 0 | case AARCH64_OP_SME: |
1080 | 0 | if ((op->access & CS_AC_READ) && |
1081 | 0 | (op->sme.tile != AARCH64_REG_INVALID) && |
1082 | 0 | !arr_exist(regs_read, read_count, op->sme.tile)) { |
1083 | 0 | regs_read[read_count] = (uint16_t)op->sme.tile; |
1084 | 0 | read_count++; |
1085 | 0 | } |
1086 | 0 | if ((op->access & CS_AC_WRITE) && |
1087 | 0 | (op->sme.tile != AARCH64_REG_INVALID) && |
1088 | 0 | !arr_exist(regs_write, write_count, op->sme.tile)) { |
1089 | 0 | regs_write[write_count] = |
1090 | 0 | (uint16_t)op->sme.tile; |
1091 | 0 | write_count++; |
1092 | 0 | } |
1093 | 0 | if ((op->sme.slice_reg != AARCH64_REG_INVALID) && |
1094 | 0 | !arr_exist(regs_read, read_count, |
1095 | 0 | op->sme.slice_reg)) { |
1096 | 0 | regs_read[read_count] = |
1097 | 0 | (uint16_t)op->sme.slice_reg; |
1098 | 0 | read_count++; |
1099 | 0 | } |
1100 | 0 | break; |
1101 | 0 | case AARCH64_OP_PRED: |
1102 | 0 | if ((op->access & CS_AC_READ) && |
1103 | 0 | (op->pred.reg != AARCH64_REG_INVALID) && |
1104 | 0 | !arr_exist(regs_read, read_count, op->pred.reg)) { |
1105 | 0 | regs_read[read_count] = (uint16_t)op->pred.reg; |
1106 | 0 | read_count++; |
1107 | 0 | } |
1108 | 0 | if ((op->access & CS_AC_WRITE) && |
1109 | 0 | (op->pred.reg != AARCH64_REG_INVALID) && |
1110 | 0 | !arr_exist(regs_write, write_count, op->pred.reg)) { |
1111 | 0 | regs_write[write_count] = |
1112 | 0 | (uint16_t)op->pred.reg; |
1113 | 0 | write_count++; |
1114 | 0 | } |
1115 | 0 | if ((op->pred.vec_select != AARCH64_REG_INVALID) && |
1116 | 0 | !arr_exist(regs_read, read_count, |
1117 | 0 | op->pred.vec_select)) { |
1118 | 0 | regs_read[read_count] = |
1119 | 0 | (uint16_t)op->pred.vec_select; |
1120 | 0 | read_count++; |
1121 | 0 | } |
1122 | 0 | break; |
1123 | 0 | default: |
1124 | 0 | break; |
1125 | 0 | } |
1126 | 0 | if (op->shift.type >= AARCH64_SFT_LSL_REG) { |
1127 | 0 | if (!arr_exist(regs_read, read_count, |
1128 | 0 | op->shift.value)) { |
1129 | 0 | regs_read[read_count] = |
1130 | 0 | (uint16_t)op->shift.value; |
1131 | 0 | read_count++; |
1132 | 0 | } |
1133 | 0 | } |
1134 | 0 | } |
1135 | | |
1136 | 0 | switch (insn->alias_id) { |
1137 | 0 | default: |
1138 | 0 | break; |
1139 | 0 | case AARCH64_INS_ALIAS_RET: |
1140 | 0 | regs_read[read_count] = AARCH64_REG_X30; |
1141 | 0 | read_count++; |
1142 | 0 | break; |
1143 | 0 | } |
1144 | | |
1145 | 0 | *regs_read_count = read_count; |
1146 | 0 | *regs_write_count = write_count; |
1147 | 0 | } |
1148 | | #endif |
1149 | | |
1150 | | static AArch64Layout_VectorLayout get_vl_by_suffix(const char suffix) |
1151 | 210k | { |
1152 | 210k | switch (suffix) { |
1153 | 62.7k | default: |
1154 | 62.7k | return AARCH64LAYOUT_INVALID; |
1155 | 36.2k | case 'b': |
1156 | 36.2k | case 'B': |
1157 | 36.2k | return AARCH64LAYOUT_VL_B; |
1158 | 32.3k | case 'h': |
1159 | 32.3k | case 'H': |
1160 | 32.3k | return AARCH64LAYOUT_VL_H; |
1161 | 35.4k | case 's': |
1162 | 35.4k | case 'S': |
1163 | 35.4k | return AARCH64LAYOUT_VL_S; |
1164 | 40.9k | case 'd': |
1165 | 40.9k | case 'D': |
1166 | 40.9k | return AARCH64LAYOUT_VL_D; |
1167 | 2.55k | case 'q': |
1168 | 2.55k | case 'Q': |
1169 | 2.55k | return AARCH64LAYOUT_VL_Q; |
1170 | 210k | } |
1171 | 210k | } |
1172 | | |
1173 | | static unsigned get_vec_list_num_regs(MCInst *MI, unsigned Reg) |
1174 | 64.5k | { |
1175 | | // Work out how many registers there are in the list (if there is an actual |
1176 | | // list). |
1177 | 64.5k | unsigned NumRegs = 1; |
1178 | 64.5k | if (MCRegisterClass_contains( |
1179 | 64.5k | MCRegisterInfo_getRegClass(MI->MRI, AArch64_DDRegClassID), |
1180 | 64.5k | Reg) || |
1181 | 64.5k | MCRegisterClass_contains( |
1182 | 64.1k | MCRegisterInfo_getRegClass(MI->MRI, AArch64_ZPR2RegClassID), |
1183 | 64.1k | Reg) || |
1184 | 64.5k | MCRegisterClass_contains( |
1185 | 49.5k | MCRegisterInfo_getRegClass(MI->MRI, AArch64_QQRegClassID), |
1186 | 49.5k | Reg) || |
1187 | 64.5k | MCRegisterClass_contains( |
1188 | 44.9k | MCRegisterInfo_getRegClass(MI->MRI, AArch64_PPR2RegClassID), |
1189 | 44.9k | Reg) || |
1190 | 64.5k | MCRegisterClass_contains( |
1191 | 43.8k | MCRegisterInfo_getRegClass(MI->MRI, |
1192 | 43.8k | AArch64_ZPR2StridedRegClassID), |
1193 | 43.8k | Reg)) |
1194 | 23.7k | NumRegs = 2; |
1195 | 40.8k | else if (MCRegisterClass_contains( |
1196 | 40.8k | MCRegisterInfo_getRegClass(MI->MRI, |
1197 | 40.8k | AArch64_DDDRegClassID), |
1198 | 40.8k | Reg) || |
1199 | 40.8k | MCRegisterClass_contains( |
1200 | 40.2k | MCRegisterInfo_getRegClass(MI->MRI, |
1201 | 40.2k | AArch64_ZPR3RegClassID), |
1202 | 40.2k | Reg) || |
1203 | 40.8k | MCRegisterClass_contains( |
1204 | 40.0k | MCRegisterInfo_getRegClass(MI->MRI, |
1205 | 40.0k | AArch64_QQQRegClassID), |
1206 | 40.0k | Reg)) |
1207 | 5.33k | NumRegs = 3; |
1208 | 35.4k | else if (MCRegisterClass_contains( |
1209 | 35.4k | MCRegisterInfo_getRegClass(MI->MRI, |
1210 | 35.4k | AArch64_DDDDRegClassID), |
1211 | 35.4k | Reg) || |
1212 | 35.4k | MCRegisterClass_contains( |
1213 | 35.1k | MCRegisterInfo_getRegClass(MI->MRI, |
1214 | 35.1k | AArch64_ZPR4RegClassID), |
1215 | 35.1k | Reg) || |
1216 | 35.4k | MCRegisterClass_contains( |
1217 | 26.5k | MCRegisterInfo_getRegClass(MI->MRI, |
1218 | 26.5k | AArch64_QQQQRegClassID), |
1219 | 26.5k | Reg) || |
1220 | 35.4k | MCRegisterClass_contains( |
1221 | 20.9k | MCRegisterInfo_getRegClass( |
1222 | 20.9k | MI->MRI, AArch64_ZPR4StridedRegClassID), |
1223 | 20.9k | Reg)) |
1224 | 15.7k | NumRegs = 4; |
1225 | 64.5k | return NumRegs; |
1226 | 64.5k | } |
1227 | | |
1228 | | static unsigned get_vec_list_stride(MCInst *MI, unsigned Reg) |
1229 | 64.5k | { |
1230 | 64.5k | unsigned Stride = 1; |
1231 | 64.5k | if (MCRegisterClass_contains( |
1232 | 64.5k | MCRegisterInfo_getRegClass(MI->MRI, |
1233 | 64.5k | AArch64_ZPR2StridedRegClassID), |
1234 | 64.5k | Reg)) |
1235 | 3.01k | Stride = 8; |
1236 | 61.5k | else if (MCRegisterClass_contains( |
1237 | 61.5k | MCRegisterInfo_getRegClass( |
1238 | 61.5k | MI->MRI, AArch64_ZPR4StridedRegClassID), |
1239 | 61.5k | Reg)) |
1240 | 1.16k | Stride = 4; |
1241 | 64.5k | return Stride; |
1242 | 64.5k | } |
1243 | | |
1244 | | static unsigned get_vec_list_first_reg(MCInst *MI, unsigned RegL) |
1245 | 64.5k | { |
1246 | 64.5k | unsigned Reg = RegL; |
1247 | | // Now forget about the list and find out what the first register is. |
1248 | 64.5k | if (MCRegisterInfo_getSubReg(MI->MRI, RegL, AArch64_dsub0)) |
1249 | 1.28k | Reg = MCRegisterInfo_getSubReg(MI->MRI, RegL, AArch64_dsub0); |
1250 | 63.2k | else if (MCRegisterInfo_getSubReg(MI->MRI, RegL, AArch64_qsub0)) |
1251 | 14.6k | Reg = MCRegisterInfo_getSubReg(MI->MRI, RegL, AArch64_qsub0); |
1252 | 48.5k | else if (MCRegisterInfo_getSubReg(MI->MRI, RegL, AArch64_zsub0)) |
1253 | 27.6k | Reg = MCRegisterInfo_getSubReg(MI->MRI, RegL, AArch64_zsub0); |
1254 | 20.9k | else if (MCRegisterInfo_getSubReg(MI->MRI, RegL, AArch64_psub0)) |
1255 | 1.14k | Reg = MCRegisterInfo_getSubReg(MI->MRI, RegL, AArch64_psub0); |
1256 | | |
1257 | | // If it's a D-reg, we need to promote it to the equivalent Q-reg before |
1258 | | // printing (otherwise getRegisterName fails). |
1259 | 64.5k | if (MCRegisterClass_contains(MCRegisterInfo_getRegClass( |
1260 | 64.5k | MI->MRI, AArch64_FPR64RegClassID), |
1261 | 64.5k | Reg)) { |
1262 | 1.51k | const MCRegisterClass *FPR128RC = MCRegisterInfo_getRegClass( |
1263 | 1.51k | MI->MRI, AArch64_FPR128RegClassID); |
1264 | 1.51k | Reg = MCRegisterInfo_getMatchingSuperReg( |
1265 | 1.51k | MI->MRI, Reg, AArch64_dsub, FPR128RC); |
1266 | 1.51k | } |
1267 | 64.5k | return Reg; |
1268 | 64.5k | } |
1269 | | |
1270 | | static bool is_vector_reg(unsigned Reg) |
1271 | 220k | { |
1272 | 220k | if ((Reg >= AArch64_Q0) && (Reg <= AArch64_Q31)) |
1273 | 53.3k | return true; |
1274 | 166k | else if ((Reg >= AArch64_Z0) && (Reg <= AArch64_Z31)) |
1275 | 164k | return true; |
1276 | 2.30k | else if ((Reg >= AArch64_P0) && (Reg <= AArch64_P15)) |
1277 | 2.30k | return true; |
1278 | 0 | return false; |
1279 | 220k | } |
1280 | | |
1281 | | static unsigned getNextVectorRegister(unsigned Reg, unsigned Stride /* = 1 */) |
1282 | 146k | { |
1283 | 366k | while (Stride--) { |
1284 | 220k | if (!is_vector_reg(Reg)) { |
1285 | 0 | CS_ASSERT(0 && "Vector register expected!"); |
1286 | 0 | return 0; |
1287 | 0 | } |
1288 | | // Vector lists can wrap around. |
1289 | 220k | else if (Reg == AArch64_Q31) |
1290 | 1.72k | Reg = AArch64_Q0; |
1291 | | // Vector lists can wrap around. |
1292 | 218k | else if (Reg == AArch64_Z31) |
1293 | 2.60k | Reg = AArch64_Z0; |
1294 | | // Vector lists can wrap around. |
1295 | 215k | else if (Reg == AArch64_P15) |
1296 | 42 | Reg = AArch64_P0; |
1297 | 215k | else |
1298 | | // Assume ordered registers |
1299 | 215k | ++Reg; |
1300 | 220k | } |
1301 | 146k | return Reg; |
1302 | 146k | } |
1303 | | |
1304 | | static aarch64_extender llvm_to_cs_ext(AArch64_AM_ShiftExtendType ExtType) |
1305 | 12.8k | { |
1306 | 12.8k | switch (ExtType) { |
1307 | 10.0k | default: |
1308 | 10.0k | return AARCH64_EXT_INVALID; |
1309 | 269 | case AArch64_AM_UXTB: |
1310 | 269 | return AARCH64_EXT_UXTB; |
1311 | 379 | case AArch64_AM_UXTH: |
1312 | 379 | return AARCH64_EXT_UXTH; |
1313 | 545 | case AArch64_AM_UXTW: |
1314 | 545 | return AARCH64_EXT_UXTW; |
1315 | 699 | case AArch64_AM_UXTX: |
1316 | 699 | return AARCH64_EXT_UXTX; |
1317 | 252 | case AArch64_AM_SXTB: |
1318 | 252 | return AARCH64_EXT_SXTB; |
1319 | 334 | case AArch64_AM_SXTH: |
1320 | 334 | return AARCH64_EXT_SXTH; |
1321 | 115 | case AArch64_AM_SXTW: |
1322 | 115 | return AARCH64_EXT_SXTW; |
1323 | 201 | case AArch64_AM_SXTX: |
1324 | 201 | return AARCH64_EXT_SXTX; |
1325 | 12.8k | } |
1326 | 12.8k | } |
1327 | | |
1328 | | static aarch64_shifter llvm_to_cs_shift(AArch64_AM_ShiftExtendType ShiftExtType) |
1329 | 10.0k | { |
1330 | 10.0k | switch (ShiftExtType) { |
1331 | 0 | default: |
1332 | 0 | return AARCH64_SFT_INVALID; |
1333 | 5.19k | case AArch64_AM_LSL: |
1334 | 5.19k | return AARCH64_SFT_LSL; |
1335 | 1.32k | case AArch64_AM_LSR: |
1336 | 1.32k | return AARCH64_SFT_LSR; |
1337 | 1.86k | case AArch64_AM_ASR: |
1338 | 1.86k | return AARCH64_SFT_ASR; |
1339 | 910 | case AArch64_AM_ROR: |
1340 | 910 | return AARCH64_SFT_ROR; |
1341 | 794 | case AArch64_AM_MSL: |
1342 | 794 | return AARCH64_SFT_MSL; |
1343 | 10.0k | } |
1344 | 10.0k | } |
1345 | | |
1346 | | /// Initializes or finishes a memory operand of Capstone (depending on \p |
1347 | | /// status). A memory operand in Capstone can be assembled by two LLVM operands. |
1348 | | /// E.g. the base register and the immediate disponent. |
1349 | | void AArch64_set_mem_access(MCInst *MI, bool status) |
1350 | 346k | { |
1351 | 346k | if (!detail_is_set(MI)) |
1352 | 0 | return; |
1353 | 346k | set_doing_mem(MI, status); |
1354 | 346k | if (status) { |
1355 | 173k | if (AArch64_get_detail(MI)->op_count > 0 && |
1356 | 173k | AArch64_get_detail_op(MI, -1)->type == AARCH64_OP_MEM && |
1357 | 173k | AArch64_get_detail_op(MI, -1)->mem.index == |
1358 | 71.1k | AARCH64_REG_INVALID && |
1359 | 173k | AArch64_get_detail_op(MI, -1)->mem.disp == 0) { |
1360 | | // Previous memory operand not done yet. Select it. |
1361 | 70.3k | AArch64_dec_op_count(MI); |
1362 | 70.3k | return; |
1363 | 70.3k | } |
1364 | | |
1365 | | // Init a new one. |
1366 | 103k | AArch64_get_detail_op(MI, 0)->type = AARCH64_OP_MEM; |
1367 | 103k | AArch64_get_detail_op(MI, 0)->mem.base = AARCH64_REG_INVALID; |
1368 | 103k | AArch64_get_detail_op(MI, 0)->mem.index = AARCH64_REG_INVALID; |
1369 | 103k | AArch64_get_detail_op(MI, 0)->mem.disp = 0; |
1370 | | |
1371 | 103k | #ifndef CAPSTONE_DIET |
1372 | 103k | uint8_t access = |
1373 | 103k | map_get_op_access(MI, AArch64_get_detail(MI)->op_count); |
1374 | 103k | AArch64_get_detail_op(MI, 0)->access = access; |
1375 | 103k | #endif |
1376 | 173k | } else { |
1377 | | // done, select the next operand slot |
1378 | 173k | AArch64_inc_op_count(MI); |
1379 | 173k | } |
1380 | 346k | } |
1381 | | |
1382 | | /// Common prefix for all AArch64_add_cs_detail_* functions |
1383 | | static bool add_cs_detail_begin(MCInst *MI, unsigned op_num) |
1384 | 923k | { |
1385 | 923k | if (!detail_is_set(MI) || !map_fill_detail_ops(MI)) |
1386 | 0 | return false; |
1387 | | |
1388 | 923k | if (AArch64_get_detail(MI)->is_doing_sme) { |
1389 | | // Unset the flag if there is no bound operand anymore. |
1390 | 123k | if (!(map_get_op_type(MI, op_num) & CS_OP_BOUND)) { |
1391 | 87.8k | AArch64_get_detail(MI)->is_doing_sme = false; |
1392 | 87.8k | AArch64_inc_op_count(MI); |
1393 | 87.8k | } |
1394 | 123k | } |
1395 | 923k | return true; |
1396 | 923k | } |
1397 | | |
1398 | | /// Fills cs_detail with the data of the operand. |
1399 | | /// This function handles operands which's original printer function has no |
1400 | | /// specialities. |
1401 | | void AArch64_add_cs_detail_0(MCInst *MI, aarch64_op_group op_group, |
1402 | | unsigned OpNum) |
1403 | 508k | { |
1404 | 508k | if (!add_cs_detail_begin(MI, OpNum)) |
1405 | 0 | return; |
1406 | | |
1407 | | // Fill cs_detail |
1408 | 508k | switch (op_group) { |
1409 | 0 | default: |
1410 | 0 | printf("ERROR: Operand group %d not handled!\n", op_group); |
1411 | 0 | CS_ASSERT_RET(0); |
1412 | 365k | case AArch64_OP_GROUP_Operand: { |
1413 | 365k | cs_op_type primary_op_type = map_get_op_type(MI, OpNum) & |
1414 | 365k | ~(CS_OP_MEM | CS_OP_BOUND); |
1415 | 365k | switch (primary_op_type) { |
1416 | 0 | default: |
1417 | 0 | printf("Unhandled operand type 0x%x\n", |
1418 | 0 | primary_op_type); |
1419 | 0 | CS_ASSERT_RET(0); |
1420 | 312k | case AARCH64_OP_REG: |
1421 | 312k | AArch64_set_detail_op_reg(MI, OpNum, |
1422 | 312k | MCInst_getOpVal(MI, OpNum)); |
1423 | 312k | break; |
1424 | 52.2k | case AARCH64_OP_IMM: |
1425 | 52.2k | AArch64_set_detail_op_imm(MI, OpNum, AARCH64_OP_IMM, |
1426 | 52.2k | MCInst_getOpVal(MI, OpNum)); |
1427 | 52.2k | break; |
1428 | 697 | case AARCH64_OP_FP: { |
1429 | | // printOperand does not handle FP operands. But sometimes |
1430 | | // is used to print FP operands as normal immediate. |
1431 | 697 | AArch64_get_detail_op(MI, 0)->type = AARCH64_OP_IMM; |
1432 | 697 | AArch64_get_detail_op(MI, 0)->imm = |
1433 | 697 | MCInst_getOpVal(MI, OpNum); |
1434 | 697 | AArch64_get_detail_op(MI, 0)->access = |
1435 | 697 | map_get_op_access(MI, OpNum); |
1436 | 697 | AArch64_inc_op_count(MI); |
1437 | 697 | break; |
1438 | 0 | } |
1439 | 365k | } |
1440 | 365k | break; |
1441 | 365k | } |
1442 | 365k | case AArch64_OP_GROUP_AddSubImm: { |
1443 | 2.58k | unsigned Val = (MCInst_getOpVal(MI, OpNum) & 0xfff); |
1444 | 2.58k | AArch64_set_detail_op_imm(MI, OpNum, AARCH64_OP_IMM, Val); |
1445 | | // Shift is added in printShifter() |
1446 | 2.58k | break; |
1447 | 365k | } |
1448 | 0 | case AArch64_OP_GROUP_AdrLabel: { |
1449 | 0 | if (MCOperand_isImm(MCInst_getOperand(MI, OpNum))) { |
1450 | 0 | int64_t Offset = MCInst_getOpVal(MI, OpNum); |
1451 | 0 | AArch64_set_detail_op_imm(MI, OpNum, AARCH64_OP_IMM, |
1452 | 0 | (MI->address & -4) + Offset); |
1453 | 0 | } else { |
1454 | | // Expression |
1455 | 0 | AArch64_set_detail_op_imm( |
1456 | 0 | MI, OpNum, AARCH64_OP_IMM, |
1457 | 0 | MCOperand_isImm(MCInst_getOperand(MI, OpNum))); |
1458 | 0 | } |
1459 | 0 | break; |
1460 | 365k | } |
1461 | 0 | case AArch64_OP_GROUP_AdrpLabel: { |
1462 | 0 | if (MCOperand_isImm(MCInst_getOperand(MI, OpNum))) { |
1463 | 0 | int64_t Offset = MCInst_getOpVal(MI, OpNum) * 4096; |
1464 | 0 | AArch64_set_detail_op_imm(MI, OpNum, AARCH64_OP_IMM, |
1465 | 0 | (MI->address & -4096) + |
1466 | 0 | Offset); |
1467 | 0 | } else { |
1468 | | // Expression |
1469 | 0 | AArch64_set_detail_op_imm( |
1470 | 0 | MI, OpNum, AARCH64_OP_IMM, |
1471 | 0 | MCOperand_isImm(MCInst_getOperand(MI, OpNum))); |
1472 | 0 | } |
1473 | 0 | break; |
1474 | 365k | } |
1475 | 4.16k | case AArch64_OP_GROUP_AdrAdrpLabel: { |
1476 | 4.16k | if (!MCOperand_isImm(MCInst_getOperand(MI, OpNum))) { |
1477 | | // Expression |
1478 | 0 | AArch64_set_detail_op_imm( |
1479 | 0 | MI, OpNum, AARCH64_OP_IMM, |
1480 | 0 | MCOperand_isImm(MCInst_getOperand(MI, OpNum))); |
1481 | 0 | break; |
1482 | 0 | } |
1483 | 4.16k | int64_t Offset = MCInst_getOpVal(MI, OpNum); |
1484 | 4.16k | uint64_t Address = MI->address; |
1485 | 4.16k | if (MCInst_getOpcode(MI) == AArch64_ADRP) { |
1486 | 1.92k | Offset = Offset * 4096; |
1487 | 1.92k | Address = Address & -4096; |
1488 | 1.92k | } |
1489 | 4.16k | AArch64_set_detail_op_imm(MI, OpNum, AARCH64_OP_IMM, |
1490 | 4.16k | Address + Offset); |
1491 | 4.16k | break; |
1492 | 4.16k | } |
1493 | 10.3k | case AArch64_OP_GROUP_AlignedLabel: { |
1494 | 10.3k | if (MCOperand_isImm(MCInst_getOperand(MI, OpNum))) { |
1495 | 10.1k | int64_t Offset = MCInst_getOpVal(MI, OpNum) * 4; |
1496 | 10.1k | AArch64_set_detail_op_imm(MI, OpNum, AARCH64_OP_IMM, |
1497 | 10.1k | MI->address + Offset); |
1498 | 10.1k | } else { |
1499 | | // Expression |
1500 | 215 | AArch64_set_detail_op_imm( |
1501 | 215 | MI, OpNum, AARCH64_OP_IMM, |
1502 | 215 | MCOperand_isImm(MCInst_getOperand(MI, OpNum))); |
1503 | 215 | } |
1504 | 10.3k | break; |
1505 | 4.16k | } |
1506 | 0 | case AArch64_OP_GROUP_AMNoIndex: { |
1507 | 0 | AArch64_set_detail_op_mem(MI, OpNum, |
1508 | 0 | MCInst_getOpVal(MI, OpNum)); |
1509 | 0 | break; |
1510 | 4.16k | } |
1511 | 2.79k | case AArch64_OP_GROUP_ArithExtend: { |
1512 | 2.79k | unsigned Val = MCInst_getOpVal(MI, OpNum); |
1513 | 2.79k | AArch64_AM_ShiftExtendType ExtType = |
1514 | 2.79k | AArch64_AM_getArithExtendType(Val); |
1515 | 2.79k | unsigned ShiftVal = AArch64_AM_getArithShiftValue(Val); |
1516 | | |
1517 | 2.79k | AArch64_get_detail_op(MI, -1)->ext = llvm_to_cs_ext(ExtType); |
1518 | 2.79k | AArch64_get_detail_op(MI, -1)->shift.value = ShiftVal; |
1519 | 2.79k | AArch64_get_detail_op(MI, -1)->shift.type = AARCH64_SFT_LSL; |
1520 | 2.79k | break; |
1521 | 4.16k | } |
1522 | 75 | case AArch64_OP_GROUP_BarriernXSOption: { |
1523 | 75 | unsigned Val = MCInst_getOpVal(MI, OpNum); |
1524 | 75 | aarch64_sysop sysop = { 0 }; |
1525 | 75 | const AArch64DBnXS_DBnXS *DB = |
1526 | 75 | AArch64DBnXS_lookupDBnXSByEncoding(Val); |
1527 | 75 | if (DB) |
1528 | 75 | sysop.imm.dbnxs = (aarch64_dbnxs)DB->SysImm.dbnxs; |
1529 | 0 | else |
1530 | 0 | sysop.imm.raw_val = Val; |
1531 | 75 | sysop.sub_type = AARCH64_OP_DBNXS; |
1532 | 75 | AArch64_set_detail_op_sys(MI, OpNum, sysop, AARCH64_OP_SYSIMM); |
1533 | 75 | break; |
1534 | 4.16k | } |
1535 | 171 | case AArch64_OP_GROUP_AppleSysBarrierOption: { |
1536 | | // Proprietary stuff. We just add the |
1537 | | // immediate here. |
1538 | 171 | unsigned Val = MCOperand_getImm(MCInst_getOperand(MI, OpNum)); |
1539 | 171 | AArch64_set_detail_op_imm(MI, OpNum, AARCH64_OP_IMM, Val); |
1540 | 171 | break; |
1541 | 4.16k | } |
1542 | 436 | case AArch64_OP_GROUP_BarrierOption: { |
1543 | 436 | unsigned Val = MCOperand_getImm(MCInst_getOperand(MI, OpNum)); |
1544 | 436 | unsigned Opcode = MCInst_getOpcode(MI); |
1545 | 436 | aarch64_sysop sysop = { 0 }; |
1546 | | |
1547 | 436 | if (Opcode == AArch64_ISB) { |
1548 | 20 | const AArch64ISB_ISB *ISB = |
1549 | 20 | AArch64ISB_lookupISBByEncoding(Val); |
1550 | 20 | if (ISB) |
1551 | 0 | sysop.alias.isb = |
1552 | 0 | (aarch64_isb)ISB->SysAlias.isb; |
1553 | 20 | else |
1554 | 20 | sysop.alias.raw_val = Val; |
1555 | 20 | sysop.sub_type = AARCH64_OP_ISB; |
1556 | 20 | AArch64_set_detail_op_sys(MI, OpNum, sysop, |
1557 | 20 | AARCH64_OP_SYSALIAS); |
1558 | 416 | } else if (Opcode == AArch64_TSB) { |
1559 | 21 | const AArch64TSB_TSB *TSB = |
1560 | 21 | AArch64TSB_lookupTSBByEncoding(Val); |
1561 | 21 | if (TSB) |
1562 | 21 | sysop.alias.tsb = |
1563 | 21 | (aarch64_tsb)TSB->SysAlias.tsb; |
1564 | 0 | else |
1565 | 0 | sysop.alias.raw_val = Val; |
1566 | 21 | sysop.sub_type = AARCH64_OP_TSB; |
1567 | 21 | AArch64_set_detail_op_sys(MI, OpNum, sysop, |
1568 | 21 | AARCH64_OP_SYSALIAS); |
1569 | 395 | } else { |
1570 | 395 | const AArch64DB_DB *DB = |
1571 | 395 | AArch64DB_lookupDBByEncoding(Val); |
1572 | 395 | if (DB) |
1573 | 52 | sysop.alias.db = (aarch64_db)DB->SysAlias.db; |
1574 | 343 | else |
1575 | 343 | sysop.alias.raw_val = Val; |
1576 | 395 | sysop.sub_type = AARCH64_OP_DB; |
1577 | 395 | AArch64_set_detail_op_sys(MI, OpNum, sysop, |
1578 | 395 | AARCH64_OP_SYSALIAS); |
1579 | 395 | } |
1580 | 436 | break; |
1581 | 4.16k | } |
1582 | 379 | case AArch64_OP_GROUP_BTIHintOp: { |
1583 | 379 | aarch64_sysop sysop = { 0 }; |
1584 | 379 | unsigned btihintop = MCInst_getOpVal(MI, OpNum) ^ 32; |
1585 | 379 | const AArch64BTIHint_BTI *BTI = |
1586 | 379 | AArch64BTIHint_lookupBTIByEncoding(btihintop); |
1587 | 379 | if (BTI) |
1588 | 379 | sysop.alias.bti = (aarch64_bti)BTI->SysAlias.bti; |
1589 | 0 | else |
1590 | 0 | sysop.alias.raw_val = btihintop; |
1591 | 379 | sysop.sub_type = AARCH64_OP_BTI; |
1592 | 379 | AArch64_set_detail_op_sys(MI, OpNum, sysop, |
1593 | 379 | AARCH64_OP_SYSALIAS); |
1594 | 379 | break; |
1595 | 4.16k | } |
1596 | 3.03k | case AArch64_OP_GROUP_CondCode: { |
1597 | 3.03k | AArch64_get_detail(MI)->cc = MCInst_getOpVal(MI, OpNum); |
1598 | 3.03k | break; |
1599 | 4.16k | } |
1600 | 2.26k | case AArch64_OP_GROUP_ExtendedRegister: { |
1601 | 2.26k | AArch64_set_detail_op_reg(MI, OpNum, |
1602 | 2.26k | MCInst_getOpVal(MI, OpNum)); |
1603 | 2.26k | break; |
1604 | 4.16k | } |
1605 | 393 | case AArch64_OP_GROUP_FPImmOperand: { |
1606 | 393 | MCOperand *MO = MCInst_getOperand(MI, (OpNum)); |
1607 | 393 | float FPImm = |
1608 | 393 | MCOperand_isDFPImm(MO) ? |
1609 | 0 | BitsToDouble(MCOperand_getImm(MO)) : |
1610 | 393 | AArch64_AM_getFPImmFloat(MCOperand_getImm(MO)); |
1611 | 393 | AArch64_set_detail_op_float(MI, OpNum, FPImm); |
1612 | 393 | break; |
1613 | 4.16k | } |
1614 | 6.93k | case AArch64_OP_GROUP_GPR64as32: { |
1615 | 6.93k | unsigned Reg = MCInst_getOpVal(MI, OpNum); |
1616 | 6.93k | AArch64_set_detail_op_reg(MI, OpNum, getWRegFromXReg(Reg)); |
1617 | 6.93k | break; |
1618 | 4.16k | } |
1619 | 124 | case AArch64_OP_GROUP_GPR64x8: { |
1620 | 124 | unsigned Reg = MCInst_getOpVal(MI, (OpNum)); |
1621 | 124 | Reg = MCRegisterInfo_getSubReg(MI->MRI, Reg, AArch64_x8sub_0); |
1622 | 124 | AArch64_set_detail_op_reg(MI, OpNum, Reg); |
1623 | 124 | break; |
1624 | 4.16k | } |
1625 | 4.23k | case AArch64_OP_GROUP_Imm: |
1626 | 4.29k | case AArch64_OP_GROUP_ImmHex: |
1627 | 4.29k | AArch64_set_detail_op_imm(MI, OpNum, AARCH64_OP_IMM, |
1628 | 4.29k | MCInst_getOpVal(MI, OpNum)); |
1629 | 4.29k | break; |
1630 | 0 | case AArch64_OP_GROUP_ImplicitlyTypedVectorList: |
1631 | | // The TypedVectorList implements the logic of implicitly typed operand. |
1632 | 0 | AArch64_add_cs_detail_2( |
1633 | 0 | MI, AArch64_OP_GROUP_TypedVectorList_0_b, OpNum, 0, 0); |
1634 | 0 | break; |
1635 | 78 | case AArch64_OP_GROUP_InverseCondCode: { |
1636 | 78 | AArch64CC_CondCode CC = (AArch64CC_CondCode)MCOperand_getImm( |
1637 | 78 | MCInst_getOperand(MI, (OpNum))); |
1638 | 78 | AArch64_get_detail(MI)->cc = AArch64CC_getInvertedCondCode(CC); |
1639 | 78 | break; |
1640 | 4.23k | } |
1641 | 1.98k | case AArch64_OP_GROUP_MatrixTile: { |
1642 | 1.98k | const char *RegName = AArch64_LLVM_getRegisterName( |
1643 | 1.98k | MCInst_getOpVal(MI, OpNum), AArch64_NoRegAltName); |
1644 | 1.98k | const char *Dot = strstr(RegName, "."); |
1645 | 1.98k | AArch64Layout_VectorLayout vas = AARCH64LAYOUT_INVALID; |
1646 | 1.98k | if (!Dot) { |
1647 | | // The matrix dimensions are machine dependent. |
1648 | | // Currently we do not support differentiation of machines. |
1649 | | // So we just indicate the use of the complete matrix. |
1650 | 0 | vas = sme_reg_to_vas(MCInst_getOpVal(MI, OpNum)); |
1651 | 0 | } else |
1652 | 1.98k | vas = get_vl_by_suffix(Dot[1]); |
1653 | 1.98k | AArch64_set_detail_op_sme(MI, OpNum, AARCH64_SME_MATRIX_TILE, |
1654 | 1.98k | vas); |
1655 | 1.98k | break; |
1656 | 4.23k | } |
1657 | 859 | case AArch64_OP_GROUP_MatrixTileList: { |
1658 | 859 | unsigned MaxRegs = 8; |
1659 | 859 | unsigned RegMask = MCInst_getOpVal(MI, (OpNum)); |
1660 | | |
1661 | 7.73k | for (unsigned I = 0; I < MaxRegs; ++I) { |
1662 | 6.87k | unsigned Reg = RegMask & (1 << I); |
1663 | 6.87k | if (Reg == 0) |
1664 | 2.59k | continue; |
1665 | 4.28k | AArch64_get_detail_op(MI, 0)->is_list_member = true; |
1666 | 4.28k | AArch64_set_detail_op_sme(MI, OpNum, |
1667 | 4.28k | AARCH64_SME_MATRIX_TILE_LIST, |
1668 | 4.28k | AARCH64LAYOUT_VL_D, |
1669 | 4.28k | (int)(AARCH64_REG_ZAD0 + I)); |
1670 | 4.28k | AArch64_inc_op_count(MI); |
1671 | 4.28k | } |
1672 | 859 | AArch64_get_detail(MI)->is_doing_sme = false; |
1673 | 859 | break; |
1674 | 4.23k | } |
1675 | 1.18k | case AArch64_OP_GROUP_MRSSystemRegister: |
1676 | 6.05k | case AArch64_OP_GROUP_MSRSystemRegister: { |
1677 | 6.05k | unsigned Val = MCInst_getOpVal(MI, OpNum); |
1678 | 6.05k | const AArch64SysReg_SysReg *Reg = |
1679 | 6.05k | AArch64SysReg_lookupSysRegByEncoding(Val); |
1680 | 6.05k | bool Read = (op_group == AArch64_OP_GROUP_MRSSystemRegister) ? |
1681 | 1.18k | true : |
1682 | 6.05k | false; |
1683 | | |
1684 | 6.05k | bool isValidSysReg = |
1685 | 6.05k | (Reg && (Read ? Reg->Readable : Reg->Writeable) && |
1686 | 6.05k | AArch64_testFeatureList(MI->csh->mode, |
1687 | 798 | Reg->FeaturesRequired)); |
1688 | | |
1689 | 6.05k | if (Reg && !isValidSysReg) |
1690 | 611 | Reg = AArch64SysReg_lookupSysRegByName(Reg->AltName); |
1691 | 6.05k | aarch64_sysop sysop = { 0 }; |
1692 | | // If Reg is NULL it is a generic system register. |
1693 | 6.05k | if (Reg) |
1694 | 1.40k | sysop.reg.sysreg = (aarch64_sysreg)Reg->SysReg.sysreg; |
1695 | 4.65k | else { |
1696 | 4.65k | sysop.reg.raw_val = Val; |
1697 | 4.65k | } |
1698 | 6.05k | aarch64_op_type type = |
1699 | 6.05k | (op_group == AArch64_OP_GROUP_MRSSystemRegister) ? |
1700 | 1.18k | AARCH64_OP_REG_MRS : |
1701 | 6.05k | AARCH64_OP_REG_MSR; |
1702 | 6.05k | sysop.sub_type = type; |
1703 | 6.05k | AArch64_set_detail_op_sys(MI, OpNum, sysop, AARCH64_OP_SYSREG); |
1704 | 6.05k | break; |
1705 | 1.18k | } |
1706 | 197 | case AArch64_OP_GROUP_PSBHintOp: { |
1707 | 197 | unsigned psbhintop = MCInst_getOpVal(MI, OpNum); |
1708 | 197 | const AArch64PSBHint_PSB *PSB = |
1709 | 197 | AArch64PSBHint_lookupPSBByEncoding(psbhintop); |
1710 | 197 | aarch64_sysop sysop = { 0 }; |
1711 | 197 | if (PSB) |
1712 | 197 | sysop.alias.psb = (aarch64_psb)PSB->SysAlias.psb; |
1713 | 0 | else |
1714 | 0 | sysop.alias.raw_val = psbhintop; |
1715 | 197 | sysop.sub_type = AARCH64_OP_PSB; |
1716 | 197 | AArch64_set_detail_op_sys(MI, OpNum, sysop, |
1717 | 197 | AARCH64_OP_SYSALIAS); |
1718 | 197 | break; |
1719 | 1.18k | } |
1720 | 1.51k | case AArch64_OP_GROUP_RPRFMOperand: { |
1721 | 1.51k | unsigned prfop = MCInst_getOpVal(MI, OpNum); |
1722 | 1.51k | const AArch64PRFM_PRFM *PRFM = |
1723 | 1.51k | AArch64PRFM_lookupPRFMByEncoding(prfop); |
1724 | 1.51k | aarch64_sysop sysop = { 0 }; |
1725 | 1.51k | if (PRFM) |
1726 | 1.48k | sysop.alias.prfm = (aarch64_prfm)PRFM->SysAlias.prfm; |
1727 | 27 | else |
1728 | 27 | sysop.alias.raw_val = prfop; |
1729 | 1.51k | sysop.sub_type = AARCH64_OP_PRFM; |
1730 | 1.51k | AArch64_set_detail_op_sys(MI, OpNum, sysop, |
1731 | 1.51k | AARCH64_OP_SYSALIAS); |
1732 | 1.51k | break; |
1733 | 1.18k | } |
1734 | 5.13k | case AArch64_OP_GROUP_ShiftedRegister: { |
1735 | 5.13k | AArch64_set_detail_op_reg(MI, OpNum, |
1736 | 5.13k | MCInst_getOpVal(MI, OpNum)); |
1737 | | // Shift part is handled in printShifter() |
1738 | 5.13k | break; |
1739 | 1.18k | } |
1740 | 10.0k | case AArch64_OP_GROUP_Shifter: { |
1741 | 10.0k | unsigned Val = MCInst_getOpVal(MI, OpNum); |
1742 | 10.0k | AArch64_AM_ShiftExtendType ShExtType = |
1743 | 10.0k | AArch64_AM_getShiftType(Val); |
1744 | 10.0k | AArch64_get_detail_op(MI, -1)->ext = llvm_to_cs_ext(ShExtType); |
1745 | 10.0k | AArch64_get_detail_op(MI, -1)->shift.type = |
1746 | 10.0k | llvm_to_cs_shift(ShExtType); |
1747 | 10.0k | AArch64_get_detail_op(MI, -1)->shift.value = |
1748 | 10.0k | AArch64_AM_getShiftValue(Val); |
1749 | 10.0k | break; |
1750 | 1.18k | } |
1751 | 773 | case AArch64_OP_GROUP_SIMDType10Operand: { |
1752 | 773 | unsigned RawVal = MCInst_getOpVal(MI, OpNum); |
1753 | 773 | uint64_t Val = AArch64_AM_decodeAdvSIMDModImmType10(RawVal); |
1754 | 773 | AArch64_set_detail_op_imm(MI, OpNum, AARCH64_OP_IMM, Val); |
1755 | 773 | break; |
1756 | 1.18k | } |
1757 | 0 | case AArch64_OP_GROUP_SVCROp: { |
1758 | 0 | unsigned svcrop = MCInst_getOpVal(MI, OpNum); |
1759 | 0 | const AArch64SVCR_SVCR *SVCR = |
1760 | 0 | AArch64SVCR_lookupSVCRByEncoding(svcrop); |
1761 | 0 | aarch64_sysop sysop = { 0 }; |
1762 | 0 | if (SVCR) |
1763 | 0 | sysop.alias.svcr = (aarch64_svcr)SVCR->SysAlias.svcr; |
1764 | 0 | else |
1765 | 0 | sysop.alias.raw_val = svcrop; |
1766 | 0 | sysop.sub_type = AARCH64_OP_SVCR; |
1767 | 0 | AArch64_set_detail_op_sys(MI, OpNum, sysop, |
1768 | 0 | AARCH64_OP_SYSALIAS); |
1769 | 0 | break; |
1770 | 1.18k | } |
1771 | 8.53k | case AArch64_OP_GROUP_SVEPattern: { |
1772 | 8.53k | unsigned Val = MCInst_getOpVal(MI, OpNum); |
1773 | 8.53k | const AArch64SVEPredPattern_SVEPREDPAT *Pat = |
1774 | 8.53k | AArch64SVEPredPattern_lookupSVEPREDPATByEncoding(Val); |
1775 | 8.53k | if (!Pat) { |
1776 | 3.39k | AArch64_set_detail_op_imm(MI, OpNum, AARCH64_OP_IMM, |
1777 | 3.39k | Val); |
1778 | 3.39k | break; |
1779 | 3.39k | } |
1780 | 5.13k | aarch64_sysop sysop = { 0 }; |
1781 | 5.13k | sysop.alias = Pat->SysAlias; |
1782 | 5.13k | sysop.sub_type = AARCH64_OP_SVEPREDPAT; |
1783 | 5.13k | AArch64_set_detail_op_sys(MI, OpNum, sysop, |
1784 | 5.13k | AARCH64_OP_SYSALIAS); |
1785 | 5.13k | break; |
1786 | 8.53k | } |
1787 | 893 | case AArch64_OP_GROUP_SVEVecLenSpecifier: { |
1788 | 893 | unsigned Val = MCInst_getOpVal(MI, OpNum); |
1789 | | // Pattern has only 1 bit |
1790 | 893 | if (Val > 1) |
1791 | 0 | CS_ASSERT_RET(0 && "Invalid vector length specifier"); |
1792 | 893 | const AArch64SVEVecLenSpecifier_SVEVECLENSPECIFIER *Pat = |
1793 | 893 | AArch64SVEVecLenSpecifier_lookupSVEVECLENSPECIFIERByEncoding( |
1794 | 893 | Val); |
1795 | 893 | if (!Pat) |
1796 | 0 | break; |
1797 | 893 | aarch64_sysop sysop = { 0 }; |
1798 | 893 | sysop.alias = Pat->SysAlias; |
1799 | 893 | sysop.sub_type = AARCH64_OP_SVEVECLENSPECIFIER; |
1800 | 893 | AArch64_set_detail_op_sys(MI, OpNum, sysop, |
1801 | 893 | AARCH64_OP_SYSALIAS); |
1802 | 893 | break; |
1803 | 893 | } |
1804 | 5.67k | case AArch64_OP_GROUP_SysCROperand: { |
1805 | 5.67k | uint64_t cimm = MCInst_getOpVal(MI, OpNum); |
1806 | 5.67k | AArch64_set_detail_op_imm(MI, OpNum, AARCH64_OP_CIMM, cimm); |
1807 | 5.67k | break; |
1808 | 893 | } |
1809 | 253 | case AArch64_OP_GROUP_SyspXzrPair: { |
1810 | 253 | unsigned Reg = MCInst_getOpVal(MI, OpNum); |
1811 | 253 | AArch64_set_detail_op_reg(MI, OpNum, Reg); |
1812 | 253 | AArch64_set_detail_op_reg(MI, OpNum, Reg); |
1813 | 253 | break; |
1814 | 893 | } |
1815 | 508 | case AArch64_OP_GROUP_SystemPStateField: { |
1816 | 508 | unsigned Val = MCInst_getOpVal(MI, OpNum); |
1817 | | |
1818 | 508 | aarch64_sysop sysop = { 0 }; |
1819 | 508 | const AArch64PState_PStateImm0_15 *PStateImm15 = |
1820 | 508 | AArch64PState_lookupPStateImm0_15ByEncoding(Val); |
1821 | 508 | const AArch64PState_PStateImm0_1 *PStateImm1 = |
1822 | 508 | AArch64PState_lookupPStateImm0_1ByEncoding(Val); |
1823 | 508 | if (PStateImm15 && |
1824 | 508 | AArch64_testFeatureList(MI->csh->mode, |
1825 | 449 | PStateImm15->FeaturesRequired)) { |
1826 | 449 | sysop.alias = PStateImm15->SysAlias; |
1827 | 449 | sysop.sub_type = AARCH64_OP_PSTATEIMM0_15; |
1828 | 449 | AArch64_set_detail_op_sys(MI, OpNum, sysop, |
1829 | 449 | AARCH64_OP_SYSALIAS); |
1830 | 449 | } else if (PStateImm1 && |
1831 | 59 | AArch64_testFeatureList( |
1832 | 59 | MI->csh->mode, |
1833 | 59 | PStateImm1->FeaturesRequired)) { |
1834 | 59 | sysop.alias = PStateImm1->SysAlias; |
1835 | 59 | sysop.sub_type = AARCH64_OP_PSTATEIMM0_1; |
1836 | 59 | AArch64_set_detail_op_sys(MI, OpNum, sysop, |
1837 | 59 | AARCH64_OP_SYSALIAS); |
1838 | 59 | } else { |
1839 | 0 | AArch64_set_detail_op_imm(MI, OpNum, AARCH64_OP_IMM, |
1840 | 0 | Val); |
1841 | 0 | } |
1842 | 508 | break; |
1843 | 893 | } |
1844 | 62.8k | case AArch64_OP_GROUP_VRegOperand: { |
1845 | 62.8k | unsigned Reg = MCInst_getOpVal(MI, OpNum); |
1846 | 62.8k | AArch64_get_detail_op(MI, 0)->is_vreg = true; |
1847 | 62.8k | AArch64_set_detail_op_reg(MI, OpNum, Reg); |
1848 | 62.8k | break; |
1849 | 893 | } |
1850 | 508k | } |
1851 | 508k | } |
1852 | | |
1853 | | /// Fills cs_detail with the data of the operand. |
1854 | | /// This function handles operands which original printer function is a template |
1855 | | /// with one argument. |
1856 | | void AArch64_add_cs_detail_1(MCInst *MI, aarch64_op_group op_group, |
1857 | | unsigned OpNum, uint64_t temp_arg_0) |
1858 | 315k | { |
1859 | 315k | if (!add_cs_detail_begin(MI, OpNum)) |
1860 | 0 | return; |
1861 | 315k | switch (op_group) { |
1862 | 0 | default: |
1863 | 0 | printf("ERROR: Operand group %d not handled!\n", op_group); |
1864 | 0 | CS_ASSERT_RET(0); |
1865 | 136 | case AArch64_OP_GROUP_GPRSeqPairsClassOperand_32: |
1866 | 2.04k | case AArch64_OP_GROUP_GPRSeqPairsClassOperand_64: { |
1867 | 2.04k | unsigned size = temp_arg_0; |
1868 | 2.04k | unsigned Reg = MCInst_getOpVal(MI, (OpNum)); |
1869 | | |
1870 | 2.04k | unsigned Sube = (size == 32) ? AArch64_sube32 : AArch64_sube64; |
1871 | 2.04k | unsigned Subo = (size == 32) ? AArch64_subo32 : AArch64_subo64; |
1872 | | |
1873 | 2.04k | unsigned Even = MCRegisterInfo_getSubReg(MI->MRI, Reg, Sube); |
1874 | 2.04k | unsigned Odd = MCRegisterInfo_getSubReg(MI->MRI, Reg, Subo); |
1875 | 2.04k | AArch64_set_detail_op_reg(MI, OpNum, Even); |
1876 | 2.04k | AArch64_set_detail_op_reg(MI, OpNum, Odd); |
1877 | 2.04k | break; |
1878 | 136 | } |
1879 | 1.04k | case AArch64_OP_GROUP_Imm8OptLsl_int16_t: |
1880 | 2.68k | case AArch64_OP_GROUP_Imm8OptLsl_int32_t: |
1881 | 3.19k | case AArch64_OP_GROUP_Imm8OptLsl_int64_t: |
1882 | 3.62k | case AArch64_OP_GROUP_Imm8OptLsl_int8_t: |
1883 | 3.84k | case AArch64_OP_GROUP_Imm8OptLsl_uint16_t: |
1884 | 4.62k | case AArch64_OP_GROUP_Imm8OptLsl_uint32_t: |
1885 | 5.21k | case AArch64_OP_GROUP_Imm8OptLsl_uint64_t: |
1886 | 5.65k | case AArch64_OP_GROUP_Imm8OptLsl_uint8_t: { |
1887 | 5.65k | unsigned UnscaledVal = MCInst_getOpVal(MI, (OpNum)); |
1888 | 5.65k | unsigned Shift = MCInst_getOpVal(MI, (OpNum + 1)); |
1889 | | |
1890 | 5.65k | if ((UnscaledVal == 0) && |
1891 | 5.65k | (AArch64_AM_getShiftValue(Shift) != 0)) { |
1892 | 636 | AArch64_set_detail_op_imm(MI, OpNum, AARCH64_OP_IMM, |
1893 | 636 | UnscaledVal); |
1894 | | // Shift is handled in printShifter() |
1895 | 636 | break; |
1896 | 636 | } |
1897 | | |
1898 | 5.01k | #define SCALE_SET(T) \ |
1899 | 5.01k | do { \ |
1900 | 5.01k | T Val; \ |
1901 | 5.01k | if (CHAR(T) == 'i') /* Signed */ \ |
1902 | 5.01k | Val = (int8_t)UnscaledVal * \ |
1903 | 3.56k | (1 << AArch64_AM_getShiftValue(Shift)); \ |
1904 | 5.01k | else \ |
1905 | 5.01k | Val = (uint8_t)UnscaledVal * \ |
1906 | 1.45k | (1 << AArch64_AM_getShiftValue(Shift)); \ |
1907 | 5.01k | AArch64_set_detail_op_imm(MI, OpNum, AARCH64_OP_IMM, Val); \ |
1908 | 5.01k | } while (0) |
1909 | | |
1910 | 5.01k | switch (op_group) { |
1911 | 0 | default: |
1912 | 0 | CS_ASSERT_RET( |
1913 | 0 | 0 && |
1914 | 0 | "Operand group for Imm8OptLsl not handled."); |
1915 | 1.03k | case AArch64_OP_GROUP_Imm8OptLsl_int16_t: { |
1916 | 1.03k | SCALE_SET(int16_t); |
1917 | 1.03k | break; |
1918 | 0 | } |
1919 | 1.60k | case AArch64_OP_GROUP_Imm8OptLsl_int32_t: { |
1920 | 1.60k | SCALE_SET(int32_t); |
1921 | 1.60k | break; |
1922 | 0 | } |
1923 | 486 | case AArch64_OP_GROUP_Imm8OptLsl_int64_t: { |
1924 | 486 | SCALE_SET(int64_t); |
1925 | 486 | break; |
1926 | 0 | } |
1927 | 432 | case AArch64_OP_GROUP_Imm8OptLsl_int8_t: { |
1928 | 432 | SCALE_SET(int8_t); |
1929 | 432 | break; |
1930 | 0 | } |
1931 | 61 | case AArch64_OP_GROUP_Imm8OptLsl_uint16_t: { |
1932 | 61 | SCALE_SET(uint16_t); |
1933 | 61 | break; |
1934 | 0 | } |
1935 | 751 | case AArch64_OP_GROUP_Imm8OptLsl_uint32_t: { |
1936 | 751 | SCALE_SET(uint32_t); |
1937 | 751 | break; |
1938 | 0 | } |
1939 | 203 | case AArch64_OP_GROUP_Imm8OptLsl_uint64_t: { |
1940 | 203 | SCALE_SET(uint64_t); |
1941 | 203 | break; |
1942 | 0 | } |
1943 | 440 | case AArch64_OP_GROUP_Imm8OptLsl_uint8_t: { |
1944 | 440 | SCALE_SET(uint8_t); |
1945 | 440 | break; |
1946 | 0 | } |
1947 | 5.01k | } |
1948 | 5.01k | break; |
1949 | 5.01k | } |
1950 | 5.01k | case AArch64_OP_GROUP_ImmScale_16: |
1951 | 6.22k | case AArch64_OP_GROUP_ImmScale_2: |
1952 | 6.36k | case AArch64_OP_GROUP_ImmScale_3: |
1953 | 6.42k | case AArch64_OP_GROUP_ImmScale_32: |
1954 | 14.0k | case AArch64_OP_GROUP_ImmScale_4: |
1955 | 21.0k | case AArch64_OP_GROUP_ImmScale_8: { |
1956 | 21.0k | unsigned Scale = temp_arg_0; |
1957 | 21.0k | AArch64_set_detail_op_imm(MI, OpNum, AARCH64_OP_IMM, |
1958 | 21.0k | Scale * MCInst_getOpVal(MI, OpNum)); |
1959 | 21.0k | break; |
1960 | 14.0k | } |
1961 | 1.17k | case AArch64_OP_GROUP_LogicalImm_int16_t: |
1962 | 3.48k | case AArch64_OP_GROUP_LogicalImm_int32_t: |
1963 | 6.35k | case AArch64_OP_GROUP_LogicalImm_int64_t: |
1964 | 10.0k | case AArch64_OP_GROUP_LogicalImm_int8_t: { |
1965 | 10.0k | unsigned TypeSize = temp_arg_0; |
1966 | 10.0k | uint64_t Val = AArch64_AM_decodeLogicalImmediate( |
1967 | 10.0k | MCInst_getOpVal(MI, OpNum), 8 * TypeSize); |
1968 | 10.0k | AArch64_set_detail_op_imm(MI, OpNum, AARCH64_OP_IMM, Val); |
1969 | 10.0k | break; |
1970 | 6.35k | } |
1971 | 78 | case AArch64_OP_GROUP_Matrix_0: |
1972 | 2.33k | case AArch64_OP_GROUP_Matrix_16: |
1973 | 7.03k | case AArch64_OP_GROUP_Matrix_32: |
1974 | 10.7k | case AArch64_OP_GROUP_Matrix_64: { |
1975 | 10.7k | unsigned EltSize = temp_arg_0; |
1976 | 10.7k | AArch64_set_detail_op_sme(MI, OpNum, AARCH64_SME_MATRIX_TILE, |
1977 | 10.7k | (AArch64Layout_VectorLayout)EltSize); |
1978 | 10.7k | break; |
1979 | 7.03k | } |
1980 | 0 | case AArch64_OP_GROUP_MatrixIndex_0: |
1981 | 11.6k | case AArch64_OP_GROUP_MatrixIndex_1: |
1982 | 12.4k | case AArch64_OP_GROUP_MatrixIndex_8: { |
1983 | 12.4k | unsigned scale = temp_arg_0; |
1984 | 12.4k | if (AArch64_get_detail_op(MI, 0)->type == AARCH64_OP_SME) { |
1985 | | // The index is part of an SME matrix |
1986 | 11.0k | AArch64_set_detail_op_sme( |
1987 | 11.0k | MI, OpNum, AARCH64_SME_MATRIX_SLICE_OFF, |
1988 | 11.0k | AARCH64LAYOUT_INVALID, |
1989 | 11.0k | (uint32_t)(MCInst_getOpVal(MI, OpNum) * scale)); |
1990 | 11.0k | } else if (AArch64_get_detail_op(MI, 0)->type == |
1991 | 1.39k | AARCH64_OP_PRED) { |
1992 | | // The index is part of a predicate |
1993 | 235 | AArch64_set_detail_op_pred(MI, OpNum); |
1994 | 1.16k | } else { |
1995 | | // The index is used for an SVE2 instruction. |
1996 | 1.16k | AArch64_set_detail_op_imm( |
1997 | 1.16k | MI, OpNum, AARCH64_OP_IMM, |
1998 | 1.16k | scale * MCInst_getOpVal(MI, OpNum)); |
1999 | 1.16k | } |
2000 | 12.4k | break; |
2001 | 11.6k | } |
2002 | 3.88k | case AArch64_OP_GROUP_MatrixTileVector_0: |
2003 | 6.98k | case AArch64_OP_GROUP_MatrixTileVector_1: { |
2004 | 6.98k | bool isVertical = temp_arg_0; |
2005 | 6.98k | const char *RegName = AArch64_LLVM_getRegisterName( |
2006 | 6.98k | MCInst_getOpVal(MI, OpNum), AArch64_NoRegAltName); |
2007 | 6.98k | const char *Dot = strstr(RegName, "."); |
2008 | 6.98k | AArch64Layout_VectorLayout vas = AARCH64LAYOUT_INVALID; |
2009 | 6.98k | if (!Dot) { |
2010 | | // The matrix dimensions are machine dependent. |
2011 | | // Currently we do not support differentiation of machines. |
2012 | | // So we just indicate the use of the complete matrix. |
2013 | 0 | vas = sme_reg_to_vas(MCInst_getOpVal(MI, OpNum)); |
2014 | 0 | } else |
2015 | 6.98k | vas = get_vl_by_suffix(Dot[1]); |
2016 | 6.98k | setup_sme_operand(MI); |
2017 | 6.98k | AArch64_set_detail_op_sme(MI, OpNum, AARCH64_SME_MATRIX_TILE, |
2018 | 6.98k | vas); |
2019 | 6.98k | AArch64_get_detail_op(MI, 0)->sme.is_vertical = isVertical; |
2020 | 6.98k | break; |
2021 | 3.88k | } |
2022 | 1.32k | case AArch64_OP_GROUP_PostIncOperand_1: |
2023 | 1.44k | case AArch64_OP_GROUP_PostIncOperand_12: |
2024 | 2.41k | case AArch64_OP_GROUP_PostIncOperand_16: |
2025 | 3.11k | case AArch64_OP_GROUP_PostIncOperand_2: |
2026 | 3.94k | case AArch64_OP_GROUP_PostIncOperand_24: |
2027 | 4.60k | case AArch64_OP_GROUP_PostIncOperand_3: |
2028 | 5.19k | case AArch64_OP_GROUP_PostIncOperand_32: |
2029 | 5.68k | case AArch64_OP_GROUP_PostIncOperand_4: |
2030 | 6.05k | case AArch64_OP_GROUP_PostIncOperand_48: |
2031 | 7.36k | case AArch64_OP_GROUP_PostIncOperand_6: |
2032 | 7.45k | case AArch64_OP_GROUP_PostIncOperand_64: |
2033 | 8.40k | case AArch64_OP_GROUP_PostIncOperand_8: { |
2034 | 8.40k | uint64_t Imm = temp_arg_0; |
2035 | 8.40k | unsigned Reg = MCInst_getOpVal(MI, OpNum); |
2036 | 8.40k | if (Reg == AArch64_XZR) { |
2037 | 0 | AArch64_get_detail_op(MI, -1)->mem.disp = Imm; |
2038 | 0 | AArch64_get_detail(MI)->post_index = true; |
2039 | 0 | AArch64_inc_op_count(MI); |
2040 | 0 | } else |
2041 | 8.40k | AArch64_set_detail_op_reg(MI, OpNum, Reg); |
2042 | 8.40k | break; |
2043 | 7.45k | } |
2044 | 7.54k | case AArch64_OP_GROUP_PredicateAsCounter_0: |
2045 | 7.62k | case AArch64_OP_GROUP_PredicateAsCounter_16: |
2046 | 7.66k | case AArch64_OP_GROUP_PredicateAsCounter_32: |
2047 | 8.14k | case AArch64_OP_GROUP_PredicateAsCounter_64: |
2048 | 8.45k | case AArch64_OP_GROUP_PredicateAsCounter_8: { |
2049 | 8.45k | unsigned EltSize = temp_arg_0; |
2050 | 8.45k | AArch64_get_detail_op(MI, 0)->vas = EltSize; |
2051 | 8.45k | AArch64_set_detail_op_reg(MI, OpNum, |
2052 | 8.45k | MCInst_getOpVal(MI, OpNum)); |
2053 | 8.45k | break; |
2054 | 8.14k | } |
2055 | 1.27k | case AArch64_OP_GROUP_PrefetchOp_0: |
2056 | 6.81k | case AArch64_OP_GROUP_PrefetchOp_1: { |
2057 | 6.81k | bool IsSVEPrefetch = (bool)temp_arg_0; |
2058 | 6.81k | unsigned prfop = MCInst_getOpVal(MI, (OpNum)); |
2059 | 6.81k | aarch64_sysop sysop = { 0 }; |
2060 | 6.81k | if (IsSVEPrefetch) { |
2061 | 5.54k | const AArch64SVEPRFM_SVEPRFM *PRFM = |
2062 | 5.54k | AArch64SVEPRFM_lookupSVEPRFMByEncoding(prfop); |
2063 | 5.54k | if (PRFM) { |
2064 | 4.66k | sysop.alias = PRFM->SysAlias; |
2065 | 4.66k | sysop.sub_type = AARCH64_OP_SVEPRFM; |
2066 | 4.66k | AArch64_set_detail_op_sys(MI, OpNum, sysop, |
2067 | 4.66k | AARCH64_OP_SYSALIAS); |
2068 | 4.66k | break; |
2069 | 4.66k | } |
2070 | 5.54k | } else { |
2071 | 1.27k | const AArch64PRFM_PRFM *PRFM = |
2072 | 1.27k | AArch64PRFM_lookupPRFMByEncoding(prfop); |
2073 | 1.27k | if (PRFM && |
2074 | 1.27k | AArch64_testFeatureList(MI->csh->mode, |
2075 | 822 | PRFM->FeaturesRequired)) { |
2076 | 822 | sysop.alias = PRFM->SysAlias; |
2077 | 822 | sysop.sub_type = AARCH64_OP_PRFM; |
2078 | 822 | AArch64_set_detail_op_sys(MI, OpNum, sysop, |
2079 | 822 | AARCH64_OP_SYSALIAS); |
2080 | 822 | break; |
2081 | 822 | } |
2082 | 1.27k | } |
2083 | 1.32k | AArch64_get_detail_op(MI, 0)->type = AARCH64_OP_IMM; |
2084 | 1.32k | AArch64_get_detail_op(MI, 0)->imm = prfop; |
2085 | 1.32k | AArch64_get_detail_op(MI, 0)->access = |
2086 | 1.32k | map_get_op_access(MI, OpNum); |
2087 | 1.32k | AArch64_inc_op_count(MI); |
2088 | 1.32k | break; |
2089 | 6.81k | } |
2090 | 433 | case AArch64_OP_GROUP_SImm_16: |
2091 | 749 | case AArch64_OP_GROUP_SImm_8: { |
2092 | 749 | AArch64_set_detail_op_imm(MI, OpNum, AARCH64_OP_IMM, |
2093 | 749 | MCInst_getOpVal(MI, OpNum)); |
2094 | 749 | break; |
2095 | 433 | } |
2096 | 741 | case AArch64_OP_GROUP_SVELogicalImm_int16_t: |
2097 | 3.41k | case AArch64_OP_GROUP_SVELogicalImm_int32_t: |
2098 | 4.02k | case AArch64_OP_GROUP_SVELogicalImm_int64_t: { |
2099 | | // General issue here that we do not save the operand type |
2100 | | // for each operand. So we choose the largest type. |
2101 | 4.02k | uint64_t Val = MCInst_getOpVal(MI, OpNum); |
2102 | 4.02k | uint64_t DecodedVal = |
2103 | 4.02k | AArch64_AM_decodeLogicalImmediate(Val, 64); |
2104 | 4.02k | AArch64_set_detail_op_imm(MI, OpNum, AARCH64_OP_IMM, |
2105 | 4.02k | DecodedVal); |
2106 | 4.02k | break; |
2107 | 3.41k | } |
2108 | 54.4k | case AArch64_OP_GROUP_SVERegOp_0: |
2109 | 87.6k | case AArch64_OP_GROUP_SVERegOp_b: |
2110 | 115k | case AArch64_OP_GROUP_SVERegOp_d: |
2111 | 147k | case AArch64_OP_GROUP_SVERegOp_h: |
2112 | 149k | case AArch64_OP_GROUP_SVERegOp_q: |
2113 | 179k | case AArch64_OP_GROUP_SVERegOp_s: { |
2114 | 179k | char Suffix = (char)temp_arg_0; |
2115 | 179k | AArch64_get_detail_op(MI, 0)->vas = get_vl_by_suffix(Suffix); |
2116 | 179k | AArch64_set_detail_op_reg(MI, OpNum, |
2117 | 179k | MCInst_getOpVal(MI, OpNum)); |
2118 | 179k | break; |
2119 | 149k | } |
2120 | 1.93k | case AArch64_OP_GROUP_UImm12Offset_1: |
2121 | 2.41k | case AArch64_OP_GROUP_UImm12Offset_16: |
2122 | 3.43k | case AArch64_OP_GROUP_UImm12Offset_2: |
2123 | 4.50k | case AArch64_OP_GROUP_UImm12Offset_4: |
2124 | 5.91k | case AArch64_OP_GROUP_UImm12Offset_8: { |
2125 | | // Otherwise it is an expression. For which we only add the immediate |
2126 | 5.91k | unsigned Scale = MCOperand_isImm(MCInst_getOperand(MI, OpNum)) ? |
2127 | 5.91k | temp_arg_0 : |
2128 | 5.91k | 1; |
2129 | 5.91k | AArch64_set_detail_op_imm(MI, OpNum, AARCH64_OP_IMM, |
2130 | 5.91k | Scale * MCInst_getOpVal(MI, OpNum)); |
2131 | 5.91k | break; |
2132 | 4.50k | } |
2133 | 31.2k | case AArch64_OP_GROUP_VectorIndex_1: |
2134 | 31.2k | case AArch64_OP_GROUP_VectorIndex_8: { |
2135 | 31.2k | CS_ASSERT_RET(AArch64_get_detail(MI)->op_count > 0); |
2136 | 31.2k | unsigned Scale = temp_arg_0; |
2137 | 31.2k | unsigned VIndex = Scale * MCInst_getOpVal(MI, OpNum); |
2138 | | // The index can either be for one operand, or for each operand of a list. |
2139 | 31.2k | if (!AArch64_get_detail_op(MI, -1)->is_list_member) { |
2140 | 16.9k | AArch64_get_detail_op(MI, -1)->vector_index = VIndex; |
2141 | 16.9k | break; |
2142 | 16.9k | } |
2143 | 52.1k | for (int i = AArch64_get_detail(MI)->op_count - 1; i >= 0; |
2144 | 37.7k | --i) { |
2145 | 37.7k | if (!AArch64_get_detail(MI)->operands[i].is_list_member) |
2146 | 0 | break; |
2147 | 37.7k | AArch64_get_detail(MI)->operands[i].vector_index = |
2148 | 37.7k | VIndex; |
2149 | 37.7k | } |
2150 | 14.3k | break; |
2151 | 31.2k | } |
2152 | 20 | case AArch64_OP_GROUP_ZPRasFPR_128: |
2153 | 608 | case AArch64_OP_GROUP_ZPRasFPR_16: |
2154 | 797 | case AArch64_OP_GROUP_ZPRasFPR_32: |
2155 | 1.24k | case AArch64_OP_GROUP_ZPRasFPR_64: |
2156 | 1.35k | case AArch64_OP_GROUP_ZPRasFPR_8: { |
2157 | 1.35k | unsigned Base = AArch64_NoRegister; |
2158 | 1.35k | unsigned Width = temp_arg_0; |
2159 | 1.35k | switch (Width) { |
2160 | 106 | case 8: |
2161 | 106 | Base = AArch64_B0; |
2162 | 106 | break; |
2163 | 588 | case 16: |
2164 | 588 | Base = AArch64_H0; |
2165 | 588 | break; |
2166 | 189 | case 32: |
2167 | 189 | Base = AArch64_S0; |
2168 | 189 | break; |
2169 | 449 | case 64: |
2170 | 449 | Base = AArch64_D0; |
2171 | 449 | break; |
2172 | 20 | case 128: |
2173 | 20 | Base = AArch64_Q0; |
2174 | 20 | break; |
2175 | 0 | default: |
2176 | 0 | CS_ASSERT_RET(0 && "Unsupported width"); |
2177 | 1.35k | } |
2178 | 1.35k | unsigned Reg = MCInst_getOpVal(MI, (OpNum)); |
2179 | 1.35k | AArch64_set_detail_op_reg(MI, OpNum, Reg - AArch64_Z0 + Base); |
2180 | 1.35k | break; |
2181 | 1.35k | } |
2182 | 315k | } |
2183 | 315k | } |
2184 | | |
2185 | | /// Fills cs_detail with the data of the operand. |
2186 | | /// This function handles operands which original printer function is a template |
2187 | | /// with two arguments. |
2188 | | void AArch64_add_cs_detail_2(MCInst *MI, aarch64_op_group op_group, |
2189 | | unsigned OpNum, uint64_t temp_arg_0, |
2190 | | uint64_t temp_arg_1) |
2191 | 77.5k | { |
2192 | 77.5k | if (!add_cs_detail_begin(MI, OpNum)) |
2193 | 0 | return; |
2194 | 77.5k | switch (op_group) { |
2195 | 0 | default: |
2196 | 0 | printf("ERROR: Operand group %d not handled!\n", op_group); |
2197 | 0 | CS_ASSERT_RET(0); |
2198 | 578 | case AArch64_OP_GROUP_ComplexRotationOp_180_90: |
2199 | 2.39k | case AArch64_OP_GROUP_ComplexRotationOp_90_0: { |
2200 | 2.39k | unsigned Angle = temp_arg_0; |
2201 | 2.39k | unsigned Remainder = temp_arg_1; |
2202 | 2.39k | unsigned Imm = (MCInst_getOpVal(MI, OpNum) * Angle) + Remainder; |
2203 | 2.39k | AArch64_set_detail_op_imm(MI, OpNum, AARCH64_OP_IMM, Imm); |
2204 | 2.39k | break; |
2205 | 578 | } |
2206 | 124 | case AArch64_OP_GROUP_ExactFPImm_AArch64ExactFPImm_half_AArch64ExactFPImm_one: |
2207 | 749 | case AArch64_OP_GROUP_ExactFPImm_AArch64ExactFPImm_half_AArch64ExactFPImm_two: |
2208 | 1.77k | case AArch64_OP_GROUP_ExactFPImm_AArch64ExactFPImm_zero_AArch64ExactFPImm_one: { |
2209 | 1.77k | aarch64_exactfpimm ImmIs0 = temp_arg_0; |
2210 | 1.77k | aarch64_exactfpimm ImmIs1 = temp_arg_1; |
2211 | 1.77k | const AArch64ExactFPImm_ExactFPImm *Imm0Desc = |
2212 | 1.77k | AArch64ExactFPImm_lookupExactFPImmByEnum(ImmIs0); |
2213 | 1.77k | const AArch64ExactFPImm_ExactFPImm *Imm1Desc = |
2214 | 1.77k | AArch64ExactFPImm_lookupExactFPImmByEnum(ImmIs1); |
2215 | 1.77k | unsigned Val = MCInst_getOpVal(MI, (OpNum)); |
2216 | 1.77k | aarch64_sysop sysop = { 0 }; |
2217 | 1.77k | sysop.imm = Val ? Imm1Desc->SysImm : Imm0Desc->SysImm; |
2218 | 1.77k | sysop.sub_type = AARCH64_OP_EXACTFPIMM; |
2219 | 1.77k | AArch64_set_detail_op_sys(MI, OpNum, sysop, AARCH64_OP_SYSIMM); |
2220 | 1.77k | break; |
2221 | 749 | } |
2222 | 3.37k | case AArch64_OP_GROUP_ImmRangeScale_2_1: |
2223 | 6.71k | case AArch64_OP_GROUP_ImmRangeScale_4_3: { |
2224 | 6.71k | uint64_t Scale = temp_arg_0; |
2225 | 6.71k | uint64_t Offset = temp_arg_1; |
2226 | 6.71k | unsigned FirstImm = Scale * MCInst_getOpVal(MI, (OpNum)); |
2227 | 6.71k | AArch64_set_detail_op_imm_range(MI, OpNum, FirstImm, |
2228 | 6.71k | FirstImm + Offset); |
2229 | 6.71k | break; |
2230 | 3.37k | } |
2231 | 10 | case AArch64_OP_GROUP_MemExtend_w_128: |
2232 | 327 | case AArch64_OP_GROUP_MemExtend_w_16: |
2233 | 377 | case AArch64_OP_GROUP_MemExtend_w_32: |
2234 | 745 | case AArch64_OP_GROUP_MemExtend_w_64: |
2235 | 1.00k | case AArch64_OP_GROUP_MemExtend_w_8: |
2236 | 1.01k | case AArch64_OP_GROUP_MemExtend_x_128: |
2237 | 1.26k | case AArch64_OP_GROUP_MemExtend_x_16: |
2238 | 1.28k | case AArch64_OP_GROUP_MemExtend_x_32: |
2239 | 1.63k | case AArch64_OP_GROUP_MemExtend_x_64: |
2240 | 2.09k | case AArch64_OP_GROUP_MemExtend_x_8: { |
2241 | 2.09k | char SrcRegKind = (char)temp_arg_0; |
2242 | 2.09k | unsigned ExtWidth = temp_arg_1; |
2243 | 2.09k | bool SignExtend = MCInst_getOpVal(MI, OpNum); |
2244 | 2.09k | bool DoShift = MCInst_getOpVal(MI, OpNum + 1); |
2245 | 2.09k | AArch64_set_detail_shift_ext(MI, OpNum, SignExtend, DoShift, |
2246 | 2.09k | ExtWidth, SrcRegKind); |
2247 | 2.09k | break; |
2248 | 1.63k | } |
2249 | 14.0k | case AArch64_OP_GROUP_TypedVectorList_0_b: |
2250 | 31.2k | case AArch64_OP_GROUP_TypedVectorList_0_d: |
2251 | 44.2k | case AArch64_OP_GROUP_TypedVectorList_0_h: |
2252 | 45.8k | case AArch64_OP_GROUP_TypedVectorList_0_q: |
2253 | 58.5k | case AArch64_OP_GROUP_TypedVectorList_0_s: |
2254 | 58.5k | case AArch64_OP_GROUP_TypedVectorList_0_0: |
2255 | 61.0k | case AArch64_OP_GROUP_TypedVectorList_16_b: |
2256 | 61.1k | case AArch64_OP_GROUP_TypedVectorList_1_d: |
2257 | 61.9k | case AArch64_OP_GROUP_TypedVectorList_2_d: |
2258 | 62.5k | case AArch64_OP_GROUP_TypedVectorList_2_s: |
2259 | 62.7k | case AArch64_OP_GROUP_TypedVectorList_4_h: |
2260 | 62.9k | case AArch64_OP_GROUP_TypedVectorList_4_s: |
2261 | 63.4k | case AArch64_OP_GROUP_TypedVectorList_8_b: |
2262 | 64.5k | case AArch64_OP_GROUP_TypedVectorList_8_h: { |
2263 | 64.5k | uint8_t NumLanes = (uint8_t)temp_arg_0; |
2264 | 64.5k | char LaneKind = (char)temp_arg_1; |
2265 | 64.5k | uint16_t Pair = ((NumLanes << 8) | LaneKind); |
2266 | | |
2267 | 64.5k | AArch64Layout_VectorLayout vas = AARCH64LAYOUT_INVALID; |
2268 | 64.5k | switch (Pair) { |
2269 | 0 | default: |
2270 | 0 | printf("Typed vector list with NumLanes = %d and LaneKind = %c not handled.\n", |
2271 | 0 | NumLanes, LaneKind); |
2272 | 0 | CS_ASSERT_RET(0); |
2273 | 584 | case ((8 << 8) | 'b'): |
2274 | 584 | vas = AARCH64LAYOUT_VL_8B; |
2275 | 584 | break; |
2276 | 147 | case ((4 << 8) | 'h'): |
2277 | 147 | vas = AARCH64LAYOUT_VL_4H; |
2278 | 147 | break; |
2279 | 637 | case ((2 << 8) | 's'): |
2280 | 637 | vas = AARCH64LAYOUT_VL_2S; |
2281 | 637 | break; |
2282 | 151 | case ((1 << 8) | 'd'): |
2283 | 151 | vas = AARCH64LAYOUT_VL_1D; |
2284 | 151 | break; |
2285 | 2.49k | case ((16 << 8) | 'b'): |
2286 | 2.49k | vas = AARCH64LAYOUT_VL_16B; |
2287 | 2.49k | break; |
2288 | 1.06k | case ((8 << 8) | 'h'): |
2289 | 1.06k | vas = AARCH64LAYOUT_VL_8H; |
2290 | 1.06k | break; |
2291 | 205 | case ((4 << 8) | 's'): |
2292 | 205 | vas = AARCH64LAYOUT_VL_4S; |
2293 | 205 | break; |
2294 | 750 | case ((2 << 8) | 'd'): |
2295 | 750 | vas = AARCH64LAYOUT_VL_2D; |
2296 | 750 | break; |
2297 | 14.0k | case 'b': |
2298 | 14.0k | vas = AARCH64LAYOUT_VL_B; |
2299 | 14.0k | break; |
2300 | 13.0k | case 'h': |
2301 | 13.0k | vas = AARCH64LAYOUT_VL_H; |
2302 | 13.0k | break; |
2303 | 12.6k | case 's': |
2304 | 12.6k | vas = AARCH64LAYOUT_VL_S; |
2305 | 12.6k | break; |
2306 | 17.1k | case 'd': |
2307 | 17.1k | vas = AARCH64LAYOUT_VL_D; |
2308 | 17.1k | break; |
2309 | 1.55k | case 'q': |
2310 | 1.55k | vas = AARCH64LAYOUT_VL_Q; |
2311 | 1.55k | break; |
2312 | 28 | case '0': |
2313 | | // Implicitly Typed register |
2314 | 28 | break; |
2315 | 64.5k | } |
2316 | | |
2317 | 64.5k | unsigned Reg = MCOperand_getReg(MCInst_getOperand(MI, OpNum)); |
2318 | 64.5k | unsigned NumRegs = get_vec_list_num_regs(MI, Reg); |
2319 | 64.5k | unsigned Stride = get_vec_list_stride(MI, Reg); |
2320 | 64.5k | Reg = get_vec_list_first_reg(MI, Reg); |
2321 | | |
2322 | 64.5k | if ((MCRegisterClass_contains( |
2323 | 64.5k | MCRegisterInfo_getRegClass(MI->MRI, |
2324 | 64.5k | AArch64_ZPRRegClassID), |
2325 | 64.5k | Reg) || |
2326 | 64.5k | MCRegisterClass_contains( |
2327 | 21.4k | MCRegisterInfo_getRegClass(MI->MRI, |
2328 | 21.4k | AArch64_PPRRegClassID), |
2329 | 21.4k | Reg)) && |
2330 | 64.5k | NumRegs > 1 && Stride == 1 && |
2331 | 64.5k | Reg < getNextVectorRegister(Reg, NumRegs - 1)) { |
2332 | 24.1k | AArch64_get_detail_op(MI, 0)->is_list_member = true; |
2333 | 24.1k | AArch64_get_detail_op(MI, 0)->vas = vas; |
2334 | 24.1k | AArch64_set_detail_op_reg(MI, OpNum, Reg); |
2335 | 24.1k | if (NumRegs > 1) { |
2336 | | // Add all registers of the list to the details. |
2337 | 64.9k | for (size_t i = 0; i < NumRegs - 1; ++i) { |
2338 | 40.7k | AArch64_get_detail_op(MI, 0) |
2339 | 40.7k | ->is_list_member = true; |
2340 | 40.7k | AArch64_get_detail_op(MI, 0)->vas = vas; |
2341 | 40.7k | AArch64_set_detail_op_reg( |
2342 | 40.7k | MI, OpNum, |
2343 | 40.7k | getNextVectorRegister(Reg + i, |
2344 | 40.7k | 1)); |
2345 | 40.7k | } |
2346 | 24.1k | } |
2347 | 40.3k | } else { |
2348 | 121k | for (unsigned i = 0; i < NumRegs; |
2349 | 81.2k | ++i, Reg = getNextVectorRegister(Reg, Stride)) { |
2350 | 81.2k | if (!(MCRegisterClass_contains( |
2351 | 81.2k | MCRegisterInfo_getRegClass( |
2352 | 81.2k | MI->MRI, |
2353 | 81.2k | AArch64_ZPRRegClassID), |
2354 | 81.2k | Reg) || |
2355 | 81.2k | MCRegisterClass_contains( |
2356 | 53.4k | MCRegisterInfo_getRegClass( |
2357 | 53.4k | MI->MRI, |
2358 | 53.4k | AArch64_PPRRegClassID), |
2359 | 53.4k | Reg))) { |
2360 | 53.3k | AArch64_get_detail_op(MI, 0)->is_vreg = |
2361 | 53.3k | true; |
2362 | 53.3k | } |
2363 | 81.2k | AArch64_get_detail_op(MI, 0)->is_list_member = |
2364 | 81.2k | true; |
2365 | 81.2k | AArch64_get_detail_op(MI, 0)->vas = vas; |
2366 | 81.2k | AArch64_set_detail_op_reg(MI, OpNum, Reg); |
2367 | 81.2k | } |
2368 | 40.3k | } |
2369 | 64.5k | } |
2370 | 77.5k | } |
2371 | 77.5k | } |
2372 | | |
2373 | | /// Fills cs_detail with the data of the operand. |
2374 | | /// This function handles operands which original printer function is a template |
2375 | | /// with four arguments. |
2376 | | void AArch64_add_cs_detail_4(MCInst *MI, aarch64_op_group op_group, |
2377 | | unsigned OpNum, uint64_t temp_arg_0, |
2378 | | uint64_t temp_arg_1, uint64_t temp_arg_2, |
2379 | | uint64_t temp_arg_3) |
2380 | 22.1k | { |
2381 | 22.1k | if (!add_cs_detail_begin(MI, OpNum)) |
2382 | 0 | return; |
2383 | 22.1k | switch (op_group) { |
2384 | 0 | default: |
2385 | 0 | printf("ERROR: Operand group %d not handled!\n", op_group); |
2386 | 0 | CS_ASSERT_RET(0); |
2387 | 743 | case AArch64_OP_GROUP_RegWithShiftExtend_0_128_x_0: |
2388 | 928 | case AArch64_OP_GROUP_RegWithShiftExtend_0_16_w_d: |
2389 | 1.07k | case AArch64_OP_GROUP_RegWithShiftExtend_0_16_w_s: |
2390 | 2.72k | case AArch64_OP_GROUP_RegWithShiftExtend_0_16_x_0: |
2391 | 3.18k | case AArch64_OP_GROUP_RegWithShiftExtend_0_16_x_d: |
2392 | 3.26k | case AArch64_OP_GROUP_RegWithShiftExtend_0_16_x_s: |
2393 | 3.90k | case AArch64_OP_GROUP_RegWithShiftExtend_0_32_w_d: |
2394 | 3.94k | case AArch64_OP_GROUP_RegWithShiftExtend_0_32_w_s: |
2395 | 5.47k | case AArch64_OP_GROUP_RegWithShiftExtend_0_32_x_0: |
2396 | 6.06k | case AArch64_OP_GROUP_RegWithShiftExtend_0_32_x_d: |
2397 | 6.48k | case AArch64_OP_GROUP_RegWithShiftExtend_0_32_x_s: |
2398 | 7.36k | case AArch64_OP_GROUP_RegWithShiftExtend_0_64_w_d: |
2399 | 7.40k | case AArch64_OP_GROUP_RegWithShiftExtend_0_64_w_s: |
2400 | 8.39k | case AArch64_OP_GROUP_RegWithShiftExtend_0_64_x_0: |
2401 | 9.29k | case AArch64_OP_GROUP_RegWithShiftExtend_0_64_x_d: |
2402 | 9.35k | case AArch64_OP_GROUP_RegWithShiftExtend_0_64_x_s: |
2403 | 11.6k | case AArch64_OP_GROUP_RegWithShiftExtend_0_8_w_d: |
2404 | 12.3k | case AArch64_OP_GROUP_RegWithShiftExtend_0_8_w_s: |
2405 | 15.6k | case AArch64_OP_GROUP_RegWithShiftExtend_0_8_x_0: |
2406 | 17.4k | case AArch64_OP_GROUP_RegWithShiftExtend_0_8_x_d: |
2407 | 17.5k | case AArch64_OP_GROUP_RegWithShiftExtend_0_8_x_s: |
2408 | 17.9k | case AArch64_OP_GROUP_RegWithShiftExtend_1_16_w_d: |
2409 | 18.1k | case AArch64_OP_GROUP_RegWithShiftExtend_1_16_w_s: |
2410 | 19.1k | case AArch64_OP_GROUP_RegWithShiftExtend_1_32_w_d: |
2411 | 19.4k | case AArch64_OP_GROUP_RegWithShiftExtend_1_32_w_s: |
2412 | 19.7k | case AArch64_OP_GROUP_RegWithShiftExtend_1_64_w_d: |
2413 | 19.7k | case AArch64_OP_GROUP_RegWithShiftExtend_1_64_w_s: |
2414 | 21.1k | case AArch64_OP_GROUP_RegWithShiftExtend_1_8_w_d: |
2415 | 22.1k | case AArch64_OP_GROUP_RegWithShiftExtend_1_8_w_s: { |
2416 | | // signed (s) and unsigned (u) extend |
2417 | 22.1k | bool SignExtend = (bool)temp_arg_0; |
2418 | | // Extend width |
2419 | 22.1k | int ExtWidth = (int)temp_arg_1; |
2420 | | // w = word, x = doubleword |
2421 | 22.1k | char SrcRegKind = (char)temp_arg_2; |
2422 | | // Vector register element/arrangement specifier: |
2423 | | // B = 8bit, H = 16bit, S = 32bit, D = 64bit, Q = 128bit |
2424 | | // No suffix = complete register |
2425 | | // According to: ARM Reference manual supplement, doc number: DDI 0584 |
2426 | 22.1k | char Suffix = (char)temp_arg_3; |
2427 | | |
2428 | | // Register will be added in printOperand() afterwards. Here we only handle |
2429 | | // shift and extend. |
2430 | 22.1k | AArch64_get_detail_op(MI, -1)->vas = get_vl_by_suffix(Suffix); |
2431 | | |
2432 | 22.1k | bool DoShift = ExtWidth != 8; |
2433 | 22.1k | if (!(SignExtend || DoShift || SrcRegKind == 'w')) |
2434 | 5.17k | return; |
2435 | | |
2436 | 16.9k | AArch64_set_detail_shift_ext(MI, OpNum, SignExtend, DoShift, |
2437 | 16.9k | ExtWidth, SrcRegKind); |
2438 | 16.9k | break; |
2439 | 22.1k | } |
2440 | 22.1k | } |
2441 | 22.1k | } |
2442 | | |
2443 | | /// Adds a register AArch64 operand at position OpNum and increases the op_count by |
2444 | | /// one. |
2445 | | void AArch64_set_detail_op_reg(MCInst *MI, unsigned OpNum, aarch64_reg Reg) |
2446 | 749k | { |
2447 | 749k | if (!detail_is_set(MI)) |
2448 | 0 | return; |
2449 | 749k | AArch64_check_safe_inc(MI); |
2450 | | |
2451 | 749k | if (Reg == AARCH64_REG_ZA || |
2452 | 749k | (Reg >= AARCH64_REG_ZAB0 && Reg < AARCH64_REG_ZT0)) { |
2453 | | // A tile register should be treated as SME operand. |
2454 | 0 | AArch64_set_detail_op_sme(MI, OpNum, AARCH64_SME_MATRIX_TILE, |
2455 | 0 | sme_reg_to_vas(Reg)); |
2456 | 0 | return; |
2457 | 749k | } else if (((Reg >= AARCH64_REG_P0) && (Reg <= AARCH64_REG_P15)) || |
2458 | 749k | ((Reg >= AARCH64_REG_PN0) && (Reg <= AARCH64_REG_PN15))) { |
2459 | | // SME/SVE predicate register. |
2460 | 74.6k | AArch64_set_detail_op_pred(MI, OpNum); |
2461 | 74.6k | return; |
2462 | 674k | } else if (AArch64_get_detail(MI)->is_doing_sme) { |
2463 | 18.0k | CS_ASSERT_RET(map_get_op_type(MI, OpNum) & CS_OP_BOUND); |
2464 | 18.0k | if (AArch64_get_detail_op(MI, 0)->type == AARCH64_OP_SME) { |
2465 | 17.7k | AArch64_set_detail_op_sme(MI, OpNum, |
2466 | 17.7k | AARCH64_SME_MATRIX_SLICE_REG, |
2467 | 17.7k | AARCH64LAYOUT_INVALID); |
2468 | 17.7k | } else if (AArch64_get_detail_op(MI, 0)->type == |
2469 | 235 | AARCH64_OP_PRED) { |
2470 | 235 | AArch64_set_detail_op_pred(MI, OpNum); |
2471 | 235 | } else { |
2472 | 0 | CS_ASSERT_RET(0 && "Unkown SME/SVE operand type"); |
2473 | 0 | } |
2474 | 18.0k | return; |
2475 | 18.0k | } |
2476 | 656k | if (map_get_op_type(MI, OpNum) & CS_OP_MEM) { |
2477 | 127k | AArch64_set_detail_op_mem(MI, OpNum, Reg); |
2478 | 127k | return; |
2479 | 127k | } |
2480 | | |
2481 | 529k | CS_ASSERT_RET(!(map_get_op_type(MI, OpNum) & CS_OP_BOUND)); |
2482 | 529k | CS_ASSERT_RET(!(map_get_op_type(MI, OpNum) & CS_OP_MEM)); |
2483 | 529k | CS_ASSERT_RET(map_get_op_type(MI, OpNum) == CS_OP_REG); |
2484 | | |
2485 | 529k | AArch64_get_detail_op(MI, 0)->type = AARCH64_OP_REG; |
2486 | 529k | AArch64_get_detail_op(MI, 0)->reg = Reg; |
2487 | 529k | AArch64_get_detail_op(MI, 0)->access = map_get_op_access(MI, OpNum); |
2488 | 529k | AArch64_inc_op_count(MI); |
2489 | 529k | } |
2490 | | |
2491 | | /// Check if the previous operand is a memory operand |
2492 | | /// with only the base register set AND if this base register |
2493 | | /// is write-back. |
2494 | | /// This indicates the following immediate is a post-indexed |
2495 | | /// memory offset. |
2496 | | static bool prev_is_membase_wb(MCInst *MI) |
2497 | 103k | { |
2498 | 103k | return AArch64_get_detail(MI)->op_count > 0 && |
2499 | 103k | AArch64_get_detail_op(MI, -1)->type == AARCH64_OP_MEM && |
2500 | 103k | AArch64_get_detail_op(MI, -1)->mem.disp == 0 && |
2501 | 103k | get_detail(MI)->writeback; |
2502 | 103k | } |
2503 | | |
2504 | | /// Adds an immediate AArch64 operand at position OpNum and increases the op_count |
2505 | | /// by one. |
2506 | | void AArch64_set_detail_op_imm(MCInst *MI, unsigned OpNum, |
2507 | | aarch64_op_type ImmType, int64_t Imm) |
2508 | 143k | { |
2509 | 143k | if (!detail_is_set(MI)) |
2510 | 0 | return; |
2511 | 143k | AArch64_check_safe_inc(MI); |
2512 | | |
2513 | 143k | if (AArch64_get_detail(MI)->is_doing_sme) { |
2514 | 0 | CS_ASSERT_RET(map_get_op_type(MI, OpNum) & CS_OP_BOUND); |
2515 | 0 | if (AArch64_get_detail_op(MI, 0)->type == AARCH64_OP_SME) { |
2516 | 0 | AArch64_set_detail_op_sme(MI, OpNum, |
2517 | 0 | AARCH64_SME_MATRIX_SLICE_OFF, |
2518 | 0 | AARCH64LAYOUT_INVALID, |
2519 | 0 | (uint32_t)1); |
2520 | 0 | } else if (AArch64_get_detail_op(MI, 0)->type == |
2521 | 0 | AARCH64_OP_PRED) { |
2522 | 0 | AArch64_set_detail_op_pred(MI, OpNum); |
2523 | 0 | } else { |
2524 | 0 | CS_ASSERT_RET(0 && "Unkown SME operand type"); |
2525 | 0 | } |
2526 | 0 | return; |
2527 | 0 | } |
2528 | 143k | if (map_get_op_type(MI, OpNum) & CS_OP_MEM || prev_is_membase_wb(MI)) { |
2529 | 46.3k | AArch64_set_detail_op_mem(MI, OpNum, Imm); |
2530 | 46.3k | return; |
2531 | 46.3k | } |
2532 | | |
2533 | 97.1k | CS_ASSERT_RET(!(map_get_op_type(MI, OpNum) & CS_OP_MEM)); |
2534 | 97.1k | CS_ASSERT_RET((map_get_op_type(MI, OpNum) & ~CS_OP_BOUND) == CS_OP_IMM); |
2535 | 97.1k | CS_ASSERT_RET(ImmType == AARCH64_OP_IMM || ImmType == AARCH64_OP_CIMM); |
2536 | | |
2537 | 97.1k | AArch64_get_detail_op(MI, 0)->type = ImmType; |
2538 | 97.1k | AArch64_get_detail_op(MI, 0)->imm = Imm; |
2539 | 97.1k | AArch64_get_detail_op(MI, 0)->access = map_get_op_access(MI, OpNum); |
2540 | 97.1k | AArch64_inc_op_count(MI); |
2541 | 97.1k | } |
2542 | | |
2543 | | void AArch64_set_detail_op_imm_range(MCInst *MI, unsigned OpNum, |
2544 | | uint32_t FirstImm, uint32_t Offset) |
2545 | 6.71k | { |
2546 | 6.71k | if (!detail_is_set(MI)) |
2547 | 0 | return; |
2548 | 6.71k | AArch64_check_safe_inc(MI); |
2549 | | |
2550 | 6.71k | if (AArch64_get_detail(MI)->is_doing_sme) { |
2551 | 6.71k | CS_ASSERT_RET(map_get_op_type(MI, OpNum) & CS_OP_BOUND); |
2552 | 6.71k | if (AArch64_get_detail_op(MI, 0)->type == AARCH64_OP_SME) { |
2553 | 6.71k | AArch64_set_detail_op_sme( |
2554 | 6.71k | MI, OpNum, AARCH64_SME_MATRIX_SLICE_OFF_RANGE, |
2555 | 6.71k | AARCH64LAYOUT_INVALID, (uint32_t)FirstImm, |
2556 | 6.71k | (uint32_t)Offset); |
2557 | 6.71k | } else if (AArch64_get_detail_op(MI, 0)->type == |
2558 | 0 | AARCH64_OP_PRED) { |
2559 | 0 | CS_ASSERT_RET(0 && |
2560 | 0 | "Unkown SME predicate imm range type"); |
2561 | 0 | } else { |
2562 | 0 | CS_ASSERT_RET(0 && "Unkown SME operand type"); |
2563 | 0 | } |
2564 | 6.71k | return; |
2565 | 6.71k | } |
2566 | | |
2567 | 0 | CS_ASSERT_RET(!(map_get_op_type(MI, OpNum) & CS_OP_MEM)); |
2568 | 0 | CS_ASSERT_RET(map_get_op_type(MI, OpNum) == CS_OP_IMM); |
2569 | |
|
2570 | 0 | AArch64_get_detail_op(MI, 0)->type = AARCH64_OP_IMM_RANGE; |
2571 | 0 | AArch64_get_detail_op(MI, 0)->imm_range.first = FirstImm; |
2572 | 0 | AArch64_get_detail_op(MI, 0)->imm_range.offset = Offset; |
2573 | 0 | AArch64_get_detail_op(MI, 0)->access = map_get_op_access(MI, OpNum); |
2574 | 0 | AArch64_inc_op_count(MI); |
2575 | 0 | } |
2576 | | |
2577 | | /// Adds a memory AARCH64 operand at position OpNum. op_count is *not* increased by |
2578 | | /// one. This is done by set_mem_access(). |
2579 | | void AArch64_set_detail_op_mem(MCInst *MI, unsigned OpNum, uint64_t Val) |
2580 | 173k | { |
2581 | 173k | if (!detail_is_set(MI)) |
2582 | 0 | return; |
2583 | 173k | AArch64_check_safe_inc(MI); |
2584 | | |
2585 | 173k | AArch64_set_mem_access(MI, true); |
2586 | | |
2587 | 173k | cs_op_type secondary_type = map_get_op_type(MI, OpNum) & ~CS_OP_MEM; |
2588 | 173k | switch (secondary_type) { |
2589 | 0 | default: |
2590 | 0 | CS_ASSERT_RET(0 && "Secondary type not supported yet."); |
2591 | 127k | case CS_OP_REG: { |
2592 | 127k | bool is_index_reg = AArch64_get_detail_op(MI, 0)->mem.base != |
2593 | 127k | AARCH64_REG_INVALID; |
2594 | 127k | if (is_index_reg) |
2595 | 28.2k | AArch64_get_detail_op(MI, 0)->mem.index = Val; |
2596 | 98.8k | else { |
2597 | 98.8k | AArch64_get_detail_op(MI, 0)->mem.base = Val; |
2598 | 98.8k | } |
2599 | | |
2600 | 127k | if (MCInst_opIsTying(MI, OpNum)) { |
2601 | | // Especially base registers can be writeback registers. |
2602 | | // For this they tie an MC operand which has write |
2603 | | // access. But this one is never processed in the printer |
2604 | | // (because it is never emitted). Therefor it is never |
2605 | | // added to the modified list. |
2606 | | // Here we check for this case and add the memory register |
2607 | | // to the modified list. |
2608 | 24.9k | map_add_implicit_write(MI, MCInst_getOpVal(MI, OpNum)); |
2609 | 24.9k | } |
2610 | 127k | break; |
2611 | 0 | } |
2612 | 46.3k | case CS_OP_IMM: { |
2613 | 46.3k | AArch64_get_detail_op(MI, 0)->mem.disp = Val; |
2614 | 46.3k | break; |
2615 | 0 | } |
2616 | 173k | } |
2617 | | |
2618 | 173k | AArch64_get_detail_op(MI, 0)->type = AARCH64_OP_MEM; |
2619 | 173k | AArch64_get_detail_op(MI, 0)->access = map_get_op_access(MI, OpNum); |
2620 | 173k | AArch64_set_mem_access(MI, false); |
2621 | 173k | } |
2622 | | |
2623 | | /// Adds the shift and sign extend info to the previous operand. |
2624 | | /// op_count is *not* incremented by one. |
2625 | | void AArch64_set_detail_shift_ext(MCInst *MI, unsigned OpNum, bool SignExtend, |
2626 | | bool DoShift, unsigned ExtWidth, |
2627 | | char SrcRegKind) |
2628 | 19.0k | { |
2629 | 19.0k | bool IsLSL = !SignExtend && SrcRegKind == 'x'; |
2630 | 19.0k | if (IsLSL) |
2631 | 8.14k | AArch64_get_detail_op(MI, -1)->shift.type = AARCH64_SFT_LSL; |
2632 | 10.9k | else { |
2633 | 10.9k | aarch64_extender ext = SignExtend ? AARCH64_EXT_SXTB : |
2634 | 10.9k | AARCH64_EXT_UXTB; |
2635 | 10.9k | switch (SrcRegKind) { |
2636 | 0 | default: |
2637 | 0 | CS_ASSERT_RET(0 && "Extender not handled\n"); |
2638 | 0 | case 'b': |
2639 | 0 | ext += 0; |
2640 | 0 | break; |
2641 | 0 | case 'h': |
2642 | 0 | ext += 1; |
2643 | 0 | break; |
2644 | 10.5k | case 'w': |
2645 | 10.5k | ext += 2; |
2646 | 10.5k | break; |
2647 | 384 | case 'x': |
2648 | 384 | ext += 3; |
2649 | 384 | break; |
2650 | 10.9k | } |
2651 | 10.9k | AArch64_get_detail_op(MI, -1)->ext = ext; |
2652 | 10.9k | } |
2653 | 19.0k | if (DoShift || IsLSL) { |
2654 | 13.1k | unsigned ShiftAmount = DoShift ? Log2_32(ExtWidth / 8) : 0; |
2655 | 13.1k | AArch64_get_detail_op(MI, -1)->shift.type = AARCH64_SFT_LSL; |
2656 | 13.1k | AArch64_get_detail_op(MI, -1)->shift.value = ShiftAmount; |
2657 | 13.1k | } |
2658 | 19.0k | } |
2659 | | |
2660 | | /// Transforms the immediate of the operand to a float and stores it. |
2661 | | /// Increments the op_counter by one. |
2662 | | void AArch64_set_detail_op_float(MCInst *MI, unsigned OpNum, float Val) |
2663 | 393 | { |
2664 | 393 | if (!detail_is_set(MI)) |
2665 | 0 | return; |
2666 | 393 | AArch64_check_safe_inc(MI); |
2667 | | |
2668 | 393 | AArch64_get_detail_op(MI, 0)->type = AARCH64_OP_FP; |
2669 | 393 | AArch64_get_detail_op(MI, 0)->fp = Val; |
2670 | 393 | AArch64_get_detail_op(MI, 0)->access = map_get_op_access(MI, OpNum); |
2671 | 393 | AArch64_inc_op_count(MI); |
2672 | 393 | } |
2673 | | |
2674 | | /// Adds a the system operand and increases the op_count by |
2675 | | /// one. |
2676 | | void AArch64_set_detail_op_sys(MCInst *MI, unsigned OpNum, aarch64_sysop sys_op, |
2677 | | aarch64_op_type type) |
2678 | 22.4k | { |
2679 | 22.4k | if (!detail_is_set(MI)) |
2680 | 0 | return; |
2681 | 22.4k | AArch64_check_safe_inc(MI); |
2682 | | |
2683 | 22.4k | AArch64_get_detail_op(MI, 0)->type = type; |
2684 | 22.4k | AArch64_get_detail_op(MI, 0)->sysop = sys_op; |
2685 | 22.4k | if (sys_op.sub_type == AARCH64_OP_EXACTFPIMM) { |
2686 | 1.77k | AArch64_get_detail_op(MI, 0)->fp = |
2687 | 1.77k | aarch64_exact_fp_to_fp(sys_op.imm.exactfpimm); |
2688 | 1.77k | } |
2689 | 22.4k | AArch64_inc_op_count(MI); |
2690 | 22.4k | } |
2691 | | |
2692 | | void AArch64_set_detail_op_pred(MCInst *MI, unsigned OpNum) |
2693 | 75.0k | { |
2694 | 75.0k | if (!detail_is_set(MI)) |
2695 | 0 | return; |
2696 | 75.0k | AArch64_check_safe_inc(MI); |
2697 | | |
2698 | 75.0k | if (AArch64_get_detail_op(MI, 0)->type == AARCH64_OP_INVALID) { |
2699 | 73.4k | setup_pred_operand(MI); |
2700 | 73.4k | } |
2701 | 75.0k | aarch64_op_pred *p = &AArch64_get_detail_op(MI, 0)->pred; |
2702 | 75.0k | if (p->reg == AARCH64_REG_INVALID) { |
2703 | 73.4k | p->reg = MCInst_getOpVal(MI, OpNum); |
2704 | 73.4k | AArch64_get_detail_op(MI, 0)->access = |
2705 | 73.4k | map_get_op_access(MI, OpNum); |
2706 | 73.4k | AArch64_get_detail(MI)->is_doing_sme = true; |
2707 | 73.4k | return; |
2708 | 73.4k | } else if (p->vec_select == AARCH64_REG_INVALID) { |
2709 | 1.37k | p->vec_select = MCInst_getOpVal(MI, OpNum); |
2710 | 1.37k | return; |
2711 | 1.37k | } else if (p->imm_index == -1) { |
2712 | 235 | p->imm_index = MCInst_getOpVal(MI, OpNum); |
2713 | 235 | return; |
2714 | 235 | } |
2715 | 0 | CS_ASSERT_RET(0 && "Should not be reached."); |
2716 | 0 | } |
2717 | | |
2718 | | /// Adds a SME matrix component to a SME operand. |
2719 | | void AArch64_set_detail_op_sme(MCInst *MI, unsigned OpNum, |
2720 | | aarch64_sme_op_part part, |
2721 | | AArch64Layout_VectorLayout vas, ...) |
2722 | 59.5k | { |
2723 | 59.5k | if (!detail_is_set(MI)) |
2724 | 0 | return; |
2725 | 59.5k | AArch64_check_safe_inc(MI); |
2726 | | |
2727 | 59.5k | AArch64_get_detail_op(MI, 0)->type = AARCH64_OP_SME; |
2728 | 59.5k | switch (part) { |
2729 | 0 | default: |
2730 | 0 | printf("Unhandled SME operand part %d\n", part); |
2731 | 0 | CS_ASSERT_RET(0); |
2732 | 4.28k | case AARCH64_SME_MATRIX_TILE_LIST: { |
2733 | 4.28k | setup_sme_operand(MI); |
2734 | 4.28k | va_list args; |
2735 | 4.28k | va_start(args, vas); |
2736 | | // NOLINTBEGIN(clang-analyzer-valist.Uninitialized) |
2737 | 4.28k | int Tile = va_arg(args, int); |
2738 | | // NOLINTEND(clang-analyzer-valist.Uninitialized) |
2739 | 4.28k | va_end(args); |
2740 | 4.28k | AArch64_get_detail_op(MI, 0)->sme.type = AARCH64_SME_OP_TILE; |
2741 | 4.28k | AArch64_get_detail_op(MI, 0)->sme.tile = Tile; |
2742 | 4.28k | AArch64_get_detail_op(MI, 0)->vas = vas; |
2743 | 4.28k | AArch64_get_detail_op(MI, 0)->access = |
2744 | 4.28k | map_get_op_access(MI, OpNum); |
2745 | 4.28k | AArch64_get_detail(MI)->is_doing_sme = true; |
2746 | 4.28k | break; |
2747 | 0 | } |
2748 | 19.7k | case AARCH64_SME_MATRIX_TILE: |
2749 | 19.7k | CS_ASSERT_RET(map_get_op_type(MI, OpNum) == CS_OP_REG); |
2750 | | |
2751 | 19.7k | setup_sme_operand(MI); |
2752 | 19.7k | AArch64_get_detail_op(MI, 0)->sme.type = AARCH64_SME_OP_TILE; |
2753 | 19.7k | AArch64_get_detail_op(MI, 0)->sme.tile = |
2754 | 19.7k | MCInst_getOpVal(MI, OpNum); |
2755 | 19.7k | AArch64_get_detail_op(MI, 0)->vas = vas; |
2756 | 19.7k | AArch64_get_detail_op(MI, 0)->access = |
2757 | 19.7k | map_get_op_access(MI, OpNum); |
2758 | 19.7k | AArch64_get_detail(MI)->is_doing_sme = true; |
2759 | 19.7k | break; |
2760 | 17.7k | case AARCH64_SME_MATRIX_SLICE_REG: |
2761 | 17.7k | CS_ASSERT_RET((map_get_op_type(MI, OpNum) & |
2762 | 17.7k | ~(CS_OP_MEM | CS_OP_BOUND)) == CS_OP_REG); |
2763 | 17.7k | CS_ASSERT_RET(AArch64_get_detail_op(MI, 0)->type == |
2764 | 17.7k | AARCH64_OP_SME); |
2765 | | |
2766 | | // SME operand already present. Add the slice to it. |
2767 | 17.7k | AArch64_get_detail_op(MI, 0)->sme.type = |
2768 | 17.7k | AARCH64_SME_OP_TILE_VEC; |
2769 | 17.7k | AArch64_get_detail_op(MI, 0)->sme.slice_reg = |
2770 | 17.7k | MCInst_getOpVal(MI, OpNum); |
2771 | 17.7k | break; |
2772 | 11.0k | case AARCH64_SME_MATRIX_SLICE_OFF: { |
2773 | 11.0k | CS_ASSERT_RET((map_get_op_type(MI, OpNum) & |
2774 | 11.0k | ~(CS_OP_MEM | CS_OP_BOUND)) == CS_OP_IMM); |
2775 | | // Because we took care of the slice register before, the op at -1 must be a SME operand. |
2776 | 11.0k | CS_ASSERT_RET(AArch64_get_detail_op(MI, 0)->type == |
2777 | 11.0k | AARCH64_OP_SME); |
2778 | 11.0k | CS_ASSERT_RET( |
2779 | 11.0k | AArch64_get_detail_op(MI, 0)->sme.slice_offset.imm == |
2780 | 11.0k | AARCH64_SLICE_IMM_INVALID); |
2781 | 11.0k | va_list args; |
2782 | 11.0k | va_start(args, vas); |
2783 | | // NOLINTBEGIN(clang-analyzer-valist.Uninitialized) |
2784 | 11.0k | uint16_t offset = va_arg(args, uint32_t); |
2785 | | // NOLINTEND(clang-analyzer-valist.Uninitialized) |
2786 | 11.0k | va_end(args); |
2787 | 11.0k | AArch64_get_detail_op(MI, 0)->sme.slice_offset.imm = offset; |
2788 | 11.0k | break; |
2789 | 0 | } |
2790 | 6.71k | case AARCH64_SME_MATRIX_SLICE_OFF_RANGE: { |
2791 | 6.71k | va_list args; |
2792 | 6.71k | va_start(args, vas); |
2793 | | // NOLINTBEGIN(clang-analyzer-valist.Uninitialized) |
2794 | 6.71k | uint8_t First = va_arg(args, uint32_t); |
2795 | 6.71k | uint8_t Offset = va_arg(args, uint32_t); |
2796 | | // NOLINTEND(clang-analyzer-valist.Uninitialized) |
2797 | 6.71k | va_end(args); |
2798 | 6.71k | AArch64_get_detail_op(MI, 0)->sme.slice_offset.imm_range.first = |
2799 | 6.71k | First; |
2800 | 6.71k | AArch64_get_detail_op(MI, 0)->sme.slice_offset.imm_range.offset = |
2801 | 6.71k | Offset; |
2802 | 6.71k | AArch64_get_detail_op(MI, 0)->sme.has_range_offset = true; |
2803 | 6.71k | break; |
2804 | 0 | } |
2805 | 59.5k | } |
2806 | 59.5k | } |
2807 | | |
2808 | | static void insert_op(MCInst *MI, unsigned index, cs_aarch64_op op) |
2809 | 16.3k | { |
2810 | 16.3k | if (!detail_is_set(MI)) { |
2811 | 0 | return; |
2812 | 0 | } |
2813 | | |
2814 | 16.3k | AArch64_check_safe_inc(MI); |
2815 | 16.3k | cs_aarch64_op *ops = AArch64_get_detail(MI)->operands; |
2816 | 16.3k | int i = AArch64_get_detail(MI)->op_count; |
2817 | 16.3k | if (index == -1) { |
2818 | 16.3k | ops[i] = op; |
2819 | 16.3k | AArch64_inc_op_count(MI); |
2820 | 16.3k | return; |
2821 | 16.3k | } |
2822 | 0 | for (; i > 0 && i > index; --i) { |
2823 | 0 | ops[i] = ops[i - 1]; |
2824 | 0 | } |
2825 | 0 | ops[index] = op; |
2826 | 0 | AArch64_inc_op_count(MI); |
2827 | 0 | } |
2828 | | |
2829 | | /// Inserts a float to the detail operands at @index. |
2830 | | /// If @index == -1, it pushes the operand to the end of the ops array. |
2831 | | /// Already present operands are moved. |
2832 | | void AArch64_insert_detail_op_float_at(MCInst *MI, unsigned index, double val, |
2833 | | cs_ac_type access) |
2834 | 0 | { |
2835 | 0 | if (!detail_is_set(MI)) |
2836 | 0 | return; |
2837 | | |
2838 | 0 | AArch64_check_safe_inc(MI); |
2839 | |
|
2840 | 0 | cs_aarch64_op op; |
2841 | 0 | AArch64_setup_op(&op); |
2842 | 0 | op.type = AARCH64_OP_FP; |
2843 | 0 | op.fp = val; |
2844 | 0 | op.access = access; |
2845 | |
|
2846 | 0 | insert_op(MI, index, op); |
2847 | 0 | } |
2848 | | |
2849 | | /// Inserts a register to the detail operands at @index. |
2850 | | /// If @index == -1, it pushes the operand to the end of the ops array. |
2851 | | /// Already present operands are moved. |
2852 | | void AArch64_insert_detail_op_reg_at(MCInst *MI, unsigned index, |
2853 | | aarch64_reg Reg, cs_ac_type access) |
2854 | 465 | { |
2855 | 465 | if (!detail_is_set(MI)) |
2856 | 0 | return; |
2857 | | |
2858 | 465 | AArch64_check_safe_inc(MI); |
2859 | | |
2860 | 465 | cs_aarch64_op op; |
2861 | 465 | AArch64_setup_op(&op); |
2862 | 465 | op.type = AARCH64_OP_REG; |
2863 | 465 | op.reg = Reg; |
2864 | 465 | op.access = access; |
2865 | | |
2866 | 465 | insert_op(MI, index, op); |
2867 | 465 | } |
2868 | | |
2869 | | /// Inserts a immediate to the detail operands at @index. |
2870 | | /// If @index == -1, it pushes the operand to the end of the ops array. |
2871 | | /// Already present operands are moved. |
2872 | | void AArch64_insert_detail_op_imm_at(MCInst *MI, unsigned index, int64_t Imm) |
2873 | 5.12k | { |
2874 | 5.12k | if (!detail_is_set(MI)) |
2875 | 0 | return; |
2876 | 5.12k | AArch64_check_safe_inc(MI); |
2877 | | |
2878 | 5.12k | cs_aarch64_op op; |
2879 | 5.12k | AArch64_setup_op(&op); |
2880 | 5.12k | op.type = AARCH64_OP_IMM; |
2881 | 5.12k | op.imm = Imm; |
2882 | 5.12k | op.access = CS_AC_READ; |
2883 | | |
2884 | 5.12k | insert_op(MI, index, op); |
2885 | 5.12k | } |
2886 | | |
2887 | | void AArch64_insert_detail_op_sys(MCInst *MI, unsigned index, |
2888 | | aarch64_sysop sys_op, aarch64_op_type type) |
2889 | 7.03k | { |
2890 | 7.03k | if (!detail_is_set(MI)) |
2891 | 0 | return; |
2892 | 7.03k | AArch64_check_safe_inc(MI); |
2893 | | |
2894 | 7.03k | cs_aarch64_op op; |
2895 | 7.03k | AArch64_setup_op(&op); |
2896 | 7.03k | op.type = type; |
2897 | 7.03k | op.sysop = sys_op; |
2898 | 7.03k | if (op.sysop.sub_type == AARCH64_OP_EXACTFPIMM) { |
2899 | 6.76k | op.fp = aarch64_exact_fp_to_fp(op.sysop.imm.exactfpimm); |
2900 | 6.76k | } |
2901 | 7.03k | insert_op(MI, index, op); |
2902 | 7.03k | } |
2903 | | |
2904 | | void AArch64_insert_detail_op_sme(MCInst *MI, unsigned index, |
2905 | | aarch64_op_sme sme_op) |
2906 | 3.76k | { |
2907 | 3.76k | if (!detail_is_set(MI)) |
2908 | 0 | return; |
2909 | 3.76k | AArch64_check_safe_inc(MI); |
2910 | | |
2911 | 3.76k | cs_aarch64_op op; |
2912 | 3.76k | AArch64_setup_op(&op); |
2913 | 3.76k | op.type = AARCH64_OP_SME; |
2914 | 3.76k | op.sme = sme_op; |
2915 | 3.76k | insert_op(MI, index, op); |
2916 | 3.76k | } |
2917 | | |
2918 | | #endif |