/src/binutils-gdb/opcodes/aarch64-opc.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* aarch64-opc.c -- AArch64 opcode support. |
2 | | Copyright (C) 2009-2024 Free Software Foundation, Inc. |
3 | | Contributed by ARM Ltd. |
4 | | |
5 | | This file is part of the GNU opcodes library. |
6 | | |
7 | | This library is free software; you can redistribute it and/or modify |
8 | | it under the terms of the GNU General Public License as published by |
9 | | the Free Software Foundation; either version 3, or (at your option) |
10 | | any later version. |
11 | | |
12 | | It is distributed in the hope that it will be useful, but WITHOUT |
13 | | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY |
14 | | or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public |
15 | | License for more details. |
16 | | |
17 | | You should have received a copy of the GNU General Public License |
18 | | along with this program; see the file COPYING3. If not, |
19 | | see <http://www.gnu.org/licenses/>. */ |
20 | | |
21 | | #include "sysdep.h" |
22 | | #include <assert.h> |
23 | | #include <stdlib.h> |
24 | | #include <stdio.h> |
25 | | #include <stdint.h> |
26 | | #include <stdarg.h> |
27 | | #include <inttypes.h> |
28 | | |
29 | | #include "opintl.h" |
30 | | #include "libiberty.h" |
31 | | |
32 | | #include "aarch64-opc.h" |
33 | | |
34 | | #ifdef DEBUG_AARCH64 |
35 | | int debug_dump = false; |
36 | | #endif /* DEBUG_AARCH64 */ |
37 | | |
38 | | /* The enumeration strings associated with each value of a 5-bit SVE |
39 | | pattern operand. A null entry indicates a reserved meaning. */ |
40 | | const char *const aarch64_sve_pattern_array[32] = { |
41 | | /* 0-7. */ |
42 | | "pow2", |
43 | | "vl1", |
44 | | "vl2", |
45 | | "vl3", |
46 | | "vl4", |
47 | | "vl5", |
48 | | "vl6", |
49 | | "vl7", |
50 | | /* 8-15. */ |
51 | | "vl8", |
52 | | "vl16", |
53 | | "vl32", |
54 | | "vl64", |
55 | | "vl128", |
56 | | "vl256", |
57 | | 0, |
58 | | 0, |
59 | | /* 16-23. */ |
60 | | 0, |
61 | | 0, |
62 | | 0, |
63 | | 0, |
64 | | 0, |
65 | | 0, |
66 | | 0, |
67 | | 0, |
68 | | /* 24-31. */ |
69 | | 0, |
70 | | 0, |
71 | | 0, |
72 | | 0, |
73 | | 0, |
74 | | "mul4", |
75 | | "mul3", |
76 | | "all" |
77 | | }; |
78 | | |
79 | | /* The enumeration strings associated with each value of a 4-bit SVE |
80 | | prefetch operand. A null entry indicates a reserved meaning. */ |
81 | | const char *const aarch64_sve_prfop_array[16] = { |
82 | | /* 0-7. */ |
83 | | "pldl1keep", |
84 | | "pldl1strm", |
85 | | "pldl2keep", |
86 | | "pldl2strm", |
87 | | "pldl3keep", |
88 | | "pldl3strm", |
89 | | 0, |
90 | | 0, |
91 | | /* 8-15. */ |
92 | | "pstl1keep", |
93 | | "pstl1strm", |
94 | | "pstl2keep", |
95 | | "pstl2strm", |
96 | | "pstl3keep", |
97 | | "pstl3strm", |
98 | | 0, |
99 | | 0 |
100 | | }; |
101 | | |
102 | | /* The enumeration strings associated with each value of a 6-bit RPRFM |
103 | | operation. */ |
104 | | const char *const aarch64_rprfmop_array[64] = { |
105 | | "pldkeep", |
106 | | "pstkeep", |
107 | | 0, |
108 | | 0, |
109 | | "pldstrm", |
110 | | "pststrm" |
111 | | }; |
112 | | |
113 | | /* Vector length multiples for a predicate-as-counter operand. Used in things |
114 | | like AARCH64_OPND_SME_VLxN_10. */ |
115 | | const char *const aarch64_sme_vlxn_array[2] = { |
116 | | "vlx2", |
117 | | "vlx4" |
118 | | }; |
119 | | |
120 | | /* Helper functions to determine which operand to be used to encode/decode |
121 | | the size:Q fields for AdvSIMD instructions. */ |
122 | | |
123 | | static inline bool |
124 | | vector_qualifier_p (enum aarch64_opnd_qualifier qualifier) |
125 | 526k | { |
126 | 526k | return (qualifier >= AARCH64_OPND_QLF_V_8B |
127 | 526k | && qualifier <= AARCH64_OPND_QLF_V_1Q); |
128 | 526k | } |
129 | | |
130 | | static inline bool |
131 | | fp_qualifier_p (enum aarch64_opnd_qualifier qualifier) |
132 | 369 | { |
133 | 369 | return (qualifier >= AARCH64_OPND_QLF_S_B |
134 | 369 | && qualifier <= AARCH64_OPND_QLF_S_Q); |
135 | 369 | } |
136 | | |
137 | | enum data_pattern |
138 | | { |
139 | | DP_UNKNOWN, |
140 | | DP_VECTOR_3SAME, |
141 | | DP_VECTOR_LONG, |
142 | | DP_VECTOR_WIDE, |
143 | | DP_VECTOR_ACROSS_LANES, |
144 | | }; |
145 | | |
146 | | static const char significant_operand_index [] = |
147 | | { |
148 | | 0, /* DP_UNKNOWN, by default using operand 0. */ |
149 | | 0, /* DP_VECTOR_3SAME */ |
150 | | 1, /* DP_VECTOR_LONG */ |
151 | | 2, /* DP_VECTOR_WIDE */ |
152 | | 1, /* DP_VECTOR_ACROSS_LANES */ |
153 | | }; |
154 | | |
155 | | /* Given a sequence of qualifiers in QUALIFIERS, determine and return |
156 | | the data pattern. |
157 | | N.B. QUALIFIERS is a possible sequence of qualifiers each of which |
158 | | corresponds to one of a sequence of operands. */ |
159 | | |
160 | | static enum data_pattern |
161 | | get_data_pattern (const aarch64_opnd_qualifier_seq_t qualifiers) |
162 | 224k | { |
163 | 224k | if (vector_qualifier_p (qualifiers[0])) |
164 | 223k | { |
165 | | /* e.g. v.4s, v.4s, v.4s |
166 | | or v.4h, v.4h, v.h[3]. */ |
167 | 223k | if (qualifiers[0] == qualifiers[1] |
168 | 223k | && vector_qualifier_p (qualifiers[2]) |
169 | 223k | && (aarch64_get_qualifier_esize (qualifiers[0]) |
170 | 70.4k | == aarch64_get_qualifier_esize (qualifiers[1])) |
171 | 223k | && (aarch64_get_qualifier_esize (qualifiers[0]) |
172 | 70.4k | == aarch64_get_qualifier_esize (qualifiers[2]))) |
173 | 68.1k | return DP_VECTOR_3SAME; |
174 | | /* e.g. v.8h, v.8b, v.8b. |
175 | | or v.4s, v.4h, v.h[2]. |
176 | | or v.8h, v.16b. */ |
177 | 155k | if (vector_qualifier_p (qualifiers[1]) |
178 | 155k | && aarch64_get_qualifier_esize (qualifiers[0]) != 0 |
179 | 155k | && (aarch64_get_qualifier_esize (qualifiers[0]) |
180 | 102k | == aarch64_get_qualifier_esize (qualifiers[1]) << 1)) |
181 | 40.9k | return DP_VECTOR_LONG; |
182 | | /* e.g. v.8h, v.8h, v.8b. */ |
183 | 114k | if (qualifiers[0] == qualifiers[1] |
184 | 114k | && vector_qualifier_p (qualifiers[2]) |
185 | 114k | && aarch64_get_qualifier_esize (qualifiers[0]) != 0 |
186 | 114k | && (aarch64_get_qualifier_esize (qualifiers[0]) |
187 | 2.30k | == aarch64_get_qualifier_esize (qualifiers[2]) << 1) |
188 | 114k | && (aarch64_get_qualifier_esize (qualifiers[0]) |
189 | 2.30k | == aarch64_get_qualifier_esize (qualifiers[1]))) |
190 | 2.30k | return DP_VECTOR_WIDE; |
191 | 114k | } |
192 | 369 | else if (fp_qualifier_p (qualifiers[0])) |
193 | 369 | { |
194 | | /* e.g. SADDLV <V><d>, <Vn>.<T>. */ |
195 | 369 | if (vector_qualifier_p (qualifiers[1]) |
196 | 369 | && qualifiers[2] == AARCH64_OPND_QLF_NIL) |
197 | 286 | return DP_VECTOR_ACROSS_LANES; |
198 | 369 | } |
199 | | |
200 | 112k | return DP_UNKNOWN; |
201 | 224k | } |
202 | | |
203 | | /* Select the operand to do the encoding/decoding of the 'size:Q' fields in |
204 | | the AdvSIMD instructions. */ |
205 | | /* N.B. it is possible to do some optimization that doesn't call |
206 | | get_data_pattern each time when we need to select an operand. We can |
207 | | either buffer the caculated the result or statically generate the data, |
208 | | however, it is not obvious that the optimization will bring significant |
209 | | benefit. */ |
210 | | |
211 | | int |
212 | | aarch64_select_operand_for_sizeq_field_coding (const aarch64_opcode *opcode) |
213 | 224k | { |
214 | 224k | return |
215 | 224k | significant_operand_index [get_data_pattern (opcode->qualifiers_list[0])]; |
216 | 224k | } |
217 | | |
218 | | /* Instruction bit-fields. |
219 | | + Keep synced with 'enum aarch64_field_kind'. */ |
220 | | const aarch64_field fields[] = |
221 | | { |
222 | | { 0, 0 }, /* NIL. */ |
223 | | { 8, 4 }, /* CRm: in the system instructions. */ |
224 | | { 10, 2 }, /* CRm_dsb_nxs: 2-bit imm. encoded in CRm<3:2>. */ |
225 | | { 12, 4 }, /* CRn: in the system instructions. */ |
226 | | { 10, 8 }, /* CSSC_imm8. */ |
227 | | { 11, 1 }, /* H: in advsimd scalar x indexed element instructions. */ |
228 | | { 21, 1 }, /* L: in advsimd scalar x indexed element instructions. */ |
229 | | { 0, 5 }, /* LSE128_Rt: Shared input+output operand register. */ |
230 | | { 16, 5 }, /* LSE128_Rt2: Shared input+output operand register 2. */ |
231 | | { 20, 1 }, /* M: in advsimd scalar x indexed element instructions. */ |
232 | | { 22, 1 }, /* N: in logical (immediate) instructions. */ |
233 | | { 30, 1 }, /* Q: in most AdvSIMD instructions. */ |
234 | | { 10, 5 }, /* Ra: in fp instructions. */ |
235 | | { 0, 5 }, /* Rd: in many integer instructions. */ |
236 | | { 16, 5 }, /* Rm: in ld/st reg offset and some integer inst. */ |
237 | | { 5, 5 }, /* Rn: in many integer instructions. */ |
238 | | { 16, 5 }, /* Rs: in load/store exclusive instructions. */ |
239 | | { 0, 5 }, /* Rt: in load/store instructions. */ |
240 | | { 10, 5 }, /* Rt2: in load/store pair instructions. */ |
241 | | { 12, 1 }, /* S: in load/store reg offset instructions. */ |
242 | | { 12, 2 }, /* SM3_imm2: Indexed element SM3 2 bits index immediate. */ |
243 | | { 1, 3 }, /* SME_Pdx2: predicate register, multiple of 2, [3:1]. */ |
244 | | { 13, 3 }, /* SME_Pm: second source scalable predicate register P0-P7. */ |
245 | | { 0, 3 }, /* SME_PNd3: PN0-PN7, bits [2:0]. */ |
246 | | { 5, 3 }, /* SME_PNn3: PN0-PN7, bits [7:5]. */ |
247 | | { 16, 1 }, /* SME_Q: Q class bit, bit 16. */ |
248 | | { 16, 2 }, /* SME_Rm: index base register W12-W15 [17:16]. */ |
249 | | { 13, 2 }, /* SME_Rv: vector select register W12-W15, bits [14:13]. */ |
250 | | { 15, 1 }, /* SME_V: (horizontal / vertical tiles), bit 15. */ |
251 | | { 10, 1 }, /* SME_VL_10: VLx2 or VLx4, bit [10]. */ |
252 | | { 13, 1 }, /* SME_VL_13: VLx2 or VLx4, bit [13]. */ |
253 | | { 0, 2 }, /* SME_ZAda_2b: tile ZA0-ZA3. */ |
254 | | { 0, 3 }, /* SME_ZAda_3b: tile ZA0-ZA7. */ |
255 | | { 1, 4 }, /* SME_Zdn2: Z0-Z31, multiple of 2, bits [4:1]. */ |
256 | | { 2, 3 }, /* SME_Zdn4: Z0-Z31, multiple of 4, bits [4:2]. */ |
257 | | { 16, 4 }, /* SME_Zm: Z0-Z15, bits [19:16]. */ |
258 | | { 17, 4 }, /* SME_Zm2: Z0-Z31, multiple of 2, bits [20:17]. */ |
259 | | { 18, 3 }, /* SME_Zm4: Z0-Z31, multiple of 4, bits [20:18]. */ |
260 | | { 6, 4 }, /* SME_Zn2: Z0-Z31, multiple of 2, bits [9:6]. */ |
261 | | { 7, 3 }, /* SME_Zn4: Z0-Z31, multiple of 4, bits [9:7]. */ |
262 | | { 4, 1 }, /* SME_ZtT: upper bit of Zt, bit [4]. */ |
263 | | { 0, 3 }, /* SME_Zt3: lower 3 bits of Zt, bits [2:0]. */ |
264 | | { 0, 2 }, /* SME_Zt2: lower 2 bits of Zt, bits [1:0]. */ |
265 | | { 23, 1 }, /* SME_i1: immediate field, bit 23. */ |
266 | | { 12, 2 }, /* SME_size_12: bits [13:12]. */ |
267 | | { 22, 2 }, /* SME_size_22: size<1>, size<0> class field, [23:22]. */ |
268 | | { 23, 1 }, /* SME_sz_23: bit [23]. */ |
269 | | { 22, 1 }, /* SME_tszh: immediate and qualifier field, bit 22. */ |
270 | | { 18, 3 }, /* SME_tszl: immediate and qualifier field, bits [20:18]. */ |
271 | | { 0, 8 }, /* SME_zero_mask: list of up to 8 tile names separated by commas [7:0]. */ |
272 | | { 4, 1 }, /* SVE_M_4: Merge/zero select, bit 4. */ |
273 | | { 14, 1 }, /* SVE_M_14: Merge/zero select, bit 14. */ |
274 | | { 16, 1 }, /* SVE_M_16: Merge/zero select, bit 16. */ |
275 | | { 17, 1 }, /* SVE_N: SVE equivalent of N. */ |
276 | | { 0, 4 }, /* SVE_Pd: p0-p15, bits [3,0]. */ |
277 | | { 10, 3 }, /* SVE_Pg3: p0-p7, bits [12,10]. */ |
278 | | { 5, 4 }, /* SVE_Pg4_5: p0-p15, bits [8,5]. */ |
279 | | { 10, 4 }, /* SVE_Pg4_10: p0-p15, bits [13,10]. */ |
280 | | { 16, 4 }, /* SVE_Pg4_16: p0-p15, bits [19,16]. */ |
281 | | { 16, 4 }, /* SVE_Pm: p0-p15, bits [19,16]. */ |
282 | | { 5, 4 }, /* SVE_Pn: p0-p15, bits [8,5]. */ |
283 | | { 0, 4 }, /* SVE_Pt: p0-p15, bits [3,0]. */ |
284 | | { 5, 5 }, /* SVE_Rm: SVE alternative position for Rm. */ |
285 | | { 16, 5 }, /* SVE_Rn: SVE alternative position for Rn. */ |
286 | | { 0, 5 }, /* SVE_Vd: Scalar SIMD&FP register, bits [4,0]. */ |
287 | | { 5, 5 }, /* SVE_Vm: Scalar SIMD&FP register, bits [9,5]. */ |
288 | | { 5, 5 }, /* SVE_Vn: Scalar SIMD&FP register, bits [9,5]. */ |
289 | | { 5, 5 }, /* SVE_Za_5: SVE vector register, bits [9,5]. */ |
290 | | { 16, 5 }, /* SVE_Za_16: SVE vector register, bits [20,16]. */ |
291 | | { 0, 5 }, /* SVE_Zd: SVE vector register. bits [4,0]. */ |
292 | | { 5, 5 }, /* SVE_Zm_5: SVE vector register, bits [9,5]. */ |
293 | | { 16, 5 }, /* SVE_Zm_16: SVE vector register, bits [20,16]. */ |
294 | | { 5, 5 }, /* SVE_Zn: SVE vector register, bits [9,5]. */ |
295 | | { 0, 5 }, /* SVE_Zt: SVE vector register, bits [4,0]. */ |
296 | | { 5, 1 }, /* SVE_i1: single-bit immediate. */ |
297 | | { 20, 1 }, /* SVE_i2h: high bit of 2bit immediate, bits. */ |
298 | | { 22, 1 }, /* SVE_i3h: high bit of 3-bit immediate. */ |
299 | | { 19, 2 }, /* SVE_i3h2: two high bits of 3bit immediate, bits [20,19]. */ |
300 | | { 11, 1 }, /* SVE_i3l: low bit of 3-bit immediate. */ |
301 | | { 16, 3 }, /* SVE_imm3: 3-bit immediate field. */ |
302 | | { 16, 4 }, /* SVE_imm4: 4-bit immediate field. */ |
303 | | { 5, 5 }, /* SVE_imm5: 5-bit immediate field. */ |
304 | | { 16, 5 }, /* SVE_imm5b: secondary 5-bit immediate field. */ |
305 | | { 16, 6 }, /* SVE_imm6: 6-bit immediate field. */ |
306 | | { 14, 7 }, /* SVE_imm7: 7-bit immediate field. */ |
307 | | { 5, 8 }, /* SVE_imm8: 8-bit immediate field. */ |
308 | | { 5, 9 }, /* SVE_imm9: 9-bit immediate field. */ |
309 | | { 11, 6 }, /* SVE_immr: SVE equivalent of immr. */ |
310 | | { 5, 6 }, /* SVE_imms: SVE equivalent of imms. */ |
311 | | { 10, 2 }, /* SVE_msz: 2-bit shift amount for ADR. */ |
312 | | { 5, 5 }, /* SVE_pattern: vector pattern enumeration. */ |
313 | | { 0, 4 }, /* SVE_prfop: prefetch operation for SVE PRF[BHWD]. */ |
314 | | { 16, 1 }, /* SVE_rot1: 1-bit rotation amount. */ |
315 | | { 10, 2 }, /* SVE_rot2: 2-bit rotation amount. */ |
316 | | { 10, 1 }, /* SVE_rot3: 1-bit rotation amount at bit 10. */ |
317 | | { 17, 2 }, /* SVE_size: 2-bit element size, bits [18,17]. */ |
318 | | { 22, 1 }, /* SVE_sz: 1-bit element size select. */ |
319 | | { 30, 1 }, /* SVE_sz2: 1-bit element size select. */ |
320 | | { 16, 4 }, /* SVE_tsz: triangular size select. */ |
321 | | { 22, 2 }, /* SVE_tszh: triangular size select high, bits [23,22]. */ |
322 | | { 8, 2 }, /* SVE_tszl_8: triangular size select low, bits [9,8]. */ |
323 | | { 19, 2 }, /* SVE_tszl_19: triangular size select low, bits [20,19]. */ |
324 | | { 14, 1 }, /* SVE_xs_14: UXTW/SXTW select (bit 14). */ |
325 | | { 22, 1 }, /* SVE_xs_22: UXTW/SXTW select (bit 22). */ |
326 | | { 22, 1 }, /* S_imm10: in LDRAA and LDRAB instructions. */ |
327 | | { 16, 3 }, /* abc: a:b:c bits in AdvSIMD modified immediate. */ |
328 | | { 13, 3 }, /* asisdlso_opcode: opcode in advsimd ld/st single element. */ |
329 | | { 19, 5 }, /* b40: in the test bit and branch instructions. */ |
330 | | { 31, 1 }, /* b5: in the test bit and branch instructions. */ |
331 | | { 12, 4 }, /* cmode: in advsimd modified immediate instructions. */ |
332 | | { 12, 4 }, /* cond: condition flags as a source operand. */ |
333 | | { 0, 4 }, /* cond2: condition in truly conditional-executed inst. */ |
334 | | { 5, 5 }, /* defgh: d:e:f:g:h bits in AdvSIMD modified immediate. */ |
335 | | { 21, 2 }, /* hw: in move wide constant instructions. */ |
336 | | { 0, 1 }, /* imm1_0: general immediate in bits [0]. */ |
337 | | { 2, 1 }, /* imm1_2: general immediate in bits [2]. */ |
338 | | { 8, 1 }, /* imm1_8: general immediate in bits [8]. */ |
339 | | { 10, 1 }, /* imm1_10: general immediate in bits [10]. */ |
340 | | { 15, 1 }, /* imm1_15: general immediate in bits [15]. */ |
341 | | { 16, 1 }, /* imm1_16: general immediate in bits [16]. */ |
342 | | { 0, 2 }, /* imm2_0: general immediate in bits [1:0]. */ |
343 | | { 1, 2 }, /* imm2_1: general immediate in bits [2:1]. */ |
344 | | { 8, 2 }, /* imm2_8: general immediate in bits [9:8]. */ |
345 | | { 10, 2 }, /* imm2_10: 2-bit immediate, bits [11:10] */ |
346 | | { 12, 2 }, /* imm2_12: 2-bit immediate, bits [13:12] */ |
347 | | { 15, 2 }, /* imm2_15: 2-bit immediate, bits [16:15] */ |
348 | | { 16, 2 }, /* imm2_16: 2-bit immediate, bits [17:16] */ |
349 | | { 19, 2 }, /* imm2_19: 2-bit immediate, bits [20:19] */ |
350 | | { 0, 3 }, /* imm3_0: general immediate in bits [2:0]. */ |
351 | | { 5, 3 }, /* imm3_5: general immediate in bits [7:5]. */ |
352 | | { 10, 3 }, /* imm3_10: in add/sub extended reg instructions. */ |
353 | | { 12, 3 }, /* imm3_12: general immediate in bits [14:12]. */ |
354 | | { 14, 3 }, /* imm3_14: general immediate in bits [16:14]. */ |
355 | | { 15, 3 }, /* imm3_15: general immediate in bits [17:15]. */ |
356 | | { 0, 4 }, /* imm4_0: in rmif instructions. */ |
357 | | { 5, 4 }, /* imm4_5: in SME instructions. */ |
358 | | { 10, 4 }, /* imm4_10: in adddg/subg instructions. */ |
359 | | { 11, 4 }, /* imm4_11: in advsimd ext and advsimd ins instructions. */ |
360 | | { 14, 4 }, /* imm4_14: general immediate in bits [17:14]. */ |
361 | | { 16, 5 }, /* imm5: in conditional compare (immediate) instructions. */ |
362 | | { 10, 6 }, /* imm6_10: in add/sub reg shifted instructions. */ |
363 | | { 15, 6 }, /* imm6_15: in rmif instructions. */ |
364 | | { 15, 7 }, /* imm7: in load/store pair pre/post index instructions. */ |
365 | | { 13, 8 }, /* imm8: in floating-point scalar move immediate inst. */ |
366 | | { 12, 9 }, /* imm9: in load/store pre/post index instructions. */ |
367 | | { 10, 12 }, /* imm12: in ld/st unsigned imm or add/sub shifted inst. */ |
368 | | { 5, 14 }, /* imm14: in test bit and branch instructions. */ |
369 | | { 0, 16 }, /* imm16_0: in udf instruction. */ |
370 | | { 5, 16 }, /* imm16_5: in exception instructions. */ |
371 | | { 5, 19 }, /* imm19: e.g. in CBZ. */ |
372 | | { 0, 26 }, /* imm26: in unconditional branch instructions. */ |
373 | | { 16, 3 }, /* immb: in advsimd shift by immediate instructions. */ |
374 | | { 19, 4 }, /* immh: in advsimd shift by immediate instructions. */ |
375 | | { 5, 19 }, /* immhi: e.g. in ADRP. */ |
376 | | { 29, 2 }, /* immlo: e.g. in ADRP. */ |
377 | | { 16, 6 }, /* immr: in bitfield and logical immediate instructions. */ |
378 | | { 10, 6 }, /* imms: in bitfield and logical immediate instructions. */ |
379 | | { 11, 1 }, /* index: in ld/st inst deciding the pre/post-index. */ |
380 | | { 24, 1 }, /* index2: in ld/st pair inst deciding the pre/post-index. */ |
381 | | { 30, 2 }, /* ldst_size: size field in ld/st reg offset inst. */ |
382 | | { 13, 2 }, /* len: in advsimd tbl/tbx instructions. */ |
383 | | { 30, 1 }, /* lse_sz: in LSE extension atomic instructions. */ |
384 | | { 0, 4 }, /* nzcv: flag bit specifier, encoded in the "nzcv" field. */ |
385 | | { 29, 1 }, /* op: in AdvSIMD modified immediate instructions. */ |
386 | | { 19, 2 }, /* op0: in the system instructions. */ |
387 | | { 16, 3 }, /* op1: in the system instructions. */ |
388 | | { 5, 3 }, /* op2: in the system instructions. */ |
389 | | { 22, 2 }, /* opc: in load/store reg offset instructions. */ |
390 | | { 23, 1 }, /* opc1: in load/store reg offset instructions. */ |
391 | | { 12, 4 }, /* opcode: in advsimd load/store instructions. */ |
392 | | { 13, 3 }, /* option: in ld/st reg offset + add/sub extended reg inst. */ |
393 | | { 11, 2 }, /* rotate1: FCMLA immediate rotate. */ |
394 | | { 13, 2 }, /* rotate2: Indexed element FCMLA immediate rotate. */ |
395 | | { 12, 1 }, /* rotate3: FCADD immediate rotate. */ |
396 | | { 10, 6 }, /* scale: in the fixed-point scalar to fp converting inst. */ |
397 | | { 31, 1 }, /* sf: in integer data processing instructions. */ |
398 | | { 22, 2 }, /* shift: in add/sub reg/imm shifted instructions. */ |
399 | | { 22, 2 }, /* size: in most AdvSIMD and floating-point instructions. */ |
400 | | { 22, 1 }, /* sz: 1-bit element size select. */ |
401 | | { 22, 2 }, /* type: floating point type field in fp data inst. */ |
402 | | { 10, 2 }, /* vldst_size: size field in the AdvSIMD load/store inst. */ |
403 | | { 5, 3 }, /* off3: immediate offset used to calculate slice number in a |
404 | | ZA tile. */ |
405 | | { 5, 2 }, /* off2: immediate offset used to calculate slice number in |
406 | | a ZA tile. */ |
407 | | { 7, 1 }, /* ZAn_1: name of the 1bit encoded ZA tile. */ |
408 | | { 5, 1 }, /* ol: immediate offset used to calculate slice number in a ZA |
409 | | tile. */ |
410 | | { 6, 2 }, /* ZAn_2: name of the 2bit encoded ZA tile. */ |
411 | | { 5, 3 }, /* ZAn_3: name of the 3bit encoded ZA tile. */ |
412 | | { 6, 1 }, /* ZAn: name of the bit encoded ZA tile. */ |
413 | | { 12, 4 }, /* opc2: in rcpc3 ld/st inst deciding the pre/post-index. */ |
414 | | { 30, 2 }, /* rcpc3_size: in rcpc3 ld/st, field controls Rt/Rt2 width. */ |
415 | | }; |
416 | | |
417 | | enum aarch64_operand_class |
418 | | aarch64_get_operand_class (enum aarch64_opnd type) |
419 | 2.24M | { |
420 | 2.24M | return aarch64_operands[type].op_class; |
421 | 2.24M | } |
422 | | |
423 | | const char * |
424 | | aarch64_get_operand_name (enum aarch64_opnd type) |
425 | 0 | { |
426 | 0 | return aarch64_operands[type].name; |
427 | 0 | } |
428 | | |
429 | | /* Get operand description string. |
430 | | This is usually for the diagnosis purpose. */ |
431 | | const char * |
432 | | aarch64_get_operand_desc (enum aarch64_opnd type) |
433 | 0 | { |
434 | 0 | return aarch64_operands[type].desc; |
435 | 0 | } |
436 | | |
437 | | /* Table of all conditional affixes. */ |
438 | | const aarch64_cond aarch64_conds[16] = |
439 | | { |
440 | | {{"eq", "none"}, 0x0}, |
441 | | {{"ne", "any"}, 0x1}, |
442 | | {{"cs", "hs", "nlast"}, 0x2}, |
443 | | {{"cc", "lo", "ul", "last"}, 0x3}, |
444 | | {{"mi", "first"}, 0x4}, |
445 | | {{"pl", "nfrst"}, 0x5}, |
446 | | {{"vs"}, 0x6}, |
447 | | {{"vc"}, 0x7}, |
448 | | {{"hi", "pmore"}, 0x8}, |
449 | | {{"ls", "plast"}, 0x9}, |
450 | | {{"ge", "tcont"}, 0xa}, |
451 | | {{"lt", "tstop"}, 0xb}, |
452 | | {{"gt"}, 0xc}, |
453 | | {{"le"}, 0xd}, |
454 | | {{"al"}, 0xe}, |
455 | | {{"nv"}, 0xf}, |
456 | | }; |
457 | | |
458 | | const aarch64_cond * |
459 | | get_cond_from_value (aarch64_insn value) |
460 | 60.3k | { |
461 | 60.3k | assert (value < 16); |
462 | 60.3k | return &aarch64_conds[(unsigned int) value]; |
463 | 60.3k | } |
464 | | |
465 | | const aarch64_cond * |
466 | | get_inverted_cond (const aarch64_cond *cond) |
467 | 802 | { |
468 | 802 | return &aarch64_conds[cond->value ^ 0x1]; |
469 | 802 | } |
470 | | |
471 | | /* Table describing the operand extension/shifting operators; indexed by |
472 | | enum aarch64_modifier_kind. |
473 | | |
474 | | The value column provides the most common values for encoding modifiers, |
475 | | which enables table-driven encoding/decoding for the modifiers. */ |
476 | | const struct aarch64_name_value_pair aarch64_operand_modifiers [] = |
477 | | { |
478 | | {"none", 0x0}, |
479 | | {"msl", 0x0}, |
480 | | {"ror", 0x3}, |
481 | | {"asr", 0x2}, |
482 | | {"lsr", 0x1}, |
483 | | {"lsl", 0x0}, |
484 | | {"uxtb", 0x0}, |
485 | | {"uxth", 0x1}, |
486 | | {"uxtw", 0x2}, |
487 | | {"uxtx", 0x3}, |
488 | | {"sxtb", 0x4}, |
489 | | {"sxth", 0x5}, |
490 | | {"sxtw", 0x6}, |
491 | | {"sxtx", 0x7}, |
492 | | {"mul", 0x0}, |
493 | | {"mul vl", 0x0}, |
494 | | {NULL, 0}, |
495 | | }; |
496 | | |
497 | | enum aarch64_modifier_kind |
498 | | aarch64_get_operand_modifier (const struct aarch64_name_value_pair *desc) |
499 | 0 | { |
500 | 0 | return desc - aarch64_operand_modifiers; |
501 | 0 | } |
502 | | |
503 | | aarch64_insn |
504 | | aarch64_get_operand_modifier_value (enum aarch64_modifier_kind kind) |
505 | 0 | { |
506 | 0 | return aarch64_operand_modifiers[kind].value; |
507 | 0 | } |
508 | | |
509 | | enum aarch64_modifier_kind |
510 | | aarch64_get_operand_modifier_from_value (aarch64_insn value, |
511 | | bool extend_p) |
512 | 566k | { |
513 | 566k | if (extend_p) |
514 | 78.6k | return AARCH64_MOD_UXTB + value; |
515 | 488k | else |
516 | 488k | return AARCH64_MOD_LSL - value; |
517 | 566k | } |
518 | | |
519 | | bool |
520 | | aarch64_extend_operator_p (enum aarch64_modifier_kind kind) |
521 | 41.9k | { |
522 | 41.9k | return kind > AARCH64_MOD_LSL && kind <= AARCH64_MOD_SXTX; |
523 | 41.9k | } |
524 | | |
525 | | static inline bool |
526 | | aarch64_shift_operator_p (enum aarch64_modifier_kind kind) |
527 | 467k | { |
528 | 467k | return kind >= AARCH64_MOD_ROR && kind <= AARCH64_MOD_LSL; |
529 | 467k | } |
530 | | |
531 | | const struct aarch64_name_value_pair aarch64_barrier_options[16] = |
532 | | { |
533 | | { "#0x00", 0x0 }, |
534 | | { "oshld", 0x1 }, |
535 | | { "oshst", 0x2 }, |
536 | | { "osh", 0x3 }, |
537 | | { "#0x04", 0x4 }, |
538 | | { "nshld", 0x5 }, |
539 | | { "nshst", 0x6 }, |
540 | | { "nsh", 0x7 }, |
541 | | { "#0x08", 0x8 }, |
542 | | { "ishld", 0x9 }, |
543 | | { "ishst", 0xa }, |
544 | | { "ish", 0xb }, |
545 | | { "#0x0c", 0xc }, |
546 | | { "ld", 0xd }, |
547 | | { "st", 0xe }, |
548 | | { "sy", 0xf }, |
549 | | }; |
550 | | |
551 | | const struct aarch64_name_value_pair aarch64_barrier_dsb_nxs_options[4] = |
552 | | { /* CRm<3:2> #imm */ |
553 | | { "oshnxs", 16 }, /* 00 16 */ |
554 | | { "nshnxs", 20 }, /* 01 20 */ |
555 | | { "ishnxs", 24 }, /* 10 24 */ |
556 | | { "synxs", 28 }, /* 11 28 */ |
557 | | }; |
558 | | |
559 | | /* Table describing the operands supported by the aliases of the HINT |
560 | | instruction. |
561 | | |
562 | | The name column is the operand that is accepted for the alias. The value |
563 | | column is the hint number of the alias. The list of operands is terminated |
564 | | by NULL in the name column. */ |
565 | | |
566 | | const struct aarch64_name_value_pair aarch64_hint_options[] = |
567 | | { |
568 | | /* BTI. This is also the F_DEFAULT entry for AARCH64_OPND_BTI_TARGET. */ |
569 | | { " ", HINT_ENCODE (HINT_OPD_F_NOPRINT, 0x20) }, |
570 | | { "csync", HINT_OPD_CSYNC }, /* PSB CSYNC. */ |
571 | | { "dsync", HINT_OPD_DSYNC }, /* GCSB DSYNC. */ |
572 | | { "c", HINT_OPD_C }, /* BTI C. */ |
573 | | { "j", HINT_OPD_J }, /* BTI J. */ |
574 | | { "jc", HINT_OPD_JC }, /* BTI JC. */ |
575 | | { NULL, HINT_OPD_NULL }, |
576 | | }; |
577 | | |
578 | | /* op -> op: load = 0 instruction = 1 store = 2 |
579 | | l -> level: 1-3 |
580 | | t -> temporal: temporal (retained) = 0 non-temporal (streaming) = 1 */ |
581 | | #define B(op,l,t) (((op) << 3) | (((l) - 1) << 1) | (t)) |
582 | | const struct aarch64_name_value_pair aarch64_prfops[32] = |
583 | | { |
584 | | { "pldl1keep", B(0, 1, 0) }, |
585 | | { "pldl1strm", B(0, 1, 1) }, |
586 | | { "pldl2keep", B(0, 2, 0) }, |
587 | | { "pldl2strm", B(0, 2, 1) }, |
588 | | { "pldl3keep", B(0, 3, 0) }, |
589 | | { "pldl3strm", B(0, 3, 1) }, |
590 | | { "pldslckeep", B(0, 4, 0) }, |
591 | | { "pldslcstrm", B(0, 4, 1) }, |
592 | | { "plil1keep", B(1, 1, 0) }, |
593 | | { "plil1strm", B(1, 1, 1) }, |
594 | | { "plil2keep", B(1, 2, 0) }, |
595 | | { "plil2strm", B(1, 2, 1) }, |
596 | | { "plil3keep", B(1, 3, 0) }, |
597 | | { "plil3strm", B(1, 3, 1) }, |
598 | | { "plislckeep", B(1, 4, 0) }, |
599 | | { "plislcstrm", B(1, 4, 1) }, |
600 | | { "pstl1keep", B(2, 1, 0) }, |
601 | | { "pstl1strm", B(2, 1, 1) }, |
602 | | { "pstl2keep", B(2, 2, 0) }, |
603 | | { "pstl2strm", B(2, 2, 1) }, |
604 | | { "pstl3keep", B(2, 3, 0) }, |
605 | | { "pstl3strm", B(2, 3, 1) }, |
606 | | { "pstslckeep", B(2, 4, 0) }, |
607 | | { "pstslcstrm", B(2, 4, 1) }, |
608 | | { NULL, 0x18 }, |
609 | | { NULL, 0x19 }, |
610 | | { NULL, 0x1a }, |
611 | | { NULL, 0x1b }, |
612 | | { NULL, 0x1c }, |
613 | | { NULL, 0x1d }, |
614 | | { NULL, 0x1e }, |
615 | | { NULL, 0x1f }, |
616 | | }; |
617 | | #undef B |
618 | | |
619 | | /* Utilities on value constraint. */ |
620 | | |
621 | | static inline int |
622 | | value_in_range_p (int64_t value, int low, int high) |
623 | 2.13M | { |
624 | 2.13M | return (value >= low && value <= high) ? 1 : 0; |
625 | 2.13M | } |
626 | | |
627 | | /* Return true if VALUE is a multiple of ALIGN. */ |
628 | | static inline int |
629 | | value_aligned_p (int64_t value, int align) |
630 | 1.51M | { |
631 | 1.51M | return (value % align) == 0; |
632 | 1.51M | } |
633 | | |
634 | | /* A signed value fits in a field. */ |
635 | | static inline int |
636 | | value_fit_signed_field_p (int64_t value, unsigned width) |
637 | 942k | { |
638 | 942k | assert (width < 32); |
639 | 942k | if (width < sizeof (value) * 8) |
640 | 942k | { |
641 | 942k | int64_t lim = (uint64_t) 1 << (width - 1); |
642 | 942k | if (value >= -lim && value < lim) |
643 | 942k | return 1; |
644 | 942k | } |
645 | 0 | return 0; |
646 | 942k | } |
647 | | |
648 | | /* An unsigned value fits in a field. */ |
649 | | static inline int |
650 | | value_fit_unsigned_field_p (int64_t value, unsigned width) |
651 | 1.78M | { |
652 | 1.78M | assert (width < 32); |
653 | 1.78M | if (width < sizeof (value) * 8) |
654 | 1.78M | { |
655 | 1.78M | int64_t lim = (uint64_t) 1 << width; |
656 | 1.78M | if (value >= 0 && value < lim) |
657 | 1.78M | return 1; |
658 | 1.78M | } |
659 | 0 | return 0; |
660 | 1.78M | } |
661 | | |
662 | | /* Return 1 if OPERAND is SP or WSP. */ |
663 | | int |
664 | | aarch64_stack_pointer_p (const aarch64_opnd_info *operand) |
665 | 137k | { |
666 | 137k | return ((aarch64_get_operand_class (operand->type) |
667 | 137k | == AARCH64_OPND_CLASS_INT_REG) |
668 | 137k | && operand_maybe_stack_pointer (aarch64_operands + operand->type) |
669 | 137k | && operand->reg.regno == 31); |
670 | 137k | } |
671 | | |
672 | | /* Return 1 if OPERAND is XZR or WZP. */ |
673 | | int |
674 | | aarch64_zero_register_p (const aarch64_opnd_info *operand) |
675 | 0 | { |
676 | 0 | return ((aarch64_get_operand_class (operand->type) |
677 | 0 | == AARCH64_OPND_CLASS_INT_REG) |
678 | 0 | && !operand_maybe_stack_pointer (aarch64_operands + operand->type) |
679 | 0 | && operand->reg.regno == 31); |
680 | 0 | } |
681 | | |
682 | | /* Return true if the operand *OPERAND that has the operand code |
683 | | OPERAND->TYPE and been qualified by OPERAND->QUALIFIER can be also |
684 | | qualified by the qualifier TARGET. */ |
685 | | |
686 | | static inline int |
687 | | operand_also_qualified_p (const struct aarch64_opnd_info *operand, |
688 | | aarch64_opnd_qualifier_t target) |
689 | 3.60M | { |
690 | 3.60M | switch (operand->qualifier) |
691 | 3.60M | { |
692 | 5.02k | case AARCH64_OPND_QLF_W: |
693 | 5.02k | if (target == AARCH64_OPND_QLF_WSP && aarch64_stack_pointer_p (operand)) |
694 | 215 | return 1; |
695 | 4.80k | break; |
696 | 975k | case AARCH64_OPND_QLF_X: |
697 | 975k | if (target == AARCH64_OPND_QLF_SP && aarch64_stack_pointer_p (operand)) |
698 | 67 | return 1; |
699 | 974k | break; |
700 | 974k | case AARCH64_OPND_QLF_WSP: |
701 | 0 | if (target == AARCH64_OPND_QLF_W |
702 | 0 | && operand_maybe_stack_pointer (aarch64_operands + operand->type)) |
703 | 0 | return 1; |
704 | 0 | break; |
705 | 0 | case AARCH64_OPND_QLF_SP: |
706 | 0 | if (target == AARCH64_OPND_QLF_X |
707 | 0 | && operand_maybe_stack_pointer (aarch64_operands + operand->type)) |
708 | 0 | return 1; |
709 | 0 | break; |
710 | 2.62M | default: |
711 | 2.62M | break; |
712 | 3.60M | } |
713 | | |
714 | 3.60M | return 0; |
715 | 3.60M | } |
716 | | |
717 | | /* Given qualifier sequence list QSEQ_LIST and the known qualifier KNOWN_QLF |
718 | | for operand KNOWN_IDX, return the expected qualifier for operand IDX. |
719 | | |
720 | | Return NIL if more than one expected qualifiers are found. */ |
721 | | |
722 | | aarch64_opnd_qualifier_t |
723 | | aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t *qseq_list, |
724 | | int idx, |
725 | | const aarch64_opnd_qualifier_t known_qlf, |
726 | | int known_idx) |
727 | 0 | { |
728 | 0 | int i, saved_i; |
729 | | |
730 | | /* Special case. |
731 | | |
732 | | When the known qualifier is NIL, we have to assume that there is only |
733 | | one qualifier sequence in the *QSEQ_LIST and return the corresponding |
734 | | qualifier directly. One scenario is that for instruction |
735 | | PRFM <prfop>, [<Xn|SP>, #:lo12:<symbol>] |
736 | | which has only one possible valid qualifier sequence |
737 | | NIL, S_D |
738 | | the caller may pass NIL in KNOWN_QLF to obtain S_D so that it can |
739 | | determine the correct relocation type (i.e. LDST64_LO12) for PRFM. |
740 | | |
741 | | Because the qualifier NIL has dual roles in the qualifier sequence: |
742 | | it can mean no qualifier for the operand, or the qualifer sequence is |
743 | | not in use (when all qualifiers in the sequence are NILs), we have to |
744 | | handle this special case here. */ |
745 | 0 | if (known_qlf == AARCH64_OPND_NIL) |
746 | 0 | { |
747 | 0 | assert (qseq_list[0][known_idx] == AARCH64_OPND_NIL); |
748 | 0 | return qseq_list[0][idx]; |
749 | 0 | } |
750 | | |
751 | 0 | for (i = 0, saved_i = -1; i < AARCH64_MAX_QLF_SEQ_NUM; ++i) |
752 | 0 | { |
753 | 0 | if (qseq_list[i][known_idx] == known_qlf) |
754 | 0 | { |
755 | 0 | if (saved_i != -1) |
756 | | /* More than one sequences are found to have KNOWN_QLF at |
757 | | KNOWN_IDX. */ |
758 | 0 | return AARCH64_OPND_NIL; |
759 | 0 | saved_i = i; |
760 | 0 | } |
761 | 0 | } |
762 | | |
763 | 0 | return qseq_list[saved_i][idx]; |
764 | 0 | } |
765 | | |
766 | | enum operand_qualifier_kind |
767 | | { |
768 | | OQK_NIL, |
769 | | OQK_OPD_VARIANT, |
770 | | OQK_VALUE_IN_RANGE, |
771 | | OQK_MISC, |
772 | | }; |
773 | | |
774 | | /* Operand qualifier description. */ |
775 | | struct operand_qualifier_data |
776 | | { |
777 | | /* The usage of the three data fields depends on the qualifier kind. */ |
778 | | int data0; |
779 | | int data1; |
780 | | int data2; |
781 | | /* Description. */ |
782 | | const char *desc; |
783 | | /* Kind. */ |
784 | | enum operand_qualifier_kind kind; |
785 | | }; |
786 | | |
787 | | /* Indexed by the operand qualifier enumerators. */ |
788 | | struct operand_qualifier_data aarch64_opnd_qualifiers[] = |
789 | | { |
790 | | {0, 0, 0, "NIL", OQK_NIL}, |
791 | | |
792 | | /* Operand variant qualifiers. |
793 | | First 3 fields: |
794 | | element size, number of elements and common value for encoding. */ |
795 | | |
796 | | {4, 1, 0x0, "w", OQK_OPD_VARIANT}, |
797 | | {8, 1, 0x1, "x", OQK_OPD_VARIANT}, |
798 | | {4, 1, 0x0, "wsp", OQK_OPD_VARIANT}, |
799 | | {8, 1, 0x1, "sp", OQK_OPD_VARIANT}, |
800 | | |
801 | | {1, 1, 0x0, "b", OQK_OPD_VARIANT}, |
802 | | {2, 1, 0x1, "h", OQK_OPD_VARIANT}, |
803 | | {4, 1, 0x2, "s", OQK_OPD_VARIANT}, |
804 | | {8, 1, 0x3, "d", OQK_OPD_VARIANT}, |
805 | | {16, 1, 0x4, "q", OQK_OPD_VARIANT}, |
806 | | {4, 1, 0x0, "4b", OQK_OPD_VARIANT}, |
807 | | {4, 1, 0x0, "2h", OQK_OPD_VARIANT}, |
808 | | |
809 | | {1, 4, 0x0, "4b", OQK_OPD_VARIANT}, |
810 | | {1, 8, 0x0, "8b", OQK_OPD_VARIANT}, |
811 | | {1, 16, 0x1, "16b", OQK_OPD_VARIANT}, |
812 | | {2, 2, 0x0, "2h", OQK_OPD_VARIANT}, |
813 | | {2, 4, 0x2, "4h", OQK_OPD_VARIANT}, |
814 | | {2, 8, 0x3, "8h", OQK_OPD_VARIANT}, |
815 | | {4, 2, 0x4, "2s", OQK_OPD_VARIANT}, |
816 | | {4, 4, 0x5, "4s", OQK_OPD_VARIANT}, |
817 | | {8, 1, 0x6, "1d", OQK_OPD_VARIANT}, |
818 | | {8, 2, 0x7, "2d", OQK_OPD_VARIANT}, |
819 | | {16, 1, 0x8, "1q", OQK_OPD_VARIANT}, |
820 | | |
821 | | {0, 0, 0, "z", OQK_OPD_VARIANT}, |
822 | | {0, 0, 0, "m", OQK_OPD_VARIANT}, |
823 | | |
824 | | /* Qualifier for scaled immediate for Tag granule (stg,st2g,etc). */ |
825 | | {16, 0, 0, "tag", OQK_OPD_VARIANT}, |
826 | | |
827 | | /* Qualifiers constraining the value range. |
828 | | First 3 fields: |
829 | | Lower bound, higher bound, unused. */ |
830 | | |
831 | | {0, 15, 0, "CR", OQK_VALUE_IN_RANGE}, |
832 | | {0, 7, 0, "imm_0_7" , OQK_VALUE_IN_RANGE}, |
833 | | {0, 15, 0, "imm_0_15", OQK_VALUE_IN_RANGE}, |
834 | | {0, 31, 0, "imm_0_31", OQK_VALUE_IN_RANGE}, |
835 | | {0, 63, 0, "imm_0_63", OQK_VALUE_IN_RANGE}, |
836 | | {1, 32, 0, "imm_1_32", OQK_VALUE_IN_RANGE}, |
837 | | {1, 64, 0, "imm_1_64", OQK_VALUE_IN_RANGE}, |
838 | | |
839 | | /* Qualifiers for miscellaneous purpose. |
840 | | First 3 fields: |
841 | | unused, unused and unused. */ |
842 | | |
843 | | {0, 0, 0, "lsl", 0}, |
844 | | {0, 0, 0, "msl", 0}, |
845 | | |
846 | | {0, 0, 0, "retrieving", 0}, |
847 | | }; |
848 | | |
849 | | static inline bool |
850 | | operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier) |
851 | 8.93M | { |
852 | 8.93M | return aarch64_opnd_qualifiers[qualifier].kind == OQK_OPD_VARIANT; |
853 | 8.93M | } |
854 | | |
855 | | static inline bool |
856 | | qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier) |
857 | 3.60M | { |
858 | 3.60M | return aarch64_opnd_qualifiers[qualifier].kind == OQK_VALUE_IN_RANGE; |
859 | 3.60M | } |
860 | | |
861 | | const char* |
862 | | aarch64_get_qualifier_name (aarch64_opnd_qualifier_t qualifier) |
863 | 2.72M | { |
864 | 2.72M | return aarch64_opnd_qualifiers[qualifier].desc; |
865 | 2.72M | } |
866 | | |
867 | | /* Given an operand qualifier, return the expected data element size |
868 | | of a qualified operand. */ |
869 | | unsigned char |
870 | | aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t qualifier) |
871 | 6.32M | { |
872 | 6.32M | assert (operand_variant_qualifier_p (qualifier)); |
873 | 6.32M | return aarch64_opnd_qualifiers[qualifier].data0; |
874 | 6.32M | } |
875 | | |
876 | | unsigned char |
877 | | aarch64_get_qualifier_nelem (aarch64_opnd_qualifier_t qualifier) |
878 | 59.4k | { |
879 | 59.4k | assert (operand_variant_qualifier_p (qualifier)); |
880 | 59.4k | return aarch64_opnd_qualifiers[qualifier].data1; |
881 | 59.4k | } |
882 | | |
883 | | aarch64_insn |
884 | | aarch64_get_qualifier_standard_value (aarch64_opnd_qualifier_t qualifier) |
885 | 2.55M | { |
886 | 2.55M | assert (operand_variant_qualifier_p (qualifier)); |
887 | 2.55M | return aarch64_opnd_qualifiers[qualifier].data2; |
888 | 2.55M | } |
889 | | |
890 | | static int |
891 | | get_lower_bound (aarch64_opnd_qualifier_t qualifier) |
892 | 495k | { |
893 | 495k | assert (qualifier_value_in_range_constraint_p (qualifier)); |
894 | 495k | return aarch64_opnd_qualifiers[qualifier].data0; |
895 | 495k | } |
896 | | |
897 | | static int |
898 | | get_upper_bound (aarch64_opnd_qualifier_t qualifier) |
899 | 526k | { |
900 | 526k | assert (qualifier_value_in_range_constraint_p (qualifier)); |
901 | 526k | return aarch64_opnd_qualifiers[qualifier].data1; |
902 | 526k | } |
903 | | |
904 | | #ifdef DEBUG_AARCH64 |
905 | | void |
906 | | aarch64_verbose (const char *str, ...) |
907 | | { |
908 | | va_list ap; |
909 | | va_start (ap, str); |
910 | | printf ("#### "); |
911 | | vprintf (str, ap); |
912 | | printf ("\n"); |
913 | | va_end (ap); |
914 | | } |
915 | | |
916 | | static inline void |
917 | | dump_qualifier_sequence (const aarch64_opnd_qualifier_t *qualifier) |
918 | | { |
919 | | int i; |
920 | | printf ("#### \t"); |
921 | | for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++qualifier) |
922 | | printf ("%s,", aarch64_get_qualifier_name (*qualifier)); |
923 | | printf ("\n"); |
924 | | } |
925 | | |
926 | | static void |
927 | | dump_match_qualifiers (const struct aarch64_opnd_info *opnd, |
928 | | const aarch64_opnd_qualifier_t *qualifier) |
929 | | { |
930 | | int i; |
931 | | aarch64_opnd_qualifier_t curr[AARCH64_MAX_OPND_NUM]; |
932 | | |
933 | | aarch64_verbose ("dump_match_qualifiers:"); |
934 | | for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i) |
935 | | curr[i] = opnd[i].qualifier; |
936 | | dump_qualifier_sequence (curr); |
937 | | aarch64_verbose ("against"); |
938 | | dump_qualifier_sequence (qualifier); |
939 | | } |
940 | | #endif /* DEBUG_AARCH64 */ |
941 | | |
942 | | /* This function checks if the given instruction INSN is a destructive |
943 | | instruction based on the usage of the registers. It does not recognize |
944 | | unary destructive instructions. */ |
945 | | bool |
946 | | aarch64_is_destructive_by_operands (const aarch64_opcode *opcode) |
947 | 27 | { |
948 | 27 | int i = 0; |
949 | 27 | const enum aarch64_opnd *opnds = opcode->operands; |
950 | | |
951 | 27 | if (opnds[0] == AARCH64_OPND_NIL) |
952 | 0 | return false; |
953 | | |
954 | 58 | while (opnds[++i] != AARCH64_OPND_NIL) |
955 | 56 | if (opnds[i] == opnds[0]) |
956 | 25 | return true; |
957 | | |
958 | 2 | return false; |
959 | 27 | } |
960 | | |
961 | | /* TODO improve this, we can have an extra field at the runtime to |
962 | | store the number of operands rather than calculating it every time. */ |
963 | | |
964 | | int |
965 | | aarch64_num_of_operands (const aarch64_opcode *opcode) |
966 | 7.24M | { |
967 | 7.24M | int i = 0; |
968 | 7.24M | const enum aarch64_opnd *opnds = opcode->operands; |
969 | 24.6M | while (opnds[i++] != AARCH64_OPND_NIL) |
970 | 17.4M | ; |
971 | 7.24M | --i; |
972 | 7.24M | assert (i >= 0 && i <= AARCH64_MAX_OPND_NUM); |
973 | 7.24M | return i; |
974 | 7.24M | } |
975 | | |
976 | | /* Find the best matched qualifier sequence in *QUALIFIERS_LIST for INST. |
977 | | If succeeds, fill the found sequence in *RET, return 1; otherwise return 0. |
978 | | |
979 | | Store the smallest number of non-matching qualifiers in *INVALID_COUNT. |
980 | | This is always 0 if the function succeeds. |
981 | | |
982 | | N.B. on the entry, it is very likely that only some operands in *INST |
983 | | have had their qualifiers been established. |
984 | | |
985 | | If STOP_AT is not -1, the function will only try to match |
986 | | the qualifier sequence for operands before and including the operand |
987 | | of index STOP_AT; and on success *RET will only be filled with the first |
988 | | (STOP_AT+1) qualifiers. |
989 | | |
990 | | A couple examples of the matching algorithm: |
991 | | |
992 | | X,W,NIL should match |
993 | | X,W,NIL |
994 | | |
995 | | NIL,NIL should match |
996 | | X ,NIL |
997 | | |
998 | | Apart from serving the main encoding routine, this can also be called |
999 | | during or after the operand decoding. */ |
1000 | | |
1001 | | int |
1002 | | aarch64_find_best_match (const aarch64_inst *inst, |
1003 | | const aarch64_opnd_qualifier_seq_t *qualifiers_list, |
1004 | | int stop_at, aarch64_opnd_qualifier_t *ret, |
1005 | | int *invalid_count) |
1006 | 6.52M | { |
1007 | 6.52M | int i, num_opnds, invalid, min_invalid; |
1008 | 6.52M | const aarch64_opnd_qualifier_t *qualifiers; |
1009 | | |
1010 | 6.52M | num_opnds = aarch64_num_of_operands (inst->opcode); |
1011 | 6.52M | if (num_opnds == 0) |
1012 | 0 | { |
1013 | 0 | DEBUG_TRACE ("SUCCEED: no operand"); |
1014 | 0 | *invalid_count = 0; |
1015 | 0 | return 1; |
1016 | 0 | } |
1017 | | |
1018 | 6.52M | if (stop_at < 0 || stop_at >= num_opnds) |
1019 | 5.70M | stop_at = num_opnds - 1; |
1020 | | |
1021 | | /* For each pattern. */ |
1022 | 6.52M | min_invalid = num_opnds; |
1023 | 9.15M | for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list) |
1024 | 9.15M | { |
1025 | 9.15M | int j; |
1026 | 9.15M | qualifiers = *qualifiers_list; |
1027 | | |
1028 | | /* Start as positive. */ |
1029 | 9.15M | invalid = 0; |
1030 | | |
1031 | 9.15M | DEBUG_TRACE ("%d", i); |
1032 | | #ifdef DEBUG_AARCH64 |
1033 | | if (debug_dump) |
1034 | | dump_match_qualifiers (inst->operands, qualifiers); |
1035 | | #endif |
1036 | | |
1037 | | /* The first entry should be taken literally, even if it's an empty |
1038 | | qualifier sequence. (This matters for strict testing.) In other |
1039 | | positions an empty sequence acts as a terminator. */ |
1040 | 9.15M | if (i > 0 && empty_qualifier_sequence_p (qualifiers)) |
1041 | 47.2k | break; |
1042 | | |
1043 | 31.3M | for (j = 0; j < num_opnds && j <= stop_at; ++j, ++qualifiers) |
1044 | 22.2M | { |
1045 | 22.2M | if (inst->operands[j].qualifier == AARCH64_OPND_QLF_NIL |
1046 | 22.2M | && !(inst->opcode->flags & F_STRICT)) |
1047 | 12.1M | { |
1048 | | /* Either the operand does not have qualifier, or the qualifier |
1049 | | for the operand needs to be deduced from the qualifier |
1050 | | sequence. |
1051 | | In the latter case, any constraint checking related with |
1052 | | the obtained qualifier should be done later in |
1053 | | operand_general_constraint_met_p. */ |
1054 | 12.1M | continue; |
1055 | 12.1M | } |
1056 | 10.1M | else if (*qualifiers != inst->operands[j].qualifier) |
1057 | 3.60M | { |
1058 | | /* Unless the target qualifier can also qualify the operand |
1059 | | (which has already had a non-nil qualifier), non-equal |
1060 | | qualifiers are generally un-matched. */ |
1061 | 3.60M | if (operand_also_qualified_p (inst->operands + j, *qualifiers)) |
1062 | 282 | continue; |
1063 | 3.60M | else |
1064 | 3.60M | invalid += 1; |
1065 | 3.60M | } |
1066 | 6.51M | else |
1067 | 6.51M | continue; /* Equal qualifiers are certainly matched. */ |
1068 | 22.2M | } |
1069 | | |
1070 | 9.11M | if (min_invalid > invalid) |
1071 | 8.10M | min_invalid = invalid; |
1072 | | |
1073 | | /* Qualifiers established. */ |
1074 | 9.11M | if (min_invalid == 0) |
1075 | 6.48M | break; |
1076 | 9.11M | } |
1077 | | |
1078 | 6.52M | *invalid_count = min_invalid; |
1079 | 6.52M | if (min_invalid == 0) |
1080 | 6.48M | { |
1081 | | /* Fill the result in *RET. */ |
1082 | 6.48M | int j; |
1083 | 6.48M | qualifiers = *qualifiers_list; |
1084 | | |
1085 | 6.48M | DEBUG_TRACE ("complete qualifiers using list %d", i); |
1086 | | #ifdef DEBUG_AARCH64 |
1087 | | if (debug_dump) |
1088 | | dump_qualifier_sequence (qualifiers); |
1089 | | #endif |
1090 | | |
1091 | 21.5M | for (j = 0; j <= stop_at; ++j, ++qualifiers) |
1092 | 15.0M | ret[j] = *qualifiers; |
1093 | 36.7M | for (; j < AARCH64_MAX_OPND_NUM; ++j) |
1094 | 30.2M | ret[j] = AARCH64_OPND_QLF_NIL; |
1095 | | |
1096 | 6.48M | DEBUG_TRACE ("SUCCESS"); |
1097 | 6.48M | return 1; |
1098 | 6.48M | } |
1099 | | |
1100 | 47.2k | DEBUG_TRACE ("FAIL"); |
1101 | 47.2k | return 0; |
1102 | 6.52M | } |
1103 | | |
1104 | | /* Operand qualifier matching and resolving. |
1105 | | |
1106 | | Return 1 if the operand qualifier(s) in *INST match one of the qualifier |
1107 | | sequences in INST->OPCODE->qualifiers_list; otherwise return 0. |
1108 | | |
1109 | | Store the smallest number of non-matching qualifiers in *INVALID_COUNT. |
1110 | | This is always 0 if the function succeeds. |
1111 | | |
1112 | | if UPDATE_P, update the qualifier(s) in *INST after the matching |
1113 | | succeeds. */ |
1114 | | |
1115 | | static int |
1116 | | match_operands_qualifier (aarch64_inst *inst, bool update_p, |
1117 | | int *invalid_count) |
1118 | 5.70M | { |
1119 | 5.70M | int i; |
1120 | 5.70M | aarch64_opnd_qualifier_seq_t qualifiers; |
1121 | | |
1122 | 5.70M | if (!aarch64_find_best_match (inst, inst->opcode->qualifiers_list, -1, |
1123 | 5.70M | qualifiers, invalid_count)) |
1124 | 25.8k | { |
1125 | 25.8k | DEBUG_TRACE ("matching FAIL"); |
1126 | 25.8k | return 0; |
1127 | 25.8k | } |
1128 | | |
1129 | | /* Update the qualifiers. */ |
1130 | 5.68M | if (update_p) |
1131 | 18.7M | for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i) |
1132 | 18.7M | { |
1133 | 18.7M | if (inst->opcode->operands[i] == AARCH64_OPND_NIL) |
1134 | 5.68M | break; |
1135 | 13.0M | DEBUG_TRACE_IF (inst->operands[i].qualifier != qualifiers[i], |
1136 | 13.0M | "update %s with %s for operand %d", |
1137 | 13.0M | aarch64_get_qualifier_name (inst->operands[i].qualifier), |
1138 | 13.0M | aarch64_get_qualifier_name (qualifiers[i]), i); |
1139 | 13.0M | inst->operands[i].qualifier = qualifiers[i]; |
1140 | 13.0M | } |
1141 | | |
1142 | 5.68M | DEBUG_TRACE ("matching SUCCESS"); |
1143 | 5.68M | return 1; |
1144 | 5.70M | } |
1145 | | |
1146 | | /* Return TRUE if VALUE is a wide constant that can be moved into a general |
1147 | | register by MOVZ. |
1148 | | |
1149 | | IS32 indicates whether value is a 32-bit immediate or not. |
1150 | | If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift |
1151 | | amount will be returned in *SHIFT_AMOUNT. */ |
1152 | | |
1153 | | bool |
1154 | | aarch64_wide_constant_p (uint64_t value, int is32, unsigned int *shift_amount) |
1155 | 77.9k | { |
1156 | 77.9k | int amount; |
1157 | | |
1158 | 77.9k | DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 ")", value, value); |
1159 | | |
1160 | 77.9k | if (is32) |
1161 | 26.0k | { |
1162 | | /* Allow all zeros or all ones in top 32-bits, so that |
1163 | | 32-bit constant expressions like ~0x80000000 are |
1164 | | permitted. */ |
1165 | 26.0k | if (value >> 32 != 0 && value >> 32 != 0xffffffff) |
1166 | | /* Immediate out of range. */ |
1167 | 0 | return false; |
1168 | 26.0k | value &= 0xffffffff; |
1169 | 26.0k | } |
1170 | | |
1171 | | /* first, try movz then movn */ |
1172 | 77.9k | amount = -1; |
1173 | 77.9k | if ((value & ((uint64_t) 0xffff << 0)) == value) |
1174 | 31.7k | amount = 0; |
1175 | 46.1k | else if ((value & ((uint64_t) 0xffff << 16)) == value) |
1176 | 8.48k | amount = 16; |
1177 | 37.6k | else if (!is32 && (value & ((uint64_t) 0xffff << 32)) == value) |
1178 | 9.50k | amount = 32; |
1179 | 28.1k | else if (!is32 && (value & ((uint64_t) 0xffff << 48)) == value) |
1180 | 4.04k | amount = 48; |
1181 | | |
1182 | 77.9k | if (amount == -1) |
1183 | 24.1k | { |
1184 | 24.1k | DEBUG_TRACE ("exit false with 0x%" PRIx64 "(%" PRIi64 ")", value, value); |
1185 | 24.1k | return false; |
1186 | 24.1k | } |
1187 | | |
1188 | 53.8k | if (shift_amount != NULL) |
1189 | 0 | *shift_amount = amount; |
1190 | | |
1191 | 53.8k | DEBUG_TRACE ("exit true with amount %d", amount); |
1192 | | |
1193 | 53.8k | return true; |
1194 | 77.9k | } |
1195 | | |
1196 | | /* Build the accepted values for immediate logical SIMD instructions. |
1197 | | |
1198 | | The standard encodings of the immediate value are: |
1199 | | N imms immr SIMD size R S |
1200 | | 1 ssssss rrrrrr 64 UInt(rrrrrr) UInt(ssssss) |
1201 | | 0 0sssss 0rrrrr 32 UInt(rrrrr) UInt(sssss) |
1202 | | 0 10ssss 00rrrr 16 UInt(rrrr) UInt(ssss) |
1203 | | 0 110sss 000rrr 8 UInt(rrr) UInt(sss) |
1204 | | 0 1110ss 0000rr 4 UInt(rr) UInt(ss) |
1205 | | 0 11110s 00000r 2 UInt(r) UInt(s) |
1206 | | where all-ones value of S is reserved. |
1207 | | |
1208 | | Let's call E the SIMD size. |
1209 | | |
1210 | | The immediate value is: S+1 bits '1' rotated to the right by R. |
1211 | | |
1212 | | The total of valid encodings is 64*63 + 32*31 + ... + 2*1 = 5334 |
1213 | | (remember S != E - 1). */ |
1214 | | |
1215 | 225k | #define TOTAL_IMM_NB 5334 |
1216 | | |
1217 | | typedef struct |
1218 | | { |
1219 | | uint64_t imm; |
1220 | | aarch64_insn encoding; |
1221 | | } simd_imm_encoding; |
1222 | | |
1223 | | static simd_imm_encoding simd_immediates[TOTAL_IMM_NB]; |
1224 | | |
1225 | | static int |
1226 | | simd_imm_encoding_cmp(const void *i1, const void *i2) |
1227 | 2.70M | { |
1228 | 2.70M | const simd_imm_encoding *imm1 = (const simd_imm_encoding *)i1; |
1229 | 2.70M | const simd_imm_encoding *imm2 = (const simd_imm_encoding *)i2; |
1230 | | |
1231 | 2.70M | if (imm1->imm < imm2->imm) |
1232 | 1.37M | return -1; |
1233 | 1.33M | if (imm1->imm > imm2->imm) |
1234 | 1.10M | return +1; |
1235 | 225k | return 0; |
1236 | 1.33M | } |
1237 | | |
1238 | | /* immediate bitfield standard encoding |
1239 | | imm13<12> imm13<5:0> imm13<11:6> SIMD size R S |
1240 | | 1 ssssss rrrrrr 64 rrrrrr ssssss |
1241 | | 0 0sssss 0rrrrr 32 rrrrr sssss |
1242 | | 0 10ssss 00rrrr 16 rrrr ssss |
1243 | | 0 110sss 000rrr 8 rrr sss |
1244 | | 0 1110ss 0000rr 4 rr ss |
1245 | | 0 11110s 00000r 2 r s */ |
1246 | | static inline int |
1247 | | encode_immediate_bitfield (int is64, uint32_t s, uint32_t r) |
1248 | 10.6k | { |
1249 | 10.6k | return (is64 << 12) | (r << 6) | s; |
1250 | 10.6k | } |
1251 | | |
1252 | | static void |
1253 | | build_immediate_table (void) |
1254 | 2 | { |
1255 | 2 | uint32_t log_e, e, s, r, s_mask; |
1256 | 2 | uint64_t mask, imm; |
1257 | 2 | int nb_imms; |
1258 | 2 | int is64; |
1259 | | |
1260 | 2 | nb_imms = 0; |
1261 | 14 | for (log_e = 1; log_e <= 6; log_e++) |
1262 | 12 | { |
1263 | | /* Get element size. */ |
1264 | 12 | e = 1u << log_e; |
1265 | 12 | if (log_e == 6) |
1266 | 2 | { |
1267 | 2 | is64 = 1; |
1268 | 2 | mask = 0xffffffffffffffffull; |
1269 | 2 | s_mask = 0; |
1270 | 2 | } |
1271 | 10 | else |
1272 | 10 | { |
1273 | 10 | is64 = 0; |
1274 | 10 | mask = (1ull << e) - 1; |
1275 | | /* log_e s_mask |
1276 | | 1 ((1 << 4) - 1) << 2 = 111100 |
1277 | | 2 ((1 << 3) - 1) << 3 = 111000 |
1278 | | 3 ((1 << 2) - 1) << 4 = 110000 |
1279 | | 4 ((1 << 1) - 1) << 5 = 100000 |
1280 | | 5 ((1 << 0) - 1) << 6 = 000000 */ |
1281 | 10 | s_mask = ((1u << (5 - log_e)) - 1) << (log_e + 1); |
1282 | 10 | } |
1283 | 252 | for (s = 0; s < e - 1; s++) |
1284 | 10.9k | for (r = 0; r < e; r++) |
1285 | 10.6k | { |
1286 | | /* s+1 consecutive bits to 1 (s < 63) */ |
1287 | 10.6k | imm = (1ull << (s + 1)) - 1; |
1288 | | /* rotate right by r */ |
1289 | 10.6k | if (r != 0) |
1290 | 10.4k | imm = (imm >> r) | ((imm << (e - r)) & mask); |
1291 | | /* replicate the constant depending on SIMD size */ |
1292 | 10.6k | switch (log_e) |
1293 | 10.6k | { |
1294 | 4 | case 1: imm = (imm << 2) | imm; |
1295 | | /* Fall through. */ |
1296 | 28 | case 2: imm = (imm << 4) | imm; |
1297 | | /* Fall through. */ |
1298 | 140 | case 3: imm = (imm << 8) | imm; |
1299 | | /* Fall through. */ |
1300 | 620 | case 4: imm = (imm << 16) | imm; |
1301 | | /* Fall through. */ |
1302 | 2.60k | case 5: imm = (imm << 32) | imm; |
1303 | | /* Fall through. */ |
1304 | 10.6k | case 6: break; |
1305 | 0 | default: abort (); |
1306 | 10.6k | } |
1307 | 10.6k | simd_immediates[nb_imms].imm = imm; |
1308 | 10.6k | simd_immediates[nb_imms].encoding = |
1309 | 10.6k | encode_immediate_bitfield(is64, s | s_mask, r); |
1310 | 10.6k | nb_imms++; |
1311 | 10.6k | } |
1312 | 12 | } |
1313 | 2 | assert (nb_imms == TOTAL_IMM_NB); |
1314 | 2 | qsort(simd_immediates, nb_imms, |
1315 | 2 | sizeof(simd_immediates[0]), simd_imm_encoding_cmp); |
1316 | 2 | } |
1317 | | |
1318 | | /* Return TRUE if VALUE is a valid logical immediate, i.e. bitmask, that can |
1319 | | be accepted by logical (immediate) instructions |
1320 | | e.g. ORR <Xd|SP>, <Xn>, #<imm>. |
1321 | | |
1322 | | ESIZE is the number of bytes in the decoded immediate value. |
1323 | | If ENCODING is not NULL, on the return of TRUE, the standard encoding for |
1324 | | VALUE will be returned in *ENCODING. */ |
1325 | | |
1326 | | bool |
1327 | | aarch64_logical_immediate_p (uint64_t value, int esize, aarch64_insn *encoding) |
1328 | 225k | { |
1329 | 225k | simd_imm_encoding imm_enc; |
1330 | 225k | const simd_imm_encoding *imm_encoding; |
1331 | 225k | static bool initialized = false; |
1332 | 225k | uint64_t upper; |
1333 | 225k | int i; |
1334 | | |
1335 | 225k | DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 "), esize: %d", value, |
1336 | 225k | value, esize); |
1337 | | |
1338 | 225k | if (!initialized) |
1339 | 2 | { |
1340 | 2 | build_immediate_table (); |
1341 | 2 | initialized = true; |
1342 | 2 | } |
1343 | | |
1344 | | /* Allow all zeros or all ones in top bits, so that |
1345 | | constant expressions like ~1 are permitted. */ |
1346 | 225k | upper = (uint64_t) -1 << (esize * 4) << (esize * 4); |
1347 | 225k | if ((value & ~upper) != value && (value | upper) != value) |
1348 | 0 | return false; |
1349 | | |
1350 | | /* Replicate to a full 64-bit value. */ |
1351 | 225k | value &= ~upper; |
1352 | 376k | for (i = esize * 8; i < 64; i *= 2) |
1353 | 151k | value |= (value << i); |
1354 | | |
1355 | 225k | imm_enc.imm = value; |
1356 | 225k | imm_encoding = (const simd_imm_encoding *) |
1357 | 225k | bsearch(&imm_enc, simd_immediates, TOTAL_IMM_NB, |
1358 | 225k | sizeof(simd_immediates[0]), simd_imm_encoding_cmp); |
1359 | 225k | if (imm_encoding == NULL) |
1360 | 0 | { |
1361 | 0 | DEBUG_TRACE ("exit with false"); |
1362 | 0 | return false; |
1363 | 0 | } |
1364 | 225k | if (encoding != NULL) |
1365 | 0 | *encoding = imm_encoding->encoding; |
1366 | 225k | DEBUG_TRACE ("exit with true"); |
1367 | 225k | return true; |
1368 | 225k | } |
1369 | | |
1370 | | /* If 64-bit immediate IMM is in the format of |
1371 | | "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh", |
1372 | | where a, b, c, d, e, f, g and h are independently 0 or 1, return an integer |
1373 | | of value "abcdefgh". Otherwise return -1. */ |
1374 | | int |
1375 | | aarch64_shrink_expanded_imm8 (uint64_t imm) |
1376 | 108 | { |
1377 | 108 | int i, ret; |
1378 | 108 | uint32_t byte; |
1379 | | |
1380 | 108 | ret = 0; |
1381 | 972 | for (i = 0; i < 8; i++) |
1382 | 864 | { |
1383 | 864 | byte = (imm >> (8 * i)) & 0xff; |
1384 | 864 | if (byte == 0xff) |
1385 | 389 | ret |= 1 << i; |
1386 | 475 | else if (byte != 0x00) |
1387 | 0 | return -1; |
1388 | 864 | } |
1389 | 108 | return ret; |
1390 | 108 | } |
1391 | | |
1392 | | /* Utility inline functions for operand_general_constraint_met_p. */ |
1393 | | |
1394 | | static inline void |
1395 | | set_error (aarch64_operand_error *mismatch_detail, |
1396 | | enum aarch64_operand_error_kind kind, int idx, |
1397 | | const char* error) |
1398 | 0 | { |
1399 | 0 | if (mismatch_detail == NULL) |
1400 | 0 | return; |
1401 | 0 | mismatch_detail->kind = kind; |
1402 | 0 | mismatch_detail->index = idx; |
1403 | 0 | mismatch_detail->error = error; |
1404 | 0 | } |
1405 | | |
1406 | | static inline void |
1407 | | set_syntax_error (aarch64_operand_error *mismatch_detail, int idx, |
1408 | | const char* error) |
1409 | 1.96k | { |
1410 | 1.96k | if (mismatch_detail == NULL) |
1411 | 1.96k | return; |
1412 | 0 | set_error (mismatch_detail, AARCH64_OPDE_SYNTAX_ERROR, idx, error); |
1413 | 0 | } |
1414 | | |
1415 | | static inline void |
1416 | | set_invalid_regno_error (aarch64_operand_error *mismatch_detail, int idx, |
1417 | | const char *prefix, int lower_bound, int upper_bound) |
1418 | 0 | { |
1419 | 0 | if (mismatch_detail == NULL) |
1420 | 0 | return; |
1421 | 0 | set_error (mismatch_detail, AARCH64_OPDE_INVALID_REGNO, idx, NULL); |
1422 | 0 | mismatch_detail->data[0].s = prefix; |
1423 | 0 | mismatch_detail->data[1].i = lower_bound; |
1424 | 0 | mismatch_detail->data[2].i = upper_bound; |
1425 | 0 | } |
1426 | | |
1427 | | static inline void |
1428 | | set_out_of_range_error (aarch64_operand_error *mismatch_detail, |
1429 | | int idx, int lower_bound, int upper_bound, |
1430 | | const char* error) |
1431 | 0 | { |
1432 | 0 | if (mismatch_detail == NULL) |
1433 | 0 | return; |
1434 | 0 | set_error (mismatch_detail, AARCH64_OPDE_OUT_OF_RANGE, idx, error); |
1435 | 0 | mismatch_detail->data[0].i = lower_bound; |
1436 | 0 | mismatch_detail->data[1].i = upper_bound; |
1437 | 0 | } |
1438 | | |
1439 | | static inline void |
1440 | | set_imm_out_of_range_error (aarch64_operand_error *mismatch_detail, |
1441 | | int idx, int lower_bound, int upper_bound) |
1442 | 89.1k | { |
1443 | 89.1k | if (mismatch_detail == NULL) |
1444 | 89.1k | return; |
1445 | 0 | set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound, |
1446 | 0 | _("immediate value")); |
1447 | 0 | } |
1448 | | |
1449 | | static inline void |
1450 | | set_offset_out_of_range_error (aarch64_operand_error *mismatch_detail, |
1451 | | int idx, int lower_bound, int upper_bound) |
1452 | 0 | { |
1453 | 0 | if (mismatch_detail == NULL) |
1454 | 0 | return; |
1455 | 0 | set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound, |
1456 | 0 | _("immediate offset")); |
1457 | 0 | } |
1458 | | |
1459 | | static inline void |
1460 | | set_regno_out_of_range_error (aarch64_operand_error *mismatch_detail, |
1461 | | int idx, int lower_bound, int upper_bound) |
1462 | 0 | { |
1463 | 0 | if (mismatch_detail == NULL) |
1464 | 0 | return; |
1465 | 0 | set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound, |
1466 | 0 | _("register number")); |
1467 | 0 | } |
1468 | | |
1469 | | static inline void |
1470 | | set_elem_idx_out_of_range_error (aarch64_operand_error *mismatch_detail, |
1471 | | int idx, int lower_bound, int upper_bound) |
1472 | 446 | { |
1473 | 446 | if (mismatch_detail == NULL) |
1474 | 446 | return; |
1475 | 0 | set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound, |
1476 | 0 | _("register element index")); |
1477 | 0 | } |
1478 | | |
1479 | | static inline void |
1480 | | set_sft_amount_out_of_range_error (aarch64_operand_error *mismatch_detail, |
1481 | | int idx, int lower_bound, int upper_bound) |
1482 | 77.7k | { |
1483 | 77.7k | if (mismatch_detail == NULL) |
1484 | 77.7k | return; |
1485 | 0 | set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound, |
1486 | 0 | _("shift amount")); |
1487 | 0 | } |
1488 | | |
1489 | | /* Report that the MUL modifier in operand IDX should be in the range |
1490 | | [LOWER_BOUND, UPPER_BOUND]. */ |
1491 | | static inline void |
1492 | | set_multiplier_out_of_range_error (aarch64_operand_error *mismatch_detail, |
1493 | | int idx, int lower_bound, int upper_bound) |
1494 | 0 | { |
1495 | 0 | if (mismatch_detail == NULL) |
1496 | 0 | return; |
1497 | 0 | set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound, |
1498 | 0 | _("multiplier")); |
1499 | 0 | } |
1500 | | |
1501 | | static inline void |
1502 | | set_unaligned_error (aarch64_operand_error *mismatch_detail, int idx, |
1503 | | int alignment) |
1504 | 0 | { |
1505 | 0 | if (mismatch_detail == NULL) |
1506 | 0 | return; |
1507 | 0 | set_error (mismatch_detail, AARCH64_OPDE_UNALIGNED, idx, NULL); |
1508 | 0 | mismatch_detail->data[0].i = alignment; |
1509 | 0 | } |
1510 | | |
1511 | | static inline void |
1512 | | set_reg_list_length_error (aarch64_operand_error *mismatch_detail, int idx, |
1513 | | int expected_num) |
1514 | 440 | { |
1515 | 440 | if (mismatch_detail == NULL) |
1516 | 440 | return; |
1517 | 0 | set_error (mismatch_detail, AARCH64_OPDE_REG_LIST_LENGTH, idx, NULL); |
1518 | 0 | mismatch_detail->data[0].i = 1 << expected_num; |
1519 | 0 | } |
1520 | | |
1521 | | static inline void |
1522 | | set_reg_list_stride_error (aarch64_operand_error *mismatch_detail, int idx, |
1523 | | int expected_num) |
1524 | 0 | { |
1525 | 0 | if (mismatch_detail == NULL) |
1526 | 0 | return; |
1527 | 0 | set_error (mismatch_detail, AARCH64_OPDE_REG_LIST_STRIDE, idx, NULL); |
1528 | 0 | mismatch_detail->data[0].i = 1 << expected_num; |
1529 | 0 | } |
1530 | | |
1531 | | static inline void |
1532 | | set_invalid_vg_size (aarch64_operand_error *mismatch_detail, |
1533 | | int idx, int expected) |
1534 | 0 | { |
1535 | 0 | if (mismatch_detail == NULL) |
1536 | 0 | return; |
1537 | 0 | set_error (mismatch_detail, AARCH64_OPDE_INVALID_VG_SIZE, idx, NULL); |
1538 | 0 | mismatch_detail->data[0].i = expected; |
1539 | 0 | } |
1540 | | |
1541 | | static inline void |
1542 | | set_other_error (aarch64_operand_error *mismatch_detail, int idx, |
1543 | | const char* error) |
1544 | 26.8k | { |
1545 | 26.8k | if (mismatch_detail == NULL) |
1546 | 26.8k | return; |
1547 | 0 | set_error (mismatch_detail, AARCH64_OPDE_OTHER_ERROR, idx, error); |
1548 | 0 | } |
1549 | | |
1550 | | /* Check that indexed register operand OPND has a register in the range |
1551 | | [MIN_REGNO, MAX_REGNO] and an index in the range [MIN_INDEX, MAX_INDEX]. |
1552 | | PREFIX is the register prefix, such as "z" for SVE vector registers. */ |
1553 | | |
1554 | | static bool |
1555 | | check_reglane (const aarch64_opnd_info *opnd, |
1556 | | aarch64_operand_error *mismatch_detail, int idx, |
1557 | | const char *prefix, int min_regno, int max_regno, |
1558 | | int min_index, int max_index) |
1559 | 25.0k | { |
1560 | 25.0k | if (!value_in_range_p (opnd->reglane.regno, min_regno, max_regno)) |
1561 | 0 | { |
1562 | 0 | set_invalid_regno_error (mismatch_detail, idx, prefix, min_regno, |
1563 | 0 | max_regno); |
1564 | 0 | return false; |
1565 | 0 | } |
1566 | 25.0k | if (!value_in_range_p (opnd->reglane.index, min_index, max_index)) |
1567 | 0 | { |
1568 | 0 | set_elem_idx_out_of_range_error (mismatch_detail, idx, min_index, |
1569 | 0 | max_index); |
1570 | 0 | return false; |
1571 | 0 | } |
1572 | 25.0k | return true; |
1573 | 25.0k | } |
1574 | | |
1575 | | /* Check that register list operand OPND has NUM_REGS registers and a |
1576 | | register stride of STRIDE. */ |
1577 | | |
1578 | | static bool |
1579 | | check_reglist (const aarch64_opnd_info *opnd, |
1580 | | aarch64_operand_error *mismatch_detail, int idx, |
1581 | | int num_regs, int stride) |
1582 | 246k | { |
1583 | 246k | if (opnd->reglist.num_regs != num_regs) |
1584 | 440 | { |
1585 | 440 | set_reg_list_length_error (mismatch_detail, idx, num_regs); |
1586 | 440 | return false; |
1587 | 440 | } |
1588 | 246k | if (opnd->reglist.stride != stride) |
1589 | 0 | { |
1590 | 0 | set_reg_list_stride_error (mismatch_detail, idx, stride); |
1591 | 0 | return false; |
1592 | 0 | } |
1593 | 246k | return true; |
1594 | 246k | } |
1595 | | |
1596 | | /* Check that indexed ZA operand OPND has: |
1597 | | |
1598 | | - a selection register in the range [MIN_WREG, MIN_WREG + 3] |
1599 | | |
1600 | | - RANGE_SIZE consecutive immediate offsets. |
1601 | | |
1602 | | - an initial immediate offset that is a multiple of RANGE_SIZE |
1603 | | in the range [0, MAX_VALUE * RANGE_SIZE] |
1604 | | |
1605 | | - a vector group size of GROUP_SIZE. */ |
1606 | | |
1607 | | static bool |
1608 | | check_za_access (const aarch64_opnd_info *opnd, |
1609 | | aarch64_operand_error *mismatch_detail, int idx, |
1610 | | int min_wreg, int max_value, unsigned int range_size, |
1611 | | int group_size) |
1612 | 54.9k | { |
1613 | 54.9k | if (!value_in_range_p (opnd->indexed_za.index.regno, min_wreg, min_wreg + 3)) |
1614 | 0 | { |
1615 | 0 | if (min_wreg == 12) |
1616 | 0 | set_other_error (mismatch_detail, idx, |
1617 | 0 | _("expected a selection register in the" |
1618 | 0 | " range w12-w15")); |
1619 | 0 | else if (min_wreg == 8) |
1620 | 0 | set_other_error (mismatch_detail, idx, |
1621 | 0 | _("expected a selection register in the" |
1622 | 0 | " range w8-w11")); |
1623 | 0 | else |
1624 | 0 | abort (); |
1625 | 0 | return false; |
1626 | 0 | } |
1627 | | |
1628 | 54.9k | int max_index = max_value * range_size; |
1629 | 54.9k | if (!value_in_range_p (opnd->indexed_za.index.imm, 0, max_index)) |
1630 | 0 | { |
1631 | 0 | set_offset_out_of_range_error (mismatch_detail, idx, 0, max_index); |
1632 | 0 | return false; |
1633 | 0 | } |
1634 | | |
1635 | 54.9k | if ((opnd->indexed_za.index.imm % range_size) != 0) |
1636 | 0 | { |
1637 | 0 | assert (range_size == 2 || range_size == 4); |
1638 | 0 | set_other_error (mismatch_detail, idx, |
1639 | 0 | range_size == 2 |
1640 | 0 | ? _("starting offset is not a multiple of 2") |
1641 | 0 | : _("starting offset is not a multiple of 4")); |
1642 | 0 | return false; |
1643 | 0 | } |
1644 | | |
1645 | 54.9k | if (opnd->indexed_za.index.countm1 != range_size - 1) |
1646 | 0 | { |
1647 | 0 | if (range_size == 1) |
1648 | 0 | set_other_error (mismatch_detail, idx, |
1649 | 0 | _("expected a single offset rather than" |
1650 | 0 | " a range")); |
1651 | 0 | else if (range_size == 2) |
1652 | 0 | set_other_error (mismatch_detail, idx, |
1653 | 0 | _("expected a range of two offsets")); |
1654 | 0 | else if (range_size == 4) |
1655 | 0 | set_other_error (mismatch_detail, idx, |
1656 | 0 | _("expected a range of four offsets")); |
1657 | 0 | else |
1658 | 0 | abort (); |
1659 | 0 | return false; |
1660 | 0 | } |
1661 | | |
1662 | | /* The vector group specifier is optional in assembly code. */ |
1663 | 54.9k | if (opnd->indexed_za.group_size != 0 |
1664 | 54.9k | && opnd->indexed_za.group_size != group_size) |
1665 | 0 | { |
1666 | 0 | set_invalid_vg_size (mismatch_detail, idx, group_size); |
1667 | 0 | return false; |
1668 | 0 | } |
1669 | | |
1670 | 54.9k | return true; |
1671 | 54.9k | } |
1672 | | |
1673 | | /* Given a load/store operation, calculate the size of transferred data via a |
1674 | | cumulative sum of qualifier sizes preceding the address operand in the |
1675 | | OPNDS operand list argument. */ |
1676 | | int |
1677 | | calc_ldst_datasize (const aarch64_opnd_info *opnds) |
1678 | 5.54k | { |
1679 | 5.54k | unsigned num_bytes = 0; /* total number of bytes transferred. */ |
1680 | 5.54k | enum aarch64_operand_class opnd_class; |
1681 | 5.54k | enum aarch64_opnd type; |
1682 | | |
1683 | 16.6k | for (int i = 0; i < AARCH64_MAX_OPND_NUM; i++) |
1684 | 16.6k | { |
1685 | 16.6k | type = opnds[i].type; |
1686 | 16.6k | opnd_class = aarch64_operands[type].op_class; |
1687 | 16.6k | if (opnd_class == AARCH64_OPND_CLASS_ADDRESS) |
1688 | 5.54k | break; |
1689 | 11.0k | num_bytes += aarch64_get_qualifier_esize (opnds[i].qualifier); |
1690 | 11.0k | } |
1691 | 5.54k | return num_bytes; |
1692 | 5.54k | } |
1693 | | |
1694 | | |
1695 | | /* General constraint checking based on operand code. |
1696 | | |
1697 | | Return 1 if OPNDS[IDX] meets the general constraint of operand code TYPE |
1698 | | as the IDXth operand of opcode OPCODE. Otherwise return 0. |
1699 | | |
1700 | | This function has to be called after the qualifiers for all operands |
1701 | | have been resolved. |
1702 | | |
1703 | | Mismatching error message is returned in *MISMATCH_DETAIL upon request, |
1704 | | i.e. when MISMATCH_DETAIL is non-NULL. This avoids the generation |
1705 | | of error message during the disassembling where error message is not |
1706 | | wanted. We avoid the dynamic construction of strings of error messages |
1707 | | here (i.e. in libopcodes), as it is costly and complicated; instead, we |
1708 | | use a combination of error code, static string and some integer data to |
1709 | | represent an error. */ |
1710 | | |
1711 | | static int |
1712 | | operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx, |
1713 | | enum aarch64_opnd type, |
1714 | | const aarch64_opcode *opcode, |
1715 | | aarch64_operand_error *mismatch_detail) |
1716 | 12.9M | { |
1717 | 12.9M | unsigned num, modifiers, shift; |
1718 | 12.9M | unsigned char size; |
1719 | 12.9M | int64_t imm, min_value, max_value; |
1720 | 12.9M | uint64_t uvalue, mask; |
1721 | 12.9M | const aarch64_opnd_info *opnd = opnds + idx; |
1722 | 12.9M | aarch64_opnd_qualifier_t qualifier = opnd->qualifier; |
1723 | 12.9M | int i; |
1724 | | |
1725 | 12.9M | assert (opcode->operands[idx] == opnd->type && opnd->type == type); |
1726 | | |
1727 | 12.9M | switch (aarch64_operands[type].op_class) |
1728 | 12.9M | { |
1729 | 4.55M | case AARCH64_OPND_CLASS_INT_REG: |
1730 | | /* Check for pair of xzr registers. */ |
1731 | 4.55M | if (type == AARCH64_OPND_PAIRREG_OR_XZR |
1732 | 4.55M | && opnds[idx - 1].reg.regno == 0x1f) |
1733 | 24 | { |
1734 | 24 | if (opnds[idx].reg.regno != 0x1f) |
1735 | 0 | { |
1736 | 0 | set_syntax_error (mismatch_detail, idx - 1, |
1737 | 0 | _("second reg in pair should be xzr if first is" |
1738 | 0 | " xzr")); |
1739 | 0 | return 0; |
1740 | 0 | } |
1741 | 24 | } |
1742 | | /* Check pair reg constraints for instructions taking a pair of |
1743 | | consecutively-numbered general-purpose registers. */ |
1744 | 4.55M | else if (type == AARCH64_OPND_PAIRREG |
1745 | 4.55M | || type == AARCH64_OPND_PAIRREG_OR_XZR) |
1746 | 4.57k | { |
1747 | 4.57k | assert (idx == 1 || idx == 2 || idx == 3 || idx == 5); |
1748 | 4.57k | if (opnds[idx - 1].reg.regno % 2 != 0) |
1749 | 1.96k | { |
1750 | 1.96k | set_syntax_error (mismatch_detail, idx - 1, |
1751 | 1.96k | _("reg pair must start from even reg")); |
1752 | 1.96k | return 0; |
1753 | 1.96k | } |
1754 | 2.60k | if (opnds[idx].reg.regno != opnds[idx - 1].reg.regno + 1) |
1755 | 0 | { |
1756 | 0 | set_syntax_error (mismatch_detail, idx, |
1757 | 0 | _("reg pair must be contiguous")); |
1758 | 0 | return 0; |
1759 | 0 | } |
1760 | 2.60k | break; |
1761 | 2.60k | } |
1762 | | |
1763 | | /* <Xt> may be optional in some IC and TLBI instructions. */ |
1764 | 4.54M | if (type == AARCH64_OPND_Rt_SYS) |
1765 | 208 | { |
1766 | 208 | assert (idx == 1 && (aarch64_get_operand_class (opnds[0].type) |
1767 | 208 | == AARCH64_OPND_CLASS_SYSTEM)); |
1768 | 208 | if (opnds[1].present |
1769 | 208 | && !aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op)) |
1770 | 0 | { |
1771 | 0 | set_other_error (mismatch_detail, idx, _("extraneous register")); |
1772 | 0 | return 0; |
1773 | 0 | } |
1774 | 208 | if (!opnds[1].present |
1775 | 208 | && aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op)) |
1776 | 0 | { |
1777 | 0 | set_other_error (mismatch_detail, idx, _("missing register")); |
1778 | 0 | return 0; |
1779 | 0 | } |
1780 | 208 | } |
1781 | 4.54M | switch (qualifier) |
1782 | 4.54M | { |
1783 | 2.42k | case AARCH64_OPND_QLF_WSP: |
1784 | 6.62k | case AARCH64_OPND_QLF_SP: |
1785 | 6.62k | if (!aarch64_stack_pointer_p (opnd)) |
1786 | 4.17k | { |
1787 | 4.17k | set_other_error (mismatch_detail, idx, |
1788 | 4.17k | _("stack pointer register expected")); |
1789 | 4.17k | return 0; |
1790 | 4.17k | } |
1791 | 2.45k | break; |
1792 | 4.53M | default: |
1793 | 4.53M | break; |
1794 | 4.54M | } |
1795 | 4.54M | break; |
1796 | | |
1797 | 4.54M | case AARCH64_OPND_CLASS_SVE_REG: |
1798 | 842k | switch (type) |
1799 | 842k | { |
1800 | 2.52k | case AARCH64_OPND_SVE_Zm3_INDEX: |
1801 | 5.77k | case AARCH64_OPND_SVE_Zm3_22_INDEX: |
1802 | 5.96k | case AARCH64_OPND_SVE_Zm3_19_INDEX: |
1803 | 10.2k | case AARCH64_OPND_SVE_Zm3_11_INDEX: |
1804 | 12.3k | case AARCH64_OPND_SVE_Zm4_11_INDEX: |
1805 | 15.3k | case AARCH64_OPND_SVE_Zm4_INDEX: |
1806 | 15.3k | size = get_operand_fields_width (get_operand_from_code (type)); |
1807 | 15.3k | shift = get_operand_specific_data (&aarch64_operands[type]); |
1808 | 15.3k | if (!check_reglane (opnd, mismatch_detail, idx, |
1809 | 15.3k | "z", 0, (1 << shift) - 1, |
1810 | 15.3k | 0, (1u << (size - shift)) - 1)) |
1811 | 0 | return 0; |
1812 | 15.3k | break; |
1813 | | |
1814 | 15.3k | case AARCH64_OPND_SVE_Zn_INDEX: |
1815 | 817 | size = aarch64_get_qualifier_esize (opnd->qualifier); |
1816 | 817 | if (!check_reglane (opnd, mismatch_detail, idx, "z", 0, 31, |
1817 | 817 | 0, 64 / size - 1)) |
1818 | 0 | return 0; |
1819 | 817 | break; |
1820 | | |
1821 | 817 | case AARCH64_OPND_SVE_Zm_imm4: |
1822 | 6 | if (!check_reglane (opnd, mismatch_detail, idx, "z", 0, 31, 0, 15)) |
1823 | 0 | return 0; |
1824 | 6 | break; |
1825 | | |
1826 | 71 | case AARCH64_OPND_SVE_Zn_5_INDEX: |
1827 | 71 | size = aarch64_get_qualifier_esize (opnd->qualifier); |
1828 | 71 | if (!check_reglane (opnd, mismatch_detail, idx, "z", 0, 31, |
1829 | 71 | 0, 16 / size - 1)) |
1830 | 0 | return 0; |
1831 | 71 | break; |
1832 | | |
1833 | 71 | case AARCH64_OPND_SME_PNn3_INDEX1: |
1834 | 10 | case AARCH64_OPND_SME_PNn3_INDEX2: |
1835 | 10 | size = get_operand_field_width (get_operand_from_code (type), 1); |
1836 | 10 | if (!check_reglane (opnd, mismatch_detail, idx, "pn", 8, 15, |
1837 | 10 | 0, (1 << size) - 1)) |
1838 | 0 | return 0; |
1839 | 10 | break; |
1840 | | |
1841 | 16 | case AARCH64_OPND_SME_Zn_INDEX1_16: |
1842 | 73 | case AARCH64_OPND_SME_Zn_INDEX2_15: |
1843 | 137 | case AARCH64_OPND_SME_Zn_INDEX2_16: |
1844 | 152 | case AARCH64_OPND_SME_Zn_INDEX3_14: |
1845 | 191 | case AARCH64_OPND_SME_Zn_INDEX3_15: |
1846 | 286 | case AARCH64_OPND_SME_Zn_INDEX4_14: |
1847 | 286 | size = get_operand_fields_width (get_operand_from_code (type)) - 5; |
1848 | 286 | if (!check_reglane (opnd, mismatch_detail, idx, "z", 0, 31, |
1849 | 286 | 0, (1 << size) - 1)) |
1850 | 0 | return 0; |
1851 | 286 | break; |
1852 | | |
1853 | 286 | case AARCH64_OPND_SME_Zm_INDEX1: |
1854 | 810 | case AARCH64_OPND_SME_Zm_INDEX2: |
1855 | 1.67k | case AARCH64_OPND_SME_Zm_INDEX3_1: |
1856 | 2.40k | case AARCH64_OPND_SME_Zm_INDEX3_2: |
1857 | 5.20k | case AARCH64_OPND_SME_Zm_INDEX3_10: |
1858 | 5.93k | case AARCH64_OPND_SME_Zm_INDEX4_1: |
1859 | 8.44k | case AARCH64_OPND_SME_Zm_INDEX4_10: |
1860 | 8.44k | size = get_operand_fields_width (get_operand_from_code (type)) - 4; |
1861 | 8.44k | if (!check_reglane (opnd, mismatch_detail, idx, "z", 0, 15, |
1862 | 8.44k | 0, (1 << size) - 1)) |
1863 | 0 | return 0; |
1864 | 8.44k | break; |
1865 | | |
1866 | 8.44k | case AARCH64_OPND_SME_Zm: |
1867 | 2.41k | if (opnd->reg.regno > 15) |
1868 | 0 | { |
1869 | 0 | set_invalid_regno_error (mismatch_detail, idx, "z", 0, 15); |
1870 | 0 | return 0; |
1871 | 0 | } |
1872 | 2.41k | break; |
1873 | | |
1874 | 2.41k | case AARCH64_OPND_SME_PnT_Wm_imm: |
1875 | 1.02k | size = aarch64_get_qualifier_esize (opnd->qualifier); |
1876 | 1.02k | max_value = 16 / size - 1; |
1877 | 1.02k | if (!check_za_access (opnd, mismatch_detail, idx, |
1878 | 1.02k | 12, max_value, 1, 0)) |
1879 | 0 | return 0; |
1880 | 1.02k | break; |
1881 | | |
1882 | 814k | default: |
1883 | 814k | break; |
1884 | 842k | } |
1885 | 842k | break; |
1886 | | |
1887 | 842k | case AARCH64_OPND_CLASS_SVE_REGLIST: |
1888 | 213k | switch (type) |
1889 | 213k | { |
1890 | 248 | case AARCH64_OPND_SME_Pdx2: |
1891 | 10.4k | case AARCH64_OPND_SME_Zdnx2: |
1892 | 14.0k | case AARCH64_OPND_SME_Zdnx4: |
1893 | 14.7k | case AARCH64_OPND_SME_Zmx2: |
1894 | 14.8k | case AARCH64_OPND_SME_Zmx4: |
1895 | 18.0k | case AARCH64_OPND_SME_Znx2: |
1896 | 19.1k | case AARCH64_OPND_SME_Znx4: |
1897 | 20.3k | case AARCH64_OPND_SME_Zt2: |
1898 | 21.1k | case AARCH64_OPND_SME_Zt3: |
1899 | 22.6k | case AARCH64_OPND_SME_Zt4: |
1900 | 22.6k | num = get_operand_specific_data (&aarch64_operands[type]); |
1901 | 22.6k | if (!check_reglist (opnd, mismatch_detail, idx, num, 1)) |
1902 | 0 | return 0; |
1903 | 22.6k | if ((opnd->reglist.first_regno % num) != 0) |
1904 | 1.72k | { |
1905 | 1.72k | set_other_error (mismatch_detail, idx, |
1906 | 1.72k | _("start register out of range")); |
1907 | 1.72k | return 0; |
1908 | 1.72k | } |
1909 | 20.9k | break; |
1910 | | |
1911 | 20.9k | case AARCH64_OPND_SME_Ztx2_STRIDED: |
1912 | 6.52k | case AARCH64_OPND_SME_Ztx4_STRIDED: |
1913 | | /* 2-register lists have a stride of 8 and 4-register lists |
1914 | | have a stride of 4. */ |
1915 | 6.52k | num = get_operand_specific_data (&aarch64_operands[type]); |
1916 | 6.52k | if (!check_reglist (opnd, mismatch_detail, idx, num, 16 / num)) |
1917 | 0 | return 0; |
1918 | 6.52k | num = 16 | (opnd->reglist.stride - 1); |
1919 | 6.52k | if ((opnd->reglist.first_regno & ~num) != 0) |
1920 | 0 | { |
1921 | 0 | set_other_error (mismatch_detail, idx, |
1922 | 0 | _("start register out of range")); |
1923 | 0 | return 0; |
1924 | 0 | } |
1925 | 6.52k | break; |
1926 | | |
1927 | 6.52k | case AARCH64_OPND_SME_PdxN: |
1928 | 4.94k | case AARCH64_OPND_SVE_ZnxN: |
1929 | 184k | case AARCH64_OPND_SVE_ZtxN: |
1930 | 184k | num = get_opcode_dependent_value (opcode); |
1931 | 184k | if (!check_reglist (opnd, mismatch_detail, idx, num, 1)) |
1932 | 0 | return 0; |
1933 | 184k | break; |
1934 | | |
1935 | 184k | default: |
1936 | 0 | abort (); |
1937 | 213k | } |
1938 | 211k | break; |
1939 | | |
1940 | 211k | case AARCH64_OPND_CLASS_ZA_ACCESS: |
1941 | 53.9k | switch (type) |
1942 | 53.9k | { |
1943 | 369 | case AARCH64_OPND_SME_ZA_HV_idx_src: |
1944 | 10.4k | case AARCH64_OPND_SME_ZA_HV_idx_dest: |
1945 | 41.6k | case AARCH64_OPND_SME_ZA_HV_idx_ldstr: |
1946 | 41.6k | size = aarch64_get_qualifier_esize (opnd->qualifier); |
1947 | 41.6k | max_value = 16 / size - 1; |
1948 | 41.6k | if (!check_za_access (opnd, mismatch_detail, idx, 12, max_value, 1, |
1949 | 41.6k | get_opcode_dependent_value (opcode))) |
1950 | 0 | return 0; |
1951 | 41.6k | break; |
1952 | | |
1953 | 41.6k | case AARCH64_OPND_SME_ZA_array_off4: |
1954 | 908 | if (!check_za_access (opnd, mismatch_detail, idx, 12, 15, 1, |
1955 | 908 | get_opcode_dependent_value (opcode))) |
1956 | 0 | return 0; |
1957 | 908 | break; |
1958 | | |
1959 | 2.13k | case AARCH64_OPND_SME_ZA_array_off3_0: |
1960 | 2.13k | case AARCH64_OPND_SME_ZA_array_off3_5: |
1961 | 2.13k | if (!check_za_access (opnd, mismatch_detail, idx, 8, 7, 1, |
1962 | 2.13k | get_opcode_dependent_value (opcode))) |
1963 | 0 | return 0; |
1964 | 2.13k | break; |
1965 | | |
1966 | 2.20k | case AARCH64_OPND_SME_ZA_array_off1x4: |
1967 | 2.20k | if (!check_za_access (opnd, mismatch_detail, idx, 8, 1, 4, |
1968 | 2.20k | get_opcode_dependent_value (opcode))) |
1969 | 0 | return 0; |
1970 | 2.20k | break; |
1971 | | |
1972 | 2.20k | case AARCH64_OPND_SME_ZA_array_off2x2: |
1973 | 1.12k | if (!check_za_access (opnd, mismatch_detail, idx, 8, 3, 2, |
1974 | 1.12k | get_opcode_dependent_value (opcode))) |
1975 | 0 | return 0; |
1976 | 1.12k | break; |
1977 | | |
1978 | 3.70k | case AARCH64_OPND_SME_ZA_array_off2x4: |
1979 | 3.70k | if (!check_za_access (opnd, mismatch_detail, idx, 8, 3, 4, |
1980 | 3.70k | get_opcode_dependent_value (opcode))) |
1981 | 0 | return 0; |
1982 | 3.70k | break; |
1983 | | |
1984 | 3.70k | case AARCH64_OPND_SME_ZA_array_off3x2: |
1985 | 1.94k | if (!check_za_access (opnd, mismatch_detail, idx, 8, 7, 2, |
1986 | 1.94k | get_opcode_dependent_value (opcode))) |
1987 | 0 | return 0; |
1988 | 1.94k | break; |
1989 | | |
1990 | 1.94k | case AARCH64_OPND_SME_ZA_array_vrsb_1: |
1991 | 0 | if (!check_za_access (opnd, mismatch_detail, idx, 12, 7, 2, |
1992 | 0 | get_opcode_dependent_value (opcode))) |
1993 | 0 | return 0; |
1994 | 0 | break; |
1995 | | |
1996 | 0 | case AARCH64_OPND_SME_ZA_array_vrsh_1: |
1997 | 0 | if (!check_za_access (opnd, mismatch_detail, idx, 12, 3, 2, |
1998 | 0 | get_opcode_dependent_value (opcode))) |
1999 | 0 | return 0; |
2000 | 0 | break; |
2001 | | |
2002 | 0 | case AARCH64_OPND_SME_ZA_array_vrss_1: |
2003 | 0 | if (!check_za_access (opnd, mismatch_detail, idx, 12, 1, 2, |
2004 | 0 | get_opcode_dependent_value (opcode))) |
2005 | 0 | return 0; |
2006 | 0 | break; |
2007 | | |
2008 | 0 | case AARCH64_OPND_SME_ZA_array_vrsd_1: |
2009 | 0 | if (!check_za_access (opnd, mismatch_detail, idx, 12, 0, 2, |
2010 | 0 | get_opcode_dependent_value (opcode))) |
2011 | 0 | return 0; |
2012 | 0 | break; |
2013 | | |
2014 | 2 | case AARCH64_OPND_SME_ZA_array_vrsb_2: |
2015 | 2 | if (!check_za_access (opnd, mismatch_detail, idx, 12, 3, 4, |
2016 | 2 | get_opcode_dependent_value (opcode))) |
2017 | 0 | return 0; |
2018 | 2 | break; |
2019 | | |
2020 | 2 | case AARCH64_OPND_SME_ZA_array_vrsh_2: |
2021 | 0 | if (!check_za_access (opnd, mismatch_detail, idx, 12, 1, 4, |
2022 | 0 | get_opcode_dependent_value (opcode))) |
2023 | 0 | return 0; |
2024 | 0 | break; |
2025 | | |
2026 | 0 | case AARCH64_OPND_SME_ZA_array_vrss_2: |
2027 | 1 | case AARCH64_OPND_SME_ZA_array_vrsd_2: |
2028 | 1 | if (!check_za_access (opnd, mismatch_detail, idx, 12, 0, 4, |
2029 | 1 | get_opcode_dependent_value (opcode))) |
2030 | 0 | return 0; |
2031 | 1 | break; |
2032 | | |
2033 | 146 | case AARCH64_OPND_SME_ZA_HV_idx_srcxN: |
2034 | 265 | case AARCH64_OPND_SME_ZA_HV_idx_destxN: |
2035 | 265 | size = aarch64_get_qualifier_esize (opnd->qualifier); |
2036 | 265 | num = get_opcode_dependent_value (opcode); |
2037 | 265 | max_value = 16 / num / size; |
2038 | 265 | if (max_value > 0) |
2039 | 211 | max_value -= 1; |
2040 | 265 | if (!check_za_access (opnd, mismatch_detail, idx, |
2041 | 265 | 12, max_value, num, 0)) |
2042 | 0 | return 0; |
2043 | 265 | break; |
2044 | | |
2045 | 265 | default: |
2046 | 0 | abort (); |
2047 | 53.9k | } |
2048 | 53.9k | break; |
2049 | | |
2050 | 593k | case AARCH64_OPND_CLASS_PRED_REG: |
2051 | 593k | switch (type) |
2052 | 593k | { |
2053 | 931 | case AARCH64_OPND_SME_PNd3: |
2054 | 19.9k | case AARCH64_OPND_SME_PNg3: |
2055 | 19.9k | if (opnd->reg.regno < 8) |
2056 | 0 | { |
2057 | 0 | set_invalid_regno_error (mismatch_detail, idx, "pn", 8, 15); |
2058 | 0 | return 0; |
2059 | 0 | } |
2060 | 19.9k | break; |
2061 | | |
2062 | 573k | default: |
2063 | 573k | if (opnd->reg.regno >= 8 |
2064 | 573k | && get_operand_fields_width (get_operand_from_code (type)) == 3) |
2065 | 0 | { |
2066 | 0 | set_invalid_regno_error (mismatch_detail, idx, "p", 0, 7); |
2067 | 0 | return 0; |
2068 | 0 | } |
2069 | 573k | break; |
2070 | 593k | } |
2071 | 593k | break; |
2072 | | |
2073 | 593k | case AARCH64_OPND_CLASS_COND: |
2074 | 20.3k | if (type == AARCH64_OPND_COND1 |
2075 | 20.3k | && (opnds[idx].cond->value & 0xe) == 0xe) |
2076 | 0 | { |
2077 | | /* Not allow AL or NV. */ |
2078 | 0 | set_syntax_error (mismatch_detail, idx, NULL); |
2079 | 0 | } |
2080 | 20.3k | break; |
2081 | | |
2082 | 2.28M | case AARCH64_OPND_CLASS_ADDRESS: |
2083 | | /* Check writeback. */ |
2084 | 2.28M | switch (opcode->iclass) |
2085 | 2.28M | { |
2086 | 214k | case ldst_pos: |
2087 | 275k | case ldst_unscaled: |
2088 | 371k | case ldstnapair_offs: |
2089 | 508k | case ldstpair_off: |
2090 | 520k | case ldst_unpriv: |
2091 | 520k | if (opnd->addr.writeback == 1) |
2092 | 0 | { |
2093 | 0 | set_syntax_error (mismatch_detail, idx, |
2094 | 0 | _("unexpected address writeback")); |
2095 | 0 | return 0; |
2096 | 0 | } |
2097 | 520k | break; |
2098 | 520k | case ldst_imm10: |
2099 | 8.69k | if (opnd->addr.writeback == 1 && opnd->addr.preind != 1) |
2100 | 0 | { |
2101 | 0 | set_syntax_error (mismatch_detail, idx, |
2102 | 0 | _("unexpected address writeback")); |
2103 | 0 | return 0; |
2104 | 0 | } |
2105 | 8.69k | break; |
2106 | 40.6k | case ldst_imm9: |
2107 | 183k | case ldstpair_indexed: |
2108 | 190k | case asisdlsep: |
2109 | 207k | case asisdlsop: |
2110 | 207k | if (opnd->addr.writeback == 0) |
2111 | 0 | { |
2112 | 0 | set_syntax_error (mismatch_detail, idx, |
2113 | 0 | _("address writeback expected")); |
2114 | 0 | return 0; |
2115 | 0 | } |
2116 | 207k | break; |
2117 | 207k | case rcpc3: |
2118 | 11.9k | if (opnd->addr.writeback) |
2119 | 341 | if ((type == AARCH64_OPND_RCPC3_ADDR_PREIND_WB |
2120 | 341 | && !opnd->addr.preind) |
2121 | 341 | || (type == AARCH64_OPND_RCPC3_ADDR_POSTIND |
2122 | 341 | && !opnd->addr.postind)) |
2123 | 0 | { |
2124 | 0 | set_syntax_error (mismatch_detail, idx, |
2125 | 0 | _("unexpected address writeback")); |
2126 | 0 | return 0; |
2127 | 0 | } |
2128 | | |
2129 | 11.9k | break; |
2130 | 1.53M | default: |
2131 | 1.53M | assert (opnd->addr.writeback == 0); |
2132 | 1.53M | break; |
2133 | 2.28M | } |
2134 | 2.28M | switch (type) |
2135 | 2.28M | { |
2136 | 354k | case AARCH64_OPND_ADDR_SIMM7: |
2137 | | /* Scaled signed 7 bits immediate offset. */ |
2138 | | /* Get the size of the data element that is accessed, which may be |
2139 | | different from that of the source register size, |
2140 | | e.g. in strb/ldrb. */ |
2141 | 354k | size = aarch64_get_qualifier_esize (opnd->qualifier); |
2142 | 354k | if (!value_in_range_p (opnd->addr.offset.imm, -64 * size, 63 * size)) |
2143 | 0 | { |
2144 | 0 | set_offset_out_of_range_error (mismatch_detail, idx, |
2145 | 0 | -64 * size, 63 * size); |
2146 | 0 | return 0; |
2147 | 0 | } |
2148 | 354k | if (!value_aligned_p (opnd->addr.offset.imm, size)) |
2149 | 0 | { |
2150 | 0 | set_unaligned_error (mismatch_detail, idx, size); |
2151 | 0 | return 0; |
2152 | 0 | } |
2153 | 354k | break; |
2154 | 354k | case AARCH64_OPND_ADDR_OFFSET: |
2155 | 107k | case AARCH64_OPND_ADDR_SIMM9: |
2156 | | /* Unscaled signed 9 bits immediate offset. */ |
2157 | 107k | if (!value_in_range_p (opnd->addr.offset.imm, -256, 255)) |
2158 | 0 | { |
2159 | 0 | set_offset_out_of_range_error (mismatch_detail, idx, -256, 255); |
2160 | 0 | return 0; |
2161 | 0 | } |
2162 | 107k | break; |
2163 | | |
2164 | 107k | case AARCH64_OPND_ADDR_SIMM9_2: |
2165 | | /* Unscaled signed 9 bits immediate offset, which has to be negative |
2166 | | or unaligned. */ |
2167 | 0 | size = aarch64_get_qualifier_esize (qualifier); |
2168 | 0 | if ((value_in_range_p (opnd->addr.offset.imm, 0, 255) |
2169 | 0 | && !value_aligned_p (opnd->addr.offset.imm, size)) |
2170 | 0 | || value_in_range_p (opnd->addr.offset.imm, -256, -1)) |
2171 | 0 | return 1; |
2172 | 0 | set_other_error (mismatch_detail, idx, |
2173 | 0 | _("negative or unaligned offset expected")); |
2174 | 0 | return 0; |
2175 | | |
2176 | 8.69k | case AARCH64_OPND_ADDR_SIMM10: |
2177 | | /* Scaled signed 10 bits immediate offset. */ |
2178 | 8.69k | if (!value_in_range_p (opnd->addr.offset.imm, -4096, 4088)) |
2179 | 0 | { |
2180 | 0 | set_offset_out_of_range_error (mismatch_detail, idx, -4096, 4088); |
2181 | 0 | return 0; |
2182 | 0 | } |
2183 | 8.69k | if (!value_aligned_p (opnd->addr.offset.imm, 8)) |
2184 | 0 | { |
2185 | 0 | set_unaligned_error (mismatch_detail, idx, 8); |
2186 | 0 | return 0; |
2187 | 0 | } |
2188 | 8.69k | break; |
2189 | | |
2190 | 21.0k | case AARCH64_OPND_ADDR_SIMM11: |
2191 | | /* Signed 11 bits immediate offset (multiple of 16). */ |
2192 | 21.0k | if (!value_in_range_p (opnd->addr.offset.imm, -1024, 1008)) |
2193 | 0 | { |
2194 | 0 | set_offset_out_of_range_error (mismatch_detail, idx, -1024, 1008); |
2195 | 0 | return 0; |
2196 | 0 | } |
2197 | | |
2198 | 21.0k | if (!value_aligned_p (opnd->addr.offset.imm, 16)) |
2199 | 0 | { |
2200 | 0 | set_unaligned_error (mismatch_detail, idx, 16); |
2201 | 0 | return 0; |
2202 | 0 | } |
2203 | 21.0k | break; |
2204 | | |
2205 | 21.0k | case AARCH64_OPND_ADDR_SIMM13: |
2206 | | /* Signed 13 bits immediate offset (multiple of 16). */ |
2207 | 6.47k | if (!value_in_range_p (opnd->addr.offset.imm, -4096, 4080)) |
2208 | 0 | { |
2209 | 0 | set_offset_out_of_range_error (mismatch_detail, idx, -4096, 4080); |
2210 | 0 | return 0; |
2211 | 0 | } |
2212 | | |
2213 | 6.47k | if (!value_aligned_p (opnd->addr.offset.imm, 16)) |
2214 | 0 | { |
2215 | 0 | set_unaligned_error (mismatch_detail, idx, 16); |
2216 | 0 | return 0; |
2217 | 0 | } |
2218 | 6.47k | break; |
2219 | | |
2220 | 24.1k | case AARCH64_OPND_SIMD_ADDR_POST: |
2221 | | /* AdvSIMD load/store multiple structures, post-index. */ |
2222 | 24.1k | assert (idx == 1); |
2223 | 24.1k | if (opnd->addr.offset.is_reg) |
2224 | 22.5k | { |
2225 | 22.5k | if (value_in_range_p (opnd->addr.offset.regno, 0, 30)) |
2226 | 22.5k | return 1; |
2227 | 0 | else |
2228 | 0 | { |
2229 | 0 | set_other_error (mismatch_detail, idx, |
2230 | 0 | _("invalid register offset")); |
2231 | 0 | return 0; |
2232 | 0 | } |
2233 | 22.5k | } |
2234 | 1.62k | else |
2235 | 1.62k | { |
2236 | 1.62k | const aarch64_opnd_info *prev = &opnds[idx-1]; |
2237 | 1.62k | unsigned num_bytes; /* total number of bytes transferred. */ |
2238 | | /* The opcode dependent area stores the number of elements in |
2239 | | each structure to be loaded/stored. */ |
2240 | 1.62k | int is_ld1r = get_opcode_dependent_value (opcode) == 1; |
2241 | 1.62k | if (opcode->operands[0] == AARCH64_OPND_LVt_AL) |
2242 | | /* Special handling of loading single structure to all lane. */ |
2243 | 163 | num_bytes = (is_ld1r ? 1 : prev->reglist.num_regs) |
2244 | 163 | * aarch64_get_qualifier_esize (prev->qualifier); |
2245 | 1.45k | else |
2246 | 1.45k | num_bytes = prev->reglist.num_regs |
2247 | 1.45k | * aarch64_get_qualifier_esize (prev->qualifier) |
2248 | 1.45k | * aarch64_get_qualifier_nelem (prev->qualifier); |
2249 | 1.62k | if ((int) num_bytes != opnd->addr.offset.imm) |
2250 | 0 | { |
2251 | 0 | set_other_error (mismatch_detail, idx, |
2252 | 0 | _("invalid post-increment amount")); |
2253 | 0 | return 0; |
2254 | 0 | } |
2255 | 1.62k | } |
2256 | 1.62k | break; |
2257 | | |
2258 | 36.4k | case AARCH64_OPND_ADDR_REGOFF: |
2259 | | /* Get the size of the data element that is accessed, which may be |
2260 | | different from that of the source register size, |
2261 | | e.g. in strb/ldrb. */ |
2262 | 36.4k | size = aarch64_get_qualifier_esize (opnd->qualifier); |
2263 | | /* It is either no shift or shift by the binary logarithm of SIZE. */ |
2264 | 36.4k | if (opnd->shifter.amount != 0 |
2265 | 36.4k | && opnd->shifter.amount != (int)get_logsz (size)) |
2266 | 0 | { |
2267 | 0 | set_other_error (mismatch_detail, idx, |
2268 | 0 | _("invalid shift amount")); |
2269 | 0 | return 0; |
2270 | 0 | } |
2271 | | /* Only UXTW, LSL, SXTW and SXTX are the accepted extending |
2272 | | operators. */ |
2273 | 36.4k | switch (opnd->shifter.kind) |
2274 | 36.4k | { |
2275 | 2.08k | case AARCH64_MOD_UXTW: |
2276 | 10.8k | case AARCH64_MOD_LSL: |
2277 | 13.7k | case AARCH64_MOD_SXTW: |
2278 | 16.4k | case AARCH64_MOD_SXTX: break; |
2279 | 20.0k | default: |
2280 | 20.0k | set_other_error (mismatch_detail, idx, |
2281 | 20.0k | _("invalid extend/shift operator")); |
2282 | 20.0k | return 0; |
2283 | 36.4k | } |
2284 | 16.4k | break; |
2285 | | |
2286 | 214k | case AARCH64_OPND_ADDR_UIMM12: |
2287 | 214k | imm = opnd->addr.offset.imm; |
2288 | | /* Get the size of the data element that is accessed, which may be |
2289 | | different from that of the source register size, |
2290 | | e.g. in strb/ldrb. */ |
2291 | 214k | size = aarch64_get_qualifier_esize (qualifier); |
2292 | 214k | if (!value_in_range_p (opnd->addr.offset.imm, 0, 4095 * size)) |
2293 | 0 | { |
2294 | 0 | set_offset_out_of_range_error (mismatch_detail, idx, |
2295 | 0 | 0, 4095 * size); |
2296 | 0 | return 0; |
2297 | 0 | } |
2298 | 214k | if (!value_aligned_p (opnd->addr.offset.imm, size)) |
2299 | 0 | { |
2300 | 0 | set_unaligned_error (mismatch_detail, idx, size); |
2301 | 0 | return 0; |
2302 | 0 | } |
2303 | 214k | break; |
2304 | | |
2305 | 214k | case AARCH64_OPND_ADDR_PCREL14: |
2306 | 492k | case AARCH64_OPND_ADDR_PCREL19: |
2307 | 704k | case AARCH64_OPND_ADDR_PCREL21: |
2308 | 931k | case AARCH64_OPND_ADDR_PCREL26: |
2309 | 931k | imm = opnd->imm.value; |
2310 | 931k | if (operand_need_shift_by_two (get_operand_from_code (type))) |
2311 | 719k | { |
2312 | | /* The offset value in a PC-relative branch instruction is alway |
2313 | | 4-byte aligned and is encoded without the lowest 2 bits. */ |
2314 | 719k | if (!value_aligned_p (imm, 4)) |
2315 | 0 | { |
2316 | 0 | set_unaligned_error (mismatch_detail, idx, 4); |
2317 | 0 | return 0; |
2318 | 0 | } |
2319 | | /* Right shift by 2 so that we can carry out the following check |
2320 | | canonically. */ |
2321 | 719k | imm >>= 2; |
2322 | 719k | } |
2323 | 931k | size = get_operand_fields_width (get_operand_from_code (type)); |
2324 | 931k | if (!value_fit_signed_field_p (imm, size)) |
2325 | 0 | { |
2326 | 0 | set_other_error (mismatch_detail, idx, |
2327 | 0 | _("immediate out of range")); |
2328 | 0 | return 0; |
2329 | 0 | } |
2330 | 931k | break; |
2331 | | |
2332 | 931k | case AARCH64_OPND_SME_ADDR_RI_U4xVL: |
2333 | 908 | if (!value_in_range_p (opnd->addr.offset.imm, 0, 15)) |
2334 | 0 | { |
2335 | 0 | set_offset_out_of_range_error (mismatch_detail, idx, 0, 15); |
2336 | 0 | return 0; |
2337 | 0 | } |
2338 | 908 | break; |
2339 | | |
2340 | 40.5k | case AARCH64_OPND_SVE_ADDR_RI_S4xVL: |
2341 | 46.2k | case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL: |
2342 | 48.9k | case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL: |
2343 | 53.5k | case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL: |
2344 | 53.5k | min_value = -8; |
2345 | 53.5k | max_value = 7; |
2346 | 58.9k | sve_imm_offset_vl: |
2347 | 58.9k | assert (!opnd->addr.offset.is_reg); |
2348 | 58.9k | assert (opnd->addr.preind); |
2349 | 58.9k | num = 1 + get_operand_specific_data (&aarch64_operands[type]); |
2350 | 58.9k | min_value *= num; |
2351 | 58.9k | max_value *= num; |
2352 | 58.9k | if ((opnd->addr.offset.imm != 0 && !opnd->shifter.operator_present) |
2353 | 58.9k | || (opnd->shifter.operator_present |
2354 | 58.9k | && opnd->shifter.kind != AARCH64_MOD_MUL_VL)) |
2355 | 0 | { |
2356 | 0 | set_other_error (mismatch_detail, idx, |
2357 | 0 | _("invalid addressing mode")); |
2358 | 0 | return 0; |
2359 | 0 | } |
2360 | 58.9k | if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value)) |
2361 | 0 | { |
2362 | 0 | set_offset_out_of_range_error (mismatch_detail, idx, |
2363 | 0 | min_value, max_value); |
2364 | 0 | return 0; |
2365 | 0 | } |
2366 | 58.9k | if (!value_aligned_p (opnd->addr.offset.imm, num)) |
2367 | 0 | { |
2368 | 0 | set_unaligned_error (mismatch_detail, idx, num); |
2369 | 0 | return 0; |
2370 | 0 | } |
2371 | 58.9k | break; |
2372 | | |
2373 | 58.9k | case AARCH64_OPND_SVE_ADDR_RI_S6xVL: |
2374 | 1.66k | min_value = -32; |
2375 | 1.66k | max_value = 31; |
2376 | 1.66k | goto sve_imm_offset_vl; |
2377 | | |
2378 | 3.74k | case AARCH64_OPND_SVE_ADDR_RI_S9xVL: |
2379 | 3.74k | min_value = -256; |
2380 | 3.74k | max_value = 255; |
2381 | 3.74k | goto sve_imm_offset_vl; |
2382 | | |
2383 | 4.82k | case AARCH64_OPND_SVE_ADDR_RI_U6: |
2384 | 8.23k | case AARCH64_OPND_SVE_ADDR_RI_U6x2: |
2385 | 10.3k | case AARCH64_OPND_SVE_ADDR_RI_U6x4: |
2386 | 11.1k | case AARCH64_OPND_SVE_ADDR_RI_U6x8: |
2387 | 11.1k | min_value = 0; |
2388 | 11.1k | max_value = 63; |
2389 | 24.1k | sve_imm_offset: |
2390 | 24.1k | assert (!opnd->addr.offset.is_reg); |
2391 | 24.1k | assert (opnd->addr.preind); |
2392 | 24.1k | num = 1 << get_operand_specific_data (&aarch64_operands[type]); |
2393 | 24.1k | min_value *= num; |
2394 | 24.1k | max_value *= num; |
2395 | 24.1k | if (opnd->shifter.operator_present |
2396 | 24.1k | || opnd->shifter.amount_present) |
2397 | 0 | { |
2398 | 0 | set_other_error (mismatch_detail, idx, |
2399 | 0 | _("invalid addressing mode")); |
2400 | 0 | return 0; |
2401 | 0 | } |
2402 | 24.1k | if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value)) |
2403 | 0 | { |
2404 | 0 | set_offset_out_of_range_error (mismatch_detail, idx, |
2405 | 0 | min_value, max_value); |
2406 | 0 | return 0; |
2407 | 0 | } |
2408 | 24.1k | if (!value_aligned_p (opnd->addr.offset.imm, num)) |
2409 | 0 | { |
2410 | 0 | set_unaligned_error (mismatch_detail, idx, num); |
2411 | 0 | return 0; |
2412 | 0 | } |
2413 | 24.1k | break; |
2414 | | |
2415 | 24.1k | case AARCH64_OPND_SVE_ADDR_RI_S4x16: |
2416 | 1.32k | case AARCH64_OPND_SVE_ADDR_RI_S4x32: |
2417 | 1.32k | min_value = -8; |
2418 | 1.32k | max_value = 7; |
2419 | 1.32k | goto sve_imm_offset; |
2420 | | |
2421 | 10.2k | case AARCH64_OPND_SVE_ADDR_ZX: |
2422 | | /* Everything is already ensured by parse_operands or |
2423 | | aarch64_ext_sve_addr_rr_lsl (because this is a very specific |
2424 | | argument type). */ |
2425 | 10.2k | assert (opnd->addr.offset.is_reg); |
2426 | 10.2k | assert (opnd->addr.preind); |
2427 | 10.2k | assert ((aarch64_operands[type].flags & OPD_F_NO_ZR) == 0); |
2428 | 10.2k | assert (opnd->shifter.kind == AARCH64_MOD_LSL); |
2429 | 10.2k | assert (opnd->shifter.operator_present == 0); |
2430 | 10.2k | break; |
2431 | | |
2432 | 10.2k | case AARCH64_OPND_SVE_ADDR_R: |
2433 | 16.0k | case AARCH64_OPND_SVE_ADDR_RR: |
2434 | 23.6k | case AARCH64_OPND_SVE_ADDR_RR_LSL1: |
2435 | 29.1k | case AARCH64_OPND_SVE_ADDR_RR_LSL2: |
2436 | 43.3k | case AARCH64_OPND_SVE_ADDR_RR_LSL3: |
2437 | 52.1k | case AARCH64_OPND_SVE_ADDR_RR_LSL4: |
2438 | 61.8k | case AARCH64_OPND_SVE_ADDR_RX: |
2439 | 68.4k | case AARCH64_OPND_SVE_ADDR_RX_LSL1: |
2440 | 76.4k | case AARCH64_OPND_SVE_ADDR_RX_LSL2: |
2441 | 80.7k | case AARCH64_OPND_SVE_ADDR_RX_LSL3: |
2442 | 95.5k | case AARCH64_OPND_SVE_ADDR_RZ: |
2443 | 97.7k | case AARCH64_OPND_SVE_ADDR_RZ_LSL1: |
2444 | 99.2k | case AARCH64_OPND_SVE_ADDR_RZ_LSL2: |
2445 | 100k | case AARCH64_OPND_SVE_ADDR_RZ_LSL3: |
2446 | 100k | modifiers = 1 << AARCH64_MOD_LSL; |
2447 | 154k | sve_rr_operand: |
2448 | 154k | assert (opnd->addr.offset.is_reg); |
2449 | 154k | assert (opnd->addr.preind); |
2450 | 154k | if ((aarch64_operands[type].flags & OPD_F_NO_ZR) != 0 |
2451 | 154k | && opnd->addr.offset.regno == 31) |
2452 | 0 | { |
2453 | 0 | set_other_error (mismatch_detail, idx, |
2454 | 0 | _("index register xzr is not allowed")); |
2455 | 0 | return 0; |
2456 | 0 | } |
2457 | 154k | if (((1 << opnd->shifter.kind) & modifiers) == 0 |
2458 | 154k | || (opnd->shifter.amount |
2459 | 154k | != get_operand_specific_data (&aarch64_operands[type]))) |
2460 | 0 | { |
2461 | 0 | set_other_error (mismatch_detail, idx, |
2462 | 0 | _("invalid addressing mode")); |
2463 | 0 | return 0; |
2464 | 0 | } |
2465 | 154k | break; |
2466 | | |
2467 | 154k | case AARCH64_OPND_SVE_ADDR_RZ_XTW_14: |
2468 | 34.6k | case AARCH64_OPND_SVE_ADDR_RZ_XTW_22: |
2469 | 35.9k | case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14: |
2470 | 42.2k | case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22: |
2471 | 43.4k | case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14: |
2472 | 48.5k | case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22: |
2473 | 49.1k | case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14: |
2474 | 53.7k | case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22: |
2475 | 53.7k | modifiers = (1 << AARCH64_MOD_SXTW) | (1 << AARCH64_MOD_UXTW); |
2476 | 53.7k | goto sve_rr_operand; |
2477 | | |
2478 | 3.76k | case AARCH64_OPND_SVE_ADDR_ZI_U5: |
2479 | 8.12k | case AARCH64_OPND_SVE_ADDR_ZI_U5x2: |
2480 | 10.6k | case AARCH64_OPND_SVE_ADDR_ZI_U5x4: |
2481 | 11.7k | case AARCH64_OPND_SVE_ADDR_ZI_U5x8: |
2482 | 11.7k | min_value = 0; |
2483 | 11.7k | max_value = 31; |
2484 | 11.7k | goto sve_imm_offset; |
2485 | | |
2486 | 375 | case AARCH64_OPND_SVE_ADDR_ZZ_LSL: |
2487 | 375 | modifiers = 1 << AARCH64_MOD_LSL; |
2488 | 1.05k | sve_zz_operand: |
2489 | 1.05k | assert (opnd->addr.offset.is_reg); |
2490 | 1.05k | assert (opnd->addr.preind); |
2491 | 1.05k | if (((1 << opnd->shifter.kind) & modifiers) == 0 |
2492 | 1.05k | || opnd->shifter.amount < 0 |
2493 | 1.05k | || opnd->shifter.amount > 3) |
2494 | 0 | { |
2495 | 0 | set_other_error (mismatch_detail, idx, |
2496 | 0 | _("invalid addressing mode")); |
2497 | 0 | return 0; |
2498 | 0 | } |
2499 | 1.05k | break; |
2500 | | |
2501 | 1.05k | case AARCH64_OPND_SVE_ADDR_ZZ_SXTW: |
2502 | 157 | modifiers = (1 << AARCH64_MOD_SXTW); |
2503 | 157 | goto sve_zz_operand; |
2504 | | |
2505 | 524 | case AARCH64_OPND_SVE_ADDR_ZZ_UXTW: |
2506 | 524 | modifiers = 1 << AARCH64_MOD_UXTW; |
2507 | 524 | goto sve_zz_operand; |
2508 | | |
2509 | 4.71k | case AARCH64_OPND_RCPC3_ADDR_OPT_PREIND_WB: |
2510 | 5.20k | case AARCH64_OPND_RCPC3_ADDR_OPT_POSTIND: |
2511 | 5.20k | case AARCH64_OPND_RCPC3_ADDR_PREIND_WB: |
2512 | 5.20k | case AARCH64_OPND_RCPC3_ADDR_POSTIND: |
2513 | 5.20k | { |
2514 | 5.20k | int num_bytes = calc_ldst_datasize (opnds); |
2515 | 5.20k | int abs_offset = (type == AARCH64_OPND_RCPC3_ADDR_OPT_PREIND_WB |
2516 | 5.20k | || type == AARCH64_OPND_RCPC3_ADDR_PREIND_WB) |
2517 | 5.20k | ? opnd->addr.offset.imm * -1 |
2518 | 5.20k | : opnd->addr.offset.imm; |
2519 | 5.20k | if ((int) num_bytes != abs_offset |
2520 | 5.20k | && opnd->addr.offset.imm != 0) |
2521 | 0 | { |
2522 | 0 | set_other_error (mismatch_detail, idx, |
2523 | 0 | _("invalid increment amount")); |
2524 | 0 | return 0; |
2525 | 0 | } |
2526 | 5.20k | } |
2527 | 5.20k | break; |
2528 | | |
2529 | 6.73k | case AARCH64_OPND_RCPC3_ADDR_OFFSET: |
2530 | 6.73k | if (!value_in_range_p (opnd->addr.offset.imm, -256, 255)) |
2531 | 0 | { |
2532 | 0 | set_imm_out_of_range_error (mismatch_detail, idx, -256, 255); |
2533 | 0 | return 0; |
2534 | 0 | } |
2535 | | |
2536 | 324k | default: |
2537 | 324k | break; |
2538 | 2.28M | } |
2539 | 2.24M | break; |
2540 | | |
2541 | 2.24M | case AARCH64_OPND_CLASS_SIMD_REGLIST: |
2542 | 49.6k | if (type == AARCH64_OPND_LEt) |
2543 | 21.2k | { |
2544 | | /* Get the upper bound for the element index. */ |
2545 | 21.2k | num = 16 / aarch64_get_qualifier_esize (qualifier) - 1; |
2546 | 21.2k | if (!value_in_range_p (opnd->reglist.index, 0, num)) |
2547 | 0 | { |
2548 | 0 | set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num); |
2549 | 0 | return 0; |
2550 | 0 | } |
2551 | 21.2k | } |
2552 | | /* The opcode dependent area stores the number of elements in |
2553 | | each structure to be loaded/stored. */ |
2554 | 49.6k | num = get_opcode_dependent_value (opcode); |
2555 | 49.6k | switch (type) |
2556 | 49.6k | { |
2557 | 13.9k | case AARCH64_OPND_LVt: |
2558 | 13.9k | assert (num >= 1 && num <= 4); |
2559 | | /* Unless LD1/ST1, the number of registers should be equal to that |
2560 | | of the structure elements. */ |
2561 | 13.9k | if (num != 1 && !check_reglist (opnd, mismatch_detail, idx, num, 1)) |
2562 | 440 | return 0; |
2563 | 13.4k | break; |
2564 | 13.4k | case AARCH64_OPND_LVt_AL: |
2565 | 22.6k | case AARCH64_OPND_LEt: |
2566 | 22.6k | assert (num >= 1 && num <= 4); |
2567 | | /* The number of registers should be equal to that of the structure |
2568 | | elements. */ |
2569 | 22.6k | if (!check_reglist (opnd, mismatch_detail, idx, num, 1)) |
2570 | 0 | return 0; |
2571 | 22.6k | break; |
2572 | 22.6k | default: |
2573 | 13.0k | break; |
2574 | 49.6k | } |
2575 | 49.1k | if (opnd->reglist.stride != 1) |
2576 | 0 | { |
2577 | 0 | set_reg_list_stride_error (mismatch_detail, idx, 1); |
2578 | 0 | return 0; |
2579 | 0 | } |
2580 | 49.1k | break; |
2581 | | |
2582 | 2.57M | case AARCH64_OPND_CLASS_IMMEDIATE: |
2583 | | /* Constraint check on immediate operand. */ |
2584 | 2.57M | imm = opnd->imm.value; |
2585 | | /* E.g. imm_0_31 constrains value to be 0..31. */ |
2586 | 2.57M | if (qualifier_value_in_range_constraint_p (qualifier) |
2587 | 2.57M | && !value_in_range_p (imm, get_lower_bound (qualifier), |
2588 | 406k | get_upper_bound (qualifier))) |
2589 | 89.0k | { |
2590 | 89.0k | set_imm_out_of_range_error (mismatch_detail, idx, |
2591 | 89.0k | get_lower_bound (qualifier), |
2592 | 89.0k | get_upper_bound (qualifier)); |
2593 | 89.0k | return 0; |
2594 | 89.0k | } |
2595 | | |
2596 | 2.49M | switch (type) |
2597 | 2.49M | { |
2598 | 350k | case AARCH64_OPND_AIMM: |
2599 | 350k | if (opnd->shifter.kind != AARCH64_MOD_LSL) |
2600 | 0 | { |
2601 | 0 | set_other_error (mismatch_detail, idx, |
2602 | 0 | _("invalid shift operator")); |
2603 | 0 | return 0; |
2604 | 0 | } |
2605 | 350k | if (opnd->shifter.amount != 0 && opnd->shifter.amount != 12) |
2606 | 0 | { |
2607 | 0 | set_other_error (mismatch_detail, idx, |
2608 | 0 | _("shift amount must be 0 or 12")); |
2609 | 0 | return 0; |
2610 | 0 | } |
2611 | 350k | if (!value_fit_unsigned_field_p (opnd->imm.value, 12)) |
2612 | 0 | { |
2613 | 0 | set_other_error (mismatch_detail, idx, |
2614 | 0 | _("immediate out of range")); |
2615 | 0 | return 0; |
2616 | 0 | } |
2617 | 350k | break; |
2618 | | |
2619 | 350k | case AARCH64_OPND_HALF: |
2620 | 102k | assert (idx == 1 && opnds[0].type == AARCH64_OPND_Rd); |
2621 | 102k | if (opnd->shifter.kind != AARCH64_MOD_LSL) |
2622 | 0 | { |
2623 | 0 | set_other_error (mismatch_detail, idx, |
2624 | 0 | _("invalid shift operator")); |
2625 | 0 | return 0; |
2626 | 0 | } |
2627 | 102k | size = aarch64_get_qualifier_esize (opnds[0].qualifier); |
2628 | 102k | if (!value_aligned_p (opnd->shifter.amount, 16)) |
2629 | 0 | { |
2630 | 0 | set_other_error (mismatch_detail, idx, |
2631 | 0 | _("shift amount must be a multiple of 16")); |
2632 | 0 | return 0; |
2633 | 0 | } |
2634 | 102k | if (!value_in_range_p (opnd->shifter.amount, 0, size * 8 - 16)) |
2635 | 16.6k | { |
2636 | 16.6k | set_sft_amount_out_of_range_error (mismatch_detail, idx, |
2637 | 16.6k | 0, size * 8 - 16); |
2638 | 16.6k | return 0; |
2639 | 16.6k | } |
2640 | 85.9k | if (opnd->imm.value < 0) |
2641 | 0 | { |
2642 | 0 | set_other_error (mismatch_detail, idx, |
2643 | 0 | _("negative immediate value not allowed")); |
2644 | 0 | return 0; |
2645 | 0 | } |
2646 | 85.9k | if (!value_fit_unsigned_field_p (opnd->imm.value, 16)) |
2647 | 0 | { |
2648 | 0 | set_other_error (mismatch_detail, idx, |
2649 | 0 | _("immediate out of range")); |
2650 | 0 | return 0; |
2651 | 0 | } |
2652 | 85.9k | break; |
2653 | | |
2654 | 85.9k | case AARCH64_OPND_IMM_MOV: |
2655 | 53.8k | { |
2656 | 53.8k | int esize = aarch64_get_qualifier_esize (opnds[0].qualifier); |
2657 | 53.8k | imm = opnd->imm.value; |
2658 | 53.8k | assert (idx == 1); |
2659 | 53.8k | switch (opcode->op) |
2660 | 53.8k | { |
2661 | 23.5k | case OP_MOV_IMM_WIDEN: |
2662 | 23.5k | imm = ~imm; |
2663 | | /* Fall through. */ |
2664 | 53.5k | case OP_MOV_IMM_WIDE: |
2665 | 53.5k | if (!aarch64_wide_constant_p (imm, esize == 4, NULL)) |
2666 | 0 | { |
2667 | 0 | set_other_error (mismatch_detail, idx, |
2668 | 0 | _("immediate out of range")); |
2669 | 0 | return 0; |
2670 | 0 | } |
2671 | 53.5k | break; |
2672 | 53.5k | case OP_MOV_IMM_LOG: |
2673 | 337 | if (!aarch64_logical_immediate_p (imm, esize, NULL)) |
2674 | 0 | { |
2675 | 0 | set_other_error (mismatch_detail, idx, |
2676 | 0 | _("immediate out of range")); |
2677 | 0 | return 0; |
2678 | 0 | } |
2679 | 337 | break; |
2680 | 337 | default: |
2681 | 0 | assert (0); |
2682 | 0 | return 0; |
2683 | 53.8k | } |
2684 | 53.8k | } |
2685 | 53.8k | break; |
2686 | | |
2687 | 53.8k | case AARCH64_OPND_NZCV: |
2688 | 6.97k | case AARCH64_OPND_CCMP_IMM: |
2689 | 7.88k | case AARCH64_OPND_EXCEPTION: |
2690 | 1.31M | case AARCH64_OPND_UNDEFINED: |
2691 | 1.31M | case AARCH64_OPND_TME_UIMM16: |
2692 | 1.31M | case AARCH64_OPND_UIMM4: |
2693 | 1.32M | case AARCH64_OPND_UIMM4_ADDG: |
2694 | 1.32M | case AARCH64_OPND_UIMM7: |
2695 | 1.32M | case AARCH64_OPND_UIMM3_OP1: |
2696 | 1.32M | case AARCH64_OPND_UIMM3_OP2: |
2697 | 1.32M | case AARCH64_OPND_SVE_UIMM3: |
2698 | 1.34M | case AARCH64_OPND_SVE_UIMM7: |
2699 | 1.34M | case AARCH64_OPND_SVE_UIMM8: |
2700 | 1.35M | case AARCH64_OPND_SVE_UIMM8_53: |
2701 | 1.35M | case AARCH64_OPND_CSSC_UIMM8: |
2702 | 1.35M | size = get_operand_fields_width (get_operand_from_code (type)); |
2703 | 1.35M | assert (size < 32); |
2704 | 1.35M | if (!value_fit_unsigned_field_p (opnd->imm.value, size)) |
2705 | 0 | { |
2706 | 0 | set_imm_out_of_range_error (mismatch_detail, idx, 0, |
2707 | 0 | (1u << size) - 1); |
2708 | 0 | return 0; |
2709 | 0 | } |
2710 | 1.35M | break; |
2711 | | |
2712 | 1.35M | case AARCH64_OPND_UIMM10: |
2713 | | /* Scaled unsigned 10 bits immediate offset. */ |
2714 | 2.28k | if (!value_in_range_p (opnd->imm.value, 0, 1008)) |
2715 | 0 | { |
2716 | 0 | set_imm_out_of_range_error (mismatch_detail, idx, 0, 1008); |
2717 | 0 | return 0; |
2718 | 0 | } |
2719 | | |
2720 | 2.28k | if (!value_aligned_p (opnd->imm.value, 16)) |
2721 | 0 | { |
2722 | 0 | set_unaligned_error (mismatch_detail, idx, 16); |
2723 | 0 | return 0; |
2724 | 0 | } |
2725 | 2.28k | break; |
2726 | | |
2727 | 7.85k | case AARCH64_OPND_SIMM5: |
2728 | 8.32k | case AARCH64_OPND_SVE_SIMM5: |
2729 | 8.55k | case AARCH64_OPND_SVE_SIMM5B: |
2730 | 9.51k | case AARCH64_OPND_SVE_SIMM6: |
2731 | 9.85k | case AARCH64_OPND_SVE_SIMM8: |
2732 | 11.5k | case AARCH64_OPND_CSSC_SIMM8: |
2733 | 11.5k | size = get_operand_fields_width (get_operand_from_code (type)); |
2734 | 11.5k | assert (size < 32); |
2735 | 11.5k | if (!value_fit_signed_field_p (opnd->imm.value, size)) |
2736 | 0 | { |
2737 | 0 | set_imm_out_of_range_error (mismatch_detail, idx, |
2738 | 0 | -(1 << (size - 1)), |
2739 | 0 | (1 << (size - 1)) - 1); |
2740 | 0 | return 0; |
2741 | 0 | } |
2742 | 11.5k | break; |
2743 | | |
2744 | 31.4k | case AARCH64_OPND_WIDTH: |
2745 | 31.4k | assert (idx > 1 && opnds[idx-1].type == AARCH64_OPND_IMM |
2746 | 31.4k | && opnds[0].type == AARCH64_OPND_Rd); |
2747 | 31.4k | size = get_upper_bound (qualifier); |
2748 | 31.4k | if (opnd->imm.value + opnds[idx-1].imm.value > size) |
2749 | | /* lsb+width <= reg.size */ |
2750 | 0 | { |
2751 | 0 | set_imm_out_of_range_error (mismatch_detail, idx, 1, |
2752 | 0 | size - opnds[idx-1].imm.value); |
2753 | 0 | return 0; |
2754 | 0 | } |
2755 | 31.4k | break; |
2756 | | |
2757 | 192k | case AARCH64_OPND_LIMM: |
2758 | 223k | case AARCH64_OPND_SVE_LIMM: |
2759 | 223k | { |
2760 | 223k | int esize = aarch64_get_qualifier_esize (opnds[0].qualifier); |
2761 | 223k | uint64_t uimm = opnd->imm.value; |
2762 | 223k | if (opcode->op == OP_BIC) |
2763 | 0 | uimm = ~uimm; |
2764 | 223k | if (!aarch64_logical_immediate_p (uimm, esize, NULL)) |
2765 | 0 | { |
2766 | 0 | set_other_error (mismatch_detail, idx, |
2767 | 0 | _("immediate out of range")); |
2768 | 0 | return 0; |
2769 | 0 | } |
2770 | 223k | } |
2771 | 223k | break; |
2772 | | |
2773 | 223k | case AARCH64_OPND_IMM0: |
2774 | 318 | case AARCH64_OPND_FPIMM0: |
2775 | 318 | if (opnd->imm.value != 0) |
2776 | 0 | { |
2777 | 0 | set_other_error (mismatch_detail, idx, |
2778 | 0 | _("immediate zero expected")); |
2779 | 0 | return 0; |
2780 | 0 | } |
2781 | 318 | break; |
2782 | | |
2783 | 490 | case AARCH64_OPND_IMM_ROT1: |
2784 | 11.5k | case AARCH64_OPND_IMM_ROT2: |
2785 | 16.1k | case AARCH64_OPND_SVE_IMM_ROT2: |
2786 | 16.1k | if (opnd->imm.value != 0 |
2787 | 16.1k | && opnd->imm.value != 90 |
2788 | 16.1k | && opnd->imm.value != 180 |
2789 | 16.1k | && opnd->imm.value != 270) |
2790 | 0 | { |
2791 | 0 | set_other_error (mismatch_detail, idx, |
2792 | 0 | _("rotate expected to be 0, 90, 180 or 270")); |
2793 | 0 | return 0; |
2794 | 0 | } |
2795 | 16.1k | break; |
2796 | | |
2797 | 16.1k | case AARCH64_OPND_IMM_ROT3: |
2798 | 480 | case AARCH64_OPND_SVE_IMM_ROT1: |
2799 | 500 | case AARCH64_OPND_SVE_IMM_ROT3: |
2800 | 500 | if (opnd->imm.value != 90 && opnd->imm.value != 270) |
2801 | 0 | { |
2802 | 0 | set_other_error (mismatch_detail, idx, |
2803 | 0 | _("rotate expected to be 90 or 270")); |
2804 | 0 | return 0; |
2805 | 0 | } |
2806 | 500 | break; |
2807 | | |
2808 | 500 | case AARCH64_OPND_SHLL_IMM: |
2809 | 22 | assert (idx == 2); |
2810 | 22 | size = 8 * aarch64_get_qualifier_esize (opnds[idx - 1].qualifier); |
2811 | 22 | if (opnd->imm.value != size) |
2812 | 0 | { |
2813 | 0 | set_other_error (mismatch_detail, idx, |
2814 | 0 | _("invalid shift amount")); |
2815 | 0 | return 0; |
2816 | 0 | } |
2817 | 22 | break; |
2818 | | |
2819 | 6.50k | case AARCH64_OPND_IMM_VLSL: |
2820 | 6.50k | size = aarch64_get_qualifier_esize (qualifier); |
2821 | 6.50k | if (!value_in_range_p (opnd->imm.value, 0, size * 8 - 1)) |
2822 | 0 | { |
2823 | 0 | set_imm_out_of_range_error (mismatch_detail, idx, 0, |
2824 | 0 | size * 8 - 1); |
2825 | 0 | return 0; |
2826 | 0 | } |
2827 | 6.50k | break; |
2828 | | |
2829 | 8.86k | case AARCH64_OPND_IMM_VLSR: |
2830 | 8.86k | size = aarch64_get_qualifier_esize (qualifier); |
2831 | 8.86k | if (!value_in_range_p (opnd->imm.value, 1, size * 8)) |
2832 | 0 | { |
2833 | 0 | set_imm_out_of_range_error (mismatch_detail, idx, 1, size * 8); |
2834 | 0 | return 0; |
2835 | 0 | } |
2836 | 8.86k | break; |
2837 | | |
2838 | 8.86k | case AARCH64_OPND_SIMD_IMM: |
2839 | 2.15k | case AARCH64_OPND_SIMD_IMM_SFT: |
2840 | | /* Qualifier check. */ |
2841 | 2.15k | switch (qualifier) |
2842 | 2.15k | { |
2843 | 1.77k | case AARCH64_OPND_QLF_LSL: |
2844 | 1.77k | if (opnd->shifter.kind != AARCH64_MOD_LSL) |
2845 | 0 | { |
2846 | 0 | set_other_error (mismatch_detail, idx, |
2847 | 0 | _("invalid shift operator")); |
2848 | 0 | return 0; |
2849 | 0 | } |
2850 | 1.77k | break; |
2851 | 1.77k | case AARCH64_OPND_QLF_MSL: |
2852 | 273 | if (opnd->shifter.kind != AARCH64_MOD_MSL) |
2853 | 0 | { |
2854 | 0 | set_other_error (mismatch_detail, idx, |
2855 | 0 | _("invalid shift operator")); |
2856 | 0 | return 0; |
2857 | 0 | } |
2858 | 273 | break; |
2859 | 273 | case AARCH64_OPND_QLF_NIL: |
2860 | 108 | if (opnd->shifter.kind != AARCH64_MOD_NONE) |
2861 | 0 | { |
2862 | 0 | set_other_error (mismatch_detail, idx, |
2863 | 0 | _("shift is not permitted")); |
2864 | 0 | return 0; |
2865 | 0 | } |
2866 | 108 | break; |
2867 | 108 | default: |
2868 | 0 | assert (0); |
2869 | 0 | return 0; |
2870 | 2.15k | } |
2871 | | /* Is the immediate valid? */ |
2872 | 2.15k | assert (idx == 1); |
2873 | 2.15k | if (aarch64_get_qualifier_esize (opnds[0].qualifier) != 8) |
2874 | 2.04k | { |
2875 | | /* uimm8 or simm8 */ |
2876 | 2.04k | if (!value_in_range_p (opnd->imm.value, -128, 255)) |
2877 | 0 | { |
2878 | 0 | set_imm_out_of_range_error (mismatch_detail, idx, -128, 255); |
2879 | 0 | return 0; |
2880 | 0 | } |
2881 | 2.04k | } |
2882 | 108 | else if (aarch64_shrink_expanded_imm8 (opnd->imm.value) < 0) |
2883 | 0 | { |
2884 | | /* uimm64 is not |
2885 | | 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee |
2886 | | ffffffffgggggggghhhhhhhh'. */ |
2887 | 0 | set_other_error (mismatch_detail, idx, |
2888 | 0 | _("invalid value for immediate")); |
2889 | 0 | return 0; |
2890 | 0 | } |
2891 | | /* Is the shift amount valid? */ |
2892 | 2.15k | switch (opnd->shifter.kind) |
2893 | 2.15k | { |
2894 | 1.77k | case AARCH64_MOD_LSL: |
2895 | 1.77k | size = aarch64_get_qualifier_esize (opnds[0].qualifier); |
2896 | 1.77k | if (!value_in_range_p (opnd->shifter.amount, 0, (size - 1) * 8)) |
2897 | 0 | { |
2898 | 0 | set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, |
2899 | 0 | (size - 1) * 8); |
2900 | 0 | return 0; |
2901 | 0 | } |
2902 | 1.77k | if (!value_aligned_p (opnd->shifter.amount, 8)) |
2903 | 0 | { |
2904 | 0 | set_unaligned_error (mismatch_detail, idx, 8); |
2905 | 0 | return 0; |
2906 | 0 | } |
2907 | 1.77k | break; |
2908 | 1.77k | case AARCH64_MOD_MSL: |
2909 | | /* Only 8 and 16 are valid shift amount. */ |
2910 | 273 | if (opnd->shifter.amount != 8 && opnd->shifter.amount != 16) |
2911 | 0 | { |
2912 | 0 | set_other_error (mismatch_detail, idx, |
2913 | 0 | _("shift amount must be 0 or 16")); |
2914 | 0 | return 0; |
2915 | 0 | } |
2916 | 273 | break; |
2917 | 273 | default: |
2918 | 108 | if (opnd->shifter.kind != AARCH64_MOD_NONE) |
2919 | 0 | { |
2920 | 0 | set_other_error (mismatch_detail, idx, |
2921 | 0 | _("invalid shift operator")); |
2922 | 0 | return 0; |
2923 | 0 | } |
2924 | 108 | break; |
2925 | 2.15k | } |
2926 | 2.15k | break; |
2927 | | |
2928 | 2.15k | case AARCH64_OPND_FPIMM: |
2929 | 733 | case AARCH64_OPND_SIMD_FPIMM: |
2930 | 1.83k | case AARCH64_OPND_SVE_FPIMM8: |
2931 | 1.83k | if (opnd->imm.is_fp == 0) |
2932 | 0 | { |
2933 | 0 | set_other_error (mismatch_detail, idx, |
2934 | 0 | _("floating-point immediate expected")); |
2935 | 0 | return 0; |
2936 | 0 | } |
2937 | | /* The value is expected to be an 8-bit floating-point constant with |
2938 | | sign, 3-bit exponent and normalized 4 bits of precision, encoded |
2939 | | in "a:b:c:d:e:f:g:h" or FLD_imm8 (depending on the type of the |
2940 | | instruction). */ |
2941 | 1.83k | if (!value_in_range_p (opnd->imm.value, 0, 255)) |
2942 | 0 | { |
2943 | 0 | set_other_error (mismatch_detail, idx, |
2944 | 0 | _("immediate out of range")); |
2945 | 0 | return 0; |
2946 | 0 | } |
2947 | 1.83k | if (opnd->shifter.kind != AARCH64_MOD_NONE) |
2948 | 0 | { |
2949 | 0 | set_other_error (mismatch_detail, idx, |
2950 | 0 | _("invalid shift operator")); |
2951 | 0 | return 0; |
2952 | 0 | } |
2953 | 1.83k | break; |
2954 | | |
2955 | 1.83k | case AARCH64_OPND_SVE_AIMM: |
2956 | 1.05k | min_value = 0; |
2957 | 16.5k | sve_aimm: |
2958 | 16.5k | assert (opnd->shifter.kind == AARCH64_MOD_LSL); |
2959 | 16.5k | size = aarch64_get_qualifier_esize (opnds[0].qualifier); |
2960 | 16.5k | mask = ~((uint64_t) -1 << (size * 4) << (size * 4)); |
2961 | 16.5k | uvalue = opnd->imm.value; |
2962 | 16.5k | shift = opnd->shifter.amount; |
2963 | 16.5k | if (size == 1) |
2964 | 3.54k | { |
2965 | 3.54k | if (shift != 0) |
2966 | 16 | { |
2967 | 16 | set_other_error (mismatch_detail, idx, |
2968 | 16 | _("no shift amount allowed for" |
2969 | 16 | " 8-bit constants")); |
2970 | 16 | return 0; |
2971 | 16 | } |
2972 | 3.54k | } |
2973 | 13.0k | else |
2974 | 13.0k | { |
2975 | 13.0k | if (shift != 0 && shift != 8) |
2976 | 0 | { |
2977 | 0 | set_other_error (mismatch_detail, idx, |
2978 | 0 | _("shift amount must be 0 or 8")); |
2979 | 0 | return 0; |
2980 | 0 | } |
2981 | 13.0k | if (shift == 0 && (uvalue & 0xff) == 0) |
2982 | 7.89k | { |
2983 | 7.89k | shift = 8; |
2984 | 7.89k | uvalue = (int64_t) uvalue / 256; |
2985 | 7.89k | } |
2986 | 13.0k | } |
2987 | 16.5k | mask >>= shift; |
2988 | 16.5k | if ((uvalue & mask) != uvalue && (uvalue | ~mask) != uvalue) |
2989 | 652 | { |
2990 | 652 | set_other_error (mismatch_detail, idx, |
2991 | 652 | _("immediate too big for element size")); |
2992 | 652 | return 0; |
2993 | 652 | } |
2994 | 15.8k | uvalue = (uvalue - min_value) & mask; |
2995 | 15.8k | if (uvalue > 0xff) |
2996 | 0 | { |
2997 | 0 | set_other_error (mismatch_detail, idx, |
2998 | 0 | _("invalid arithmetic immediate")); |
2999 | 0 | return 0; |
3000 | 0 | } |
3001 | 15.8k | break; |
3002 | | |
3003 | 15.8k | case AARCH64_OPND_SVE_ASIMM: |
3004 | 15.4k | min_value = -128; |
3005 | 15.4k | goto sve_aimm; |
3006 | | |
3007 | 12 | case AARCH64_OPND_SVE_I1_HALF_ONE: |
3008 | 12 | assert (opnd->imm.is_fp); |
3009 | 12 | if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x3f800000) |
3010 | 0 | { |
3011 | 0 | set_other_error (mismatch_detail, idx, |
3012 | 0 | _("floating-point value must be 0.5 or 1.0")); |
3013 | 0 | return 0; |
3014 | 0 | } |
3015 | 12 | break; |
3016 | | |
3017 | 12 | case AARCH64_OPND_SVE_I1_HALF_TWO: |
3018 | 3 | assert (opnd->imm.is_fp); |
3019 | 3 | if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x40000000) |
3020 | 0 | { |
3021 | 0 | set_other_error (mismatch_detail, idx, |
3022 | 0 | _("floating-point value must be 0.5 or 2.0")); |
3023 | 0 | return 0; |
3024 | 0 | } |
3025 | 3 | break; |
3026 | | |
3027 | 75 | case AARCH64_OPND_SVE_I1_ZERO_ONE: |
3028 | 75 | assert (opnd->imm.is_fp); |
3029 | 75 | if (opnd->imm.value != 0 && opnd->imm.value != 0x3f800000) |
3030 | 0 | { |
3031 | 0 | set_other_error (mismatch_detail, idx, |
3032 | 0 | _("floating-point value must be 0.0 or 1.0")); |
3033 | 0 | return 0; |
3034 | 0 | } |
3035 | 75 | break; |
3036 | | |
3037 | 75 | case AARCH64_OPND_SVE_INV_LIMM: |
3038 | 0 | { |
3039 | 0 | int esize = aarch64_get_qualifier_esize (opnds[0].qualifier); |
3040 | 0 | uint64_t uimm = ~opnd->imm.value; |
3041 | 0 | if (!aarch64_logical_immediate_p (uimm, esize, NULL)) |
3042 | 0 | { |
3043 | 0 | set_other_error (mismatch_detail, idx, |
3044 | 0 | _("immediate out of range")); |
3045 | 0 | return 0; |
3046 | 0 | } |
3047 | 0 | } |
3048 | 0 | break; |
3049 | | |
3050 | 1.39k | case AARCH64_OPND_SVE_LIMM_MOV: |
3051 | 1.39k | { |
3052 | 1.39k | int esize = aarch64_get_qualifier_esize (opnds[0].qualifier); |
3053 | 1.39k | uint64_t uimm = opnd->imm.value; |
3054 | 1.39k | if (!aarch64_logical_immediate_p (uimm, esize, NULL)) |
3055 | 0 | { |
3056 | 0 | set_other_error (mismatch_detail, idx, |
3057 | 0 | _("immediate out of range")); |
3058 | 0 | return 0; |
3059 | 0 | } |
3060 | 1.39k | if (!aarch64_sve_dupm_mov_immediate_p (uimm, esize)) |
3061 | 0 | { |
3062 | 0 | set_other_error (mismatch_detail, idx, |
3063 | 0 | _("invalid replicated MOV immediate")); |
3064 | 0 | return 0; |
3065 | 0 | } |
3066 | 1.39k | } |
3067 | 1.39k | break; |
3068 | | |
3069 | 3.92k | case AARCH64_OPND_SVE_PATTERN_SCALED: |
3070 | 3.92k | assert (opnd->shifter.kind == AARCH64_MOD_MUL); |
3071 | 3.92k | if (!value_in_range_p (opnd->shifter.amount, 1, 16)) |
3072 | 0 | { |
3073 | 0 | set_multiplier_out_of_range_error (mismatch_detail, idx, 1, 16); |
3074 | 0 | return 0; |
3075 | 0 | } |
3076 | 3.92k | break; |
3077 | | |
3078 | 3.92k | case AARCH64_OPND_SVE_SHLIMM_PRED: |
3079 | 789 | case AARCH64_OPND_SVE_SHLIMM_UNPRED: |
3080 | 1.29k | case AARCH64_OPND_SVE_SHLIMM_UNPRED_22: |
3081 | 1.29k | size = aarch64_get_qualifier_esize (opnds[idx - 1].qualifier); |
3082 | 1.29k | if (!value_in_range_p (opnd->imm.value, 0, 8 * size - 1)) |
3083 | 0 | { |
3084 | 0 | set_imm_out_of_range_error (mismatch_detail, idx, |
3085 | 0 | 0, 8 * size - 1); |
3086 | 0 | return 0; |
3087 | 0 | } |
3088 | 1.29k | break; |
3089 | | |
3090 | 1.29k | case AARCH64_OPND_SME_SHRIMM4: |
3091 | 78 | size = 1 << get_operand_fields_width (get_operand_from_code (type)); |
3092 | 78 | if (!value_in_range_p (opnd->imm.value, 1, size)) |
3093 | 0 | { |
3094 | 0 | set_imm_out_of_range_error (mismatch_detail, idx, 1, size); |
3095 | 0 | return 0; |
3096 | 0 | } |
3097 | 78 | break; |
3098 | | |
3099 | 123 | case AARCH64_OPND_SME_SHRIMM5: |
3100 | 1.09k | case AARCH64_OPND_SVE_SHRIMM_PRED: |
3101 | 2.59k | case AARCH64_OPND_SVE_SHRIMM_UNPRED: |
3102 | 5.49k | case AARCH64_OPND_SVE_SHRIMM_UNPRED_22: |
3103 | 5.49k | num = (type == AARCH64_OPND_SVE_SHRIMM_UNPRED_22) ? 2 : 1; |
3104 | 5.49k | size = aarch64_get_qualifier_esize (opnds[idx - num].qualifier); |
3105 | 5.49k | if (!value_in_range_p (opnd->imm.value, 1, 8 * size)) |
3106 | 0 | { |
3107 | 0 | set_imm_out_of_range_error (mismatch_detail, idx, 1, 8*size); |
3108 | 0 | return 0; |
3109 | 0 | } |
3110 | 5.49k | break; |
3111 | | |
3112 | 5.49k | case AARCH64_OPND_SME_ZT0_INDEX: |
3113 | 0 | if (!value_in_range_p (opnd->imm.value, 0, 56)) |
3114 | 0 | { |
3115 | 0 | set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, 56); |
3116 | 0 | return 0; |
3117 | 0 | } |
3118 | 0 | if (opnd->imm.value % 8 != 0) |
3119 | 0 | { |
3120 | 0 | set_other_error (mismatch_detail, idx, |
3121 | 0 | _("byte index must be a multiple of 8")); |
3122 | 0 | return 0; |
3123 | 0 | } |
3124 | 0 | break; |
3125 | | |
3126 | 297k | default: |
3127 | 297k | break; |
3128 | 2.49M | } |
3129 | 2.47M | break; |
3130 | | |
3131 | 2.47M | case AARCH64_OPND_CLASS_SYSTEM: |
3132 | 42.3k | switch (type) |
3133 | 42.3k | { |
3134 | 173 | case AARCH64_OPND_PSTATEFIELD: |
3135 | 392 | for (i = 0; aarch64_pstatefields[i].name; ++i) |
3136 | 392 | if (aarch64_pstatefields[i].value == opnd->pstatefield) |
3137 | 173 | break; |
3138 | 173 | assert (aarch64_pstatefields[i].name); |
3139 | 173 | assert (idx == 0 && opnds[1].type == AARCH64_OPND_UIMM4); |
3140 | 173 | max_value = F_GET_REG_MAX_VALUE (aarch64_pstatefields[i].flags); |
3141 | 173 | if (opnds[1].imm.value < 0 || opnds[1].imm.value > max_value) |
3142 | 101 | { |
3143 | 101 | set_imm_out_of_range_error (mismatch_detail, 1, 0, max_value); |
3144 | 101 | return 0; |
3145 | 101 | } |
3146 | 72 | break; |
3147 | 33.9k | case AARCH64_OPND_PRFOP: |
3148 | 33.9k | if (opcode->iclass == ldst_regoff && opnd->prfop->value >= 24) |
3149 | 197 | { |
3150 | 197 | set_other_error (mismatch_detail, idx, |
3151 | 197 | _("the register-index form of PRFM does" |
3152 | 197 | " not accept opcodes in the range 24-31")); |
3153 | 197 | return 0; |
3154 | 197 | } |
3155 | 33.7k | break; |
3156 | 33.7k | default: |
3157 | 8.26k | break; |
3158 | 42.3k | } |
3159 | 42.0k | break; |
3160 | | |
3161 | 50.5k | case AARCH64_OPND_CLASS_SIMD_ELEMENT: |
3162 | | /* Get the upper bound for the element index. */ |
3163 | 50.5k | if (opcode->op == OP_FCMLA_ELEM) |
3164 | | /* FCMLA index range depends on the vector size of other operands |
3165 | | and is halfed because complex numbers take two elements. */ |
3166 | 5.97k | num = aarch64_get_qualifier_nelem (opnds[0].qualifier) |
3167 | 5.97k | * aarch64_get_qualifier_esize (opnds[0].qualifier) / 2; |
3168 | 44.5k | else |
3169 | 44.5k | num = 16; |
3170 | 50.5k | num = num / aarch64_get_qualifier_esize (qualifier) - 1; |
3171 | 50.5k | assert (aarch64_get_qualifier_nelem (qualifier) == 1); |
3172 | | |
3173 | | /* Index out-of-range. */ |
3174 | 50.5k | if (!value_in_range_p (opnd->reglane.index, 0, num)) |
3175 | 446 | { |
3176 | 446 | set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num); |
3177 | 446 | return 0; |
3178 | 446 | } |
3179 | | /* SMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>]. |
3180 | | <Vm> Is the vector register (V0-V31) or (V0-V15), whose |
3181 | | number is encoded in "size:M:Rm": |
3182 | | size <Vm> |
3183 | | 00 RESERVED |
3184 | | 01 0:Rm |
3185 | | 10 M:Rm |
3186 | | 11 RESERVED */ |
3187 | 50.1k | if (type == AARCH64_OPND_Em16 && qualifier == AARCH64_OPND_QLF_S_H |
3188 | 50.1k | && !value_in_range_p (opnd->reglane.regno, 0, 15)) |
3189 | 0 | { |
3190 | 0 | set_regno_out_of_range_error (mismatch_detail, idx, 0, 15); |
3191 | 0 | return 0; |
3192 | 0 | } |
3193 | 50.1k | break; |
3194 | | |
3195 | 509k | case AARCH64_OPND_CLASS_MODIFIED_REG: |
3196 | 509k | assert (idx == 1 || idx == 2); |
3197 | 509k | switch (type) |
3198 | 509k | { |
3199 | 41.9k | case AARCH64_OPND_Rm_EXT: |
3200 | 41.9k | if (!aarch64_extend_operator_p (opnd->shifter.kind) |
3201 | 41.9k | && opnd->shifter.kind != AARCH64_MOD_LSL) |
3202 | 0 | { |
3203 | 0 | set_other_error (mismatch_detail, idx, |
3204 | 0 | _("extend operator expected")); |
3205 | 0 | return 0; |
3206 | 0 | } |
3207 | | /* It is not optional unless at least one of "Rd" or "Rn" is '11111' |
3208 | | (i.e. SP), in which case it defaults to LSL. The LSL alias is |
3209 | | only valid when "Rd" or "Rn" is '11111', and is preferred in that |
3210 | | case. */ |
3211 | 41.9k | if (!aarch64_stack_pointer_p (opnds + 0) |
3212 | 41.9k | && (idx != 2 || !aarch64_stack_pointer_p (opnds + 1))) |
3213 | 40.4k | { |
3214 | 40.4k | if (!opnd->shifter.operator_present) |
3215 | 0 | { |
3216 | 0 | set_other_error (mismatch_detail, idx, |
3217 | 0 | _("missing extend operator")); |
3218 | 0 | return 0; |
3219 | 0 | } |
3220 | 40.4k | else if (opnd->shifter.kind == AARCH64_MOD_LSL) |
3221 | 0 | { |
3222 | 0 | set_other_error (mismatch_detail, idx, |
3223 | 0 | _("'LSL' operator not allowed")); |
3224 | 0 | return 0; |
3225 | 0 | } |
3226 | 40.4k | } |
3227 | 41.9k | assert (opnd->shifter.operator_present /* Default to LSL. */ |
3228 | 41.9k | || opnd->shifter.kind == AARCH64_MOD_LSL); |
3229 | 41.9k | if (!value_in_range_p (opnd->shifter.amount, 0, 4)) |
3230 | 7.69k | { |
3231 | 7.69k | set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, 4); |
3232 | 7.69k | return 0; |
3233 | 7.69k | } |
3234 | | /* In the 64-bit form, the final register operand is written as Wm |
3235 | | for all but the (possibly omitted) UXTX/LSL and SXTX |
3236 | | operators. |
3237 | | N.B. GAS allows X register to be used with any operator as a |
3238 | | programming convenience. */ |
3239 | 34.2k | if (qualifier == AARCH64_OPND_QLF_X |
3240 | 34.2k | && opnd->shifter.kind != AARCH64_MOD_LSL |
3241 | 34.2k | && opnd->shifter.kind != AARCH64_MOD_UXTX |
3242 | 34.2k | && opnd->shifter.kind != AARCH64_MOD_SXTX) |
3243 | 0 | { |
3244 | 0 | set_other_error (mismatch_detail, idx, _("W register expected")); |
3245 | 0 | return 0; |
3246 | 0 | } |
3247 | 34.2k | break; |
3248 | | |
3249 | 467k | case AARCH64_OPND_Rm_SFT: |
3250 | | /* ROR is not available to the shifted register operand in |
3251 | | arithmetic instructions. */ |
3252 | 467k | if (!aarch64_shift_operator_p (opnd->shifter.kind)) |
3253 | 0 | { |
3254 | 0 | set_other_error (mismatch_detail, idx, |
3255 | 0 | _("shift operator expected")); |
3256 | 0 | return 0; |
3257 | 0 | } |
3258 | 467k | if (opnd->shifter.kind == AARCH64_MOD_ROR |
3259 | 467k | && opcode->iclass != log_shift) |
3260 | 0 | { |
3261 | 0 | set_other_error (mismatch_detail, idx, |
3262 | 0 | _("'ROR' operator not allowed")); |
3263 | 0 | return 0; |
3264 | 0 | } |
3265 | 467k | num = qualifier == AARCH64_OPND_QLF_W ? 31 : 63; |
3266 | 467k | if (!value_in_range_p (opnd->shifter.amount, 0, num)) |
3267 | 53.4k | { |
3268 | 53.4k | set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, num); |
3269 | 53.4k | return 0; |
3270 | 53.4k | } |
3271 | 413k | break; |
3272 | | |
3273 | 413k | case AARCH64_OPND_Rm_LSL: |
3274 | | /* We expect here that opnd->shifter.kind != AARCH64_MOD_LSL |
3275 | | because the parser already restricts the type of shift to LSL only, |
3276 | | so another check of shift kind would be redundant. */ |
3277 | 881 | if (!value_in_range_p (opnd->shifter.amount, 0, 7)) |
3278 | 0 | { |
3279 | 0 | set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, 7); |
3280 | 0 | return 0; |
3281 | 0 | } |
3282 | 881 | break; |
3283 | | |
3284 | 881 | default: |
3285 | 0 | break; |
3286 | 509k | } |
3287 | 448k | break; |
3288 | | |
3289 | 1.16M | default: |
3290 | 1.16M | break; |
3291 | 12.9M | } |
3292 | | |
3293 | 12.7M | return 1; |
3294 | 12.9M | } |
3295 | | |
3296 | | /* Main entrypoint for the operand constraint checking. |
3297 | | |
3298 | | Return 1 if operands of *INST meet the constraint applied by the operand |
3299 | | codes and operand qualifiers; otherwise return 0 and if MISMATCH_DETAIL is |
3300 | | not NULL, return the detail of the error in *MISMATCH_DETAIL. N.B. when |
3301 | | adding more constraint checking, make sure MISMATCH_DETAIL->KIND is set |
3302 | | with a proper error kind rather than AARCH64_OPDE_NIL (GAS asserts non-NIL |
3303 | | error kind when it is notified that an instruction does not pass the check). |
3304 | | |
3305 | | Un-determined operand qualifiers may get established during the process. */ |
3306 | | |
3307 | | int |
3308 | | aarch64_match_operands_constraint (aarch64_inst *inst, |
3309 | | aarch64_operand_error *mismatch_detail) |
3310 | 5.70M | { |
3311 | 5.70M | int i; |
3312 | | |
3313 | 5.70M | DEBUG_TRACE ("enter"); |
3314 | | |
3315 | 5.70M | i = inst->opcode->tied_operand; |
3316 | | |
3317 | 5.70M | if (i > 0) |
3318 | 55.4k | { |
3319 | | /* Check for tied_operands with specific opcode iclass. */ |
3320 | 55.4k | switch (inst->opcode->iclass) |
3321 | 55.4k | { |
3322 | | /* For SME LDR and STR instructions #imm must have the same numerical |
3323 | | value for both operands. |
3324 | | */ |
3325 | 878 | case sme_ldr: |
3326 | 908 | case sme_str: |
3327 | 908 | assert (inst->operands[0].type == AARCH64_OPND_SME_ZA_array_off4); |
3328 | 908 | assert (inst->operands[1].type == AARCH64_OPND_SME_ADDR_RI_U4xVL); |
3329 | 908 | if (inst->operands[0].indexed_za.index.imm |
3330 | 908 | != inst->operands[1].addr.offset.imm) |
3331 | 0 | { |
3332 | 0 | if (mismatch_detail) |
3333 | 0 | { |
3334 | 0 | mismatch_detail->kind = AARCH64_OPDE_UNTIED_IMMS; |
3335 | 0 | mismatch_detail->index = i; |
3336 | 0 | } |
3337 | 0 | return 0; |
3338 | 0 | } |
3339 | 908 | break; |
3340 | | |
3341 | 54.5k | default: |
3342 | 54.5k | { |
3343 | | /* Check for cases where a source register needs to be the |
3344 | | same as the destination register. Do this before |
3345 | | matching qualifiers since if an instruction has both |
3346 | | invalid tying and invalid qualifiers, the error about |
3347 | | qualifiers would suggest several alternative instructions |
3348 | | that also have invalid tying. */ |
3349 | 54.5k | enum aarch64_operand_class op_class |
3350 | 54.5k | = aarch64_get_operand_class (inst->operands[0].type); |
3351 | 54.5k | assert (aarch64_get_operand_class (inst->operands[i].type) |
3352 | 54.5k | == op_class); |
3353 | 54.5k | if (op_class == AARCH64_OPND_CLASS_SVE_REGLIST |
3354 | 54.5k | ? ((inst->operands[0].reglist.first_regno |
3355 | 242 | != inst->operands[i].reglist.first_regno) |
3356 | 242 | || (inst->operands[0].reglist.num_regs |
3357 | 242 | != inst->operands[i].reglist.num_regs) |
3358 | 242 | || (inst->operands[0].reglist.stride |
3359 | 242 | != inst->operands[i].reglist.stride)) |
3360 | 54.5k | : (inst->operands[0].reg.regno |
3361 | 54.2k | != inst->operands[i].reg.regno)) |
3362 | 0 | { |
3363 | 0 | if (mismatch_detail) |
3364 | 0 | { |
3365 | 0 | mismatch_detail->kind = AARCH64_OPDE_UNTIED_OPERAND; |
3366 | 0 | mismatch_detail->index = i; |
3367 | 0 | mismatch_detail->error = NULL; |
3368 | 0 | } |
3369 | 0 | return 0; |
3370 | 0 | } |
3371 | 54.5k | break; |
3372 | 54.5k | } |
3373 | 55.4k | } |
3374 | 55.4k | } |
3375 | | |
3376 | | /* Match operands' qualifier. |
3377 | | *INST has already had qualifier establish for some, if not all, of |
3378 | | its operands; we need to find out whether these established |
3379 | | qualifiers match one of the qualifier sequence in |
3380 | | INST->OPCODE->QUALIFIERS_LIST. If yes, we will assign each operand |
3381 | | with the corresponding qualifier in such a sequence. |
3382 | | Only basic operand constraint checking is done here; the more thorough |
3383 | | constraint checking will carried out by operand_general_constraint_met_p, |
3384 | | which has be to called after this in order to get all of the operands' |
3385 | | qualifiers established. */ |
3386 | 5.70M | int invalid_count; |
3387 | 5.70M | if (match_operands_qualifier (inst, true /* update_p */, |
3388 | 5.70M | &invalid_count) == 0) |
3389 | 25.8k | { |
3390 | 25.8k | DEBUG_TRACE ("FAIL on operand qualifier matching"); |
3391 | 25.8k | if (mismatch_detail) |
3392 | 0 | { |
3393 | | /* Return an error type to indicate that it is the qualifier |
3394 | | matching failure; we don't care about which operand as there |
3395 | | are enough information in the opcode table to reproduce it. */ |
3396 | 0 | mismatch_detail->kind = AARCH64_OPDE_INVALID_VARIANT; |
3397 | 0 | mismatch_detail->index = -1; |
3398 | 0 | mismatch_detail->error = NULL; |
3399 | 0 | mismatch_detail->data[0].i = invalid_count; |
3400 | 0 | } |
3401 | 25.8k | return 0; |
3402 | 25.8k | } |
3403 | | |
3404 | | /* Match operands' constraint. */ |
3405 | 18.4M | for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i) |
3406 | 18.4M | { |
3407 | 18.4M | enum aarch64_opnd type = inst->opcode->operands[i]; |
3408 | 18.4M | if (type == AARCH64_OPND_NIL) |
3409 | 5.48M | break; |
3410 | 12.9M | if (inst->operands[i].skip) |
3411 | 0 | { |
3412 | 0 | DEBUG_TRACE ("skip the incomplete operand %d", i); |
3413 | 0 | continue; |
3414 | 0 | } |
3415 | 12.9M | if (operand_general_constraint_met_p (inst->operands, i, type, |
3416 | 12.9M | inst->opcode, mismatch_detail) == 0) |
3417 | 196k | { |
3418 | 196k | DEBUG_TRACE ("FAIL on operand %d", i); |
3419 | 196k | return 0; |
3420 | 196k | } |
3421 | 12.9M | } |
3422 | | |
3423 | 5.48M | DEBUG_TRACE ("PASS"); |
3424 | | |
3425 | 5.48M | return 1; |
3426 | 5.68M | } |
3427 | | |
3428 | | /* Replace INST->OPCODE with OPCODE and return the replaced OPCODE. |
3429 | | Also updates the TYPE of each INST->OPERANDS with the corresponding |
3430 | | value of OPCODE->OPERANDS. |
3431 | | |
3432 | | Note that some operand qualifiers may need to be manually cleared by |
3433 | | the caller before it further calls the aarch64_opcode_encode; by |
3434 | | doing this, it helps the qualifier matching facilities work |
3435 | | properly. */ |
3436 | | |
3437 | | const aarch64_opcode* |
3438 | | aarch64_replace_opcode (aarch64_inst *inst, const aarch64_opcode *opcode) |
3439 | 87.7k | { |
3440 | 87.7k | int i; |
3441 | 87.7k | const aarch64_opcode *old = inst->opcode; |
3442 | | |
3443 | 87.7k | inst->opcode = opcode; |
3444 | | |
3445 | | /* Update the operand types. */ |
3446 | 326k | for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i) |
3447 | 326k | { |
3448 | 326k | inst->operands[i].type = opcode->operands[i]; |
3449 | 326k | if (opcode->operands[i] == AARCH64_OPND_NIL) |
3450 | 87.7k | break; |
3451 | 326k | } |
3452 | | |
3453 | 87.7k | DEBUG_TRACE ("replace %s with %s", old->name, opcode->name); |
3454 | | |
3455 | 87.7k | return old; |
3456 | 87.7k | } |
3457 | | |
3458 | | int |
3459 | | aarch64_operand_index (const enum aarch64_opnd *operands, enum aarch64_opnd operand) |
3460 | 196k | { |
3461 | 196k | int i; |
3462 | 217k | for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i) |
3463 | 217k | if (operands[i] == operand) |
3464 | 194k | return i; |
3465 | 23.1k | else if (operands[i] == AARCH64_OPND_NIL) |
3466 | 2.78k | break; |
3467 | 2.78k | return -1; |
3468 | 196k | } |
3469 | | |
3470 | | /* R0...R30, followed by FOR31. */ |
3471 | | #define BANK(R, FOR31) \ |
3472 | | { R (0), R (1), R (2), R (3), R (4), R (5), R (6), R (7), \ |
3473 | | R (8), R (9), R (10), R (11), R (12), R (13), R (14), R (15), \ |
3474 | | R (16), R (17), R (18), R (19), R (20), R (21), R (22), R (23), \ |
3475 | | R (24), R (25), R (26), R (27), R (28), R (29), R (30), FOR31 } |
3476 | | /* [0][0] 32-bit integer regs with sp Wn |
3477 | | [0][1] 64-bit integer regs with sp Xn sf=1 |
3478 | | [1][0] 32-bit integer regs with #0 Wn |
3479 | | [1][1] 64-bit integer regs with #0 Xn sf=1 */ |
3480 | | static const char *int_reg[2][2][32] = { |
3481 | | #define R32(X) "w" #X |
3482 | | #define R64(X) "x" #X |
3483 | | { BANK (R32, "wsp"), BANK (R64, "sp") }, |
3484 | | { BANK (R32, "wzr"), BANK (R64, "xzr") } |
3485 | | #undef R64 |
3486 | | #undef R32 |
3487 | | }; |
3488 | | |
3489 | | /* Names of the SVE vector registers, first with .S suffixes, |
3490 | | then with .D suffixes. */ |
3491 | | |
3492 | | static const char *sve_reg[2][32] = { |
3493 | | #define ZS(X) "z" #X ".s" |
3494 | | #define ZD(X) "z" #X ".d" |
3495 | | BANK (ZS, ZS (31)), BANK (ZD, ZD (31)) |
3496 | | #undef ZD |
3497 | | #undef ZS |
3498 | | }; |
3499 | | #undef BANK |
3500 | | |
3501 | | /* Return the integer register name. |
3502 | | if SP_REG_P is not 0, R31 is an SP reg, other R31 is the zero reg. */ |
3503 | | |
3504 | | static inline const char * |
3505 | | get_int_reg_name (int regno, aarch64_opnd_qualifier_t qualifier, int sp_reg_p) |
3506 | 3.54M | { |
3507 | 3.54M | const int has_zr = sp_reg_p ? 0 : 1; |
3508 | 3.54M | const int is_64 = aarch64_get_qualifier_esize (qualifier) == 4 ? 0 : 1; |
3509 | 3.54M | return int_reg[has_zr][is_64][regno]; |
3510 | 3.54M | } |
3511 | | |
3512 | | /* Like get_int_reg_name, but IS_64 is always 1. */ |
3513 | | |
3514 | | static inline const char * |
3515 | | get_64bit_int_reg_name (int regno, int sp_reg_p) |
3516 | 1.15M | { |
3517 | 1.15M | const int has_zr = sp_reg_p ? 0 : 1; |
3518 | 1.15M | return int_reg[has_zr][1][regno]; |
3519 | 1.15M | } |
3520 | | |
3521 | | /* Get the name of the integer offset register in OPND, using the shift type |
3522 | | to decide whether it's a word or doubleword. */ |
3523 | | |
3524 | | static inline const char * |
3525 | | get_offset_int_reg_name (const aarch64_opnd_info *opnd) |
3526 | 97.1k | { |
3527 | 97.1k | switch (opnd->shifter.kind) |
3528 | 97.1k | { |
3529 | 2.08k | case AARCH64_MOD_UXTW: |
3530 | 4.99k | case AARCH64_MOD_SXTW: |
3531 | 4.99k | return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_W, 0); |
3532 | | |
3533 | 89.5k | case AARCH64_MOD_LSL: |
3534 | 92.1k | case AARCH64_MOD_SXTX: |
3535 | 92.1k | return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_X, 0); |
3536 | | |
3537 | 0 | default: |
3538 | 0 | abort (); |
3539 | 97.1k | } |
3540 | 97.1k | } |
3541 | | |
3542 | | /* Get the name of the SVE vector offset register in OPND, using the operand |
3543 | | qualifier to decide whether the suffix should be .S or .D. */ |
3544 | | |
3545 | | static inline const char * |
3546 | | get_addr_sve_reg_name (int regno, aarch64_opnd_qualifier_t qualifier) |
3547 | 98.1k | { |
3548 | 98.1k | assert (qualifier == AARCH64_OPND_QLF_S_S |
3549 | 98.1k | || qualifier == AARCH64_OPND_QLF_S_D); |
3550 | 98.1k | return sve_reg[qualifier == AARCH64_OPND_QLF_S_D][regno]; |
3551 | 98.1k | } |
3552 | | |
3553 | | /* Types for expanding an encoded 8-bit value to a floating-point value. */ |
3554 | | |
3555 | | typedef union |
3556 | | { |
3557 | | uint64_t i; |
3558 | | double d; |
3559 | | } double_conv_t; |
3560 | | |
3561 | | typedef union |
3562 | | { |
3563 | | uint32_t i; |
3564 | | float f; |
3565 | | } single_conv_t; |
3566 | | |
3567 | | typedef union |
3568 | | { |
3569 | | uint32_t i; |
3570 | | float f; |
3571 | | } half_conv_t; |
3572 | | |
3573 | | /* IMM8 is an 8-bit floating-point constant with sign, 3-bit exponent and |
3574 | | normalized 4 bits of precision, encoded in "a:b:c:d:e:f:g:h" or FLD_imm8 |
3575 | | (depending on the type of the instruction). IMM8 will be expanded to a |
3576 | | single-precision floating-point value (SIZE == 4) or a double-precision |
3577 | | floating-point value (SIZE == 8). A half-precision floating-point value |
3578 | | (SIZE == 2) is expanded to a single-precision floating-point value. The |
3579 | | expanded value is returned. */ |
3580 | | |
3581 | | static uint64_t |
3582 | | expand_fp_imm (int size, uint32_t imm8) |
3583 | 1.28k | { |
3584 | 1.28k | uint64_t imm = 0; |
3585 | 1.28k | uint32_t imm8_7, imm8_6_0, imm8_6, imm8_6_repl4; |
3586 | | |
3587 | 1.28k | imm8_7 = (imm8 >> 7) & 0x01; /* imm8<7> */ |
3588 | 1.28k | imm8_6_0 = imm8 & 0x7f; /* imm8<6:0> */ |
3589 | 1.28k | imm8_6 = imm8_6_0 >> 6; /* imm8<6> */ |
3590 | 1.28k | imm8_6_repl4 = (imm8_6 << 3) | (imm8_6 << 2) |
3591 | 1.28k | | (imm8_6 << 1) | imm8_6; /* Replicate(imm8<6>,4) */ |
3592 | 1.28k | if (size == 8) |
3593 | 275 | { |
3594 | 275 | imm = (imm8_7 << (63-32)) /* imm8<7> */ |
3595 | 275 | | ((imm8_6 ^ 1) << (62-32)) /* NOT(imm8<6) */ |
3596 | 275 | | (imm8_6_repl4 << (58-32)) | (imm8_6 << (57-32)) |
3597 | 275 | | (imm8_6 << (56-32)) | (imm8_6 << (55-32)) /* Replicate(imm8<6>,7) */ |
3598 | 275 | | (imm8_6_0 << (48-32)); /* imm8<6>:imm8<5:0> */ |
3599 | 275 | imm <<= 32; |
3600 | 275 | } |
3601 | 1.00k | else if (size == 4 || size == 2) |
3602 | 1.00k | { |
3603 | 1.00k | imm = (imm8_7 << 31) /* imm8<7> */ |
3604 | 1.00k | | ((imm8_6 ^ 1) << 30) /* NOT(imm8<6>) */ |
3605 | 1.00k | | (imm8_6_repl4 << 26) /* Replicate(imm8<6>,4) */ |
3606 | 1.00k | | (imm8_6_0 << 19); /* imm8<6>:imm8<5:0> */ |
3607 | 1.00k | } |
3608 | 0 | else |
3609 | 0 | { |
3610 | | /* An unsupported size. */ |
3611 | 0 | assert (0); |
3612 | 0 | } |
3613 | | |
3614 | 1.28k | return imm; |
3615 | 1.28k | } |
3616 | | |
3617 | | /* Return a string based on FMT with the register style applied. */ |
3618 | | |
3619 | | static const char * |
3620 | | style_reg (struct aarch64_styler *styler, const char *fmt, ...) |
3621 | 7.79M | { |
3622 | 7.79M | const char *txt; |
3623 | 7.79M | va_list ap; |
3624 | | |
3625 | 7.79M | va_start (ap, fmt); |
3626 | 7.79M | txt = styler->apply_style (styler, dis_style_register, fmt, ap); |
3627 | 7.79M | va_end (ap); |
3628 | | |
3629 | 7.79M | return txt; |
3630 | 7.79M | } |
3631 | | |
3632 | | /* Return a string based on FMT with the immediate style applied. */ |
3633 | | |
3634 | | static const char * |
3635 | | style_imm (struct aarch64_styler *styler, const char *fmt, ...) |
3636 | 3.38M | { |
3637 | 3.38M | const char *txt; |
3638 | 3.38M | va_list ap; |
3639 | | |
3640 | 3.38M | va_start (ap, fmt); |
3641 | 3.38M | txt = styler->apply_style (styler, dis_style_immediate, fmt, ap); |
3642 | 3.38M | va_end (ap); |
3643 | | |
3644 | 3.38M | return txt; |
3645 | 3.38M | } |
3646 | | |
3647 | | /* Return a string based on FMT with the sub-mnemonic style applied. */ |
3648 | | |
3649 | | static const char * |
3650 | | style_sub_mnem (struct aarch64_styler *styler, const char *fmt, ...) |
3651 | 579k | { |
3652 | 579k | const char *txt; |
3653 | 579k | va_list ap; |
3654 | | |
3655 | 579k | va_start (ap, fmt); |
3656 | 579k | txt = styler->apply_style (styler, dis_style_sub_mnemonic, fmt, ap); |
3657 | 579k | va_end (ap); |
3658 | | |
3659 | 579k | return txt; |
3660 | 579k | } |
3661 | | |
3662 | | /* Return a string based on FMT with the address style applied. */ |
3663 | | |
3664 | | static const char * |
3665 | | style_addr (struct aarch64_styler *styler, const char *fmt, ...) |
3666 | 1.08M | { |
3667 | 1.08M | const char *txt; |
3668 | 1.08M | va_list ap; |
3669 | | |
3670 | 1.08M | va_start (ap, fmt); |
3671 | 1.08M | txt = styler->apply_style (styler, dis_style_address, fmt, ap); |
3672 | 1.08M | va_end (ap); |
3673 | | |
3674 | 1.08M | return txt; |
3675 | 1.08M | } |
3676 | | |
3677 | | /* Produce the string representation of the register list operand *OPND |
3678 | | in the buffer pointed by BUF of size SIZE. PREFIX is the part of |
3679 | | the register name that comes before the register number, such as "v". */ |
3680 | | static void |
3681 | | print_register_list (char *buf, size_t size, const aarch64_opnd_info *opnd, |
3682 | | const char *prefix, struct aarch64_styler *styler) |
3683 | 260k | { |
3684 | 260k | const int mask = (prefix[0] == 'p' ? 15 : 31); |
3685 | 260k | const int num_regs = opnd->reglist.num_regs; |
3686 | 260k | const int stride = opnd->reglist.stride; |
3687 | 260k | const int first_reg = opnd->reglist.first_regno; |
3688 | 260k | const int last_reg = (first_reg + (num_regs - 1) * stride) & mask; |
3689 | 260k | const char *qlf_name = aarch64_get_qualifier_name (opnd->qualifier); |
3690 | 260k | char tb[16]; /* Temporary buffer. */ |
3691 | | |
3692 | 260k | assert (opnd->type != AARCH64_OPND_LEt || opnd->reglist.has_index); |
3693 | 260k | assert (num_regs >= 1 && num_regs <= 4); |
3694 | | |
3695 | | /* Prepare the index if any. */ |
3696 | 260k | if (opnd->reglist.has_index) |
3697 | | /* PR 21096: The %100 is to silence a warning about possible truncation. */ |
3698 | 21.2k | snprintf (tb, sizeof (tb), "[%s]", |
3699 | 21.2k | style_imm (styler, "%" PRIi64, (opnd->reglist.index % 100))); |
3700 | 239k | else |
3701 | 239k | tb[0] = '\0'; |
3702 | | |
3703 | | /* The hyphenated form is preferred for disassembly if there are |
3704 | | more than two registers in the list, and the register numbers |
3705 | | are monotonically increasing in increments of one. */ |
3706 | 260k | if (stride == 1 && num_regs > 1 |
3707 | 260k | && ((opnd->type != AARCH64_OPND_SME_Zt2) |
3708 | 68.6k | && (opnd->type != AARCH64_OPND_SME_Zt3) |
3709 | 68.6k | && (opnd->type != AARCH64_OPND_SME_Zt4))) |
3710 | 66.8k | snprintf (buf, size, "{%s-%s}%s", |
3711 | 66.8k | style_reg (styler, "%s%d.%s", prefix, first_reg, qlf_name), |
3712 | 66.8k | style_reg (styler, "%s%d.%s", prefix, last_reg, qlf_name), tb); |
3713 | 193k | else |
3714 | 193k | { |
3715 | 193k | const int reg0 = first_reg; |
3716 | 193k | const int reg1 = (first_reg + stride) & mask; |
3717 | 193k | const int reg2 = (first_reg + stride * 2) & mask; |
3718 | 193k | const int reg3 = (first_reg + stride * 3) & mask; |
3719 | | |
3720 | 193k | switch (num_regs) |
3721 | 193k | { |
3722 | 185k | case 1: |
3723 | 185k | snprintf (buf, size, "{%s}%s", |
3724 | 185k | style_reg (styler, "%s%d.%s", prefix, reg0, qlf_name), |
3725 | 185k | tb); |
3726 | 185k | break; |
3727 | 5.54k | case 2: |
3728 | 5.54k | snprintf (buf, size, "{%s, %s}%s", |
3729 | 5.54k | style_reg (styler, "%s%d.%s", prefix, reg0, qlf_name), |
3730 | 5.54k | style_reg (styler, "%s%d.%s", prefix, reg1, qlf_name), |
3731 | 5.54k | tb); |
3732 | 5.54k | break; |
3733 | 323 | case 3: |
3734 | 323 | snprintf (buf, size, "{%s, %s, %s}%s", |
3735 | 323 | style_reg (styler, "%s%d.%s", prefix, reg0, qlf_name), |
3736 | 323 | style_reg (styler, "%s%d.%s", prefix, reg1, qlf_name), |
3737 | 323 | style_reg (styler, "%s%d.%s", prefix, reg2, qlf_name), |
3738 | 323 | tb); |
3739 | 323 | break; |
3740 | 2.45k | case 4: |
3741 | 2.45k | snprintf (buf, size, "{%s, %s, %s, %s}%s", |
3742 | 2.45k | style_reg (styler, "%s%d.%s", prefix, reg0, qlf_name), |
3743 | 2.45k | style_reg (styler, "%s%d.%s", prefix, reg1, qlf_name), |
3744 | 2.45k | style_reg (styler, "%s%d.%s", prefix, reg2, qlf_name), |
3745 | 2.45k | style_reg (styler, "%s%d.%s", prefix, reg3, qlf_name), |
3746 | 2.45k | tb); |
3747 | 2.45k | break; |
3748 | 193k | } |
3749 | 193k | } |
3750 | 260k | } |
3751 | | |
3752 | | /* Print the register+immediate address in OPND to BUF, which has SIZE |
3753 | | characters. BASE is the name of the base register. */ |
3754 | | |
3755 | | static void |
3756 | | print_immediate_offset_address (char *buf, size_t size, |
3757 | | const aarch64_opnd_info *opnd, |
3758 | | const char *base, |
3759 | | struct aarch64_styler *styler) |
3760 | 594k | { |
3761 | 594k | if (opnd->addr.writeback) |
3762 | 189k | { |
3763 | 189k | if (opnd->addr.preind) |
3764 | 94.8k | { |
3765 | 94.8k | if (opnd->type == AARCH64_OPND_ADDR_SIMM10 && !opnd->addr.offset.imm) |
3766 | 78 | snprintf (buf, size, "[%s]!", style_reg (styler, base)); |
3767 | 94.7k | else |
3768 | 94.7k | snprintf (buf, size, "[%s, %s]!", |
3769 | 94.7k | style_reg (styler, base), |
3770 | 94.7k | style_imm (styler, "#%d", opnd->addr.offset.imm)); |
3771 | 94.8k | } |
3772 | 94.2k | else |
3773 | 94.2k | snprintf (buf, size, "[%s], %s", |
3774 | 94.2k | style_reg (styler, base), |
3775 | 94.2k | style_imm (styler, "#%d", opnd->addr.offset.imm)); |
3776 | 189k | } |
3777 | 405k | else |
3778 | 405k | { |
3779 | 405k | if (opnd->shifter.operator_present) |
3780 | 56.0k | { |
3781 | 56.0k | assert (opnd->shifter.kind == AARCH64_MOD_MUL_VL); |
3782 | 56.0k | snprintf (buf, size, "[%s, %s, %s]", |
3783 | 56.0k | style_reg (styler, base), |
3784 | 56.0k | style_imm (styler, "#%d", opnd->addr.offset.imm), |
3785 | 56.0k | style_sub_mnem (styler, "mul vl")); |
3786 | 56.0k | } |
3787 | 348k | else if (opnd->addr.offset.imm) |
3788 | 290k | snprintf (buf, size, "[%s, %s]", |
3789 | 290k | style_reg (styler, base), |
3790 | 290k | style_imm (styler, "#%d", opnd->addr.offset.imm)); |
3791 | 58.4k | else |
3792 | 58.4k | snprintf (buf, size, "[%s]", style_reg (styler, base)); |
3793 | 405k | } |
3794 | 594k | } |
3795 | | |
3796 | | /* Produce the string representation of the register offset address operand |
3797 | | *OPND in the buffer pointed by BUF of size SIZE. BASE and OFFSET are |
3798 | | the names of the base and offset registers. */ |
3799 | | static void |
3800 | | print_register_offset_address (char *buf, size_t size, |
3801 | | const aarch64_opnd_info *opnd, |
3802 | | const char *base, const char *offset, |
3803 | | struct aarch64_styler *styler) |
3804 | 182k | { |
3805 | 182k | char tb[32]; /* Temporary buffer. */ |
3806 | 182k | bool print_extend_p = true; |
3807 | 182k | bool print_amount_p = true; |
3808 | 182k | const char *shift_name = aarch64_operand_modifiers[opnd->shifter.kind].name; |
3809 | | |
3810 | 182k | if (!opnd->shifter.amount && (opnd->qualifier != AARCH64_OPND_QLF_S_B |
3811 | 93.9k | || !opnd->shifter.amount_present)) |
3812 | 92.7k | { |
3813 | | /* Not print the shift/extend amount when the amount is zero and |
3814 | | when it is not the special case of 8-bit load/store instruction. */ |
3815 | 92.7k | print_amount_p = false; |
3816 | | /* Likewise, no need to print the shift operator LSL in such a |
3817 | | situation. */ |
3818 | 92.7k | if (opnd->shifter.kind == AARCH64_MOD_LSL) |
3819 | 54.3k | print_extend_p = false; |
3820 | 92.7k | } |
3821 | | |
3822 | | /* Prepare for the extend/shift. */ |
3823 | 182k | if (print_extend_p) |
3824 | 128k | { |
3825 | 128k | if (print_amount_p) |
3826 | 89.7k | snprintf (tb, sizeof (tb), ", %s %s", |
3827 | 89.7k | style_sub_mnem (styler, shift_name), |
3828 | 89.7k | style_imm (styler, "#%" PRIi64, |
3829 | | /* PR 21096: The %100 is to silence a warning about possible truncation. */ |
3830 | 89.7k | (opnd->shifter.amount % 100))); |
3831 | 38.3k | else |
3832 | 38.3k | snprintf (tb, sizeof (tb), ", %s", |
3833 | 38.3k | style_sub_mnem (styler, shift_name)); |
3834 | 128k | } |
3835 | 54.3k | else |
3836 | 54.3k | tb[0] = '\0'; |
3837 | | |
3838 | 182k | snprintf (buf, size, "[%s, %s%s]", style_reg (styler, base), |
3839 | 182k | style_reg (styler, offset), tb); |
3840 | 182k | } |
3841 | | |
3842 | | /* Print ZA tiles from imm8 in ZERO instruction. |
3843 | | |
3844 | | The preferred disassembly of this instruction uses the shortest list of tile |
3845 | | names that represent the encoded immediate mask. |
3846 | | |
3847 | | For example: |
3848 | | * An all-ones immediate is disassembled as {ZA}. |
3849 | | * An all-zeros immediate is disassembled as an empty list { }. |
3850 | | */ |
3851 | | static void |
3852 | | print_sme_za_list (char *buf, size_t size, int mask, |
3853 | | struct aarch64_styler *styler) |
3854 | 27 | { |
3855 | 27 | const char* zan[] = { "za", "za0.h", "za1.h", "za0.s", |
3856 | 27 | "za1.s", "za2.s", "za3.s", "za0.d", |
3857 | 27 | "za1.d", "za2.d", "za3.d", "za4.d", |
3858 | 27 | "za5.d", "za6.d", "za7.d", " " }; |
3859 | 27 | const int zan_v[] = { 0xff, 0x55, 0xaa, 0x11, |
3860 | 27 | 0x22, 0x44, 0x88, 0x01, |
3861 | 27 | 0x02, 0x04, 0x08, 0x10, |
3862 | 27 | 0x20, 0x40, 0x80, 0x00 }; |
3863 | 27 | int i, k; |
3864 | 27 | const int ZAN_SIZE = sizeof(zan) / sizeof(zan[0]); |
3865 | | |
3866 | 27 | k = snprintf (buf, size, "{"); |
3867 | 207 | for (i = 0; i < ZAN_SIZE; i++) |
3868 | 207 | { |
3869 | 207 | if ((mask & zan_v[i]) == zan_v[i]) |
3870 | 69 | { |
3871 | 69 | mask &= ~zan_v[i]; |
3872 | 69 | if (k > 1) |
3873 | 47 | k += snprintf (buf + k, size - k, ", "); |
3874 | | |
3875 | 69 | k += snprintf (buf + k, size - k, "%s", style_reg (styler, zan[i])); |
3876 | 69 | } |
3877 | 207 | if (mask == 0) |
3878 | 27 | break; |
3879 | 207 | } |
3880 | 27 | snprintf (buf + k, size - k, "}"); |
3881 | 27 | } |
3882 | | |
3883 | | /* Generate the string representation of the operand OPNDS[IDX] for OPCODE |
3884 | | in *BUF. The caller should pass in the maximum size of *BUF in SIZE. |
3885 | | PC, PCREL_P and ADDRESS are used to pass in and return information about |
3886 | | the PC-relative address calculation, where the PC value is passed in |
3887 | | PC. If the operand is pc-relative related, *PCREL_P (if PCREL_P non-NULL) |
3888 | | will return 1 and *ADDRESS (if ADDRESS non-NULL) will return the |
3889 | | calculated address; otherwise, *PCREL_P (if PCREL_P non-NULL) returns 0. |
3890 | | |
3891 | | The function serves both the disassembler and the assembler diagnostics |
3892 | | issuer, which is the reason why it lives in this file. */ |
3893 | | |
3894 | | void |
3895 | | aarch64_print_operand (char *buf, size_t size, bfd_vma pc, |
3896 | | const aarch64_opcode *opcode, |
3897 | | const aarch64_opnd_info *opnds, int idx, int *pcrel_p, |
3898 | | bfd_vma *address, char** notes, |
3899 | | char *comment, size_t comment_size, |
3900 | | aarch64_feature_set features, |
3901 | | struct aarch64_styler *styler) |
3902 | 10.6M | { |
3903 | 10.6M | unsigned int i, num_conds; |
3904 | 10.6M | const char *name = NULL; |
3905 | 10.6M | const aarch64_opnd_info *opnd = opnds + idx; |
3906 | 10.6M | enum aarch64_modifier_kind kind; |
3907 | 10.6M | uint64_t addr, enum_value; |
3908 | | |
3909 | 10.6M | if (comment != NULL) |
3910 | 10.6M | { |
3911 | 10.6M | assert (comment_size > 0); |
3912 | 10.6M | comment[0] = '\0'; |
3913 | 10.6M | } |
3914 | 0 | else |
3915 | 0 | assert (comment_size == 0); |
3916 | | |
3917 | 10.6M | buf[0] = '\0'; |
3918 | 10.6M | if (pcrel_p) |
3919 | 10.6M | *pcrel_p = 0; |
3920 | | |
3921 | 10.6M | switch (opnd->type) |
3922 | 10.6M | { |
3923 | 948k | case AARCH64_OPND_Rd: |
3924 | 1.42M | case AARCH64_OPND_Rn: |
3925 | 1.48M | case AARCH64_OPND_Rm: |
3926 | 2.40M | case AARCH64_OPND_Rt: |
3927 | 2.58M | case AARCH64_OPND_Rt2: |
3928 | 2.67M | case AARCH64_OPND_Rs: |
3929 | 2.70M | case AARCH64_OPND_Ra: |
3930 | 2.70M | case AARCH64_OPND_Rt_LS64: |
3931 | 2.70M | case AARCH64_OPND_Rt_SYS: |
3932 | 2.70M | case AARCH64_OPND_PAIRREG: |
3933 | 2.70M | case AARCH64_OPND_PAIRREG_OR_XZR: |
3934 | 2.70M | case AARCH64_OPND_SVE_Rm: |
3935 | 2.70M | case AARCH64_OPND_LSE128_Rt: |
3936 | 2.70M | case AARCH64_OPND_LSE128_Rt2: |
3937 | | /* The optional-ness of <Xt> in e.g. IC <ic_op>{, <Xt>} is determined by |
3938 | | the <ic_op>, therefore we use opnd->present to override the |
3939 | | generic optional-ness information. */ |
3940 | 2.70M | if (opnd->type == AARCH64_OPND_Rt_SYS) |
3941 | 208 | { |
3942 | 208 | if (!opnd->present) |
3943 | 103 | break; |
3944 | 208 | } |
3945 | | /* Omit the operand, e.g. RET. */ |
3946 | 2.70M | else if (optional_operand_p (opcode, idx) |
3947 | 2.70M | && (opnd->reg.regno |
3948 | 4.79k | == get_optional_operand_default_value (opcode))) |
3949 | 3.62k | break; |
3950 | 2.70M | assert (opnd->qualifier == AARCH64_OPND_QLF_W |
3951 | 2.70M | || opnd->qualifier == AARCH64_OPND_QLF_X); |
3952 | 2.70M | snprintf (buf, size, "%s", |
3953 | 2.70M | style_reg (styler, get_int_reg_name (opnd->reg.regno, |
3954 | 2.70M | opnd->qualifier, 0))); |
3955 | 2.70M | break; |
3956 | | |
3957 | 191k | case AARCH64_OPND_Rd_SP: |
3958 | 408k | case AARCH64_OPND_Rn_SP: |
3959 | 414k | case AARCH64_OPND_Rt_SP: |
3960 | 415k | case AARCH64_OPND_SVE_Rn_SP: |
3961 | 416k | case AARCH64_OPND_Rm_SP: |
3962 | 416k | assert (opnd->qualifier == AARCH64_OPND_QLF_W |
3963 | 416k | || opnd->qualifier == AARCH64_OPND_QLF_WSP |
3964 | 416k | || opnd->qualifier == AARCH64_OPND_QLF_X |
3965 | 416k | || opnd->qualifier == AARCH64_OPND_QLF_SP); |
3966 | 416k | snprintf (buf, size, "%s", |
3967 | 416k | style_reg (styler, get_int_reg_name (opnd->reg.regno, |
3968 | 416k | opnd->qualifier, 1))); |
3969 | 416k | break; |
3970 | | |
3971 | 21.0k | case AARCH64_OPND_Rm_EXT: |
3972 | 21.0k | kind = opnd->shifter.kind; |
3973 | 21.0k | assert (idx == 1 || idx == 2); |
3974 | 21.0k | if ((aarch64_stack_pointer_p (opnds) |
3975 | 21.0k | || (idx == 2 && aarch64_stack_pointer_p (opnds + 1))) |
3976 | 21.0k | && ((opnd->qualifier == AARCH64_OPND_QLF_W |
3977 | 750 | && opnds[0].qualifier == AARCH64_OPND_QLF_W |
3978 | 750 | && kind == AARCH64_MOD_UXTW) |
3979 | 750 | || (opnd->qualifier == AARCH64_OPND_QLF_X |
3980 | 643 | && kind == AARCH64_MOD_UXTX))) |
3981 | 170 | { |
3982 | | /* 'LSL' is the preferred form in this case. */ |
3983 | 170 | kind = AARCH64_MOD_LSL; |
3984 | 170 | if (opnd->shifter.amount == 0) |
3985 | 106 | { |
3986 | | /* Shifter omitted. */ |
3987 | 106 | snprintf (buf, size, "%s", |
3988 | 106 | style_reg (styler, |
3989 | 106 | get_int_reg_name (opnd->reg.regno, |
3990 | 106 | opnd->qualifier, 0))); |
3991 | 106 | break; |
3992 | 106 | } |
3993 | 170 | } |
3994 | 20.9k | if (opnd->shifter.amount) |
3995 | 16.3k | snprintf (buf, size, "%s, %s %s", |
3996 | 16.3k | style_reg (styler, get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0)), |
3997 | 16.3k | style_sub_mnem (styler, aarch64_operand_modifiers[kind].name), |
3998 | 16.3k | style_imm (styler, "#%" PRIi64, opnd->shifter.amount)); |
3999 | 4.56k | else |
4000 | 4.56k | snprintf (buf, size, "%s, %s", |
4001 | 4.56k | style_reg (styler, get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0)), |
4002 | 4.56k | style_sub_mnem (styler, aarch64_operand_modifiers[kind].name)); |
4003 | 20.9k | break; |
4004 | | |
4005 | 287k | case AARCH64_OPND_Rm_SFT: |
4006 | 287k | assert (opnd->qualifier == AARCH64_OPND_QLF_W |
4007 | 287k | || opnd->qualifier == AARCH64_OPND_QLF_X); |
4008 | 287k | if (opnd->shifter.amount == 0 && opnd->shifter.kind == AARCH64_MOD_LSL) |
4009 | 56.2k | snprintf (buf, size, "%s", |
4010 | 56.2k | style_reg (styler, get_int_reg_name (opnd->reg.regno, |
4011 | 56.2k | opnd->qualifier, 0))); |
4012 | 231k | else |
4013 | 231k | snprintf (buf, size, "%s, %s %s", |
4014 | 231k | style_reg (styler, get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0)), |
4015 | 231k | style_sub_mnem (styler, aarch64_operand_modifiers[opnd->shifter.kind].name), |
4016 | 231k | style_imm (styler, "#%" PRIi64, opnd->shifter.amount)); |
4017 | 287k | break; |
4018 | | |
4019 | 881 | case AARCH64_OPND_Rm_LSL: |
4020 | 881 | assert (opnd->qualifier == AARCH64_OPND_QLF_X); |
4021 | 881 | assert (opnd->shifter.kind == AARCH64_MOD_LSL); |
4022 | 881 | if (opnd->shifter.amount == 0) |
4023 | 129 | snprintf (buf, size, "%s", |
4024 | 129 | style_reg (styler, get_int_reg_name (opnd->reg.regno, |
4025 | 129 | opnd->qualifier, 0))); |
4026 | 752 | else |
4027 | 752 | snprintf (buf, size, "%s, %s %s", |
4028 | 752 | style_reg (styler, get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0)), |
4029 | 752 | style_sub_mnem (styler, aarch64_operand_modifiers[opnd->shifter.kind].name), |
4030 | 752 | style_imm (styler, "#%" PRIi64, opnd->shifter.amount)); |
4031 | 881 | break; |
4032 | | |
4033 | 28.5k | case AARCH64_OPND_Fd: |
4034 | 59.7k | case AARCH64_OPND_Fn: |
4035 | 87.9k | case AARCH64_OPND_Fm: |
4036 | 109k | case AARCH64_OPND_Fa: |
4037 | 497k | case AARCH64_OPND_Ft: |
4038 | 697k | case AARCH64_OPND_Ft2: |
4039 | 711k | case AARCH64_OPND_Sd: |
4040 | 726k | case AARCH64_OPND_Sn: |
4041 | 730k | case AARCH64_OPND_Sm: |
4042 | 730k | case AARCH64_OPND_SVE_VZn: |
4043 | 732k | case AARCH64_OPND_SVE_Vd: |
4044 | 732k | case AARCH64_OPND_SVE_Vm: |
4045 | 732k | case AARCH64_OPND_SVE_Vn: |
4046 | 732k | snprintf (buf, size, "%s", |
4047 | 732k | style_reg (styler, "%s%d", |
4048 | 732k | aarch64_get_qualifier_name (opnd->qualifier), |
4049 | 732k | opnd->reg.regno)); |
4050 | 732k | break; |
4051 | | |
4052 | 9.56k | case AARCH64_OPND_Va: |
4053 | 173k | case AARCH64_OPND_Vd: |
4054 | 315k | case AARCH64_OPND_Vn: |
4055 | 421k | case AARCH64_OPND_Vm: |
4056 | 421k | snprintf (buf, size, "%s", |
4057 | 421k | style_reg (styler, "v%d.%s", opnd->reg.regno, |
4058 | 421k | aarch64_get_qualifier_name (opnd->qualifier))); |
4059 | 421k | break; |
4060 | | |
4061 | 1.30k | case AARCH64_OPND_Ed: |
4062 | 4.84k | case AARCH64_OPND_En: |
4063 | 15.8k | case AARCH64_OPND_Em: |
4064 | 47.1k | case AARCH64_OPND_Em16: |
4065 | 47.1k | case AARCH64_OPND_SM3_IMM2: |
4066 | 47.1k | snprintf (buf, size, "%s[%s]", |
4067 | 47.1k | style_reg (styler, "v%d.%s", opnd->reglane.regno, |
4068 | 47.1k | aarch64_get_qualifier_name (opnd->qualifier)), |
4069 | 47.1k | style_imm (styler, "%" PRIi64, opnd->reglane.index)); |
4070 | 47.1k | break; |
4071 | | |
4072 | 32 | case AARCH64_OPND_VdD1: |
4073 | 405 | case AARCH64_OPND_VnD1: |
4074 | 405 | snprintf (buf, size, "%s[%s]", |
4075 | 405 | style_reg (styler, "v%d.d", opnd->reg.regno), |
4076 | 405 | style_imm (styler, "1")); |
4077 | 405 | break; |
4078 | | |
4079 | 13.0k | case AARCH64_OPND_LVn: |
4080 | 26.4k | case AARCH64_OPND_LVt: |
4081 | 27.9k | case AARCH64_OPND_LVt_AL: |
4082 | 49.1k | case AARCH64_OPND_LEt: |
4083 | 49.1k | print_register_list (buf, size, opnd, "v", styler); |
4084 | 49.1k | break; |
4085 | | |
4086 | 56.9k | case AARCH64_OPND_SVE_Pd: |
4087 | 461k | case AARCH64_OPND_SVE_Pg3: |
4088 | 461k | case AARCH64_OPND_SVE_Pg4_5: |
4089 | 471k | case AARCH64_OPND_SVE_Pg4_10: |
4090 | 479k | case AARCH64_OPND_SVE_Pg4_16: |
4091 | 482k | case AARCH64_OPND_SVE_Pm: |
4092 | 485k | case AARCH64_OPND_SVE_Pn: |
4093 | 486k | case AARCH64_OPND_SVE_Pt: |
4094 | 527k | case AARCH64_OPND_SME_Pm: |
4095 | 527k | if (opnd->qualifier == AARCH64_OPND_QLF_NIL) |
4096 | 89.1k | snprintf (buf, size, "%s", |
4097 | 89.1k | style_reg (styler, "p%d", opnd->reg.regno)); |
4098 | 437k | else if (opnd->qualifier == AARCH64_OPND_QLF_P_Z |
4099 | 437k | || opnd->qualifier == AARCH64_OPND_QLF_P_M) |
4100 | 376k | snprintf (buf, size, "%s", |
4101 | 376k | style_reg (styler, "p%d/%s", opnd->reg.regno, |
4102 | 376k | aarch64_get_qualifier_name (opnd->qualifier))); |
4103 | 61.8k | else |
4104 | 61.8k | snprintf (buf, size, "%s", |
4105 | 61.8k | style_reg (styler, "p%d.%s", opnd->reg.regno, |
4106 | 61.8k | aarch64_get_qualifier_name (opnd->qualifier))); |
4107 | 527k | break; |
4108 | | |
4109 | 0 | case AARCH64_OPND_SVE_PNd: |
4110 | 0 | case AARCH64_OPND_SVE_PNg4_10: |
4111 | 0 | case AARCH64_OPND_SVE_PNn: |
4112 | 0 | case AARCH64_OPND_SVE_PNt: |
4113 | 931 | case AARCH64_OPND_SME_PNd3: |
4114 | 19.9k | case AARCH64_OPND_SME_PNg3: |
4115 | 19.9k | case AARCH64_OPND_SME_PNn: |
4116 | 19.9k | if (opnd->qualifier == AARCH64_OPND_QLF_NIL) |
4117 | 7.27k | snprintf (buf, size, "%s", |
4118 | 7.27k | style_reg (styler, "pn%d", opnd->reg.regno)); |
4119 | 12.6k | else if (opnd->qualifier == AARCH64_OPND_QLF_P_Z |
4120 | 12.6k | || opnd->qualifier == AARCH64_OPND_QLF_P_M) |
4121 | 11.7k | snprintf (buf, size, "%s", |
4122 | 11.7k | style_reg (styler, "pn%d/%s", opnd->reg.regno, |
4123 | 11.7k | aarch64_get_qualifier_name (opnd->qualifier))); |
4124 | 953 | else |
4125 | 953 | snprintf (buf, size, "%s", |
4126 | 953 | style_reg (styler, "pn%d.%s", opnd->reg.regno, |
4127 | 953 | aarch64_get_qualifier_name (opnd->qualifier))); |
4128 | 19.9k | break; |
4129 | | |
4130 | 248 | case AARCH64_OPND_SME_Pdx2: |
4131 | 252 | case AARCH64_OPND_SME_PdxN: |
4132 | 252 | print_register_list (buf, size, opnd, "p", styler); |
4133 | 252 | break; |
4134 | | |
4135 | 4 | case AARCH64_OPND_SME_PNn3_INDEX1: |
4136 | 10 | case AARCH64_OPND_SME_PNn3_INDEX2: |
4137 | 10 | snprintf (buf, size, "%s[%s]", |
4138 | 10 | style_reg (styler, "pn%d", opnd->reglane.regno), |
4139 | 10 | style_imm (styler, "%" PRIi64, opnd->reglane.index)); |
4140 | 10 | break; |
4141 | | |
4142 | 5.70k | case AARCH64_OPND_SVE_Za_5: |
4143 | 11.1k | case AARCH64_OPND_SVE_Za_16: |
4144 | 246k | case AARCH64_OPND_SVE_Zd: |
4145 | 271k | case AARCH64_OPND_SVE_Zm_5: |
4146 | 446k | case AARCH64_OPND_SVE_Zm_16: |
4147 | 685k | case AARCH64_OPND_SVE_Zn: |
4148 | 688k | case AARCH64_OPND_SVE_Zt: |
4149 | 690k | case AARCH64_OPND_SME_Zm: |
4150 | 690k | if (opnd->qualifier == AARCH64_OPND_QLF_NIL) |
4151 | 2.20k | snprintf (buf, size, "%s", style_reg (styler, "z%d", opnd->reg.regno)); |
4152 | 688k | else |
4153 | 688k | snprintf (buf, size, "%s", |
4154 | 688k | style_reg (styler, "z%d.%s", opnd->reg.regno, |
4155 | 688k | aarch64_get_qualifier_name (opnd->qualifier))); |
4156 | 690k | break; |
4157 | | |
4158 | 4.94k | case AARCH64_OPND_SVE_ZnxN: |
4159 | 184k | case AARCH64_OPND_SVE_ZtxN: |
4160 | 194k | case AARCH64_OPND_SME_Zdnx2: |
4161 | 197k | case AARCH64_OPND_SME_Zdnx4: |
4162 | 198k | case AARCH64_OPND_SME_Zmx2: |
4163 | 198k | case AARCH64_OPND_SME_Zmx4: |
4164 | 201k | case AARCH64_OPND_SME_Znx2: |
4165 | 202k | case AARCH64_OPND_SME_Znx4: |
4166 | 207k | case AARCH64_OPND_SME_Ztx2_STRIDED: |
4167 | 209k | case AARCH64_OPND_SME_Ztx4_STRIDED: |
4168 | 210k | case AARCH64_OPND_SME_Zt2: |
4169 | 210k | case AARCH64_OPND_SME_Zt3: |
4170 | 211k | case AARCH64_OPND_SME_Zt4: |
4171 | 211k | print_register_list (buf, size, opnd, "z", styler); |
4172 | 211k | break; |
4173 | | |
4174 | 2.52k | case AARCH64_OPND_SVE_Zm3_INDEX: |
4175 | 5.77k | case AARCH64_OPND_SVE_Zm3_22_INDEX: |
4176 | 5.96k | case AARCH64_OPND_SVE_Zm3_19_INDEX: |
4177 | 10.2k | case AARCH64_OPND_SVE_Zm3_11_INDEX: |
4178 | 12.3k | case AARCH64_OPND_SVE_Zm4_11_INDEX: |
4179 | 15.3k | case AARCH64_OPND_SVE_Zm4_INDEX: |
4180 | 15.7k | case AARCH64_OPND_SVE_Zn_INDEX: |
4181 | 15.9k | case AARCH64_OPND_SME_Zm_INDEX1: |
4182 | 16.6k | case AARCH64_OPND_SME_Zm_INDEX2: |
4183 | 17.4k | case AARCH64_OPND_SME_Zm_INDEX3_1: |
4184 | 18.1k | case AARCH64_OPND_SME_Zm_INDEX3_2: |
4185 | 21.0k | case AARCH64_OPND_SME_Zm_INDEX3_10: |
4186 | 21.0k | case AARCH64_OPND_SVE_Zn_5_INDEX: |
4187 | 21.7k | case AARCH64_OPND_SME_Zm_INDEX4_1: |
4188 | 24.3k | case AARCH64_OPND_SME_Zm_INDEX4_10: |
4189 | 24.3k | case AARCH64_OPND_SME_Zn_INDEX1_16: |
4190 | 24.3k | case AARCH64_OPND_SME_Zn_INDEX2_15: |
4191 | 24.4k | case AARCH64_OPND_SME_Zn_INDEX2_16: |
4192 | 24.4k | case AARCH64_OPND_SME_Zn_INDEX3_14: |
4193 | 24.5k | case AARCH64_OPND_SME_Zn_INDEX3_15: |
4194 | 24.5k | case AARCH64_OPND_SME_Zn_INDEX4_14: |
4195 | 24.6k | case AARCH64_OPND_SVE_Zm_imm4: |
4196 | 24.6k | snprintf (buf, size, "%s[%s]", |
4197 | 24.6k | (opnd->qualifier == AARCH64_OPND_QLF_NIL |
4198 | 24.6k | ? style_reg (styler, "z%d", opnd->reglane.regno) |
4199 | 24.6k | : style_reg (styler, "z%d.%s", opnd->reglane.regno, |
4200 | 24.3k | aarch64_get_qualifier_name (opnd->qualifier))), |
4201 | 24.6k | style_imm (styler, "%" PRIi64, opnd->reglane.index)); |
4202 | 24.6k | break; |
4203 | | |
4204 | 30.3k | case AARCH64_OPND_SME_ZAda_2b: |
4205 | 40.1k | case AARCH64_OPND_SME_ZAda_3b: |
4206 | 40.1k | snprintf (buf, size, "%s", |
4207 | 40.1k | style_reg (styler, "za%d.%s", opnd->reg.regno, |
4208 | 40.1k | aarch64_get_qualifier_name (opnd->qualifier))); |
4209 | 40.1k | break; |
4210 | | |
4211 | 369 | case AARCH64_OPND_SME_ZA_HV_idx_src: |
4212 | 515 | case AARCH64_OPND_SME_ZA_HV_idx_srcxN: |
4213 | 10.6k | case AARCH64_OPND_SME_ZA_HV_idx_dest: |
4214 | 10.7k | case AARCH64_OPND_SME_ZA_HV_idx_destxN: |
4215 | 41.8k | case AARCH64_OPND_SME_ZA_HV_idx_ldstr: |
4216 | 41.8k | snprintf (buf, size, "%s%s[%s, %s%s%s%s%s]%s", |
4217 | 41.8k | opnd->type == AARCH64_OPND_SME_ZA_HV_idx_ldstr ? "{" : "", |
4218 | 41.8k | style_reg (styler, "za%d%c.%s", |
4219 | 41.8k | opnd->indexed_za.regno, |
4220 | 41.8k | opnd->indexed_za.v == 1 ? 'v' : 'h', |
4221 | 41.8k | aarch64_get_qualifier_name (opnd->qualifier)), |
4222 | 41.8k | style_reg (styler, "w%d", opnd->indexed_za.index.regno), |
4223 | 41.8k | style_imm (styler, "%" PRIi64, opnd->indexed_za.index.imm), |
4224 | 41.8k | opnd->indexed_za.index.countm1 ? ":" : "", |
4225 | 41.8k | (opnd->indexed_za.index.countm1 |
4226 | 41.8k | ? style_imm (styler, "%d", |
4227 | 265 | opnd->indexed_za.index.imm |
4228 | 265 | + opnd->indexed_za.index.countm1) |
4229 | 41.8k | : ""), |
4230 | 41.8k | opnd->indexed_za.group_size ? ", " : "", |
4231 | 41.8k | opnd->indexed_za.group_size == 2 |
4232 | 41.8k | ? style_sub_mnem (styler, "vgx2") |
4233 | 41.8k | : opnd->indexed_za.group_size == 4 |
4234 | 41.8k | ? style_sub_mnem (styler, "vgx4") : "", |
4235 | 41.8k | opnd->type == AARCH64_OPND_SME_ZA_HV_idx_ldstr ? "}" : ""); |
4236 | 41.8k | break; |
4237 | | |
4238 | 27 | case AARCH64_OPND_SME_list_of_64bit_tiles: |
4239 | 27 | print_sme_za_list (buf, size, opnd->imm.value, styler); |
4240 | 27 | break; |
4241 | | |
4242 | 2.20k | case AARCH64_OPND_SME_ZA_array_off1x4: |
4243 | 3.32k | case AARCH64_OPND_SME_ZA_array_off2x2: |
4244 | 7.02k | case AARCH64_OPND_SME_ZA_array_off2x4: |
4245 | 9.16k | case AARCH64_OPND_SME_ZA_array_off3_0: |
4246 | 9.16k | case AARCH64_OPND_SME_ZA_array_off3_5: |
4247 | 11.1k | case AARCH64_OPND_SME_ZA_array_off3x2: |
4248 | 12.0k | case AARCH64_OPND_SME_ZA_array_off4: |
4249 | 12.0k | snprintf (buf, size, "%s[%s, %s%s%s%s%s]", |
4250 | 12.0k | style_reg (styler, "za%s%s", |
4251 | 12.0k | opnd->qualifier == AARCH64_OPND_QLF_NIL ? "" : ".", |
4252 | 12.0k | (opnd->qualifier == AARCH64_OPND_QLF_NIL |
4253 | 12.0k | ? "" |
4254 | 12.0k | : aarch64_get_qualifier_name (opnd->qualifier))), |
4255 | 12.0k | style_reg (styler, "w%d", opnd->indexed_za.index.regno), |
4256 | 12.0k | style_imm (styler, "%" PRIi64, opnd->indexed_za.index.imm), |
4257 | 12.0k | opnd->indexed_za.index.countm1 ? ":" : "", |
4258 | 12.0k | (opnd->indexed_za.index.countm1 |
4259 | 12.0k | ? style_imm (styler, "%d", |
4260 | 8.97k | opnd->indexed_za.index.imm |
4261 | 8.97k | + opnd->indexed_za.index.countm1) |
4262 | 12.0k | : ""), |
4263 | 12.0k | opnd->indexed_za.group_size ? ", " : "", |
4264 | 12.0k | opnd->indexed_za.group_size == 2 |
4265 | 12.0k | ? style_sub_mnem (styler, "vgx2") |
4266 | 12.0k | : opnd->indexed_za.group_size == 4 |
4267 | 8.36k | ? style_sub_mnem (styler, "vgx4") : ""); |
4268 | 12.0k | break; |
4269 | | |
4270 | 0 | case AARCH64_OPND_SME_ZA_array_vrsb_1: |
4271 | 0 | case AARCH64_OPND_SME_ZA_array_vrsh_1: |
4272 | 0 | case AARCH64_OPND_SME_ZA_array_vrss_1: |
4273 | 0 | case AARCH64_OPND_SME_ZA_array_vrsd_1: |
4274 | 2 | case AARCH64_OPND_SME_ZA_array_vrsb_2: |
4275 | 2 | case AARCH64_OPND_SME_ZA_array_vrsh_2: |
4276 | 2 | case AARCH64_OPND_SME_ZA_array_vrss_2: |
4277 | 3 | case AARCH64_OPND_SME_ZA_array_vrsd_2: |
4278 | 3 | snprintf (buf, size, "%s [%s, %s%s%s]", |
4279 | 3 | style_reg (styler, "za%d%c%s%s", |
4280 | 3 | opnd->indexed_za.regno, |
4281 | 3 | opnd->indexed_za.v ? 'v': 'h', |
4282 | 3 | opnd->qualifier == AARCH64_OPND_QLF_NIL ? "" : ".", |
4283 | 3 | (opnd->qualifier == AARCH64_OPND_QLF_NIL |
4284 | 3 | ? "" |
4285 | 3 | : aarch64_get_qualifier_name (opnd->qualifier))), |
4286 | 3 | style_reg (styler, "w%d", opnd->indexed_za.index.regno), |
4287 | 3 | style_imm (styler, "%" PRIi64, opnd->indexed_za.index.imm), |
4288 | 3 | opnd->indexed_za.index.countm1 ? ":" : "", |
4289 | 3 | opnd->indexed_za.index.countm1 ? style_imm (styler, "%d", |
4290 | 3 | opnd->indexed_za.index.imm |
4291 | 3 | + opnd->indexed_za.index.countm1):""); |
4292 | 3 | break; |
4293 | | |
4294 | 4 | case AARCH64_OPND_SME_SM_ZA: |
4295 | 4 | snprintf (buf, size, "%s", |
4296 | 4 | style_reg (styler, opnd->reg.regno == 's' ? "sm" : "za")); |
4297 | 4 | break; |
4298 | | |
4299 | 1.02k | case AARCH64_OPND_SME_PnT_Wm_imm: |
4300 | 1.02k | snprintf (buf, size, "%s[%s, %s]", |
4301 | 1.02k | style_reg (styler, "p%d.%s", opnd->indexed_za.regno, |
4302 | 1.02k | aarch64_get_qualifier_name (opnd->qualifier)), |
4303 | 1.02k | style_reg (styler, "w%d", opnd->indexed_za.index.regno), |
4304 | 1.02k | style_imm (styler, "%" PRIi64, opnd->indexed_za.index.imm)); |
4305 | 1.02k | break; |
4306 | | |
4307 | 22 | case AARCH64_OPND_SME_VLxN_10: |
4308 | 953 | case AARCH64_OPND_SME_VLxN_13: |
4309 | 953 | enum_value = opnd->imm.value; |
4310 | 953 | assert (enum_value < ARRAY_SIZE (aarch64_sme_vlxn_array)); |
4311 | 953 | snprintf (buf, size, "%s", |
4312 | 953 | style_sub_mnem (styler, aarch64_sme_vlxn_array[enum_value])); |
4313 | 953 | break; |
4314 | | |
4315 | 1.71k | case AARCH64_OPND_CRn: |
4316 | 3.43k | case AARCH64_OPND_CRm: |
4317 | 3.43k | snprintf (buf, size, "%s", |
4318 | 3.43k | style_reg (styler, "C%" PRIi64, opnd->imm.value)); |
4319 | 3.43k | break; |
4320 | | |
4321 | 8.88k | case AARCH64_OPND_IDX: |
4322 | 9.22k | case AARCH64_OPND_MASK: |
4323 | 45.3k | case AARCH64_OPND_IMM: |
4324 | 45.7k | case AARCH64_OPND_IMM_2: |
4325 | 77.1k | case AARCH64_OPND_WIDTH: |
4326 | 78.8k | case AARCH64_OPND_UIMM3_OP1: |
4327 | 80.5k | case AARCH64_OPND_UIMM3_OP2: |
4328 | 229k | case AARCH64_OPND_BIT_NUM: |
4329 | 235k | case AARCH64_OPND_IMM_VLSL: |
4330 | 243k | case AARCH64_OPND_IMM_VLSR: |
4331 | 243k | case AARCH64_OPND_SHLL_IMM: |
4332 | 243k | case AARCH64_OPND_IMM0: |
4333 | 243k | case AARCH64_OPND_IMMR: |
4334 | 247k | case AARCH64_OPND_IMMS: |
4335 | 1.55M | case AARCH64_OPND_UNDEFINED: |
4336 | 1.56M | case AARCH64_OPND_FBITS: |
4337 | 1.56M | case AARCH64_OPND_TME_UIMM16: |
4338 | 1.56M | case AARCH64_OPND_SIMM5: |
4339 | 1.56M | case AARCH64_OPND_SME_SHRIMM4: |
4340 | 1.56M | case AARCH64_OPND_SME_SHRIMM5: |
4341 | 1.56M | case AARCH64_OPND_SVE_SHLIMM_PRED: |
4342 | 1.56M | case AARCH64_OPND_SVE_SHLIMM_UNPRED: |
4343 | 1.57M | case AARCH64_OPND_SVE_SHLIMM_UNPRED_22: |
4344 | 1.57M | case AARCH64_OPND_SVE_SHRIMM_PRED: |
4345 | 1.57M | case AARCH64_OPND_SVE_SHRIMM_UNPRED: |
4346 | 1.57M | case AARCH64_OPND_SVE_SHRIMM_UNPRED_22: |
4347 | 1.57M | case AARCH64_OPND_SVE_SIMM5: |
4348 | 1.57M | case AARCH64_OPND_SVE_SIMM5B: |
4349 | 1.57M | case AARCH64_OPND_SVE_SIMM6: |
4350 | 1.57M | case AARCH64_OPND_SVE_SIMM8: |
4351 | 1.57M | case AARCH64_OPND_SVE_UIMM3: |
4352 | 1.59M | case AARCH64_OPND_SVE_UIMM7: |
4353 | 1.59M | case AARCH64_OPND_SVE_UIMM8: |
4354 | 1.59M | case AARCH64_OPND_SVE_UIMM8_53: |
4355 | 1.60M | case AARCH64_OPND_IMM_ROT1: |
4356 | 1.61M | case AARCH64_OPND_IMM_ROT2: |
4357 | 1.61M | case AARCH64_OPND_IMM_ROT3: |
4358 | 1.61M | case AARCH64_OPND_SVE_IMM_ROT1: |
4359 | 1.61M | case AARCH64_OPND_SVE_IMM_ROT2: |
4360 | 1.61M | case AARCH64_OPND_SVE_IMM_ROT3: |
4361 | 1.61M | case AARCH64_OPND_CSSC_SIMM8: |
4362 | 1.61M | case AARCH64_OPND_CSSC_UIMM8: |
4363 | 1.61M | snprintf (buf, size, "%s", |
4364 | 1.61M | style_imm (styler, "#%" PRIi64, opnd->imm.value)); |
4365 | 1.61M | break; |
4366 | | |
4367 | 12 | case AARCH64_OPND_SVE_I1_HALF_ONE: |
4368 | 15 | case AARCH64_OPND_SVE_I1_HALF_TWO: |
4369 | 90 | case AARCH64_OPND_SVE_I1_ZERO_ONE: |
4370 | 90 | { |
4371 | 90 | single_conv_t c; |
4372 | 90 | c.i = opnd->imm.value; |
4373 | 90 | snprintf (buf, size, "%s", style_imm (styler, "#%.1f", c.f)); |
4374 | 90 | break; |
4375 | 15 | } |
4376 | | |
4377 | 81 | case AARCH64_OPND_SVE_PATTERN: |
4378 | 81 | if (optional_operand_p (opcode, idx) |
4379 | 81 | && opnd->imm.value == get_optional_operand_default_value (opcode)) |
4380 | 25 | break; |
4381 | 56 | enum_value = opnd->imm.value; |
4382 | 56 | assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array)); |
4383 | 56 | if (aarch64_sve_pattern_array[enum_value]) |
4384 | 1 | snprintf (buf, size, "%s", |
4385 | 1 | style_reg (styler, aarch64_sve_pattern_array[enum_value])); |
4386 | 55 | else |
4387 | 55 | snprintf (buf, size, "%s", |
4388 | 55 | style_imm (styler, "#%" PRIi64, opnd->imm.value)); |
4389 | 56 | break; |
4390 | | |
4391 | 3.92k | case AARCH64_OPND_SVE_PATTERN_SCALED: |
4392 | 3.92k | if (optional_operand_p (opcode, idx) |
4393 | 3.92k | && !opnd->shifter.operator_present |
4394 | 3.92k | && opnd->imm.value == get_optional_operand_default_value (opcode)) |
4395 | 28 | break; |
4396 | 3.89k | enum_value = opnd->imm.value; |
4397 | 3.89k | assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array)); |
4398 | 3.89k | if (aarch64_sve_pattern_array[opnd->imm.value]) |
4399 | 2.57k | snprintf (buf, size, "%s", |
4400 | 2.57k | style_reg (styler, |
4401 | 2.57k | aarch64_sve_pattern_array[opnd->imm.value])); |
4402 | 1.31k | else |
4403 | 1.31k | snprintf (buf, size, "%s", |
4404 | 1.31k | style_imm (styler, "#%" PRIi64, opnd->imm.value)); |
4405 | 3.89k | if (opnd->shifter.operator_present) |
4406 | 3.46k | { |
4407 | 3.46k | size_t len = strlen (buf); |
4408 | 3.46k | const char *shift_name |
4409 | 3.46k | = aarch64_operand_modifiers[opnd->shifter.kind].name; |
4410 | 3.46k | snprintf (buf + len, size - len, ", %s %s", |
4411 | 3.46k | style_sub_mnem (styler, shift_name), |
4412 | 3.46k | style_imm (styler, "#%" PRIi64, opnd->shifter.amount)); |
4413 | 3.46k | } |
4414 | 3.89k | break; |
4415 | | |
4416 | 12.9k | case AARCH64_OPND_SVE_PRFOP: |
4417 | 12.9k | enum_value = opnd->imm.value; |
4418 | 12.9k | assert (enum_value < ARRAY_SIZE (aarch64_sve_prfop_array)); |
4419 | 12.9k | if (aarch64_sve_prfop_array[enum_value]) |
4420 | 9.50k | snprintf (buf, size, "%s", |
4421 | 9.50k | style_reg (styler, aarch64_sve_prfop_array[enum_value])); |
4422 | 3.48k | else |
4423 | 3.48k | snprintf (buf, size, "%s", |
4424 | 3.48k | style_imm (styler, "#%" PRIi64, opnd->imm.value)); |
4425 | 12.9k | break; |
4426 | | |
4427 | 53.8k | case AARCH64_OPND_IMM_MOV: |
4428 | 53.8k | switch (aarch64_get_qualifier_esize (opnds[0].qualifier)) |
4429 | 53.8k | { |
4430 | 19.5k | case 4: /* e.g. MOV Wd, #<imm32>. */ |
4431 | 19.5k | { |
4432 | 19.5k | int imm32 = opnd->imm.value; |
4433 | 19.5k | snprintf (buf, size, "%s", |
4434 | 19.5k | style_imm (styler, "#0x%-20x", imm32)); |
4435 | 19.5k | snprintf (comment, comment_size, "#%d", imm32); |
4436 | 19.5k | } |
4437 | 19.5k | break; |
4438 | 34.3k | case 8: /* e.g. MOV Xd, #<imm64>. */ |
4439 | 34.3k | snprintf (buf, size, "%s", style_imm (styler, "#0x%-20" PRIx64, |
4440 | 34.3k | opnd->imm.value)); |
4441 | 34.3k | snprintf (comment, comment_size, "#%" PRIi64, opnd->imm.value); |
4442 | 34.3k | break; |
4443 | 0 | default: |
4444 | 0 | snprintf (buf, size, "<invalid>"); |
4445 | 0 | break; |
4446 | 53.8k | } |
4447 | 53.8k | break; |
4448 | | |
4449 | 53.8k | case AARCH64_OPND_FPIMM0: |
4450 | 285 | snprintf (buf, size, "%s", style_imm (styler, "#0.0")); |
4451 | 285 | break; |
4452 | | |
4453 | 105k | case AARCH64_OPND_LIMM: |
4454 | 295k | case AARCH64_OPND_AIMM: |
4455 | 327k | case AARCH64_OPND_HALF: |
4456 | 327k | case AARCH64_OPND_SVE_INV_LIMM: |
4457 | 342k | case AARCH64_OPND_SVE_LIMM: |
4458 | 343k | case AARCH64_OPND_SVE_LIMM_MOV: |
4459 | 343k | if (opnd->shifter.amount) |
4460 | 90.8k | snprintf (buf, size, "%s, %s %s", |
4461 | 90.8k | style_imm (styler, "#0x%" PRIx64, opnd->imm.value), |
4462 | 90.8k | style_sub_mnem (styler, "lsl"), |
4463 | 90.8k | style_imm (styler, "#%" PRIi64, opnd->shifter.amount)); |
4464 | 253k | else |
4465 | 253k | snprintf (buf, size, "%s", |
4466 | 253k | style_imm (styler, "#0x%" PRIx64, opnd->imm.value)); |
4467 | 343k | break; |
4468 | | |
4469 | 108 | case AARCH64_OPND_SIMD_IMM: |
4470 | 2.15k | case AARCH64_OPND_SIMD_IMM_SFT: |
4471 | 2.15k | if ((! opnd->shifter.amount && opnd->shifter.kind == AARCH64_MOD_LSL) |
4472 | 2.15k | || opnd->shifter.kind == AARCH64_MOD_NONE) |
4473 | 1.07k | snprintf (buf, size, "%s", |
4474 | 1.07k | style_imm (styler, "#0x%" PRIx64, opnd->imm.value)); |
4475 | 1.07k | else |
4476 | 1.07k | snprintf (buf, size, "%s, %s %s", |
4477 | 1.07k | style_imm (styler, "#0x%" PRIx64, opnd->imm.value), |
4478 | 1.07k | style_sub_mnem (styler, aarch64_operand_modifiers[opnd->shifter.kind].name), |
4479 | 1.07k | style_imm (styler, "#%" PRIi64, opnd->shifter.amount)); |
4480 | 2.15k | break; |
4481 | | |
4482 | 869 | case AARCH64_OPND_SVE_AIMM: |
4483 | 8.37k | case AARCH64_OPND_SVE_ASIMM: |
4484 | 8.37k | if (opnd->shifter.amount) |
4485 | 32 | snprintf (buf, size, "%s, %s %s", |
4486 | 32 | style_imm (styler, "#%" PRIi64, opnd->imm.value), |
4487 | 32 | style_sub_mnem (styler, "lsl"), |
4488 | 32 | style_imm (styler, "#%" PRIi64, opnd->shifter.amount)); |
4489 | 8.34k | else |
4490 | 8.34k | snprintf (buf, size, "%s", |
4491 | 8.34k | style_imm (styler, "#%" PRIi64, opnd->imm.value)); |
4492 | 8.37k | break; |
4493 | | |
4494 | 134 | case AARCH64_OPND_FPIMM: |
4495 | 733 | case AARCH64_OPND_SIMD_FPIMM: |
4496 | 1.28k | case AARCH64_OPND_SVE_FPIMM8: |
4497 | 1.28k | switch (aarch64_get_qualifier_esize (opnds[0].qualifier)) |
4498 | 1.28k | { |
4499 | 590 | case 2: /* e.g. FMOV <Hd>, #<imm>. */ |
4500 | 590 | { |
4501 | 590 | half_conv_t c; |
4502 | 590 | c.i = expand_fp_imm (2, opnd->imm.value); |
4503 | 590 | snprintf (buf, size, "%s", style_imm (styler, "#%.18e", c.f)); |
4504 | 590 | } |
4505 | 590 | break; |
4506 | 418 | case 4: /* e.g. FMOV <Vd>.4S, #<imm>. */ |
4507 | 418 | { |
4508 | 418 | single_conv_t c; |
4509 | 418 | c.i = expand_fp_imm (4, opnd->imm.value); |
4510 | 418 | snprintf (buf, size, "%s", style_imm (styler, "#%.18e", c.f)); |
4511 | 418 | } |
4512 | 418 | break; |
4513 | 275 | case 8: /* e.g. FMOV <Sd>, #<imm>. */ |
4514 | 275 | { |
4515 | 275 | double_conv_t c; |
4516 | 275 | c.i = expand_fp_imm (8, opnd->imm.value); |
4517 | 275 | snprintf (buf, size, "%s", style_imm (styler, "#%.18e", c.d)); |
4518 | 275 | } |
4519 | 275 | break; |
4520 | 0 | default: |
4521 | 0 | snprintf (buf, size, "<invalid>"); |
4522 | 0 | break; |
4523 | 1.28k | } |
4524 | 1.28k | break; |
4525 | | |
4526 | 1.28k | case AARCH64_OPND_CCMP_IMM: |
4527 | 6.97k | case AARCH64_OPND_NZCV: |
4528 | 7.88k | case AARCH64_OPND_EXCEPTION: |
4529 | 7.95k | case AARCH64_OPND_UIMM4: |
4530 | 10.2k | case AARCH64_OPND_UIMM4_ADDG: |
4531 | 10.2k | case AARCH64_OPND_UIMM7: |
4532 | 12.5k | case AARCH64_OPND_UIMM10: |
4533 | 12.5k | if (optional_operand_p (opcode, idx) |
4534 | 12.5k | && (opnd->imm.value == |
4535 | 205 | (int64_t) get_optional_operand_default_value (opcode))) |
4536 | | /* Omit the operand, e.g. DCPS1. */ |
4537 | 0 | break; |
4538 | 12.5k | snprintf (buf, size, "%s", |
4539 | 12.5k | style_imm (styler, "#0x%x", (unsigned int) opnd->imm.value)); |
4540 | 12.5k | break; |
4541 | | |
4542 | 14.9k | case AARCH64_OPND_COND: |
4543 | 15.7k | case AARCH64_OPND_COND1: |
4544 | 15.7k | snprintf (buf, size, "%s", |
4545 | 15.7k | style_sub_mnem (styler, opnd->cond->names[0])); |
4546 | 15.7k | num_conds = ARRAY_SIZE (opnd->cond->names); |
4547 | 29.8k | for (i = 1; i < num_conds && opnd->cond->names[i]; ++i) |
4548 | 14.0k | { |
4549 | 14.0k | size_t len = comment != NULL ? strlen (comment) : 0; |
4550 | 14.0k | if (i == 1) |
4551 | 10.7k | snprintf (comment + len, comment_size - len, "%s = %s", |
4552 | 10.7k | opnd->cond->names[0], opnd->cond->names[i]); |
4553 | 3.22k | else |
4554 | 3.22k | snprintf (comment + len, comment_size - len, ", %s", |
4555 | 3.22k | opnd->cond->names[i]); |
4556 | 14.0k | } |
4557 | 15.7k | break; |
4558 | | |
4559 | 150k | case AARCH64_OPND_ADDR_ADRP: |
4560 | 150k | addr = ((pc + AARCH64_PCREL_OFFSET) & ~(uint64_t)0xfff) |
4561 | 150k | + opnd->imm.value; |
4562 | 150k | if (pcrel_p) |
4563 | 150k | *pcrel_p = 1; |
4564 | 150k | if (address) |
4565 | 150k | *address = addr; |
4566 | | /* This is not necessary during the disassembling, as print_address_func |
4567 | | in the disassemble_info will take care of the printing. But some |
4568 | | other callers may be still interested in getting the string in *STR, |
4569 | | so here we do snprintf regardless. */ |
4570 | 150k | snprintf (buf, size, "%s", style_addr (styler, "#0x%" PRIx64 , addr)); |
4571 | 150k | break; |
4572 | | |
4573 | 148k | case AARCH64_OPND_ADDR_PCREL14: |
4574 | 492k | case AARCH64_OPND_ADDR_PCREL19: |
4575 | 704k | case AARCH64_OPND_ADDR_PCREL21: |
4576 | 931k | case AARCH64_OPND_ADDR_PCREL26: |
4577 | 931k | addr = pc + AARCH64_PCREL_OFFSET + opnd->imm.value; |
4578 | 931k | if (pcrel_p) |
4579 | 931k | *pcrel_p = 1; |
4580 | 931k | if (address) |
4581 | 931k | *address = addr; |
4582 | | /* This is not necessary during the disassembling, as print_address_func |
4583 | | in the disassemble_info will take care of the printing. But some |
4584 | | other callers may be still interested in getting the string in *STR, |
4585 | | so here we do snprintf regardless. */ |
4586 | 931k | snprintf (buf, size, "%s", style_addr (styler, "#0x%" PRIx64, addr)); |
4587 | 931k | break; |
4588 | | |
4589 | 142k | case AARCH64_OPND_ADDR_SIMPLE: |
4590 | 154k | case AARCH64_OPND_SIMD_ADDR_SIMPLE: |
4591 | 178k | case AARCH64_OPND_SIMD_ADDR_POST: |
4592 | 178k | name = get_64bit_int_reg_name (opnd->addr.base_regno, 1); |
4593 | 178k | if (opnd->type == AARCH64_OPND_SIMD_ADDR_POST) |
4594 | 24.1k | { |
4595 | 24.1k | if (opnd->addr.offset.is_reg) |
4596 | 22.5k | snprintf (buf, size, "[%s], %s", |
4597 | 22.5k | style_reg (styler, name), |
4598 | 22.5k | style_reg (styler, "x%d", opnd->addr.offset.regno)); |
4599 | 1.62k | else |
4600 | 1.62k | snprintf (buf, size, "[%s], %s", |
4601 | 1.62k | style_reg (styler, name), |
4602 | 1.62k | style_imm (styler, "#%d", opnd->addr.offset.imm)); |
4603 | 24.1k | } |
4604 | 154k | else |
4605 | 154k | snprintf (buf, size, "[%s]", style_reg (styler, name)); |
4606 | 178k | break; |
4607 | | |
4608 | 16.4k | case AARCH64_OPND_ADDR_REGOFF: |
4609 | 16.4k | case AARCH64_OPND_SVE_ADDR_R: |
4610 | 32.5k | case AARCH64_OPND_SVE_ADDR_RR: |
4611 | 40.0k | case AARCH64_OPND_SVE_ADDR_RR_LSL1: |
4612 | 45.5k | case AARCH64_OPND_SVE_ADDR_RR_LSL2: |
4613 | 59.7k | case AARCH64_OPND_SVE_ADDR_RR_LSL3: |
4614 | 68.5k | case AARCH64_OPND_SVE_ADDR_RR_LSL4: |
4615 | 78.3k | case AARCH64_OPND_SVE_ADDR_RX: |
4616 | 84.8k | case AARCH64_OPND_SVE_ADDR_RX_LSL1: |
4617 | 92.8k | case AARCH64_OPND_SVE_ADDR_RX_LSL2: |
4618 | 97.1k | case AARCH64_OPND_SVE_ADDR_RX_LSL3: |
4619 | 97.1k | print_register_offset_address |
4620 | 97.1k | (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1), |
4621 | 97.1k | get_offset_int_reg_name (opnd), styler); |
4622 | 97.1k | break; |
4623 | | |
4624 | 10.2k | case AARCH64_OPND_SVE_ADDR_ZX: |
4625 | 10.2k | print_register_offset_address |
4626 | 10.2k | (buf, size, opnd, |
4627 | 10.2k | get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier), |
4628 | 10.2k | get_64bit_int_reg_name (opnd->addr.offset.regno, 0), styler); |
4629 | 10.2k | break; |
4630 | | |
4631 | 14.8k | case AARCH64_OPND_SVE_ADDR_RZ: |
4632 | 17.0k | case AARCH64_OPND_SVE_ADDR_RZ_LSL1: |
4633 | 18.5k | case AARCH64_OPND_SVE_ADDR_RZ_LSL2: |
4634 | 20.2k | case AARCH64_OPND_SVE_ADDR_RZ_LSL3: |
4635 | 24.9k | case AARCH64_OPND_SVE_ADDR_RZ_XTW_14: |
4636 | 54.8k | case AARCH64_OPND_SVE_ADDR_RZ_XTW_22: |
4637 | 56.1k | case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14: |
4638 | 62.5k | case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22: |
4639 | 63.6k | case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14: |
4640 | 68.7k | case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22: |
4641 | 69.3k | case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14: |
4642 | 74.0k | case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22: |
4643 | 74.0k | print_register_offset_address |
4644 | 74.0k | (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1), |
4645 | 74.0k | get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier), |
4646 | 74.0k | styler); |
4647 | 74.0k | break; |
4648 | | |
4649 | 354k | case AARCH64_OPND_ADDR_SIMM7: |
4650 | 448k | case AARCH64_OPND_ADDR_SIMM9: |
4651 | 448k | case AARCH64_OPND_ADDR_SIMM9_2: |
4652 | 457k | case AARCH64_OPND_ADDR_SIMM10: |
4653 | 478k | case AARCH64_OPND_ADDR_SIMM11: |
4654 | 485k | case AARCH64_OPND_ADDR_SIMM13: |
4655 | 491k | case AARCH64_OPND_RCPC3_ADDR_OFFSET: |
4656 | 504k | case AARCH64_OPND_ADDR_OFFSET: |
4657 | 505k | case AARCH64_OPND_RCPC3_ADDR_OPT_POSTIND: |
4658 | 510k | case AARCH64_OPND_RCPC3_ADDR_OPT_PREIND_WB: |
4659 | 510k | case AARCH64_OPND_RCPC3_ADDR_POSTIND: |
4660 | 510k | case AARCH64_OPND_RCPC3_ADDR_PREIND_WB: |
4661 | 511k | case AARCH64_OPND_SME_ADDR_RI_U4xVL: |
4662 | 511k | case AARCH64_OPND_SVE_ADDR_RI_S4x16: |
4663 | 512k | case AARCH64_OPND_SVE_ADDR_RI_S4x32: |
4664 | 552k | case AARCH64_OPND_SVE_ADDR_RI_S4xVL: |
4665 | 558k | case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL: |
4666 | 561k | case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL: |
4667 | 565k | case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL: |
4668 | 567k | case AARCH64_OPND_SVE_ADDR_RI_S6xVL: |
4669 | 571k | case AARCH64_OPND_SVE_ADDR_RI_S9xVL: |
4670 | 576k | case AARCH64_OPND_SVE_ADDR_RI_U6: |
4671 | 579k | case AARCH64_OPND_SVE_ADDR_RI_U6x2: |
4672 | 581k | case AARCH64_OPND_SVE_ADDR_RI_U6x4: |
4673 | 582k | case AARCH64_OPND_SVE_ADDR_RI_U6x8: |
4674 | 582k | print_immediate_offset_address |
4675 | 582k | (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1), |
4676 | 582k | styler); |
4677 | 582k | break; |
4678 | | |
4679 | 3.76k | case AARCH64_OPND_SVE_ADDR_ZI_U5: |
4680 | 8.12k | case AARCH64_OPND_SVE_ADDR_ZI_U5x2: |
4681 | 10.6k | case AARCH64_OPND_SVE_ADDR_ZI_U5x4: |
4682 | 11.7k | case AARCH64_OPND_SVE_ADDR_ZI_U5x8: |
4683 | 11.7k | print_immediate_offset_address |
4684 | 11.7k | (buf, size, opnd, |
4685 | 11.7k | get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier), |
4686 | 11.7k | styler); |
4687 | 11.7k | break; |
4688 | | |
4689 | 375 | case AARCH64_OPND_SVE_ADDR_ZZ_LSL: |
4690 | 532 | case AARCH64_OPND_SVE_ADDR_ZZ_SXTW: |
4691 | 1.05k | case AARCH64_OPND_SVE_ADDR_ZZ_UXTW: |
4692 | 1.05k | print_register_offset_address |
4693 | 1.05k | (buf, size, opnd, |
4694 | 1.05k | get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier), |
4695 | 1.05k | get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier), |
4696 | 1.05k | styler); |
4697 | 1.05k | break; |
4698 | | |
4699 | 214k | case AARCH64_OPND_ADDR_UIMM12: |
4700 | 214k | name = get_64bit_int_reg_name (opnd->addr.base_regno, 1); |
4701 | 214k | if (opnd->addr.offset.imm) |
4702 | 196k | snprintf (buf, size, "[%s, %s]", |
4703 | 196k | style_reg (styler, name), |
4704 | 196k | style_imm (styler, "#%d", opnd->addr.offset.imm)); |
4705 | 17.6k | else |
4706 | 17.6k | snprintf (buf, size, "[%s]", style_reg (styler, name)); |
4707 | 214k | break; |
4708 | | |
4709 | 5.36k | case AARCH64_OPND_SYSREG: |
4710 | 6.33k | case AARCH64_OPND_SYSREG128: |
4711 | 7.09M | for (i = 0; aarch64_sys_regs[i].name; ++i) |
4712 | 7.08M | { |
4713 | 7.08M | const aarch64_sys_reg *sr = aarch64_sys_regs + i; |
4714 | | |
4715 | 7.08M | bool exact_match |
4716 | 7.08M | = (!(sr->flags & (F_REG_READ | F_REG_WRITE)) |
4717 | 7.08M | || (sr->flags & opnd->sysreg.flags) == opnd->sysreg.flags) |
4718 | 7.08M | && AARCH64_CPU_HAS_ALL_FEATURES (features, sr->features); |
4719 | | |
4720 | | /* Try and find an exact match, But if that fails, return the first |
4721 | | partial match that was found. */ |
4722 | 7.08M | if (aarch64_sys_regs[i].value == opnd->sysreg.value |
4723 | 7.08M | && ! aarch64_sys_reg_deprecated_p (aarch64_sys_regs[i].flags) |
4724 | 7.08M | && ! aarch64_sys_reg_alias_p (aarch64_sys_regs[i].flags) |
4725 | 7.08M | && (name == NULL || exact_match)) |
4726 | 289 | { |
4727 | 289 | name = aarch64_sys_regs[i].name; |
4728 | 289 | if (exact_match) |
4729 | 229 | { |
4730 | 229 | if (notes) |
4731 | 229 | *notes = NULL; |
4732 | 229 | break; |
4733 | 229 | } |
4734 | | |
4735 | | /* If we didn't match exactly, that means the presense of a flag |
4736 | | indicates what we didn't want for this instruction. e.g. If |
4737 | | F_REG_READ is there, that means we were looking for a write |
4738 | | register. See aarch64_ext_sysreg. */ |
4739 | 60 | if (aarch64_sys_regs[i].flags & F_REG_WRITE) |
4740 | 3 | *notes = _("reading from a write-only register"); |
4741 | 57 | else if (aarch64_sys_regs[i].flags & F_REG_READ) |
4742 | 46 | *notes = _("writing to a read-only register"); |
4743 | 60 | } |
4744 | 7.08M | } |
4745 | | |
4746 | 6.33k | if (name) |
4747 | 289 | snprintf (buf, size, "%s", style_reg (styler, name)); |
4748 | 6.04k | else |
4749 | 6.04k | { |
4750 | | /* Implementation defined system register. */ |
4751 | 6.04k | unsigned int value = opnd->sysreg.value; |
4752 | 6.04k | snprintf (buf, size, "%s", |
4753 | 6.04k | style_reg (styler, "s%u_%u_c%u_c%u_%u", |
4754 | 6.04k | (value >> 14) & 0x3, (value >> 11) & 0x7, |
4755 | 6.04k | (value >> 7) & 0xf, (value >> 3) & 0xf, |
4756 | 6.04k | value & 0x7)); |
4757 | 6.04k | } |
4758 | 6.33k | break; |
4759 | | |
4760 | 72 | case AARCH64_OPND_PSTATEFIELD: |
4761 | 267 | for (i = 0; aarch64_pstatefields[i].name; ++i) |
4762 | 267 | if (aarch64_pstatefields[i].value == opnd->pstatefield) |
4763 | 72 | { |
4764 | | /* PSTATEFIELD name is encoded partially in CRm[3:1] for SVCRSM, |
4765 | | SVCRZA and SVCRSMZA. */ |
4766 | 72 | uint32_t flags = aarch64_pstatefields[i].flags; |
4767 | 72 | if (flags & F_REG_IN_CRM |
4768 | 72 | && (PSTATE_DECODE_CRM (opnd->sysreg.flags) |
4769 | 0 | != PSTATE_DECODE_CRM (flags))) |
4770 | 0 | continue; |
4771 | 72 | break; |
4772 | 72 | } |
4773 | 72 | assert (aarch64_pstatefields[i].name); |
4774 | 72 | snprintf (buf, size, "%s", |
4775 | 72 | style_reg (styler, aarch64_pstatefields[i].name)); |
4776 | 72 | break; |
4777 | | |
4778 | 4 | case AARCH64_OPND_SYSREG_AT: |
4779 | 44 | case AARCH64_OPND_SYSREG_DC: |
4780 | 148 | case AARCH64_OPND_SYSREG_IC: |
4781 | 251 | case AARCH64_OPND_SYSREG_TLBI: |
4782 | 252 | case AARCH64_OPND_SYSREG_TLBIP: |
4783 | 252 | case AARCH64_OPND_SYSREG_SR: |
4784 | 252 | snprintf (buf, size, "%s", style_reg (styler, opnd->sysins_op->name)); |
4785 | 252 | break; |
4786 | | |
4787 | 55 | case AARCH64_OPND_BARRIER: |
4788 | 169 | case AARCH64_OPND_BARRIER_DSB_NXS: |
4789 | 169 | { |
4790 | 169 | if (opnd->barrier->name[0] == '#') |
4791 | 42 | snprintf (buf, size, "%s", style_imm (styler, opnd->barrier->name)); |
4792 | 127 | else |
4793 | 127 | snprintf (buf, size, "%s", |
4794 | 127 | style_sub_mnem (styler, opnd->barrier->name)); |
4795 | 169 | } |
4796 | 169 | break; |
4797 | | |
4798 | 132 | case AARCH64_OPND_BARRIER_ISB: |
4799 | | /* Operand can be omitted, e.g. in DCPS1. */ |
4800 | 132 | if (! optional_operand_p (opcode, idx) |
4801 | 132 | || (opnd->barrier->value |
4802 | 132 | != get_optional_operand_default_value (opcode))) |
4803 | 20 | snprintf (buf, size, "%s", |
4804 | 20 | style_imm (styler, "#0x%x", opnd->barrier->value)); |
4805 | 132 | break; |
4806 | | |
4807 | 33.5k | case AARCH64_OPND_PRFOP: |
4808 | 33.5k | if (opnd->prfop->name != NULL) |
4809 | 24.2k | snprintf (buf, size, "%s", style_sub_mnem (styler, opnd->prfop->name)); |
4810 | 9.35k | else |
4811 | 9.35k | snprintf (buf, size, "%s", style_imm (styler, "#0x%02x", |
4812 | 9.35k | opnd->prfop->value)); |
4813 | 33.5k | break; |
4814 | | |
4815 | 93 | case AARCH64_OPND_RPRFMOP: |
4816 | 93 | enum_value = opnd->imm.value; |
4817 | 93 | if (enum_value < ARRAY_SIZE (aarch64_rprfmop_array) |
4818 | 93 | && aarch64_rprfmop_array[enum_value]) |
4819 | 4 | snprintf (buf, size, "%s", |
4820 | 4 | style_reg (styler, aarch64_rprfmop_array[enum_value])); |
4821 | 89 | else |
4822 | 89 | snprintf (buf, size, "%s", |
4823 | 89 | style_imm (styler, "#%" PRIi64, opnd->imm.value)); |
4824 | 93 | break; |
4825 | | |
4826 | 141 | case AARCH64_OPND_BARRIER_PSB: |
4827 | 141 | snprintf (buf, size, "%s", style_sub_mnem (styler, "csync")); |
4828 | 141 | break; |
4829 | | |
4830 | 0 | case AARCH64_OPND_X16: |
4831 | 0 | snprintf (buf, size, "%s", style_reg (styler, "x16")); |
4832 | 0 | break; |
4833 | | |
4834 | 296 | case AARCH64_OPND_SME_ZT0: |
4835 | 296 | snprintf (buf, size, "%s", style_reg (styler, "zt0")); |
4836 | 296 | break; |
4837 | | |
4838 | 0 | case AARCH64_OPND_SME_ZT0_INDEX: |
4839 | 0 | snprintf (buf, size, "%s[%s]", style_reg (styler, "zt0"), |
4840 | 0 | style_imm (styler, "%d", (int) opnd->imm.value)); |
4841 | 0 | break; |
4842 | | |
4843 | 0 | case AARCH64_OPND_SME_ZT0_LIST: |
4844 | 0 | snprintf (buf, size, "{%s}", style_reg (styler, "zt0")); |
4845 | 0 | break; |
4846 | | |
4847 | 0 | case AARCH64_OPND_BARRIER_GCSB: |
4848 | 0 | snprintf (buf, size, "%s", style_sub_mnem (styler, "dsync")); |
4849 | 0 | break; |
4850 | | |
4851 | 240 | case AARCH64_OPND_BTI_TARGET: |
4852 | 240 | if ((HINT_FLAG (opnd->hint_option->value) & HINT_OPD_F_NOPRINT) == 0) |
4853 | 240 | snprintf (buf, size, "%s", |
4854 | 240 | style_sub_mnem (styler, opnd->hint_option->name)); |
4855 | 240 | break; |
4856 | | |
4857 | 5.54k | case AARCH64_OPND_MOPS_ADDR_Rd: |
4858 | 10.4k | case AARCH64_OPND_MOPS_ADDR_Rs: |
4859 | 10.4k | snprintf (buf, size, "[%s]!", |
4860 | 10.4k | style_reg (styler, |
4861 | 10.4k | get_int_reg_name (opnd->reg.regno, |
4862 | 10.4k | AARCH64_OPND_QLF_X, 0))); |
4863 | 10.4k | break; |
4864 | | |
4865 | 5.54k | case AARCH64_OPND_MOPS_WB_Rn: |
4866 | 5.54k | snprintf (buf, size, "%s!", |
4867 | 5.54k | style_reg (styler, get_int_reg_name (opnd->reg.regno, |
4868 | 5.54k | AARCH64_OPND_QLF_X, 0))); |
4869 | 5.54k | break; |
4870 | | |
4871 | 0 | default: |
4872 | 0 | snprintf (buf, size, "<invalid>"); |
4873 | 0 | break; |
4874 | 10.6M | } |
4875 | 10.6M | } |
4876 | | |
4877 | | #define CPENC(op0,op1,crn,crm,op2) \ |
4878 | | ((((op0) << 19) | ((op1) << 16) | ((crn) << 12) | ((crm) << 8) | ((op2) << 5)) >> 5) |
4879 | | /* for 3.9.3 Instructions for Accessing Special Purpose Registers */ |
4880 | | #define CPEN_(op1,crm,op2) CPENC(3,(op1),4,(crm),(op2)) |
4881 | | /* for 3.9.10 System Instructions */ |
4882 | | #define CPENS(op1,crn,crm,op2) CPENC(1,(op1),(crn),(crm),(op2)) |
4883 | | |
4884 | | #define C0 0 |
4885 | | #define C1 1 |
4886 | | #define C2 2 |
4887 | | #define C3 3 |
4888 | | #define C4 4 |
4889 | | #define C5 5 |
4890 | | #define C6 6 |
4891 | | #define C7 7 |
4892 | | #define C8 8 |
4893 | | #define C9 9 |
4894 | | #define C10 10 |
4895 | | #define C11 11 |
4896 | | #define C12 12 |
4897 | | #define C13 13 |
4898 | | #define C14 14 |
4899 | | #define C15 15 |
4900 | | |
4901 | | /* TODO there is one more issues need to be resolved |
4902 | | 1. handle cpu-implementation-defined system registers. |
4903 | | |
4904 | | Note that the F_REG_{READ,WRITE} flags mean read-only and write-only |
4905 | | respectively. If neither of these are set then the register is read-write. */ |
4906 | | const aarch64_sys_reg aarch64_sys_regs [] = |
4907 | | { |
4908 | | #define SYSREG(name, encoding, flags, features) \ |
4909 | | { name, encoding, flags, features }, |
4910 | | #include "aarch64-sys-regs.def" |
4911 | | { 0, CPENC (0,0,0,0,0), 0, AARCH64_NO_FEATURES } |
4912 | | #undef SYSREG |
4913 | | }; |
4914 | | |
4915 | | bool |
4916 | | aarch64_sys_reg_deprecated_p (const uint32_t reg_flags) |
4917 | 289 | { |
4918 | 289 | return (reg_flags & F_DEPRECATED) != 0; |
4919 | 289 | } |
4920 | | |
4921 | | bool |
4922 | | aarch64_sys_reg_128bit_p (const uint32_t reg_flags) |
4923 | 0 | { |
4924 | 0 | return (reg_flags & F_REG_128) != 0; |
4925 | 0 | } |
4926 | | |
4927 | | bool |
4928 | | aarch64_sys_reg_alias_p (const uint32_t reg_flags) |
4929 | 289 | { |
4930 | 289 | return (reg_flags & F_REG_ALIAS) != 0; |
4931 | 289 | } |
4932 | | |
4933 | | /* The CPENC below is fairly misleading, the fields |
4934 | | here are not in CPENC form. They are in op2op1 form. The fields are encoded |
4935 | | by ins_pstatefield, which just shifts the value by the width of the fields |
4936 | | in a loop. So if you CPENC them only the first value will be set, the rest |
4937 | | are masked out to 0. As an example. op2 = 3, op1=2. CPENC would produce a |
4938 | | value of 0b110000000001000000 (0x30040) while what you want is |
4939 | | 0b011010 (0x1a). */ |
4940 | | const aarch64_sys_reg aarch64_pstatefields [] = |
4941 | | { |
4942 | | { "spsel", 0x05, F_REG_MAX_VALUE (1), AARCH64_NO_FEATURES }, |
4943 | | { "daifset", 0x1e, F_REG_MAX_VALUE (15), AARCH64_NO_FEATURES }, |
4944 | | { "daifclr", 0x1f, F_REG_MAX_VALUE (15), AARCH64_NO_FEATURES }, |
4945 | | { "pan", 0x04, F_REG_MAX_VALUE (1) | F_ARCHEXT, AARCH64_FEATURE (PAN) }, |
4946 | | { "uao", 0x03, F_REG_MAX_VALUE (1) | F_ARCHEXT, AARCH64_FEATURE (V8_2A) }, |
4947 | | { "ssbs", 0x19, F_REG_MAX_VALUE (1) | F_ARCHEXT, AARCH64_FEATURE (SSBS) }, |
4948 | | { "dit", 0x1a, F_REG_MAX_VALUE (1) | F_ARCHEXT, AARCH64_FEATURE (V8_4A) }, |
4949 | | { "tco", 0x1c, F_REG_MAX_VALUE (1) | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) }, |
4950 | | { "svcrsm", 0x1b, PSTATE_ENCODE_CRM_AND_IMM (0x2,0x1) | F_REG_MAX_VALUE (1) |
4951 | | | F_ARCHEXT, AARCH64_FEATURE (SME) }, |
4952 | | { "svcrza", 0x1b, PSTATE_ENCODE_CRM_AND_IMM (0x4,0x1) | F_REG_MAX_VALUE (1) |
4953 | | | F_ARCHEXT, AARCH64_FEATURE (SME) }, |
4954 | | { "svcrsmza", 0x1b, PSTATE_ENCODE_CRM_AND_IMM (0x6,0x1) | F_REG_MAX_VALUE (1) |
4955 | | | F_ARCHEXT, AARCH64_FEATURE (SME) }, |
4956 | | { "allint", 0x08, F_REG_MAX_VALUE (1) | F_ARCHEXT, AARCH64_FEATURE (V8_8A) }, |
4957 | | { 0, CPENC (0,0,0,0,0), 0, AARCH64_NO_FEATURES }, |
4958 | | }; |
4959 | | |
4960 | | bool |
4961 | | aarch64_pstatefield_supported_p (const aarch64_feature_set features, |
4962 | | const aarch64_sys_reg *reg) |
4963 | 0 | { |
4964 | 0 | if (!(reg->flags & F_ARCHEXT)) |
4965 | 0 | return true; |
4966 | | |
4967 | 0 | return AARCH64_CPU_HAS_ALL_FEATURES (features, reg->features); |
4968 | 0 | } |
4969 | | |
4970 | | const aarch64_sys_ins_reg aarch64_sys_regs_ic[] = |
4971 | | { |
4972 | | { "ialluis", CPENS(0,C7,C1,0), 0, AARCH64_NO_FEATURES }, |
4973 | | { "iallu", CPENS(0,C7,C5,0), 0, AARCH64_NO_FEATURES }, |
4974 | | { "ivau", CPENS (3, C7, C5, 1), F_HASXT, AARCH64_NO_FEATURES }, |
4975 | | { 0, CPENS(0,0,0,0), 0, AARCH64_NO_FEATURES } |
4976 | | }; |
4977 | | |
4978 | | const aarch64_sys_ins_reg aarch64_sys_regs_dc[] = |
4979 | | { |
4980 | | { "zva", CPENS (3, C7, C4, 1), F_HASXT, AARCH64_NO_FEATURES }, |
4981 | | { "gva", CPENS (3, C7, C4, 3), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) }, |
4982 | | { "gzva", CPENS (3, C7, C4, 4), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) }, |
4983 | | { "ivac", CPENS (0, C7, C6, 1), F_HASXT, AARCH64_NO_FEATURES }, |
4984 | | { "igvac", CPENS (0, C7, C6, 3), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) }, |
4985 | | { "igsw", CPENS (0, C7, C6, 4), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) }, |
4986 | | { "isw", CPENS (0, C7, C6, 2), F_HASXT, AARCH64_NO_FEATURES }, |
4987 | | { "igdvac", CPENS (0, C7, C6, 5), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) }, |
4988 | | { "igdsw", CPENS (0, C7, C6, 6), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) }, |
4989 | | { "cvac", CPENS (3, C7, C10, 1), F_HASXT, AARCH64_NO_FEATURES }, |
4990 | | { "cgvac", CPENS (3, C7, C10, 3), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) }, |
4991 | | { "cgdvac", CPENS (3, C7, C10, 5), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) }, |
4992 | | { "csw", CPENS (0, C7, C10, 2), F_HASXT, AARCH64_NO_FEATURES }, |
4993 | | { "cgsw", CPENS (0, C7, C10, 4), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) }, |
4994 | | { "cgdsw", CPENS (0, C7, C10, 6), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) }, |
4995 | | { "cvau", CPENS (3, C7, C11, 1), F_HASXT, AARCH64_NO_FEATURES }, |
4996 | | { "cvap", CPENS (3, C7, C12, 1), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (V8_2A) }, |
4997 | | { "cgvap", CPENS (3, C7, C12, 3), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) }, |
4998 | | { "cgdvap", CPENS (3, C7, C12, 5), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) }, |
4999 | | { "cvadp", CPENS (3, C7, C13, 1), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (CVADP) }, |
5000 | | { "cgvadp", CPENS (3, C7, C13, 3), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) }, |
5001 | | { "cgdvadp", CPENS (3, C7, C13, 5), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) }, |
5002 | | { "civac", CPENS (3, C7, C14, 1), F_HASXT, AARCH64_NO_FEATURES }, |
5003 | | { "cigvac", CPENS (3, C7, C14, 3), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) }, |
5004 | | { "cigdvac", CPENS (3, C7, C14, 5), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) }, |
5005 | | { "cisw", CPENS (0, C7, C14, 2), F_HASXT, AARCH64_NO_FEATURES }, |
5006 | | { "cigsw", CPENS (0, C7, C14, 4), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) }, |
5007 | | { "cigdsw", CPENS (0, C7, C14, 6), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) }, |
5008 | | { "cipapa", CPENS (6, C7, C14, 1), F_HASXT, AARCH64_NO_FEATURES }, |
5009 | | { "cigdpapa", CPENS (6, C7, C14, 5), F_HASXT, AARCH64_NO_FEATURES }, |
5010 | | { 0, CPENS(0,0,0,0), 0, AARCH64_NO_FEATURES } |
5011 | | }; |
5012 | | |
5013 | | const aarch64_sys_ins_reg aarch64_sys_regs_at[] = |
5014 | | { |
5015 | | { "s1e1r", CPENS (0, C7, C8, 0), F_HASXT, AARCH64_NO_FEATURES }, |
5016 | | { "s1e1w", CPENS (0, C7, C8, 1), F_HASXT, AARCH64_NO_FEATURES }, |
5017 | | { "s1e0r", CPENS (0, C7, C8, 2), F_HASXT, AARCH64_NO_FEATURES }, |
5018 | | { "s1e0w", CPENS (0, C7, C8, 3), F_HASXT, AARCH64_NO_FEATURES }, |
5019 | | { "s12e1r", CPENS (4, C7, C8, 4), F_HASXT, AARCH64_NO_FEATURES }, |
5020 | | { "s12e1w", CPENS (4, C7, C8, 5), F_HASXT, AARCH64_NO_FEATURES }, |
5021 | | { "s12e0r", CPENS (4, C7, C8, 6), F_HASXT, AARCH64_NO_FEATURES }, |
5022 | | { "s12e0w", CPENS (4, C7, C8, 7), F_HASXT, AARCH64_NO_FEATURES }, |
5023 | | { "s1e2r", CPENS (4, C7, C8, 0), F_HASXT, AARCH64_NO_FEATURES }, |
5024 | | { "s1e2w", CPENS (4, C7, C8, 1), F_HASXT, AARCH64_NO_FEATURES }, |
5025 | | { "s1e3r", CPENS (6, C7, C8, 0), F_HASXT, AARCH64_NO_FEATURES }, |
5026 | | { "s1e3w", CPENS (6, C7, C8, 1), F_HASXT, AARCH64_NO_FEATURES }, |
5027 | | { "s1e1rp", CPENS (0, C7, C9, 0), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (V8_2A) }, |
5028 | | { "s1e1wp", CPENS (0, C7, C9, 1), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (V8_2A) }, |
5029 | | { "s1e1a", CPENS (0, C7, C9, 2), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (ATS1A) }, |
5030 | | { "s1e2a", CPENS (4, C7, C9, 2), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (ATS1A) }, |
5031 | | { "s1e3a", CPENS (6, C7, C9, 2), F_HASXT | F_ARCHEXT, AARCH64_FEATURE (ATS1A) }, |
5032 | | { 0, CPENS(0,0,0,0), 0, AARCH64_NO_FEATURES } |
5033 | | }; |
5034 | | |
5035 | | const aarch64_sys_ins_reg aarch64_sys_regs_tlbi[] = |
5036 | | { |
5037 | | { "rpaos", CPENS (6, C8, C4, 3), F_HASXT, AARCH64_NO_FEATURES }, |
5038 | | { "rpalos", CPENS (6, C8, C4, 7), F_HASXT, AARCH64_NO_FEATURES }, |
5039 | | { "paallos", CPENS (6, C8, C1, 4), 0, AARCH64_NO_FEATURES }, |
5040 | | { "paall", CPENS (6, C8, C7, 4), 0, AARCH64_NO_FEATURES }, |
5041 | | |
5042 | | #define TLBI_XS_OP(OP, CODE, FLAGS) \ |
5043 | | { OP, CODE, FLAGS, AARCH64_NO_FEATURES }, \ |
5044 | | { OP "nxs", CODE | CPENS (0, C9, 0, 0), FLAGS | F_ARCHEXT, AARCH64_FEATURE (XS) }, |
5045 | | |
5046 | | TLBI_XS_OP ( "vmalle1", CPENS (0, C8, C7, 0), 0) |
5047 | | TLBI_XS_OP ( "vae1", CPENS (0, C8, C7, 1), F_HASXT | F_REG_128) |
5048 | | TLBI_XS_OP ( "aside1", CPENS (0, C8, C7, 2), F_HASXT ) |
5049 | | TLBI_XS_OP ( "vaae1", CPENS (0, C8, C7, 3), F_HASXT | F_REG_128) |
5050 | | TLBI_XS_OP ( "vmalle1is", CPENS (0, C8, C3, 0), 0) |
5051 | | TLBI_XS_OP ( "vae1is", CPENS (0, C8, C3, 1), F_HASXT | F_REG_128) |
5052 | | TLBI_XS_OP ( "aside1is", CPENS (0, C8, C3, 2), F_HASXT ) |
5053 | | TLBI_XS_OP ( "vaae1is", CPENS (0, C8, C3, 3), F_HASXT | F_REG_128) |
5054 | | TLBI_XS_OP ( "ipas2e1is", CPENS (4, C8, C0, 1), F_HASXT | F_REG_128) |
5055 | | TLBI_XS_OP ( "ipas2le1is",CPENS (4, C8, C0, 5), F_HASXT | F_REG_128) |
5056 | | TLBI_XS_OP ( "ipas2e1", CPENS (4, C8, C4, 1), F_HASXT | F_REG_128) |
5057 | | TLBI_XS_OP ( "ipas2le1", CPENS (4, C8, C4, 5), F_HASXT | F_REG_128) |
5058 | | TLBI_XS_OP ( "vae2", CPENS (4, C8, C7, 1), F_HASXT | F_REG_128) |
5059 | | TLBI_XS_OP ( "vae2is", CPENS (4, C8, C3, 1), F_HASXT | F_REG_128) |
5060 | | TLBI_XS_OP ( "vmalls12e1",CPENS (4, C8, C7, 6), 0) |
5061 | | TLBI_XS_OP ( "vmalls12e1is",CPENS(4,C8, C3, 6), 0) |
5062 | | TLBI_XS_OP ( "vae3", CPENS (6, C8, C7, 1), F_HASXT | F_REG_128) |
5063 | | TLBI_XS_OP ( "vae3is", CPENS (6, C8, C3, 1), F_HASXT | F_REG_128) |
5064 | | TLBI_XS_OP ( "alle2", CPENS (4, C8, C7, 0), 0) |
5065 | | TLBI_XS_OP ( "alle2is", CPENS (4, C8, C3, 0), 0) |
5066 | | TLBI_XS_OP ( "alle1", CPENS (4, C8, C7, 4), 0) |
5067 | | TLBI_XS_OP ( "alle1is", CPENS (4, C8, C3, 4), 0) |
5068 | | TLBI_XS_OP ( "alle3", CPENS (6, C8, C7, 0), 0) |
5069 | | TLBI_XS_OP ( "alle3is", CPENS (6, C8, C3, 0), 0) |
5070 | | TLBI_XS_OP ( "vale1is", CPENS (0, C8, C3, 5), F_HASXT | F_REG_128) |
5071 | | TLBI_XS_OP ( "vale2is", CPENS (4, C8, C3, 5), F_HASXT | F_REG_128) |
5072 | | TLBI_XS_OP ( "vale3is", CPENS (6, C8, C3, 5), F_HASXT | F_REG_128) |
5073 | | TLBI_XS_OP ( "vaale1is", CPENS (0, C8, C3, 7), F_HASXT | F_REG_128) |
5074 | | TLBI_XS_OP ( "vale1", CPENS (0, C8, C7, 5), F_HASXT | F_REG_128) |
5075 | | TLBI_XS_OP ( "vale2", CPENS (4, C8, C7, 5), F_HASXT | F_REG_128) |
5076 | | TLBI_XS_OP ( "vale3", CPENS (6, C8, C7, 5), F_HASXT | F_REG_128) |
5077 | | TLBI_XS_OP ( "vaale1", CPENS (0, C8, C7, 7), F_HASXT | F_REG_128) |
5078 | | |
5079 | | #undef TLBI_XS_OP |
5080 | | #define TLBI_XS_OP(OP, CODE, FLAGS) \ |
5081 | | { OP, CODE, FLAGS | F_ARCHEXT, AARCH64_FEATURE (V8_4A) }, \ |
5082 | | { OP "nxs", CODE | CPENS (0, C9, 0, 0), FLAGS | F_ARCHEXT, AARCH64_FEATURE (XS) }, |
5083 | | |
5084 | | TLBI_XS_OP ( "vmalle1os", CPENS (0, C8, C1, 0), 0 ) |
5085 | | TLBI_XS_OP ( "vae1os", CPENS (0, C8, C1, 1), F_HASXT | F_REG_128 ) |
5086 | | TLBI_XS_OP ( "aside1os", CPENS (0, C8, C1, 2), F_HASXT ) |
5087 | | TLBI_XS_OP ( "vaae1os", CPENS (0, C8, C1, 3), F_HASXT | F_REG_128 ) |
5088 | | TLBI_XS_OP ( "vale1os", CPENS (0, C8, C1, 5), F_HASXT | F_REG_128 ) |
5089 | | TLBI_XS_OP ( "vaale1os", CPENS (0, C8, C1, 7), F_HASXT | F_REG_128 ) |
5090 | | TLBI_XS_OP ( "ipas2e1os", CPENS (4, C8, C4, 0), F_HASXT | F_REG_128 ) |
5091 | | TLBI_XS_OP ( "ipas2le1os", CPENS (4, C8, C4, 4), F_HASXT | F_REG_128 ) |
5092 | | TLBI_XS_OP ( "vae2os", CPENS (4, C8, C1, 1), F_HASXT | F_REG_128 ) |
5093 | | TLBI_XS_OP ( "vale2os", CPENS (4, C8, C1, 5), F_HASXT | F_REG_128 ) |
5094 | | TLBI_XS_OP ( "vmalls12e1os", CPENS (4, C8, C1, 6), 0 ) |
5095 | | TLBI_XS_OP ( "vae3os", CPENS (6, C8, C1, 1), F_HASXT | F_REG_128 ) |
5096 | | TLBI_XS_OP ( "vale3os", CPENS (6, C8, C1, 5), F_HASXT | F_REG_128 ) |
5097 | | TLBI_XS_OP ( "alle2os", CPENS (4, C8, C1, 0), 0 ) |
5098 | | TLBI_XS_OP ( "alle1os", CPENS (4, C8, C1, 4), 0 ) |
5099 | | TLBI_XS_OP ( "alle3os", CPENS (6, C8, C1, 0), 0 ) |
5100 | | |
5101 | | TLBI_XS_OP ( "rvae1", CPENS (0, C8, C6, 1), F_HASXT | F_REG_128 ) |
5102 | | TLBI_XS_OP ( "rvaae1", CPENS (0, C8, C6, 3), F_HASXT | F_REG_128 ) |
5103 | | TLBI_XS_OP ( "rvale1", CPENS (0, C8, C6, 5), F_HASXT | F_REG_128 ) |
5104 | | TLBI_XS_OP ( "rvaale1", CPENS (0, C8, C6, 7), F_HASXT | F_REG_128 ) |
5105 | | TLBI_XS_OP ( "rvae1is", CPENS (0, C8, C2, 1), F_HASXT | F_REG_128 ) |
5106 | | TLBI_XS_OP ( "rvaae1is", CPENS (0, C8, C2, 3), F_HASXT | F_REG_128 ) |
5107 | | TLBI_XS_OP ( "rvale1is", CPENS (0, C8, C2, 5), F_HASXT | F_REG_128 ) |
5108 | | TLBI_XS_OP ( "rvaale1is", CPENS (0, C8, C2, 7), F_HASXT | F_REG_128 ) |
5109 | | TLBI_XS_OP ( "rvae1os", CPENS (0, C8, C5, 1), F_HASXT | F_REG_128 ) |
5110 | | TLBI_XS_OP ( "rvaae1os", CPENS (0, C8, C5, 3), F_HASXT | F_REG_128 ) |
5111 | | TLBI_XS_OP ( "rvale1os", CPENS (0, C8, C5, 5), F_HASXT | F_REG_128 ) |
5112 | | TLBI_XS_OP ( "rvaale1os", CPENS (0, C8, C5, 7), F_HASXT | F_REG_128 ) |
5113 | | TLBI_XS_OP ( "ripas2e1is", CPENS (4, C8, C0, 2), F_HASXT | F_REG_128 ) |
5114 | | TLBI_XS_OP ( "ripas2le1is",CPENS (4, C8, C0, 6), F_HASXT | F_REG_128 ) |
5115 | | TLBI_XS_OP ( "ripas2e1", CPENS (4, C8, C4, 2), F_HASXT | F_REG_128 ) |
5116 | | TLBI_XS_OP ( "ripas2le1", CPENS (4, C8, C4, 6), F_HASXT | F_REG_128 ) |
5117 | | TLBI_XS_OP ( "ripas2e1os", CPENS (4, C8, C4, 3), F_HASXT | F_REG_128 ) |
5118 | | TLBI_XS_OP ( "ripas2le1os",CPENS (4, C8, C4, 7), F_HASXT | F_REG_128 ) |
5119 | | TLBI_XS_OP ( "rvae2", CPENS (4, C8, C6, 1), F_HASXT | F_REG_128 ) |
5120 | | TLBI_XS_OP ( "rvale2", CPENS (4, C8, C6, 5), F_HASXT | F_REG_128 ) |
5121 | | TLBI_XS_OP ( "rvae2is", CPENS (4, C8, C2, 1), F_HASXT | F_REG_128 ) |
5122 | | TLBI_XS_OP ( "rvale2is", CPENS (4, C8, C2, 5), F_HASXT | F_REG_128 ) |
5123 | | TLBI_XS_OP ( "rvae2os", CPENS (4, C8, C5, 1), F_HASXT | F_REG_128 ) |
5124 | | TLBI_XS_OP ( "rvale2os", CPENS (4, C8, C5, 5), F_HASXT | F_REG_128 ) |
5125 | | TLBI_XS_OP ( "rvae3", CPENS (6, C8, C6, 1), F_HASXT | F_REG_128 ) |
5126 | | TLBI_XS_OP ( "rvale3", CPENS (6, C8, C6, 5), F_HASXT | F_REG_128 ) |
5127 | | TLBI_XS_OP ( "rvae3is", CPENS (6, C8, C2, 1), F_HASXT | F_REG_128 ) |
5128 | | TLBI_XS_OP ( "rvale3is", CPENS (6, C8, C2, 5), F_HASXT | F_REG_128 ) |
5129 | | TLBI_XS_OP ( "rvae3os", CPENS (6, C8, C5, 1), F_HASXT | F_REG_128 ) |
5130 | | TLBI_XS_OP ( "rvale3os", CPENS (6, C8, C5, 5), F_HASXT | F_REG_128 ) |
5131 | | |
5132 | | #undef TLBI_XS_OP |
5133 | | |
5134 | | { 0, CPENS(0,0,0,0), 0, AARCH64_NO_FEATURES } |
5135 | | }; |
5136 | | |
5137 | | const aarch64_sys_ins_reg aarch64_sys_regs_sr[] = |
5138 | | { |
5139 | | /* RCTX is somewhat unique in a way that it has different values |
5140 | | (op2) based on the instruction in which it is used (cfp/dvp/cpp). |
5141 | | Thus op2 is masked out and instead encoded directly in the |
5142 | | aarch64_opcode_table entries for the respective instructions. */ |
5143 | | { "rctx", CPENS(3,C7,C3,0), F_HASXT | F_ARCHEXT | F_REG_WRITE, AARCH64_FEATURE (PREDRES) }, /* WO */ |
5144 | | { 0, CPENS(0,0,0,0), 0, AARCH64_NO_FEATURES } |
5145 | | }; |
5146 | | |
5147 | | bool |
5148 | | aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg *sys_ins_reg) |
5149 | 416 | { |
5150 | 416 | return (sys_ins_reg->flags & F_HASXT) != 0; |
5151 | 416 | } |
5152 | | |
5153 | | extern bool |
5154 | | aarch64_sys_ins_reg_supported_p (const aarch64_feature_set features, |
5155 | | const char *reg_name, |
5156 | | uint32_t reg_flags, |
5157 | | const aarch64_feature_set *reg_features) |
5158 | 0 | { |
5159 | | /* Armv8-R has no EL3. */ |
5160 | 0 | if (AARCH64_CPU_HAS_FEATURE (features, V8R)) |
5161 | 0 | { |
5162 | 0 | const char *suffix = strrchr (reg_name, '_'); |
5163 | 0 | if (suffix && !strcmp (suffix, "_el3")) |
5164 | 0 | return false; |
5165 | 0 | } |
5166 | | |
5167 | 0 | if (!(reg_flags & F_ARCHEXT)) |
5168 | 0 | return true; |
5169 | | |
5170 | 0 | return AARCH64_CPU_HAS_ALL_FEATURES (features, *reg_features); |
5171 | 0 | } |
5172 | | |
5173 | | #undef C0 |
5174 | | #undef C1 |
5175 | | #undef C2 |
5176 | | #undef C3 |
5177 | | #undef C4 |
5178 | | #undef C5 |
5179 | | #undef C6 |
5180 | | #undef C7 |
5181 | | #undef C8 |
5182 | | #undef C9 |
5183 | | #undef C10 |
5184 | | #undef C11 |
5185 | | #undef C12 |
5186 | | #undef C13 |
5187 | | #undef C14 |
5188 | | #undef C15 |
5189 | | |
5190 | 78.3k | #define BIT(INSN,BT) (((INSN) >> (BT)) & 1) |
5191 | 119k | #define BITS(INSN,HI,LO) (((INSN) >> (LO)) & ((1 << (((HI) - (LO)) + 1)) - 1)) |
5192 | | |
5193 | | static enum err_type |
5194 | | verify_ldpsw (const struct aarch64_inst *inst ATTRIBUTE_UNUSED, |
5195 | | const aarch64_insn insn, bfd_vma pc ATTRIBUTE_UNUSED, |
5196 | | bool encoding ATTRIBUTE_UNUSED, |
5197 | | aarch64_operand_error *mismatch_detail ATTRIBUTE_UNUSED, |
5198 | | aarch64_instr_sequence *insn_sequence ATTRIBUTE_UNUSED) |
5199 | 39.9k | { |
5200 | 39.9k | int t = BITS (insn, 4, 0); |
5201 | 39.9k | int n = BITS (insn, 9, 5); |
5202 | 39.9k | int t2 = BITS (insn, 14, 10); |
5203 | | |
5204 | 39.9k | if (BIT (insn, 23)) |
5205 | 11.0k | { |
5206 | | /* Write back enabled. */ |
5207 | 11.0k | if ((t == n || t2 == n) && n != 31) |
5208 | 1.42k | return ERR_UND; |
5209 | 11.0k | } |
5210 | | |
5211 | 38.4k | if (BIT (insn, 22)) |
5212 | 38.4k | { |
5213 | | /* Load */ |
5214 | 38.4k | if (t == t2) |
5215 | 2.38k | return ERR_UND; |
5216 | 38.4k | } |
5217 | | |
5218 | 36.0k | return ERR_OK; |
5219 | 38.4k | } |
5220 | | |
5221 | | /* Verifier for vector by element 3 operands functions where the |
5222 | | conditions `if sz:L == 11 then UNDEFINED` holds. */ |
5223 | | |
5224 | | static enum err_type |
5225 | | verify_elem_sd (const struct aarch64_inst *inst, const aarch64_insn insn, |
5226 | | bfd_vma pc ATTRIBUTE_UNUSED, bool encoding, |
5227 | | aarch64_operand_error *mismatch_detail ATTRIBUTE_UNUSED, |
5228 | | aarch64_instr_sequence *insn_sequence ATTRIBUTE_UNUSED) |
5229 | 2.80k | { |
5230 | 2.80k | const aarch64_insn undef_pattern = 0x3; |
5231 | 2.80k | aarch64_insn value; |
5232 | | |
5233 | 2.80k | assert (inst->opcode); |
5234 | 2.80k | assert (inst->opcode->operands[2] == AARCH64_OPND_Em); |
5235 | 2.80k | value = encoding ? inst->value : insn; |
5236 | 2.80k | assert (value); |
5237 | | |
5238 | 2.80k | if (undef_pattern == extract_fields (value, 0, 2, FLD_sz, FLD_L)) |
5239 | 585 | return ERR_UND; |
5240 | | |
5241 | 2.21k | return ERR_OK; |
5242 | 2.80k | } |
5243 | | |
5244 | | /* Check an instruction that takes three register operands and that |
5245 | | requires the register numbers to be distinct from one another. */ |
5246 | | |
5247 | | static enum err_type |
5248 | | verify_three_different_regs (const struct aarch64_inst *inst, |
5249 | | const aarch64_insn insn ATTRIBUTE_UNUSED, |
5250 | | bfd_vma pc ATTRIBUTE_UNUSED, |
5251 | | bool encoding ATTRIBUTE_UNUSED, |
5252 | | aarch64_operand_error *mismatch_detail |
5253 | | ATTRIBUTE_UNUSED, |
5254 | | aarch64_instr_sequence *insn_sequence |
5255 | | ATTRIBUTE_UNUSED) |
5256 | 6.14k | { |
5257 | 6.14k | int rd, rs, rn; |
5258 | | |
5259 | 6.14k | rd = inst->operands[0].reg.regno; |
5260 | 6.14k | rs = inst->operands[1].reg.regno; |
5261 | 6.14k | rn = inst->operands[2].reg.regno; |
5262 | 6.14k | if (rd == rs || rd == rn || rs == rn) |
5263 | 593 | { |
5264 | 593 | mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR; |
5265 | 593 | mismatch_detail->error |
5266 | 593 | = _("the three register operands must be distinct from one another"); |
5267 | 593 | mismatch_detail->index = -1; |
5268 | 593 | return ERR_UND; |
5269 | 593 | } |
5270 | | |
5271 | 5.54k | return ERR_OK; |
5272 | 6.14k | } |
5273 | | |
5274 | | /* Add INST to the end of INSN_SEQUENCE. */ |
5275 | | |
5276 | | static void |
5277 | | add_insn_to_sequence (const struct aarch64_inst *inst, |
5278 | | aarch64_instr_sequence *insn_sequence) |
5279 | 3.44k | { |
5280 | 3.44k | insn_sequence->instr[insn_sequence->num_added_insns++] = *inst; |
5281 | 3.44k | } |
5282 | | |
5283 | | /* Initialize an instruction sequence insn_sequence with the instruction INST. |
5284 | | If INST is NULL the given insn_sequence is cleared and the sequence is left |
5285 | | uninitialized. */ |
5286 | | |
5287 | | void |
5288 | | init_insn_sequence (const struct aarch64_inst *inst, |
5289 | | aarch64_instr_sequence *insn_sequence) |
5290 | 7.33k | { |
5291 | 7.33k | int num_req_entries = 0; |
5292 | | |
5293 | 7.33k | if (insn_sequence->instr) |
5294 | 3.07k | { |
5295 | 3.07k | XDELETE (insn_sequence->instr); |
5296 | 3.07k | insn_sequence->instr = NULL; |
5297 | 3.07k | } |
5298 | | |
5299 | | /* Handle all the cases here. May need to think of something smarter than |
5300 | | a giant if/else chain if this grows. At that time, a lookup table may be |
5301 | | best. */ |
5302 | 7.33k | if (inst && inst->opcode->constraints & C_SCAN_MOVPRFX) |
5303 | 463 | num_req_entries = 1; |
5304 | 7.33k | if (inst && (inst->opcode->constraints & C_SCAN_MOPS_PME) == C_SCAN_MOPS_P) |
5305 | 2.61k | num_req_entries = 2; |
5306 | | |
5307 | 7.33k | insn_sequence->num_added_insns = 0; |
5308 | 7.33k | insn_sequence->num_allocated_insns = num_req_entries; |
5309 | | |
5310 | 7.33k | if (num_req_entries != 0) |
5311 | 3.07k | { |
5312 | 3.07k | insn_sequence->instr = XCNEWVEC (aarch64_inst, num_req_entries); |
5313 | 3.07k | add_insn_to_sequence (inst, insn_sequence); |
5314 | 3.07k | } |
5315 | 7.33k | } |
5316 | | |
5317 | | /* Subroutine of verify_constraints. Check whether the instruction |
5318 | | is part of a MOPS P/M/E sequence and, if so, whether sequencing |
5319 | | expectations are met. Return true if the check passes, otherwise |
5320 | | describe the problem in MISMATCH_DETAIL. |
5321 | | |
5322 | | IS_NEW_SECTION is true if INST is assumed to start a new section. |
5323 | | The other arguments are as for verify_constraints. */ |
5324 | | |
5325 | | static bool |
5326 | | verify_mops_pme_sequence (const struct aarch64_inst *inst, |
5327 | | bool is_new_section, |
5328 | | aarch64_operand_error *mismatch_detail, |
5329 | | aarch64_instr_sequence *insn_sequence) |
5330 | 141k | { |
5331 | 141k | const struct aarch64_opcode *opcode; |
5332 | 141k | const struct aarch64_inst *prev_insn; |
5333 | 141k | int i; |
5334 | | |
5335 | 141k | opcode = inst->opcode; |
5336 | 141k | if (insn_sequence->instr) |
5337 | 3.32k | prev_insn = insn_sequence->instr + (insn_sequence->num_added_insns - 1); |
5338 | 138k | else |
5339 | 138k | prev_insn = NULL; |
5340 | | |
5341 | 141k | if (prev_insn |
5342 | 141k | && (prev_insn->opcode->constraints & C_SCAN_MOPS_PME) |
5343 | 141k | && prev_insn->opcode != opcode - 1) |
5344 | 2.50k | { |
5345 | 2.50k | mismatch_detail->kind = AARCH64_OPDE_EXPECTED_A_AFTER_B; |
5346 | 2.50k | mismatch_detail->error = NULL; |
5347 | 2.50k | mismatch_detail->index = -1; |
5348 | 2.50k | mismatch_detail->data[0].s = prev_insn->opcode[1].name; |
5349 | 2.50k | mismatch_detail->data[1].s = prev_insn->opcode->name; |
5350 | 2.50k | mismatch_detail->non_fatal = true; |
5351 | 2.50k | return false; |
5352 | 2.50k | } |
5353 | | |
5354 | 138k | if (opcode->constraints & C_SCAN_MOPS_PME) |
5355 | 2.93k | { |
5356 | 2.93k | if (is_new_section || !prev_insn || prev_insn->opcode != opcode - 1) |
5357 | 2.56k | { |
5358 | 2.56k | mismatch_detail->kind = AARCH64_OPDE_A_SHOULD_FOLLOW_B; |
5359 | 2.56k | mismatch_detail->error = NULL; |
5360 | 2.56k | mismatch_detail->index = -1; |
5361 | 2.56k | mismatch_detail->data[0].s = opcode->name; |
5362 | 2.56k | mismatch_detail->data[1].s = opcode[-1].name; |
5363 | 2.56k | mismatch_detail->non_fatal = true; |
5364 | 2.56k | return false; |
5365 | 2.56k | } |
5366 | | |
5367 | 637 | for (i = 0; i < 3; ++i) |
5368 | | /* There's no specific requirement for the data register to be |
5369 | | the same between consecutive SET* instructions. */ |
5370 | 637 | if ((opcode->operands[i] == AARCH64_OPND_MOPS_ADDR_Rd |
5371 | 637 | || opcode->operands[i] == AARCH64_OPND_MOPS_ADDR_Rs |
5372 | 637 | || opcode->operands[i] == AARCH64_OPND_MOPS_WB_Rn) |
5373 | 637 | && prev_insn->operands[i].reg.regno != inst->operands[i].reg.regno) |
5374 | 369 | { |
5375 | 369 | mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR; |
5376 | 369 | if (opcode->operands[i] == AARCH64_OPND_MOPS_ADDR_Rd) |
5377 | 228 | mismatch_detail->error = _("destination register differs from " |
5378 | 141 | "preceding instruction"); |
5379 | 141 | else if (opcode->operands[i] == AARCH64_OPND_MOPS_ADDR_Rs) |
5380 | 0 | mismatch_detail->error = _("source register differs from " |
5381 | 141 | "preceding instruction"); |
5382 | 141 | else |
5383 | 141 | mismatch_detail->error = _("size register differs from " |
5384 | 369 | "preceding instruction"); |
5385 | 369 | mismatch_detail->index = i; |
5386 | 369 | mismatch_detail->non_fatal = true; |
5387 | 369 | return false; |
5388 | 369 | } |
5389 | 369 | } |
5390 | | |
5391 | 136k | return true; |
5392 | 138k | } |
5393 | | |
5394 | | /* This function verifies that the instruction INST adheres to its specified |
5395 | | constraints. If it does then ERR_OK is returned, if not then ERR_VFI is |
5396 | | returned and MISMATCH_DETAIL contains the reason why verification failed. |
5397 | | |
5398 | | The function is called both during assembly and disassembly. If assembling |
5399 | | then ENCODING will be TRUE, else FALSE. If dissassembling PC will be set |
5400 | | and will contain the PC of the current instruction w.r.t to the section. |
5401 | | |
5402 | | If ENCODING and PC=0 then you are at a start of a section. The constraints |
5403 | | are verified against the given state insn_sequence which is updated as it |
5404 | | transitions through the verification. */ |
5405 | | |
5406 | | enum err_type |
5407 | | verify_constraints (const struct aarch64_inst *inst, |
5408 | | const aarch64_insn insn ATTRIBUTE_UNUSED, |
5409 | | bfd_vma pc, |
5410 | | bool encoding, |
5411 | | aarch64_operand_error *mismatch_detail, |
5412 | | aarch64_instr_sequence *insn_sequence) |
5413 | 4.90M | { |
5414 | 4.90M | assert (inst); |
5415 | 4.90M | assert (inst->opcode); |
5416 | | |
5417 | 4.90M | const struct aarch64_opcode *opcode = inst->opcode; |
5418 | 4.90M | if (!opcode->constraints && !insn_sequence->instr) |
5419 | 4.76M | return ERR_OK; |
5420 | | |
5421 | 144k | assert (insn_sequence); |
5422 | | |
5423 | 144k | enum err_type res = ERR_OK; |
5424 | | |
5425 | | /* This instruction puts a constraint on the insn_sequence. */ |
5426 | 144k | if (opcode->flags & F_SCAN) |
5427 | 3.07k | { |
5428 | 3.07k | if (insn_sequence->instr) |
5429 | 120 | { |
5430 | 120 | mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR; |
5431 | 120 | mismatch_detail->error = _("instruction opens new dependency " |
5432 | 120 | "sequence without ending previous one"); |
5433 | 120 | mismatch_detail->index = -1; |
5434 | 120 | mismatch_detail->non_fatal = true; |
5435 | 120 | res = ERR_VFI; |
5436 | 120 | } |
5437 | | |
5438 | 3.07k | init_insn_sequence (inst, insn_sequence); |
5439 | 3.07k | return res; |
5440 | 3.07k | } |
5441 | | |
5442 | 141k | bool is_new_section = (!encoding && pc == 0); |
5443 | 141k | if (!verify_mops_pme_sequence (inst, is_new_section, mismatch_detail, |
5444 | 141k | insn_sequence)) |
5445 | 5.43k | { |
5446 | 5.43k | res = ERR_VFI; |
5447 | 5.43k | if ((opcode->constraints & C_SCAN_MOPS_PME) != C_SCAN_MOPS_M) |
5448 | 3.80k | init_insn_sequence (NULL, insn_sequence); |
5449 | 5.43k | } |
5450 | | |
5451 | | /* Verify constraints on an existing sequence. */ |
5452 | 141k | if (insn_sequence->instr) |
5453 | 826 | { |
5454 | 826 | const struct aarch64_opcode* inst_opcode = insn_sequence->instr->opcode; |
5455 | | /* If we're decoding and we hit PC=0 with an open sequence then we haven't |
5456 | | closed a previous one that we should have. */ |
5457 | 826 | if (is_new_section && res == ERR_OK) |
5458 | 0 | { |
5459 | 0 | mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR; |
5460 | 0 | mismatch_detail->error = _("previous `movprfx' sequence not closed"); |
5461 | 0 | mismatch_detail->index = -1; |
5462 | 0 | mismatch_detail->non_fatal = true; |
5463 | 0 | res = ERR_VFI; |
5464 | | /* Reset the sequence. */ |
5465 | 0 | init_insn_sequence (NULL, insn_sequence); |
5466 | 0 | return res; |
5467 | 0 | } |
5468 | | |
5469 | | /* Validate C_SCAN_MOVPRFX constraints. Move this to a lookup table. */ |
5470 | 826 | if (inst_opcode->constraints & C_SCAN_MOVPRFX) |
5471 | 455 | { |
5472 | | /* Check to see if the MOVPRFX SVE instruction is followed by an SVE |
5473 | | instruction for better error messages. */ |
5474 | 455 | if (!opcode->avariant |
5475 | 455 | || (!AARCH64_CPU_HAS_FEATURE (*opcode->avariant, SVE) |
5476 | 455 | && !AARCH64_CPU_HAS_FEATURE (*opcode->avariant, SVE2))) |
5477 | 348 | { |
5478 | 348 | mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR; |
5479 | 348 | mismatch_detail->error = _("SVE instruction expected after " |
5480 | 348 | "`movprfx'"); |
5481 | 348 | mismatch_detail->index = -1; |
5482 | 348 | mismatch_detail->non_fatal = true; |
5483 | 348 | res = ERR_VFI; |
5484 | 348 | goto done; |
5485 | 348 | } |
5486 | | |
5487 | | /* Check to see if the MOVPRFX SVE instruction is followed by an SVE |
5488 | | instruction that is allowed to be used with a MOVPRFX. */ |
5489 | 107 | if (!(opcode->constraints & C_SCAN_MOVPRFX)) |
5490 | 40 | { |
5491 | 40 | mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR; |
5492 | 40 | mismatch_detail->error = _("SVE `movprfx' compatible instruction " |
5493 | 40 | "expected"); |
5494 | 40 | mismatch_detail->index = -1; |
5495 | 40 | mismatch_detail->non_fatal = true; |
5496 | 40 | res = ERR_VFI; |
5497 | 40 | goto done; |
5498 | 40 | } |
5499 | | |
5500 | | /* Next check for usage of the predicate register. */ |
5501 | 67 | aarch64_opnd_info blk_dest = insn_sequence->instr->operands[0]; |
5502 | 67 | aarch64_opnd_info blk_pred, inst_pred; |
5503 | 67 | memset (&blk_pred, 0, sizeof (aarch64_opnd_info)); |
5504 | 67 | memset (&inst_pred, 0, sizeof (aarch64_opnd_info)); |
5505 | 67 | bool predicated = false; |
5506 | 67 | assert (blk_dest.type == AARCH64_OPND_SVE_Zd); |
5507 | | |
5508 | | /* Determine if the movprfx instruction used is predicated or not. */ |
5509 | 67 | if (insn_sequence->instr->operands[1].type == AARCH64_OPND_SVE_Pg3) |
5510 | 67 | { |
5511 | 67 | predicated = true; |
5512 | 67 | blk_pred = insn_sequence->instr->operands[1]; |
5513 | 67 | } |
5514 | | |
5515 | 67 | unsigned char max_elem_size = 0; |
5516 | 67 | unsigned char current_elem_size; |
5517 | 67 | int num_op_used = 0, last_op_usage = 0; |
5518 | 67 | int i, inst_pred_idx = -1; |
5519 | 67 | int num_ops = aarch64_num_of_operands (opcode); |
5520 | 333 | for (i = 0; i < num_ops; i++) |
5521 | 266 | { |
5522 | 266 | aarch64_opnd_info inst_op = inst->operands[i]; |
5523 | 266 | switch (inst_op.type) |
5524 | 266 | { |
5525 | 95 | case AARCH64_OPND_SVE_Zd: |
5526 | 138 | case AARCH64_OPND_SVE_Zm_5: |
5527 | 158 | case AARCH64_OPND_SVE_Zm_16: |
5528 | 179 | case AARCH64_OPND_SVE_Zn: |
5529 | 179 | case AARCH64_OPND_SVE_Zt: |
5530 | 179 | case AARCH64_OPND_SVE_Vm: |
5531 | 179 | case AARCH64_OPND_SVE_Vn: |
5532 | 179 | case AARCH64_OPND_Va: |
5533 | 179 | case AARCH64_OPND_Vn: |
5534 | 179 | case AARCH64_OPND_Vm: |
5535 | 179 | case AARCH64_OPND_Sn: |
5536 | 179 | case AARCH64_OPND_Sm: |
5537 | 179 | if (inst_op.reg.regno == blk_dest.reg.regno) |
5538 | 78 | { |
5539 | 78 | num_op_used++; |
5540 | 78 | last_op_usage = i; |
5541 | 78 | } |
5542 | 179 | current_elem_size |
5543 | 179 | = aarch64_get_qualifier_esize (inst_op.qualifier); |
5544 | 179 | if (current_elem_size > max_elem_size) |
5545 | 67 | max_elem_size = current_elem_size; |
5546 | 179 | break; |
5547 | 0 | case AARCH64_OPND_SVE_Pd: |
5548 | 63 | case AARCH64_OPND_SVE_Pg3: |
5549 | 63 | case AARCH64_OPND_SVE_Pg4_5: |
5550 | 63 | case AARCH64_OPND_SVE_Pg4_10: |
5551 | 64 | case AARCH64_OPND_SVE_Pg4_16: |
5552 | 64 | case AARCH64_OPND_SVE_Pm: |
5553 | 64 | case AARCH64_OPND_SVE_Pn: |
5554 | 64 | case AARCH64_OPND_SVE_Pt: |
5555 | 64 | case AARCH64_OPND_SME_Pm: |
5556 | 64 | inst_pred = inst_op; |
5557 | 64 | inst_pred_idx = i; |
5558 | 64 | break; |
5559 | 23 | default: |
5560 | 23 | break; |
5561 | 266 | } |
5562 | 266 | } |
5563 | | |
5564 | 67 | assert (max_elem_size != 0); |
5565 | 67 | aarch64_opnd_info inst_dest = inst->operands[0]; |
5566 | | /* Determine the size that should be used to compare against the |
5567 | | movprfx size. */ |
5568 | 67 | current_elem_size |
5569 | 67 | = opcode->constraints & C_MAX_ELEM |
5570 | 67 | ? max_elem_size |
5571 | 67 | : aarch64_get_qualifier_esize (inst_dest.qualifier); |
5572 | | |
5573 | | /* If movprfx is predicated do some extra checks. */ |
5574 | 67 | if (predicated) |
5575 | 67 | { |
5576 | | /* The instruction must be predicated. */ |
5577 | 67 | if (inst_pred_idx < 0) |
5578 | 3 | { |
5579 | 3 | mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR; |
5580 | 3 | mismatch_detail->error = _("predicated instruction expected " |
5581 | 3 | "after `movprfx'"); |
5582 | 3 | mismatch_detail->index = -1; |
5583 | 3 | mismatch_detail->non_fatal = true; |
5584 | 3 | res = ERR_VFI; |
5585 | 3 | goto done; |
5586 | 3 | } |
5587 | | |
5588 | | /* The instruction must have a merging predicate. */ |
5589 | 64 | if (inst_pred.qualifier != AARCH64_OPND_QLF_P_M) |
5590 | 1 | { |
5591 | 1 | mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR; |
5592 | 1 | mismatch_detail->error = _("merging predicate expected due " |
5593 | 1 | "to preceding `movprfx'"); |
5594 | 1 | mismatch_detail->index = inst_pred_idx; |
5595 | 1 | mismatch_detail->non_fatal = true; |
5596 | 1 | res = ERR_VFI; |
5597 | 1 | goto done; |
5598 | 1 | } |
5599 | | |
5600 | | /* The same register must be used in instruction. */ |
5601 | 63 | if (blk_pred.reg.regno != inst_pred.reg.regno) |
5602 | 36 | { |
5603 | 36 | mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR; |
5604 | 36 | mismatch_detail->error = _("predicate register differs " |
5605 | 36 | "from that in preceding " |
5606 | 36 | "`movprfx'"); |
5607 | 36 | mismatch_detail->index = inst_pred_idx; |
5608 | 36 | mismatch_detail->non_fatal = true; |
5609 | 36 | res = ERR_VFI; |
5610 | 36 | goto done; |
5611 | 36 | } |
5612 | 63 | } |
5613 | | |
5614 | | /* Destructive operations by definition must allow one usage of the |
5615 | | same register. */ |
5616 | 27 | int allowed_usage |
5617 | 27 | = aarch64_is_destructive_by_operands (opcode) ? 2 : 1; |
5618 | | |
5619 | | /* Operand is not used at all. */ |
5620 | 27 | if (num_op_used == 0) |
5621 | 14 | { |
5622 | 14 | mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR; |
5623 | 14 | mismatch_detail->error = _("output register of preceding " |
5624 | 14 | "`movprfx' not used in current " |
5625 | 14 | "instruction"); |
5626 | 14 | mismatch_detail->index = 0; |
5627 | 14 | mismatch_detail->non_fatal = true; |
5628 | 14 | res = ERR_VFI; |
5629 | 14 | goto done; |
5630 | 14 | } |
5631 | | |
5632 | | /* We now know it's used, now determine exactly where it's used. */ |
5633 | 13 | if (blk_dest.reg.regno != inst_dest.reg.regno) |
5634 | 1 | { |
5635 | 1 | mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR; |
5636 | 1 | mismatch_detail->error = _("output register of preceding " |
5637 | 1 | "`movprfx' expected as output"); |
5638 | 1 | mismatch_detail->index = 0; |
5639 | 1 | mismatch_detail->non_fatal = true; |
5640 | 1 | res = ERR_VFI; |
5641 | 1 | goto done; |
5642 | 1 | } |
5643 | | |
5644 | | /* Operand used more than allowed for the specific opcode type. */ |
5645 | 12 | if (num_op_used > allowed_usage) |
5646 | 12 | { |
5647 | 12 | mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR; |
5648 | 12 | mismatch_detail->error = _("output register of preceding " |
5649 | 12 | "`movprfx' used as input"); |
5650 | 12 | mismatch_detail->index = last_op_usage; |
5651 | 12 | mismatch_detail->non_fatal = true; |
5652 | 12 | res = ERR_VFI; |
5653 | 12 | goto done; |
5654 | 12 | } |
5655 | | |
5656 | | /* Now the only thing left is the qualifiers checks. The register |
5657 | | must have the same maximum element size. */ |
5658 | 0 | if (inst_dest.qualifier |
5659 | 0 | && blk_dest.qualifier |
5660 | 0 | && current_elem_size |
5661 | 0 | != aarch64_get_qualifier_esize (blk_dest.qualifier)) |
5662 | 0 | { |
5663 | 0 | mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR; |
5664 | 0 | mismatch_detail->error = _("register size not compatible with " |
5665 | 0 | "previous `movprfx'"); |
5666 | 0 | mismatch_detail->index = 0; |
5667 | 0 | mismatch_detail->non_fatal = true; |
5668 | 0 | res = ERR_VFI; |
5669 | 0 | goto done; |
5670 | 0 | } |
5671 | 0 | } |
5672 | | |
5673 | 826 | done: |
5674 | 826 | if (insn_sequence->num_added_insns == insn_sequence->num_allocated_insns) |
5675 | | /* We've checked the last instruction in the sequence and so |
5676 | | don't need the sequence any more. */ |
5677 | 455 | init_insn_sequence (NULL, insn_sequence); |
5678 | 371 | else |
5679 | 371 | add_insn_to_sequence (inst, insn_sequence); |
5680 | 826 | } |
5681 | | |
5682 | 141k | return res; |
5683 | 141k | } |
5684 | | |
5685 | | |
5686 | | /* Return true if VALUE cannot be moved into an SVE register using DUP |
5687 | | (with any element size, not just ESIZE) and if using DUPM would |
5688 | | therefore be OK. ESIZE is the number of bytes in the immediate. */ |
5689 | | |
5690 | | bool |
5691 | | aarch64_sve_dupm_mov_immediate_p (uint64_t uvalue, int esize) |
5692 | 3.75k | { |
5693 | 3.75k | int64_t svalue = uvalue; |
5694 | 3.75k | uint64_t upper = (uint64_t) -1 << (esize * 4) << (esize * 4); |
5695 | | |
5696 | 3.75k | if ((uvalue & ~upper) != uvalue && (uvalue | upper) != uvalue) |
5697 | 0 | return false; |
5698 | 3.75k | if (esize <= 4 || (uint32_t) uvalue == (uint32_t) (uvalue >> 32)) |
5699 | 3.49k | { |
5700 | 3.49k | svalue = (int32_t) uvalue; |
5701 | 3.49k | if (esize <= 2 || (uint16_t) uvalue == (uint16_t) (uvalue >> 16)) |
5702 | 642 | { |
5703 | 642 | svalue = (int16_t) uvalue; |
5704 | 642 | if (esize == 1 || (uint8_t) uvalue == (uint8_t) (uvalue >> 8)) |
5705 | 495 | return false; |
5706 | 642 | } |
5707 | 3.49k | } |
5708 | 3.26k | if ((svalue & 0xff) == 0) |
5709 | 2.06k | svalue /= 256; |
5710 | 3.26k | return svalue < -128 || svalue >= 128; |
5711 | 3.75k | } |
5712 | | |
5713 | | /* Return true if a CPU with the AARCH64_FEATURE_* bits in CPU_VARIANT |
5714 | | supports the instruction described by INST. */ |
5715 | | |
5716 | | bool |
5717 | | aarch64_cpu_supports_inst_p (aarch64_feature_set cpu_variant, |
5718 | | aarch64_inst *inst) |
5719 | 0 | { |
5720 | 0 | if (!inst->opcode->avariant |
5721 | 0 | || !AARCH64_CPU_HAS_ALL_FEATURES (cpu_variant, *inst->opcode->avariant)) |
5722 | 0 | return false; |
5723 | | |
5724 | 0 | if (inst->opcode->iclass == sme_fp_sd |
5725 | 0 | && inst->operands[0].qualifier == AARCH64_OPND_QLF_S_D |
5726 | 0 | && !AARCH64_CPU_HAS_FEATURE (cpu_variant, SME_F64F64)) |
5727 | 0 | return false; |
5728 | | |
5729 | 0 | if (inst->opcode->iclass == sme_int_sd |
5730 | 0 | && inst->operands[0].qualifier == AARCH64_OPND_QLF_S_D |
5731 | 0 | && !AARCH64_CPU_HAS_FEATURE (cpu_variant, SME_I16I64)) |
5732 | 0 | return false; |
5733 | | |
5734 | 0 | return true; |
5735 | 0 | } |
5736 | | |
5737 | | /* Include the opcode description table as well as the operand description |
5738 | | table. */ |
5739 | | #define VERIFIER(x) verify_##x |
5740 | | #include "aarch64-tbl.h" |