/src/capstonenext/Mapping.c
Line | Count | Source |
1 | | /* Capstone Disassembly Engine */ |
2 | | /* By Nguyen Anh Quynh <aquynh@gmail.com>, 2013-2019 */ |
3 | | /* Rot127 <unisono@quyllur.org>, 2022-2023 */ |
4 | | |
5 | | #include "Mapping.h" |
6 | | #include "capstone/capstone.h" |
7 | | #include "cs_priv.h" |
8 | | #include "utils.h" |
9 | | |
10 | | // Create a cache to map LLVM instruction IDs to capstone instruction IDs, if |
11 | | // the architecture needs this. |
12 | | cs_err populate_insn_map_cache(cs_struct *handle) |
13 | 52.3k | { |
14 | 52.3k | unsigned int i; |
15 | | |
16 | | // If this architecture doesn't use instruction mapping, do nothing |
17 | 52.3k | if (!handle->insn_map || handle->insn_map_size <= 0) |
18 | 49.2k | return CS_ERR_OK; |
19 | | |
20 | | // Since the instruction map is assumed to be stored in ascending |
21 | | // order, we can get the maximum LLVM instruction id just by looking at |
22 | | // the last element. |
23 | 3.13k | unsigned int cache_elements = |
24 | 3.13k | handle->insn_map[handle->insn_map_size - 1].id + 1; |
25 | | |
26 | | // This should not be initialized yet. |
27 | 3.13k | CS_ASSERT(!handle->insn_cache); |
28 | | |
29 | 3.13k | unsigned short *cache = cs_mem_calloc(cache_elements, sizeof(*cache)); |
30 | 3.13k | if (!cache) { |
31 | 0 | handle->errnum = CS_ERR_MEM; |
32 | 0 | return CS_ERR_MEM; |
33 | 0 | } |
34 | 3.13k | handle->insn_cache = cache; |
35 | | |
36 | 19.6M | for (i = 1; i < handle->insn_map_size; ++i) |
37 | 19.6M | handle->insn_cache[handle->insn_map[i].id] = i; |
38 | | |
39 | 3.13k | return CS_ERR_OK; |
40 | 3.13k | } |
41 | | |
42 | | const insn_map *lookup_insn_map(cs_struct *handle, unsigned short id) |
43 | 156k | { |
44 | | // If this is getting called, we need the cache to already be populated |
45 | | // (this should be done when populate_insn_map_cache() gets called). |
46 | 156k | CS_ASSERT(handle->insn_cache); |
47 | 156k | CS_ASSERT(handle->insn_map_size); |
48 | | |
49 | 156k | unsigned short highest_id = |
50 | 156k | handle->insn_map[handle->insn_map_size - 1].id; |
51 | 156k | if (id > highest_id) |
52 | 0 | return NULL; |
53 | | |
54 | 156k | unsigned short i = handle->insn_cache[id]; |
55 | | |
56 | 156k | return &handle->insn_map[i]; |
57 | 156k | } |
58 | | |
59 | | // Gives the id for the given @name if it is saved in @map. |
60 | | // Returns the id or -1 if not found. |
61 | | int name2id(const name_map *map, int max, const char *name) |
62 | 78.6k | { |
63 | 78.6k | CS_ASSERT_RET_VAL(map && name, -1); |
64 | 78.6k | int i; |
65 | | |
66 | 11.2M | for (i = 0; i < max; i++) { |
67 | 11.2M | if (!map[i].name) { |
68 | 4.38k | return -1; |
69 | 4.38k | } |
70 | 11.2M | if (!strcmp(map[i].name, name)) { |
71 | 70.0k | return map[i].id; |
72 | 70.0k | } |
73 | 11.2M | } |
74 | | |
75 | | // nothing match |
76 | 4.15k | return -1; |
77 | 78.6k | } |
78 | | |
79 | | // Gives the name for the given @id if it is saved in @map. |
80 | | // Returns the name or NULL if not found. |
81 | | const char *id2name(const name_map *map, int max, const unsigned int id) |
82 | 2.94M | { |
83 | 2.94M | int i; |
84 | | |
85 | 118M | for (i = 0; i < max; i++) { |
86 | 118M | if (map[i].id == id) { |
87 | 2.92M | return map[i].name; |
88 | 2.92M | } |
89 | 118M | } |
90 | | |
91 | | // nothing match |
92 | 21.0k | return NULL; |
93 | 2.94M | } |
94 | | |
95 | | /// Adds a register to the implicit write register list. |
96 | | /// It will not add the same register twice. |
97 | | void map_add_implicit_write(MCInst *MI, uint32_t Reg) |
98 | 403k | { |
99 | 403k | if (!MI->flat_insn->detail) |
100 | 0 | return; |
101 | | |
102 | 403k | uint16_t *regs_write = MI->flat_insn->detail->regs_write; |
103 | 406k | for (int i = 0; i < MAX_IMPL_W_REGS; ++i) { |
104 | 406k | if (i == MI->flat_insn->detail->regs_write_count) { |
105 | 382k | regs_write[i] = Reg; |
106 | 382k | MI->flat_insn->detail->regs_write_count++; |
107 | 382k | return; |
108 | 382k | } |
109 | 24.4k | if (regs_write[i] == Reg) |
110 | 20.6k | return; |
111 | 24.4k | } |
112 | 403k | } |
113 | | |
114 | | /// Adds a register to the implicit read register list. |
115 | | /// It will not add the same register twice. |
116 | | void map_add_implicit_read(MCInst *MI, uint32_t Reg) |
117 | 180k | { |
118 | 180k | if (!MI->flat_insn->detail) |
119 | 0 | return; |
120 | | |
121 | 180k | uint16_t *regs_read = MI->flat_insn->detail->regs_read; |
122 | 190k | for (int i = 0; i < MAX_IMPL_R_REGS; ++i) { |
123 | 190k | if (i == MI->flat_insn->detail->regs_read_count) { |
124 | 164k | regs_read[i] = Reg; |
125 | 164k | MI->flat_insn->detail->regs_read_count++; |
126 | 164k | return; |
127 | 164k | } |
128 | 25.8k | if (regs_read[i] == Reg) |
129 | 15.5k | return; |
130 | 25.8k | } |
131 | 180k | } |
132 | | |
133 | | /// Removes a register from the implicit write register list. |
134 | | void map_remove_implicit_write(MCInst *MI, uint32_t Reg) |
135 | 29.7k | { |
136 | 29.7k | if (!MI->flat_insn->detail) |
137 | 0 | return; |
138 | | |
139 | 29.7k | uint16_t *regs_write = MI->flat_insn->detail->regs_write; |
140 | 29.7k | bool shorten_list = false; |
141 | 34.0k | for (int i = 0; i < MAX_IMPL_W_REGS; ++i) { |
142 | 34.0k | if (shorten_list) { |
143 | 4.31k | regs_write[i - 1] = regs_write[i]; |
144 | 4.31k | } |
145 | 34.0k | if (i >= MI->flat_insn->detail->regs_write_count) |
146 | 29.7k | return; |
147 | | |
148 | 4.31k | if (regs_write[i] == Reg) { |
149 | 4.31k | MI->flat_insn->detail->regs_write_count--; |
150 | | // The register should exist only once in the list. |
151 | 4.31k | CS_ASSERT_RET(!shorten_list); |
152 | 4.31k | shorten_list = true; |
153 | 4.31k | } |
154 | 4.31k | } |
155 | 29.7k | } |
156 | | |
157 | | /// Copies the implicit read registers of @imap to @MI->flat_insn. |
158 | | /// Already present registers will be preserved. |
159 | | void map_implicit_reads(MCInst *MI, const insn_map *imap) |
160 | 1.86M | { |
161 | 1.86M | #ifndef CAPSTONE_DIET |
162 | 1.86M | if (!MI->flat_insn->detail) |
163 | 0 | return; |
164 | | |
165 | 1.86M | cs_detail *detail = MI->flat_insn->detail; |
166 | 1.86M | unsigned Opcode = MCInst_getOpcode(MI); |
167 | 1.86M | unsigned i = 0; |
168 | 1.86M | uint16_t reg = imap[Opcode].regs_use[i]; |
169 | 2.00M | while (reg != 0) { |
170 | 136k | if (i >= MAX_IMPL_R_REGS || |
171 | 136k | detail->regs_read_count >= MAX_IMPL_R_REGS) { |
172 | 0 | printf("ERROR: Too many implicit read register defined in " |
173 | 0 | "instruction mapping.\n"); |
174 | 0 | return; |
175 | 0 | } |
176 | 136k | detail->regs_read[detail->regs_read_count++] = reg; |
177 | 136k | if (i + 1 < MAX_IMPL_R_REGS) { |
178 | | // Select next one |
179 | 136k | reg = imap[Opcode].regs_use[++i]; |
180 | 136k | } |
181 | 136k | } |
182 | 1.86M | #endif // CAPSTONE_DIET |
183 | 1.86M | } |
184 | | |
185 | | /// Copies the implicit write registers of @imap to @MI->flat_insn. |
186 | | /// Already present registers will be preserved. |
187 | | void map_implicit_writes(MCInst *MI, const insn_map *imap) |
188 | 1.86M | { |
189 | 1.86M | #ifndef CAPSTONE_DIET |
190 | 1.86M | if (!MI->flat_insn->detail) |
191 | 0 | return; |
192 | | |
193 | 1.86M | cs_detail *detail = MI->flat_insn->detail; |
194 | 1.86M | unsigned Opcode = MCInst_getOpcode(MI); |
195 | 1.86M | unsigned i = 0; |
196 | 1.86M | uint16_t reg = imap[Opcode].regs_mod[i]; |
197 | 2.17M | while (reg != 0) { |
198 | 303k | if (i >= MAX_IMPL_W_REGS || |
199 | 303k | detail->regs_write_count >= MAX_IMPL_W_REGS) { |
200 | 0 | printf("ERROR: Too many implicit write register defined in " |
201 | 0 | "instruction mapping.\n"); |
202 | 0 | return; |
203 | 0 | } |
204 | 303k | detail->regs_write[detail->regs_write_count++] = reg; |
205 | 303k | if (i + 1 < MAX_IMPL_W_REGS) { |
206 | | // Select next one |
207 | 303k | reg = imap[Opcode].regs_mod[++i]; |
208 | 303k | } |
209 | 303k | } |
210 | 1.86M | #endif // CAPSTONE_DIET |
211 | 1.86M | } |
212 | | |
213 | | /// Adds a given group to @MI->flat_insn. |
214 | | /// A group is never added twice. |
215 | | void add_group(MCInst *MI, unsigned /* arch_group */ group) |
216 | 190k | { |
217 | 190k | #ifndef CAPSTONE_DIET |
218 | 190k | if (!MI->flat_insn->detail) |
219 | 0 | return; |
220 | | |
221 | 190k | cs_detail *detail = MI->flat_insn->detail; |
222 | 190k | if (detail->groups_count >= MAX_NUM_GROUPS) { |
223 | 0 | printf("ERROR: Too many groups defined.\n"); |
224 | 0 | return; |
225 | 0 | } |
226 | 343k | for (int i = 0; i < detail->groups_count; ++i) { |
227 | 153k | if (detail->groups[i] == group) { |
228 | 401 | return; |
229 | 401 | } |
230 | 153k | } |
231 | 190k | detail->groups[detail->groups_count++] = group; |
232 | 190k | #endif // CAPSTONE_DIET |
233 | 190k | } |
234 | | |
235 | | /// Copies the groups from @imap to @MI->flat_insn. |
236 | | /// Already present groups will be preserved. |
237 | | void map_groups(MCInst *MI, const insn_map *imap) |
238 | 1.86M | { |
239 | 1.86M | #ifndef CAPSTONE_DIET |
240 | 1.86M | if (!MI->flat_insn->detail) |
241 | 0 | return; |
242 | | |
243 | 1.86M | cs_detail *detail = MI->flat_insn->detail; |
244 | 1.86M | unsigned Opcode = MCInst_getOpcode(MI); |
245 | 1.86M | unsigned i = 0; |
246 | 1.86M | uint16_t group = imap[Opcode].groups[i]; |
247 | 4.02M | while (group != 0) { |
248 | 2.15M | if (detail->groups_count >= MAX_NUM_GROUPS) { |
249 | 0 | printf("ERROR: Too many groups defined in instruction mapping.\n"); |
250 | 0 | return; |
251 | 0 | } |
252 | 2.15M | detail->groups[detail->groups_count++] = group; |
253 | 2.15M | group = imap[Opcode].groups[++i]; |
254 | 2.15M | } |
255 | 1.86M | #endif // CAPSTONE_DIET |
256 | 1.86M | } |
257 | | |
258 | | /// Returns the pointer to the supllementary information in |
259 | | /// the instruction mapping table @imap or NULL in case of failure. |
260 | | const void *map_get_suppl_info(MCInst *MI, const insn_map *imap) |
261 | 1.42M | { |
262 | 1.42M | #ifndef CAPSTONE_DIET |
263 | 1.42M | if (!MI->flat_insn->detail) |
264 | 0 | return NULL; |
265 | | |
266 | 1.42M | unsigned Opcode = MCInst_getOpcode(MI); |
267 | 1.42M | return &imap[Opcode].suppl_info; |
268 | | #else |
269 | | return NULL; |
270 | | #endif // CAPSTONE_DIET |
271 | 1.42M | } |
272 | | |
273 | | // Search for the CS instruction id for the given @MC_Opcode in @imap. |
274 | | // return -1 if none is found. |
275 | | unsigned int find_cs_id(unsigned MC_Opcode, const insn_map *imap, |
276 | | unsigned imap_size) |
277 | 1.86M | { |
278 | | // binary searching since the IDs are sorted in order |
279 | 1.86M | unsigned int left, right, m; |
280 | 1.86M | unsigned int max = imap_size; |
281 | | |
282 | 1.86M | right = max - 1; |
283 | | |
284 | 1.86M | if (MC_Opcode < imap[0].id || MC_Opcode > imap[right].id) |
285 | | // not found |
286 | 0 | return -1; |
287 | | |
288 | 1.86M | left = 0; |
289 | | |
290 | 20.9M | while (left <= right) { |
291 | 20.9M | m = (left + right) / 2; |
292 | 20.9M | if (MC_Opcode == imap[m].id) { |
293 | 1.86M | return m; |
294 | 1.86M | } |
295 | | |
296 | 19.0M | if (MC_Opcode < imap[m].id) |
297 | 6.96M | right = m - 1; |
298 | 12.1M | else |
299 | 12.1M | left = m + 1; |
300 | 19.0M | } |
301 | | |
302 | 0 | return -1; |
303 | 1.86M | } |
304 | | |
305 | | /// Sets the Capstone instruction id which maps to the @MI opcode. |
306 | | /// If no mapping is found the function returns and prints an error. |
307 | | void map_cs_id(MCInst *MI, const insn_map *imap, unsigned int imap_size) |
308 | 1.86M | { |
309 | 1.86M | unsigned int i = find_cs_id(MCInst_getOpcode(MI), imap, imap_size); |
310 | 1.86M | if (i != -1) { |
311 | 1.86M | MI->flat_insn->id = imap[i].mapid; |
312 | 1.86M | return; |
313 | 1.86M | } |
314 | 0 | printf("ERROR: Could not find CS id for MCInst opcode: %d\n", |
315 | 0 | MCInst_getOpcode(MI)); |
316 | 0 | return; |
317 | 1.86M | } |
318 | | |
319 | | /// Returns the operand type information from the |
320 | | /// mapping table for instruction operands. |
321 | | /// Only usable by `auto-sync` archs! |
322 | | const cs_op_type mapping_get_op_type(MCInst *MI, unsigned OpNum, |
323 | | const map_insn_ops *insn_ops_map, |
324 | | size_t map_size) |
325 | 14.6M | { |
326 | 14.6M | assert(MI); |
327 | 14.6M | assert(MI->Opcode < map_size); |
328 | 14.6M | assert(OpNum < sizeof(insn_ops_map[MI->Opcode].ops) / |
329 | 14.6M | sizeof(insn_ops_map[MI->Opcode].ops[0])); |
330 | | |
331 | 14.6M | return insn_ops_map[MI->Opcode].ops[OpNum].type; |
332 | 14.6M | } |
333 | | |
334 | | /// Returns the operand access flags from the |
335 | | /// mapping table for instruction operands. |
336 | | /// Only usable by `auto-sync` archs! |
337 | | const cs_ac_type mapping_get_op_access(MCInst *MI, unsigned OpNum, |
338 | | const map_insn_ops *insn_ops_map, |
339 | | size_t map_size) |
340 | 5.65M | { |
341 | 5.65M | assert(MI); |
342 | 5.65M | assert(MI->Opcode < map_size); |
343 | 5.65M | assert(OpNum < sizeof(insn_ops_map[MI->Opcode].ops) / |
344 | 5.65M | sizeof(insn_ops_map[MI->Opcode].ops[0])); |
345 | | |
346 | 5.65M | cs_ac_type access = insn_ops_map[MI->Opcode].ops[OpNum].access; |
347 | 5.65M | if (MCInst_opIsTied(MI, OpNum) || MCInst_opIsTying(MI, OpNum)) |
348 | 412k | access |= (access == CS_AC_READ) ? CS_AC_WRITE : CS_AC_READ; |
349 | 5.65M | return access; |
350 | 5.65M | } |
351 | | |
352 | | /// Returns the operand at detail->arch.operands[op_count + offset] |
353 | | /// Or NULL if detail is not set or the offset would be out of bounds. |
354 | | #define DEFINE_get_detail_op(arch, ARCH, ARCH_UPPER) \ |
355 | | cs_##arch##_op *ARCH##_get_detail_op(MCInst *MI, int offset) \ |
356 | 21.5M | { \ |
357 | 21.5M | if (!MI->flat_insn->detail) \ |
358 | 21.5M | return NULL; \ |
359 | 21.5M | int OpIdx = MI->flat_insn->detail->arch.op_count + offset; \ |
360 | 21.5M | if (OpIdx < 0 || OpIdx >= NUM_##ARCH_UPPER##_OPS) { \ |
361 | 9.07k | return NULL; \ |
362 | 9.07k | } \ |
363 | 21.5M | return &MI->flat_insn->detail->arch.operands[OpIdx]; \ |
364 | 21.5M | } |
365 | | |
366 | 11.9M | DEFINE_get_detail_op(arm, ARM, ARM); |
367 | 719k | DEFINE_get_detail_op(ppc, PPC, PPC); |
368 | 0 | DEFINE_get_detail_op(tricore, TriCore, TRICORE); |
369 | 5.74M | DEFINE_get_detail_op(aarch64, AArch64, AARCH64); |
370 | 0 | DEFINE_get_detail_op(alpha, Alpha, ALPHA); |
371 | 0 | DEFINE_get_detail_op(hppa, HPPA, HPPA); |
372 | 0 | DEFINE_get_detail_op(loongarch, LoongArch, LOONGARCH); |
373 | 1.34M | DEFINE_get_detail_op(mips, Mips, MIPS); |
374 | 0 | DEFINE_get_detail_op(riscv, RISCV, RISCV); |
375 | 1.17M | DEFINE_get_detail_op(systemz, SystemZ, SYSTEMZ); |
376 | 264k | DEFINE_get_detail_op(xtensa, Xtensa, XTENSA); |
377 | 0 | DEFINE_get_detail_op(bpf, BPF, BPF); |
378 | 0 | DEFINE_get_detail_op(arc, ARC, ARC); |
379 | 329k | DEFINE_get_detail_op(sparc, Sparc, SPARC); |
380 | | |
381 | | /// Returns the operand at detail->arch.operands[index] |
382 | | /// Or NULL if detail is not set or the index would be out of bounds. |
383 | | #define DEFINE_get_detail_op_at(arch, ARCH, ARCH_UPPER) \ |
384 | | cs_##arch##_op *ARCH##_get_detail_op_at(MCInst *MI, int index) \ |
385 | 192k | { \ |
386 | 192k | if (!MI->flat_insn->detail) \ |
387 | 192k | return NULL; \ |
388 | 192k | if (index < 0 || index >= NUM_##ARCH_UPPER##_OPS) { \ |
389 | 0 | return NULL; \ |
390 | 0 | } \ |
391 | 192k | return &MI->flat_insn->detail->arch.operands[index]; \ |
392 | 192k | } |
393 | | |
394 | 0 | DEFINE_get_detail_op_at(arm, ARM, ARM); |
395 | 0 | DEFINE_get_detail_op_at(ppc, PPC, PPC); |
396 | 0 | DEFINE_get_detail_op_at(tricore, TriCore, TRICORE); |
397 | 0 | DEFINE_get_detail_op_at(aarch64, AArch64, AARCH64); |
398 | 0 | DEFINE_get_detail_op_at(alpha, Alpha, ALPHA); |
399 | 0 | DEFINE_get_detail_op_at(hppa, HPPA, HPPA); |
400 | 0 | DEFINE_get_detail_op_at(loongarch, LoongArch, LOONGARCH); |
401 | 0 | DEFINE_get_detail_op_at(mips, Mips, MIPS); |
402 | 192k | DEFINE_get_detail_op_at(riscv, RISCV, RISCV); |
403 | 0 | DEFINE_get_detail_op_at(systemz, SystemZ, SYSTEMZ); |
404 | 0 | DEFINE_get_detail_op_at(xtensa, Xtensa, XTENSA); |
405 | 0 | DEFINE_get_detail_op_at(bpf, BPF, BPF); |
406 | 0 | DEFINE_get_detail_op_at(arc, ARC, ARC); |
407 | 0 | DEFINE_get_detail_op_at(sparc, Sparc, SPARC); |
408 | | |
409 | | /// Returns true if for this architecture the |
410 | | /// alias operands should be filled. |
411 | | /// TODO: Replace this with a proper option. |
412 | | /// So it can be toggled between disas() calls. |
413 | | bool map_use_alias_details(const MCInst *MI) |
414 | 3.13M | { |
415 | 3.13M | assert(MI); |
416 | 3.13M | return (MI->csh->detail_opt & CS_OPT_ON) && |
417 | 3.13M | !(MI->csh->detail_opt & CS_OPT_DETAIL_REAL); |
418 | 3.13M | } |
419 | | |
420 | | /// Sets the setDetailOps flag to @p Val. |
421 | | /// If detail == NULLit refuses to set the flag to true. |
422 | | void map_set_fill_detail_ops(MCInst *MI, bool Val) |
423 | 3.05M | { |
424 | 3.05M | CS_ASSERT_RET(MI); |
425 | 3.05M | if (!detail_is_set(MI)) { |
426 | 0 | MI->fillDetailOps = false; |
427 | 0 | return; |
428 | 0 | } |
429 | | |
430 | 3.05M | MI->fillDetailOps = Val; |
431 | 3.05M | } |
432 | | |
433 | | /// Sets the instruction alias flags and the given alias id. |
434 | | void map_set_is_alias_insn(MCInst *MI, bool Val, uint64_t Alias) |
435 | 0 | { |
436 | 0 | CS_ASSERT_RET(MI); |
437 | 0 | MI->isAliasInstr = Val; |
438 | 0 | MI->flat_insn->is_alias = Val; |
439 | 0 | MI->flat_insn->alias_id = Alias; |
440 | 0 | } |
441 | | |
442 | | static inline bool char_ends_mnem(const char c, cs_arch arch) |
443 | 366k | { |
444 | 366k | switch (arch) { |
445 | 291k | default: |
446 | 291k | return (!c || c == ' ' || c == '\t' || c == '.'); |
447 | 55.5k | case CS_ARCH_PPC: |
448 | 55.5k | return (!c || c == ' ' || c == '\t'); |
449 | 19.7k | case CS_ARCH_SPARC: |
450 | 19.7k | return (!c || c == ' ' || c == '\t' || c == ','); |
451 | 366k | } |
452 | 366k | } |
453 | | |
454 | | /// Sets an alternative id for some instruction. |
455 | | /// Or -1 if it fails. |
456 | | /// You must add (<ARCH>_INS_ALIAS_BEGIN + 1) to the id to get the real id. |
457 | | void map_set_alias_id(MCInst *MI, const SStream *O, |
458 | | const name_map *alias_mnem_id_map, int map_size) |
459 | 1.75M | { |
460 | 1.75M | if (!MCInst_isAlias(MI)) |
461 | 1.67M | return; |
462 | | |
463 | 78.6k | char alias_mnem[16] = { 0 }; |
464 | 78.6k | int i = 0, j = 0; |
465 | 78.6k | const char *asm_str_buf = O->buffer; |
466 | | // Skip spaces and tabs |
467 | 127k | while (is_blank_char(asm_str_buf[i])) { |
468 | 48.6k | if (!asm_str_buf[i]) { |
469 | 0 | MI->flat_insn->alias_id = -1; |
470 | 0 | return; |
471 | 0 | } |
472 | 48.6k | ++i; |
473 | 48.6k | } |
474 | 366k | for (; j < sizeof(alias_mnem) - 1; ++j, ++i) { |
475 | 366k | if (char_ends_mnem(asm_str_buf[i], MI->csh->arch)) |
476 | 78.6k | break; |
477 | 287k | alias_mnem[j] = asm_str_buf[i]; |
478 | 287k | } |
479 | | |
480 | 78.6k | MI->flat_insn->alias_id = |
481 | 78.6k | name2id(alias_mnem_id_map, map_size, alias_mnem); |
482 | 78.6k | } |
483 | | |
484 | | /// Does a binary search over the given map and searches for @id. |
485 | | /// If @id exists in @map, it sets @found to true and returns |
486 | | /// the value for the @id. |
487 | | /// Otherwise, @found is set to false and it returns UINT64_MAX. |
488 | | /// |
489 | | /// Of course it assumes the map is sorted. |
490 | | uint64_t enum_map_bin_search(const cs_enum_id_map *map, size_t map_len, |
491 | | const char *id, bool *found) |
492 | 0 | { |
493 | 0 | size_t l = 0; |
494 | 0 | size_t r = map_len; |
495 | 0 | size_t id_len = strlen(id); |
496 | |
|
497 | 0 | while (l <= r) { |
498 | 0 | size_t m = (l + r) / 2; |
499 | 0 | size_t j = 0; |
500 | 0 | size_t i = 0; |
501 | 0 | size_t entry_len = strlen(map[m].str); |
502 | |
|
503 | 0 | while (j < entry_len && i < id_len && id[i] == map[m].str[j]) { |
504 | 0 | ++j, ++i; |
505 | 0 | } |
506 | 0 | if (i == id_len && j == entry_len) { |
507 | 0 | *found = true; |
508 | 0 | return map[m].val; |
509 | 0 | } |
510 | | |
511 | 0 | if (id[i] < map[m].str[j]) { |
512 | 0 | r = m - 1; |
513 | 0 | } else if (id[i] > map[m].str[j]) { |
514 | 0 | l = m + 1; |
515 | 0 | } |
516 | 0 | if ((m == 0 && id[i] < map[m].str[j]) || |
517 | 0 | (l + r) / 2 >= map_len) { |
518 | | // Break before we go out of bounds. |
519 | 0 | break; |
520 | 0 | } |
521 | 0 | } |
522 | 0 | *found = false; |
523 | | return UINT64_MAX; |
524 | 0 | } |