Line | Count | Source |
1 | | /* Unicorn Emulator Engine */ |
2 | | /* By Nguyen Anh Quynh <aquynh@gmail.com>, 2015 */ |
3 | | /* Modified for Unicorn Engine by Chen Huitao<chenhuitao@hfmrit.com>, 2020 */ |
4 | | |
5 | | #include "unicorn/unicorn.h" |
6 | | #if defined(UNICORN_HAS_OSXKERNEL) |
7 | | #include <libkern/libkern.h> |
8 | | #else |
9 | | #include <stddef.h> |
10 | | #include <stdio.h> |
11 | | #include <stdlib.h> |
12 | | #endif |
13 | | |
14 | | #include <time.h> // nanosleep |
15 | | #include <string.h> |
16 | | |
17 | | #include "uc_priv.h" |
18 | | |
19 | | // target specific headers |
20 | | #include "qemu/target/m68k/unicorn.h" |
21 | | #include "qemu/target/i386/unicorn.h" |
22 | | #include "qemu/target/arm/unicorn.h" |
23 | | #include "qemu/target/mips/unicorn.h" |
24 | | #include "qemu/target/sparc/unicorn.h" |
25 | | #include "qemu/target/ppc/unicorn.h" |
26 | | #include "qemu/target/riscv/unicorn.h" |
27 | | #include "qemu/target/s390x/unicorn.h" |
28 | | #include "qemu/target/tricore/unicorn.h" |
29 | | |
30 | | #include "qemu/include/tcg/tcg-apple-jit.h" |
31 | | #include "qemu/include/qemu/queue.h" |
32 | | #include "qemu-common.h" |
33 | | |
34 | | static void clear_deleted_hooks(uc_engine *uc); |
35 | | static uc_err uc_snapshot(uc_engine *uc); |
36 | | static uc_err uc_restore_latest_snapshot(uc_engine *uc); |
37 | | |
38 | | #if defined(__APPLE__) && defined(HAVE_PTHREAD_JIT_PROTECT) && \ |
39 | | (defined(__arm__) || defined(__aarch64__)) |
40 | | static void save_jit_state(uc_engine *uc) |
41 | | { |
42 | | if (!uc->nested) { |
43 | | uc->thread_executable_entry = thread_executable(); |
44 | | uc->current_executable = uc->thread_executable_entry; |
45 | | } |
46 | | |
47 | | uc->nested += 1; |
48 | | } |
49 | | |
50 | | static void restore_jit_state(uc_engine *uc) |
51 | | { |
52 | | assert(uc->nested > 0); |
53 | | if (uc->nested == 1) { |
54 | | assert_executable(uc->current_executable); |
55 | | if (uc->current_executable != uc->thread_executable_entry) { |
56 | | if (uc->thread_executable_entry) { |
57 | | jit_write_protect(true); |
58 | | } else { |
59 | | jit_write_protect(false); |
60 | | } |
61 | | } |
62 | | } |
63 | | uc->nested -= 1; |
64 | | } |
65 | | #else |
66 | | static void save_jit_state(uc_engine *uc) |
67 | 55.9M | { |
68 | 55.9M | (void)uc; |
69 | 55.9M | } |
70 | | static void restore_jit_state(uc_engine *uc) |
71 | 55.9M | { |
72 | 55.9M | (void)uc; |
73 | 55.9M | } |
74 | | #endif |
75 | | |
76 | | static void *hook_insert(struct list *l, struct hook *h) |
77 | 196k | { |
78 | 196k | void *item = list_insert(l, (void *)h); |
79 | 196k | if (item) { |
80 | 196k | h->refs++; |
81 | 196k | } |
82 | 196k | return item; |
83 | 196k | } |
84 | | |
85 | | static void *hook_append(struct list *l, struct hook *h) |
86 | 0 | { |
87 | 0 | void *item = list_append(l, (void *)h); |
88 | 0 | if (item) { |
89 | 0 | h->refs++; |
90 | 0 | } |
91 | 0 | return item; |
92 | 0 | } |
93 | | |
94 | | static void hook_invalidate_region(void *key, void *data, void *opaq) |
95 | 0 | { |
96 | 0 | uc_engine *uc = (uc_engine *)opaq; |
97 | 0 | HookedRegion *region = (HookedRegion *)key; |
98 | |
|
99 | 0 | uc->uc_invalidate_tb(uc, region->start, region->length); |
100 | 0 | } |
101 | | |
102 | | static void hook_delete(void *data) |
103 | 196k | { |
104 | 196k | struct hook *h = (struct hook *)data; |
105 | | |
106 | 196k | h->refs--; |
107 | | |
108 | 196k | if (h->refs == 0) { |
109 | 196k | g_hash_table_destroy(h->hooked_regions); |
110 | 196k | free(h); |
111 | 196k | } |
112 | 196k | } |
113 | | |
114 | | UNICORN_EXPORT |
115 | | unsigned int uc_version(unsigned int *major, unsigned int *minor) |
116 | 0 | { |
117 | 0 | if (major != NULL && minor != NULL) { |
118 | 0 | *major = UC_API_MAJOR; |
119 | 0 | *minor = UC_API_MINOR; |
120 | 0 | } |
121 | |
|
122 | 0 | return (UC_API_MAJOR << 24) + (UC_API_MINOR << 16) + (UC_API_PATCH << 8) + |
123 | 0 | UC_API_EXTRA; |
124 | 0 | } |
125 | | |
126 | | static uc_err default_reg_read(void *env, int mode, unsigned int regid, |
127 | | void *value, size_t *size) |
128 | 0 | { |
129 | 0 | return UC_ERR_HANDLE; |
130 | 0 | } |
131 | | |
132 | | static uc_err default_reg_write(void *env, int mode, unsigned int regid, |
133 | | const void *value, size_t *size, int *setpc) |
134 | 0 | { |
135 | 0 | return UC_ERR_HANDLE; |
136 | 0 | } |
137 | | |
138 | | UNICORN_EXPORT |
139 | | uc_err uc_errno(uc_engine *uc) |
140 | 0 | { |
141 | 0 | return uc->errnum; |
142 | 0 | } |
143 | | |
144 | | UNICORN_EXPORT |
145 | | const char *uc_strerror(uc_err code) |
146 | 88.3k | { |
147 | 88.3k | switch (code) { |
148 | 0 | default: |
149 | 0 | return "Unknown error code"; |
150 | 0 | case UC_ERR_OK: |
151 | 0 | return "OK (UC_ERR_OK)"; |
152 | 0 | case UC_ERR_NOMEM: |
153 | 0 | return "No memory available or memory not present (UC_ERR_NOMEM)"; |
154 | 0 | case UC_ERR_ARCH: |
155 | 0 | return "Invalid/unsupported architecture (UC_ERR_ARCH)"; |
156 | 0 | case UC_ERR_HANDLE: |
157 | 0 | return "Invalid handle (UC_ERR_HANDLE)"; |
158 | 0 | case UC_ERR_MODE: |
159 | 0 | return "Invalid mode (UC_ERR_MODE)"; |
160 | 0 | case UC_ERR_VERSION: |
161 | 0 | return "Different API version between core & binding (UC_ERR_VERSION)"; |
162 | 18.6k | case UC_ERR_READ_UNMAPPED: |
163 | 18.6k | return "Invalid memory read (UC_ERR_READ_UNMAPPED)"; |
164 | 8.06k | case UC_ERR_WRITE_UNMAPPED: |
165 | 8.06k | return "Invalid memory write (UC_ERR_WRITE_UNMAPPED)"; |
166 | 4.82k | case UC_ERR_FETCH_UNMAPPED: |
167 | 4.82k | return "Invalid memory fetch (UC_ERR_FETCH_UNMAPPED)"; |
168 | 0 | case UC_ERR_HOOK: |
169 | 0 | return "Invalid hook type (UC_ERR_HOOK)"; |
170 | 11.6k | case UC_ERR_INSN_INVALID: |
171 | 11.6k | return "Invalid instruction (UC_ERR_INSN_INVALID)"; |
172 | 0 | case UC_ERR_MAP: |
173 | 0 | return "Invalid memory mapping (UC_ERR_MAP)"; |
174 | 0 | case UC_ERR_WRITE_PROT: |
175 | 0 | return "Write to write-protected memory (UC_ERR_WRITE_PROT)"; |
176 | 0 | case UC_ERR_READ_PROT: |
177 | 0 | return "Read from non-readable memory (UC_ERR_READ_PROT)"; |
178 | 0 | case UC_ERR_FETCH_PROT: |
179 | 0 | return "Fetch from non-executable memory (UC_ERR_FETCH_PROT)"; |
180 | 0 | case UC_ERR_ARG: |
181 | 0 | return "Invalid argument (UC_ERR_ARG)"; |
182 | 124 | case UC_ERR_READ_UNALIGNED: |
183 | 124 | return "Read from unaligned memory (UC_ERR_READ_UNALIGNED)"; |
184 | 92 | case UC_ERR_WRITE_UNALIGNED: |
185 | 92 | return "Write to unaligned memory (UC_ERR_WRITE_UNALIGNED)"; |
186 | 0 | case UC_ERR_FETCH_UNALIGNED: |
187 | 0 | return "Fetch from unaligned memory (UC_ERR_FETCH_UNALIGNED)"; |
188 | 0 | case UC_ERR_RESOURCE: |
189 | 0 | return "Insufficient resource (UC_ERR_RESOURCE)"; |
190 | 44.9k | case UC_ERR_EXCEPTION: |
191 | 44.9k | return "Unhandled CPU exception (UC_ERR_EXCEPTION)"; |
192 | 0 | case UC_ERR_OVERFLOW: |
193 | 0 | return "Provided buffer is too small (UC_ERR_OVERFLOW)"; |
194 | 0 | case UC_ERR_MMU_READ: |
195 | 0 | return "The tlb_fill hook returned false for a read (UC_ERR_MMU_READ)"; |
196 | 0 | case UC_ERR_MMU_WRITE: |
197 | 0 | return "The tlb_fill hook returned false for a write (UC_ERR_MMU_WRITE)"; |
198 | 0 | case UC_ERR_MMU_FETCH: |
199 | 0 | return "The tlb_fill hook returned false for a fetch (UC_ERR_MMU_FETCH)"; |
200 | 88.3k | } |
201 | 88.3k | } |
202 | | |
203 | | UNICORN_EXPORT |
204 | | bool uc_arch_supported(uc_arch arch) |
205 | 0 | { |
206 | 0 | switch (arch) { |
207 | 0 | #ifdef UNICORN_HAS_ARM |
208 | 0 | case UC_ARCH_ARM: |
209 | 0 | return true; |
210 | 0 | #endif |
211 | 0 | #ifdef UNICORN_HAS_ARM64 |
212 | 0 | case UC_ARCH_ARM64: |
213 | 0 | return true; |
214 | 0 | #endif |
215 | 0 | #ifdef UNICORN_HAS_M68K |
216 | 0 | case UC_ARCH_M68K: |
217 | 0 | return true; |
218 | 0 | #endif |
219 | 0 | #ifdef UNICORN_HAS_MIPS |
220 | 0 | case UC_ARCH_MIPS: |
221 | 0 | return true; |
222 | 0 | #endif |
223 | 0 | #ifdef UNICORN_HAS_PPC |
224 | 0 | case UC_ARCH_PPC: |
225 | 0 | return true; |
226 | 0 | #endif |
227 | 0 | #ifdef UNICORN_HAS_SPARC |
228 | 0 | case UC_ARCH_SPARC: |
229 | 0 | return true; |
230 | 0 | #endif |
231 | 0 | #ifdef UNICORN_HAS_X86 |
232 | 0 | case UC_ARCH_X86: |
233 | 0 | return true; |
234 | 0 | #endif |
235 | 0 | #ifdef UNICORN_HAS_RISCV |
236 | 0 | case UC_ARCH_RISCV: |
237 | 0 | return true; |
238 | 0 | #endif |
239 | 0 | #ifdef UNICORN_HAS_S390X |
240 | 0 | case UC_ARCH_S390X: |
241 | 0 | return true; |
242 | 0 | #endif |
243 | 0 | #ifdef UNICORN_HAS_TRICORE |
244 | 0 | case UC_ARCH_TRICORE: |
245 | 0 | return true; |
246 | 0 | #endif |
247 | | /* Invalid or disabled arch */ |
248 | 0 | default: |
249 | 0 | return false; |
250 | 0 | } |
251 | 0 | } |
252 | | |
253 | | #define UC_INIT(uc) \ |
254 | 55.9M | save_jit_state(uc); \ |
255 | 55.9M | if (unlikely(!(uc)->init_done)) { \ |
256 | 196k | int __init_ret = uc_init_engine(uc); \ |
257 | 196k | if (unlikely(__init_ret != UC_ERR_OK)) { \ |
258 | 0 | return __init_ret; \ |
259 | 0 | } \ |
260 | 196k | } |
261 | | |
262 | | static gint uc_exits_cmp(gconstpointer a, gconstpointer b, gpointer user_data) |
263 | 0 | { |
264 | 0 | uint64_t lhs = *((uint64_t *)a); |
265 | 0 | uint64_t rhs = *((uint64_t *)b); |
266 | |
|
267 | 0 | if (lhs < rhs) { |
268 | 0 | return -1; |
269 | 0 | } else if (lhs == rhs) { |
270 | 0 | return 0; |
271 | 0 | } else { |
272 | 0 | return 1; |
273 | 0 | } |
274 | 0 | } |
275 | | |
276 | | static uc_err uc_init_engine(uc_engine *uc) |
277 | 196k | { |
278 | 196k | if (uc->init_done) { |
279 | 0 | return UC_ERR_HANDLE; |
280 | 0 | } |
281 | | |
282 | 196k | uc->hooks_to_del.delete_fn = hook_delete; |
283 | | |
284 | 3.73M | for (int i = 0; i < UC_HOOK_MAX; i++) { |
285 | 3.53M | uc->hook[i].delete_fn = hook_delete; |
286 | 3.53M | } |
287 | | |
288 | 196k | uc->ctl_exits = g_tree_new_full(uc_exits_cmp, NULL, g_free, NULL); |
289 | | |
290 | 196k | if (machine_initialize(uc)) { |
291 | 0 | return UC_ERR_RESOURCE; |
292 | 0 | } |
293 | | |
294 | | // init tlb function |
295 | 196k | if (!uc->cpu->cc->tlb_fill) { |
296 | 196k | uc->set_tlb(uc, UC_TLB_CPU); |
297 | 196k | } |
298 | | |
299 | | // init fpu softfloat |
300 | 196k | uc->softfloat_initialize(); |
301 | | |
302 | 196k | if (uc->reg_reset) { |
303 | 196k | uc->reg_reset(uc); |
304 | 196k | } |
305 | | |
306 | 196k | uc->context_content = UC_CTL_CONTEXT_CPU; |
307 | | |
308 | 196k | uc->unmapped_regions = g_array_new(false, false, sizeof(MemoryRegion *)); |
309 | | |
310 | 196k | uc->init_done = true; |
311 | | |
312 | 196k | return UC_ERR_OK; |
313 | 196k | } |
314 | | |
315 | | UNICORN_EXPORT |
316 | | uc_err uc_open(uc_arch arch, uc_mode mode, uc_engine **result) |
317 | 196k | { |
318 | 196k | struct uc_struct *uc; |
319 | | |
320 | 196k | if (arch < UC_ARCH_MAX) { |
321 | 196k | uc = calloc(1, sizeof(*uc)); |
322 | 196k | if (!uc) { |
323 | | // memory insufficient |
324 | 0 | return UC_ERR_NOMEM; |
325 | 0 | } |
326 | | |
327 | | /* qemu/exec.c: phys_map_node_reserve() */ |
328 | 196k | uc->alloc_hint = 16; |
329 | 196k | uc->errnum = UC_ERR_OK; |
330 | 196k | uc->arch = arch; |
331 | 196k | uc->mode = mode; |
332 | 196k | uc->reg_read = default_reg_read; |
333 | 196k | uc->reg_write = default_reg_write; |
334 | | |
335 | | // uc->ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) }; |
336 | 196k | QLIST_INIT(&uc->ram_list.blocks); |
337 | | |
338 | 196k | QTAILQ_INIT(&uc->memory_listeners); |
339 | | |
340 | 196k | QTAILQ_INIT(&uc->address_spaces); |
341 | | |
342 | 196k | switch (arch) { |
343 | 0 | default: |
344 | 0 | break; |
345 | 0 | #ifdef UNICORN_HAS_M68K |
346 | 5 | case UC_ARCH_M68K: |
347 | 5 | if ((mode & ~UC_MODE_M68K_MASK) || !(mode & UC_MODE_BIG_ENDIAN)) { |
348 | 0 | free(uc); |
349 | 0 | return UC_ERR_MODE; |
350 | 0 | } |
351 | 5 | uc->init_arch = uc_init_m68k; |
352 | 5 | break; |
353 | 0 | #endif |
354 | 0 | #ifdef UNICORN_HAS_X86 |
355 | 33.7k | case UC_ARCH_X86: |
356 | 33.7k | if ((mode & ~UC_MODE_X86_MASK) || (mode & UC_MODE_BIG_ENDIAN) || |
357 | 33.7k | !(mode & (UC_MODE_16 | UC_MODE_32 | UC_MODE_64))) { |
358 | 0 | free(uc); |
359 | 0 | return UC_ERR_MODE; |
360 | 0 | } |
361 | 33.7k | uc->init_arch = uc_init_x86_64; |
362 | 33.7k | break; |
363 | 0 | #endif |
364 | 0 | #ifdef UNICORN_HAS_ARM |
365 | 54.1k | case UC_ARCH_ARM: |
366 | 54.1k | if ((mode & ~UC_MODE_ARM_MASK)) { |
367 | 0 | free(uc); |
368 | 0 | return UC_ERR_MODE; |
369 | 0 | } |
370 | 54.1k | uc->init_arch = uc_init_arm; |
371 | | |
372 | 54.1k | if (mode & UC_MODE_THUMB) { |
373 | 20.3k | uc->thumb = 1; |
374 | 20.3k | } |
375 | 54.1k | break; |
376 | 0 | #endif |
377 | 0 | #ifdef UNICORN_HAS_ARM64 |
378 | 62.3k | case UC_ARCH_ARM64: |
379 | 62.3k | if (mode & ~UC_MODE_ARM_MASK) { |
380 | 0 | free(uc); |
381 | 0 | return UC_ERR_MODE; |
382 | 0 | } |
383 | 62.3k | uc->init_arch = uc_init_aarch64; |
384 | 62.3k | break; |
385 | 0 | #endif |
386 | | |
387 | 0 | #if defined(UNICORN_HAS_MIPS) || defined(UNICORN_HAS_MIPSEL) || \ |
388 | 0 | defined(UNICORN_HAS_MIPS64) || defined(UNICORN_HAS_MIPS64EL) |
389 | 19.7k | case UC_ARCH_MIPS: |
390 | 19.7k | if ((mode & ~UC_MODE_MIPS_MASK) || |
391 | 19.7k | !(mode & (UC_MODE_MIPS32 | UC_MODE_MIPS64))) { |
392 | 0 | free(uc); |
393 | 0 | return UC_ERR_MODE; |
394 | 0 | } |
395 | 19.7k | if (mode & UC_MODE_BIG_ENDIAN) { |
396 | 9.74k | #ifdef UNICORN_HAS_MIPS |
397 | 9.74k | if (mode & UC_MODE_MIPS32) { |
398 | 9.74k | uc->init_arch = uc_init_mips; |
399 | 9.74k | } |
400 | 9.74k | #endif |
401 | 9.74k | #ifdef UNICORN_HAS_MIPS64 |
402 | 9.74k | if (mode & UC_MODE_MIPS64) { |
403 | 0 | uc->init_arch = uc_init_mips64; |
404 | 0 | } |
405 | 9.74k | #endif |
406 | 9.99k | } else { // little endian |
407 | 9.99k | #ifdef UNICORN_HAS_MIPSEL |
408 | 9.99k | if (mode & UC_MODE_MIPS32) { |
409 | 9.99k | uc->init_arch = uc_init_mipsel; |
410 | 9.99k | } |
411 | 9.99k | #endif |
412 | 9.99k | #ifdef UNICORN_HAS_MIPS64EL |
413 | 9.99k | if (mode & UC_MODE_MIPS64) { |
414 | 0 | uc->init_arch = uc_init_mips64el; |
415 | 0 | } |
416 | 9.99k | #endif |
417 | 9.99k | } |
418 | 19.7k | break; |
419 | 0 | #endif |
420 | | |
421 | 0 | #ifdef UNICORN_HAS_SPARC |
422 | 41 | case UC_ARCH_SPARC: |
423 | 41 | if ((mode & ~UC_MODE_SPARC_MASK) || !(mode & UC_MODE_BIG_ENDIAN) || |
424 | 41 | !(mode & (UC_MODE_SPARC32 | UC_MODE_SPARC64))) { |
425 | 0 | free(uc); |
426 | 0 | return UC_ERR_MODE; |
427 | 0 | } |
428 | 41 | if (mode & UC_MODE_SPARC64) { |
429 | 0 | uc->init_arch = uc_init_sparc64; |
430 | 41 | } else { |
431 | 41 | uc->init_arch = uc_init_sparc; |
432 | 41 | } |
433 | 41 | break; |
434 | 0 | #endif |
435 | 0 | #ifdef UNICORN_HAS_PPC |
436 | 0 | case UC_ARCH_PPC: |
437 | 0 | if ((mode & ~UC_MODE_PPC_MASK) || !(mode & UC_MODE_BIG_ENDIAN) || |
438 | 0 | !(mode & (UC_MODE_PPC32 | UC_MODE_PPC64))) { |
439 | 0 | free(uc); |
440 | 0 | return UC_ERR_MODE; |
441 | 0 | } |
442 | 0 | if (mode & UC_MODE_PPC64) { |
443 | 0 | uc->init_arch = uc_init_ppc64; |
444 | 0 | } else { |
445 | 0 | uc->init_arch = uc_init_ppc; |
446 | 0 | } |
447 | 0 | break; |
448 | 0 | #endif |
449 | 0 | #ifdef UNICORN_HAS_RISCV |
450 | 0 | case UC_ARCH_RISCV: |
451 | 0 | if ((mode & ~UC_MODE_RISCV_MASK) || |
452 | 0 | !(mode & (UC_MODE_RISCV32 | UC_MODE_RISCV64))) { |
453 | 0 | free(uc); |
454 | 0 | return UC_ERR_MODE; |
455 | 0 | } |
456 | 0 | if (mode & UC_MODE_RISCV32) { |
457 | 0 | uc->init_arch = uc_init_riscv32; |
458 | 0 | } else if (mode & UC_MODE_RISCV64) { |
459 | 0 | uc->init_arch = uc_init_riscv64; |
460 | 0 | } else { |
461 | 0 | free(uc); |
462 | 0 | return UC_ERR_MODE; |
463 | 0 | } |
464 | 0 | break; |
465 | 0 | #endif |
466 | 0 | #ifdef UNICORN_HAS_S390X |
467 | 26.5k | case UC_ARCH_S390X: |
468 | 26.5k | if ((mode & ~UC_MODE_S390X_MASK) || !(mode & UC_MODE_BIG_ENDIAN)) { |
469 | 0 | free(uc); |
470 | 0 | return UC_ERR_MODE; |
471 | 0 | } |
472 | 26.5k | uc->init_arch = uc_init_s390x; |
473 | 26.5k | break; |
474 | 0 | #endif |
475 | 0 | #ifdef UNICORN_HAS_TRICORE |
476 | 0 | case UC_ARCH_TRICORE: |
477 | 0 | if ((mode & ~UC_MODE_TRICORE_MASK)) { |
478 | 0 | free(uc); |
479 | 0 | return UC_ERR_MODE; |
480 | 0 | } |
481 | 0 | uc->init_arch = uc_init_tricore; |
482 | 0 | break; |
483 | 196k | #endif |
484 | 196k | } |
485 | | |
486 | 196k | if (uc->init_arch == NULL) { |
487 | 0 | free(uc); |
488 | 0 | return UC_ERR_ARCH; |
489 | 0 | } |
490 | | |
491 | 196k | uc->init_done = false; |
492 | 196k | uc->cpu_model = INT_MAX; // INT_MAX means the default cpu model. |
493 | | |
494 | 196k | *result = uc; |
495 | | |
496 | 196k | return UC_ERR_OK; |
497 | 196k | } else { |
498 | 0 | return UC_ERR_ARCH; |
499 | 0 | } |
500 | 196k | } |
501 | | |
502 | | UNICORN_EXPORT |
503 | | uc_err uc_close(uc_engine *uc) |
504 | 196k | { |
505 | 196k | int i; |
506 | 196k | MemoryRegion *mr; |
507 | | |
508 | 196k | if (!uc->init_done) { |
509 | 0 | free(uc); |
510 | 0 | return UC_ERR_OK; |
511 | 0 | } |
512 | | |
513 | | // Flush all translation buffers or we leak memory allocated by MMU |
514 | 196k | uc->tb_flush(uc); |
515 | | |
516 | | // Cleanup internally. |
517 | 196k | if (uc->release) { |
518 | 196k | uc->release(uc->tcg_ctx); |
519 | 196k | } |
520 | 196k | g_free(uc->tcg_ctx); |
521 | | |
522 | | // Cleanup CPU. |
523 | 196k | g_free(uc->cpu->cpu_ases); |
524 | 196k | g_free(uc->cpu->thread); |
525 | | |
526 | | /* cpu */ |
527 | 196k | qemu_vfree(uc->cpu); |
528 | | |
529 | | /* flatviews */ |
530 | 196k | g_hash_table_destroy(uc->flat_views); |
531 | | |
532 | | // During flatviews destruction, we may still access memory regions. |
533 | | // So we free them afterwards. |
534 | | /* memory */ |
535 | 196k | mr = &uc->io_mem_unassigned; |
536 | 196k | mr->destructor(mr); |
537 | 196k | mr = uc->system_io; |
538 | 196k | mr->destructor(mr); |
539 | 196k | mr = uc->system_memory; |
540 | 196k | mr->destructor(mr); |
541 | 196k | g_free(uc->system_memory); |
542 | 196k | g_free(uc->system_io); |
543 | 196k | for (size_t i = 0; i < uc->unmapped_regions->len; i++) { |
544 | 0 | mr = g_array_index(uc->unmapped_regions, MemoryRegion *, i); |
545 | 0 | mr->destructor(mr); |
546 | 0 | g_free(mr); |
547 | 0 | } |
548 | 196k | g_array_free(uc->unmapped_regions, true); |
549 | | |
550 | | // Thread relateds. |
551 | 196k | if (uc->qemu_thread_data) { |
552 | 0 | g_free(uc->qemu_thread_data); |
553 | 0 | } |
554 | | |
555 | | /* free */ |
556 | 196k | g_free(uc->init_target_page); |
557 | | |
558 | | // Other auxilaries. |
559 | 196k | g_free(uc->l1_map); |
560 | | |
561 | 196k | if (uc->bounce.buffer) { |
562 | 0 | qemu_vfree(uc->bounce.buffer); |
563 | 0 | } |
564 | | |
565 | | // free hooks and hook lists |
566 | 196k | clear_deleted_hooks(uc); |
567 | | |
568 | 3.73M | for (i = 0; i < UC_HOOK_MAX; i++) { |
569 | 3.53M | list_clear(&uc->hook[i]); |
570 | 3.53M | } |
571 | | |
572 | 196k | free(uc->mapped_blocks); |
573 | | |
574 | 196k | g_tree_destroy(uc->ctl_exits); |
575 | | |
576 | | // finally, free uc itself. |
577 | 196k | memset(uc, 0, sizeof(*uc)); |
578 | 196k | free(uc); |
579 | | |
580 | 196k | return UC_ERR_OK; |
581 | 196k | } |
582 | | |
583 | | UNICORN_EXPORT |
584 | | uc_err uc_reg_read_batch(uc_engine *uc, int const *regs, void **vals, int count) |
585 | 0 | { |
586 | 0 | UC_INIT(uc); |
587 | 0 | reg_read_t reg_read = uc->reg_read; |
588 | 0 | void *env = uc->cpu->env_ptr; |
589 | 0 | int mode = uc->mode; |
590 | 0 | int i; |
591 | |
|
592 | 0 | for (i = 0; i < count; i++) { |
593 | 0 | unsigned int regid = regs[i]; |
594 | 0 | void *value = vals[i]; |
595 | 0 | size_t size = (size_t)-1; |
596 | 0 | uc_err err = reg_read(env, mode, regid, value, &size); |
597 | 0 | if (err) { |
598 | 0 | restore_jit_state(uc); |
599 | 0 | return err; |
600 | 0 | } |
601 | 0 | } |
602 | | |
603 | 0 | restore_jit_state(uc); |
604 | 0 | return UC_ERR_OK; |
605 | 0 | } |
606 | | |
607 | | UNICORN_EXPORT |
608 | | uc_err uc_reg_write_batch(uc_engine *uc, int const *regs, void *const *vals, |
609 | | int count) |
610 | 0 | { |
611 | 0 | UC_INIT(uc); |
612 | 0 | reg_write_t reg_write = uc->reg_write; |
613 | 0 | void *env = uc->cpu->env_ptr; |
614 | 0 | int mode = uc->mode; |
615 | 0 | int setpc = 0; |
616 | 0 | int i; |
617 | |
|
618 | 0 | for (i = 0; i < count; i++) { |
619 | 0 | unsigned int regid = regs[i]; |
620 | 0 | const void *value = vals[i]; |
621 | 0 | size_t size = (size_t)-1; |
622 | 0 | uc_err err = reg_write(env, mode, regid, value, &size, &setpc); |
623 | 0 | if (err) { |
624 | 0 | restore_jit_state(uc); |
625 | 0 | return err; |
626 | 0 | } |
627 | 0 | } |
628 | 0 | if (setpc) { |
629 | | // force to quit execution and flush TB |
630 | 0 | uc->quit_request = true; |
631 | 0 | break_translation_loop(uc); |
632 | 0 | } |
633 | |
|
634 | 0 | restore_jit_state(uc); |
635 | 0 | return UC_ERR_OK; |
636 | 0 | } |
637 | | |
638 | | UNICORN_EXPORT |
639 | | uc_err uc_reg_read_batch2(uc_engine *uc, int const *regs, void *const *vals, |
640 | | size_t *sizes, int count) |
641 | 0 | { |
642 | 0 | UC_INIT(uc); |
643 | 0 | reg_read_t reg_read = uc->reg_read; |
644 | 0 | void *env = uc->cpu->env_ptr; |
645 | 0 | int mode = uc->mode; |
646 | 0 | int i; |
647 | |
|
648 | 0 | for (i = 0; i < count; i++) { |
649 | 0 | unsigned int regid = regs[i]; |
650 | 0 | void *value = vals[i]; |
651 | 0 | uc_err err = reg_read(env, mode, regid, value, sizes + i); |
652 | 0 | if (err) { |
653 | 0 | restore_jit_state(uc); |
654 | 0 | return err; |
655 | 0 | } |
656 | 0 | } |
657 | | |
658 | 0 | restore_jit_state(uc); |
659 | 0 | return UC_ERR_OK; |
660 | 0 | } |
661 | | |
662 | | UNICORN_EXPORT |
663 | | uc_err uc_reg_write_batch2(uc_engine *uc, int const *regs, |
664 | | const void *const *vals, size_t *sizes, int count) |
665 | 0 | { |
666 | 0 | UC_INIT(uc); |
667 | 0 | reg_write_t reg_write = uc->reg_write; |
668 | 0 | void *env = uc->cpu->env_ptr; |
669 | 0 | int mode = uc->mode; |
670 | 0 | int setpc = 0; |
671 | 0 | int i; |
672 | |
|
673 | 0 | for (i = 0; i < count; i++) { |
674 | 0 | unsigned int regid = regs[i]; |
675 | 0 | const void *value = vals[i]; |
676 | 0 | uc_err err = reg_write(env, mode, regid, value, sizes + i, &setpc); |
677 | 0 | if (err) { |
678 | 0 | restore_jit_state(uc); |
679 | 0 | return err; |
680 | 0 | } |
681 | 0 | } |
682 | 0 | if (setpc) { |
683 | | // force to quit execution and flush TB |
684 | 0 | uc->quit_request = true; |
685 | 0 | break_translation_loop(uc); |
686 | 0 | } |
687 | |
|
688 | 0 | restore_jit_state(uc); |
689 | 0 | return UC_ERR_OK; |
690 | 0 | } |
691 | | |
692 | | UNICORN_EXPORT |
693 | | uc_err uc_reg_read(uc_engine *uc, int regid, void *value) |
694 | 84 | { |
695 | 84 | UC_INIT(uc); |
696 | 84 | size_t size = (size_t)-1; |
697 | 84 | uc_err err = uc->reg_read(uc->cpu->env_ptr, uc->mode, regid, value, &size); |
698 | 84 | restore_jit_state(uc); |
699 | 84 | return err; |
700 | 84 | } |
701 | | |
702 | | UNICORN_EXPORT |
703 | | uc_err uc_reg_write(uc_engine *uc, int regid, const void *value) |
704 | 196k | { |
705 | 196k | UC_INIT(uc); |
706 | 196k | int setpc = 0; |
707 | 196k | size_t size = (size_t)-1; |
708 | 196k | uc_err err = |
709 | 196k | uc->reg_write(uc->cpu->env_ptr, uc->mode, regid, value, &size, &setpc); |
710 | 196k | if (err) { |
711 | 0 | restore_jit_state(uc); |
712 | 0 | return err; |
713 | 0 | } |
714 | 196k | if (setpc) { |
715 | | // force to quit execution and flush TB |
716 | 196k | uc->quit_request = true; |
717 | 196k | uc->skip_sync_pc_on_exit = true; |
718 | 196k | break_translation_loop(uc); |
719 | 196k | } |
720 | | |
721 | 196k | restore_jit_state(uc); |
722 | 196k | return UC_ERR_OK; |
723 | 196k | } |
724 | | |
725 | | UNICORN_EXPORT |
726 | | uc_err uc_reg_read2(uc_engine *uc, int regid, void *value, size_t *size) |
727 | 0 | { |
728 | 0 | UC_INIT(uc); |
729 | 0 | uc_err err = uc->reg_read(uc->cpu->env_ptr, uc->mode, regid, value, size); |
730 | 0 | restore_jit_state(uc); |
731 | 0 | return err; |
732 | 0 | } |
733 | | |
734 | | UNICORN_EXPORT |
735 | | uc_err uc_reg_write2(uc_engine *uc, int regid, const void *value, size_t *size) |
736 | 0 | { |
737 | 0 | UC_INIT(uc); |
738 | 0 | int setpc = 0; |
739 | 0 | uc_err err = |
740 | 0 | uc->reg_write(uc->cpu->env_ptr, uc->mode, regid, value, size, &setpc); |
741 | 0 | if (err) { |
742 | 0 | restore_jit_state(uc); |
743 | 0 | return err; |
744 | 0 | } |
745 | 0 | if (setpc) { |
746 | | // force to quit execution and flush TB |
747 | 0 | uc->quit_request = true; |
748 | 0 | break_translation_loop(uc); |
749 | 0 | } |
750 | |
|
751 | 0 | restore_jit_state(uc); |
752 | 0 | return UC_ERR_OK; |
753 | 0 | } |
754 | | |
755 | | static uint64_t memory_region_len(uc_engine *uc, MemoryRegion *mr, |
756 | | uint64_t address, uint64_t count) |
757 | 393k | { |
758 | 393k | hwaddr end = mr->end; |
759 | 393k | while (mr->container != uc->system_memory) { |
760 | 0 | mr = mr->container; |
761 | 0 | end += mr->addr; |
762 | 0 | } |
763 | 393k | return (uint64_t)MIN(count, end - address); |
764 | 393k | } |
765 | | |
766 | | // check if a memory area is mapped |
767 | | // this is complicated because an area can overlap adjacent blocks |
768 | | static bool check_mem_area(uc_engine *uc, uint64_t address, size_t size) |
769 | 196k | { |
770 | 196k | size_t count = 0, len; |
771 | | |
772 | 393k | while (count < size) { |
773 | 196k | MemoryRegion *mr = uc->memory_mapping(uc, address); |
774 | 196k | if (mr) { |
775 | 196k | len = memory_region_len(uc, mr, address, size - count); |
776 | 196k | count += len; |
777 | 196k | address += len; |
778 | 196k | } else { // this address is not mapped in yet |
779 | 0 | break; |
780 | 0 | } |
781 | 196k | } |
782 | | |
783 | 196k | return (count == size); |
784 | 196k | } |
785 | | |
786 | | uc_err uc_vmem_translate(uc_engine *uc, uint64_t address, uc_prot prot, |
787 | | uint64_t *paddress) |
788 | 0 | { |
789 | 0 | UC_INIT(uc); |
790 | |
|
791 | 0 | if (!(UC_PROT_READ == prot || UC_PROT_WRITE == prot || |
792 | 0 | UC_PROT_EXEC == prot)) { |
793 | 0 | restore_jit_state(uc); |
794 | 0 | return UC_ERR_ARG; |
795 | 0 | } |
796 | | |
797 | | // The sparc mmu doesn't support probe mode |
798 | 0 | if (uc->arch == UC_ARCH_SPARC && uc->cpu->cc->tlb_fill == uc->cpu->cc->tlb_fill_cpu) { |
799 | 0 | restore_jit_state(uc); |
800 | 0 | return UC_ERR_ARG; |
801 | 0 | } |
802 | | |
803 | 0 | if (!uc->virtual_to_physical(uc, address, prot, paddress)) { |
804 | 0 | restore_jit_state(uc); |
805 | 0 | switch (prot) { |
806 | 0 | case UC_PROT_READ: |
807 | 0 | return UC_ERR_READ_PROT; |
808 | 0 | case UC_PROT_WRITE: |
809 | 0 | return UC_ERR_WRITE_PROT; |
810 | 0 | case UC_PROT_EXEC: |
811 | 0 | return UC_ERR_FETCH_PROT; |
812 | 0 | default: |
813 | 0 | return UC_ERR_ARG; |
814 | 0 | } |
815 | 0 | } |
816 | | |
817 | 0 | restore_jit_state(uc); |
818 | 0 | return UC_ERR_OK; |
819 | 0 | } |
820 | | |
821 | | UNICORN_EXPORT |
822 | | uc_err uc_vmem_read(uc_engine *uc, uint64_t address, uc_prot prot, |
823 | | void *_bytes, size_t size) |
824 | 0 | { |
825 | 0 | size_t count = 0, len; |
826 | 0 | uint8_t *bytes = _bytes; |
827 | 0 | uint64_t align; |
828 | 0 | uint64_t pagesize; |
829 | |
|
830 | 0 | UC_INIT(uc); |
831 | | |
832 | | // qemu cpu_physical_memory_rw() size is an int |
833 | 0 | if (size > INT_MAX) { |
834 | 0 | restore_jit_state(uc); |
835 | 0 | return UC_ERR_ARG; |
836 | 0 | } |
837 | | |
838 | | // The sparc mmu doesn't support probe mode |
839 | 0 | if (uc->arch == UC_ARCH_SPARC && uc->cpu->cc->tlb_fill == uc->cpu->cc->tlb_fill_cpu) { |
840 | 0 | restore_jit_state(uc); |
841 | 0 | return UC_ERR_ARG; |
842 | 0 | } |
843 | | |
844 | 0 | if (!(UC_PROT_READ == prot || UC_PROT_WRITE == prot || |
845 | 0 | UC_PROT_EXEC == prot)) { |
846 | 0 | restore_jit_state(uc); |
847 | 0 | return UC_ERR_ARG; |
848 | 0 | } |
849 | | |
850 | 0 | while (count < size) { |
851 | 0 | align = uc->target_page_align; |
852 | 0 | pagesize = uc->target_page_size; |
853 | 0 | len = MIN(size - count, (address & ~align) + pagesize - address); |
854 | 0 | if (!uc->read_mem_virtual(uc, address, prot, bytes, len)) { |
855 | 0 | restore_jit_state(uc); |
856 | 0 | return UC_ERR_READ_PROT; |
857 | 0 | } |
858 | 0 | bytes += len; |
859 | 0 | address += len; |
860 | 0 | count += len; |
861 | 0 | } |
862 | 0 | assert(count == size); |
863 | 0 | restore_jit_state(uc); |
864 | 0 | return UC_ERR_OK; |
865 | 0 | } |
866 | | |
867 | | UNICORN_EXPORT |
868 | | uc_err uc_vmem_write(uc_engine *uc, uint64_t address, uc_prot prot, |
869 | | const void *_bytes, size_t size) |
870 | 0 | { |
871 | 0 | size_t count = 0, len; |
872 | 0 | const uint8_t *bytes = _bytes; |
873 | 0 | uint64_t align; |
874 | 0 | uint64_t pagesize; |
875 | 0 | uint64_t paddr = 0; |
876 | |
|
877 | 0 | UC_INIT(uc); |
878 | | |
879 | | // qemu cpu_physical_memory_rw() size is an int |
880 | 0 | if (size > INT_MAX) { |
881 | 0 | restore_jit_state(uc); |
882 | 0 | return UC_ERR_ARG; |
883 | 0 | } |
884 | | |
885 | | // The sparc mmu doesn't support probe mode |
886 | 0 | if (uc->arch == UC_ARCH_SPARC && uc->cpu->cc->tlb_fill == uc->cpu->cc->tlb_fill_cpu) { |
887 | 0 | restore_jit_state(uc); |
888 | 0 | return UC_ERR_ARG; |
889 | 0 | } |
890 | | |
891 | 0 | if (!(UC_PROT_READ == prot || UC_PROT_WRITE == prot || |
892 | 0 | UC_PROT_EXEC == prot)) { |
893 | 0 | restore_jit_state(uc); |
894 | 0 | return UC_ERR_ARG; |
895 | 0 | } |
896 | | |
897 | 0 | while (count < size) { |
898 | 0 | align = uc->target_page_align; |
899 | 0 | pagesize = uc->target_page_size; |
900 | 0 | len = MIN(size - count, (address & ~align) + pagesize - address); |
901 | 0 | if (uc_vmem_translate(uc, address, prot, &paddr) != UC_ERR_OK) { |
902 | 0 | restore_jit_state(uc); |
903 | 0 | return UC_ERR_WRITE_PROT; |
904 | 0 | } |
905 | 0 | if (uc_mem_write(uc, paddr, bytes, len) != UC_ERR_OK) { |
906 | 0 | restore_jit_state(uc); |
907 | 0 | return UC_ERR_WRITE_PROT; |
908 | 0 | } |
909 | 0 | bytes += len; |
910 | 0 | address += len; |
911 | 0 | count += len; |
912 | 0 | } |
913 | 0 | assert(count == size); |
914 | 0 | restore_jit_state(uc); |
915 | 0 | return UC_ERR_OK; |
916 | 0 | } |
917 | | |
918 | | UNICORN_EXPORT |
919 | | uc_err uc_mem_read(uc_engine *uc, uint64_t address, void *_bytes, uint64_t size) |
920 | 0 | { |
921 | 0 | uint64_t count = 0, len; |
922 | 0 | uint8_t *bytes = _bytes; |
923 | |
|
924 | 0 | UC_INIT(uc); |
925 | |
|
926 | 0 | if (!check_mem_area(uc, address, size)) { |
927 | 0 | restore_jit_state(uc); |
928 | 0 | return UC_ERR_READ_UNMAPPED; |
929 | 0 | } |
930 | | |
931 | | // memory area can overlap adjacent memory blocks |
932 | 0 | while (count < size) { |
933 | 0 | MemoryRegion *mr = uc->memory_mapping(uc, address); |
934 | 0 | if (mr) { |
935 | 0 | len = memory_region_len(uc, mr, address, size - count); |
936 | 0 | if (uc->read_mem(&uc->address_space_memory, address, bytes, len) == |
937 | 0 | false) { |
938 | 0 | break; |
939 | 0 | } |
940 | 0 | count += len; |
941 | 0 | address += len; |
942 | 0 | bytes += len; |
943 | 0 | } else { // this address is not mapped in yet |
944 | 0 | break; |
945 | 0 | } |
946 | 0 | } |
947 | |
|
948 | 0 | if (count == size) { |
949 | 0 | restore_jit_state(uc); |
950 | 0 | return UC_ERR_OK; |
951 | 0 | } else { |
952 | 0 | restore_jit_state(uc); |
953 | 0 | return UC_ERR_READ_UNMAPPED; |
954 | 0 | } |
955 | 0 | } |
956 | | |
957 | | UNICORN_EXPORT |
958 | | uc_err uc_mem_write(uc_engine *uc, uint64_t address, const void *_bytes, |
959 | | uint64_t size) |
960 | 196k | { |
961 | 196k | uint64_t count = 0, len; |
962 | 196k | const uint8_t *bytes = _bytes; |
963 | | |
964 | 196k | UC_INIT(uc); |
965 | | |
966 | 196k | if (!check_mem_area(uc, address, size)) { |
967 | 0 | restore_jit_state(uc); |
968 | 0 | return UC_ERR_WRITE_UNMAPPED; |
969 | 0 | } |
970 | | |
971 | | // memory area can overlap adjacent memory blocks |
972 | 393k | while (count < size) { |
973 | 196k | MemoryRegion *mr = uc->memory_mapping(uc, address); |
974 | 196k | if (mr) { |
975 | 196k | uint32_t operms = mr->perms; |
976 | 196k | uint64_t align = uc->target_page_align; |
977 | 196k | if (!(operms & UC_PROT_WRITE)) { // write protected |
978 | | // but this is not the program accessing memory, so temporarily |
979 | | // mark writable |
980 | 0 | uc->readonly_mem(mr, false); |
981 | 0 | } |
982 | | |
983 | 196k | len = memory_region_len(uc, mr, address, size - count); |
984 | 196k | if (uc->snapshot_level && uc->snapshot_level > mr->priority) { |
985 | 0 | mr = uc->memory_cow(uc, mr, address & ~align, |
986 | 0 | (len + (address & align) + align) & ~align); |
987 | 0 | if (!mr) { |
988 | 0 | return UC_ERR_NOMEM; |
989 | 0 | } |
990 | 0 | } |
991 | 196k | if (uc->write_mem(&uc->address_space_memory, address, bytes, len) == |
992 | 196k | false) { |
993 | 0 | break; |
994 | 0 | } |
995 | | |
996 | 196k | if (!(operms & UC_PROT_WRITE)) { // write protected |
997 | | // now write protect it again |
998 | 0 | uc->readonly_mem(mr, true); |
999 | 0 | } |
1000 | | |
1001 | 196k | count += len; |
1002 | 196k | address += len; |
1003 | 196k | bytes += len; |
1004 | 196k | } else { // this address is not mapped in yet |
1005 | 0 | break; |
1006 | 0 | } |
1007 | 196k | } |
1008 | | |
1009 | 196k | if (count == size) { |
1010 | 196k | restore_jit_state(uc); |
1011 | 196k | return UC_ERR_OK; |
1012 | 196k | } else { |
1013 | 0 | restore_jit_state(uc); |
1014 | 0 | return UC_ERR_WRITE_UNMAPPED; |
1015 | 0 | } |
1016 | 196k | } |
1017 | | |
1018 | 0 | #define TIMEOUT_STEP 2 // microseconds |
1019 | | static void *_timeout_fn(void *arg) |
1020 | 0 | { |
1021 | 0 | struct uc_struct *uc = arg; |
1022 | 0 | int64_t current_time = get_clock(); |
1023 | |
|
1024 | 0 | do { |
1025 | 0 | usleep(TIMEOUT_STEP); |
1026 | | // perhaps emulation is even done before timeout? |
1027 | 0 | if (uc->emulation_done) { |
1028 | 0 | break; |
1029 | 0 | } |
1030 | 0 | } while ((uint64_t)(get_clock() - current_time) < uc->timeout); |
1031 | | |
1032 | | // timeout before emulation is done? |
1033 | 0 | if (!uc->emulation_done) { |
1034 | 0 | uc->timed_out = true; |
1035 | | // force emulation to stop |
1036 | 0 | uc_emu_stop(uc); |
1037 | 0 | } |
1038 | |
|
1039 | 0 | return NULL; |
1040 | 0 | } |
1041 | | |
1042 | | static void enable_emu_timer(uc_engine *uc, uint64_t timeout) |
1043 | 0 | { |
1044 | 0 | uc->timeout = timeout; |
1045 | 0 | qemu_thread_create(uc, &uc->timer, "timeout", _timeout_fn, uc, |
1046 | 0 | QEMU_THREAD_JOINABLE); |
1047 | 0 | } |
1048 | | |
1049 | | static void hook_count_cb(struct uc_struct *uc, uint64_t address, uint32_t size, |
1050 | | void *user_data) |
1051 | 422M | { |
1052 | | // count this instruction. ah ah ah. |
1053 | 422M | uc->emu_counter++; |
1054 | | // printf(":: emu counter = %u, at %lx\n", uc->emu_counter, address); |
1055 | | |
1056 | 422M | if (uc->emu_counter > uc->emu_count) { |
1057 | | // printf(":: emu counter = %u, stop emulation\n", uc->emu_counter); |
1058 | 54.9M | uc_emu_stop(uc); |
1059 | 54.9M | } |
1060 | 422M | } |
1061 | | |
1062 | | static void clear_deleted_hooks(uc_engine *uc) |
1063 | 393k | { |
1064 | 393k | struct list_item *cur; |
1065 | 393k | struct hook *hook; |
1066 | 393k | int i; |
1067 | | |
1068 | 393k | for (cur = uc->hooks_to_del.head; |
1069 | 393k | cur != NULL && (hook = (struct hook *)cur->data); cur = cur->next) { |
1070 | 0 | assert(hook->to_delete); |
1071 | 0 | for (i = 0; i < UC_HOOK_MAX; i++) { |
1072 | 0 | if (list_remove(&uc->hook[i], (void *)hook)) { |
1073 | 0 | break; |
1074 | 0 | } |
1075 | 0 | } |
1076 | 0 | } |
1077 | | |
1078 | 393k | list_clear(&uc->hooks_to_del); |
1079 | 393k | } |
1080 | | |
1081 | | UNICORN_EXPORT |
1082 | | uc_err uc_emu_start(uc_engine *uc, uint64_t begin, uint64_t until, |
1083 | | uint64_t timeout, size_t count) |
1084 | 196k | { |
1085 | 196k | uc_err err; |
1086 | | |
1087 | | // reset the counter |
1088 | 196k | uc->emu_counter = 0; |
1089 | 196k | uc->invalid_error = UC_ERR_OK; |
1090 | 196k | uc->emulation_done = false; |
1091 | 196k | uc->size_recur_mem = 0; |
1092 | 196k | uc->timed_out = false; |
1093 | 196k | uc->first_tb = true; |
1094 | | |
1095 | | // Avoid nested uc_emu_start saves wrong jit states. |
1096 | 196k | if (uc->nested_level == 0) { |
1097 | 196k | UC_INIT(uc); |
1098 | 196k | } |
1099 | | |
1100 | | // Advance the nested levels. We must decrease the level count by one when |
1101 | | // we return from uc_emu_start. |
1102 | 196k | if (uc->nested_level >= UC_MAX_NESTED_LEVEL) { |
1103 | | // We can't support so many nested levels. |
1104 | 0 | return UC_ERR_RESOURCE; |
1105 | 0 | } |
1106 | 196k | uc->nested_level++; |
1107 | | |
1108 | 196k | uint32_t begin_pc32 = READ_DWORD(begin); |
1109 | 196k | switch (uc->arch) { |
1110 | 0 | default: |
1111 | 0 | break; |
1112 | 0 | #ifdef UNICORN_HAS_M68K |
1113 | 5 | case UC_ARCH_M68K: |
1114 | 5 | uc_reg_write(uc, UC_M68K_REG_PC, &begin_pc32); |
1115 | 5 | break; |
1116 | 0 | #endif |
1117 | 0 | #ifdef UNICORN_HAS_X86 |
1118 | 33.7k | case UC_ARCH_X86: |
1119 | 33.7k | switch (uc->mode) { |
1120 | 0 | default: |
1121 | 0 | break; |
1122 | 84 | case UC_MODE_16: { |
1123 | 84 | uint16_t ip; |
1124 | 84 | uint16_t cs; |
1125 | | |
1126 | 84 | uc_reg_read(uc, UC_X86_REG_CS, &cs); |
1127 | | // compensate for later adding up IP & CS |
1128 | 84 | ip = begin - cs * 16; |
1129 | 84 | uc_reg_write(uc, UC_X86_REG_IP, &ip); |
1130 | 84 | break; |
1131 | 0 | } |
1132 | 14.4k | case UC_MODE_32: |
1133 | 14.4k | uc_reg_write(uc, UC_X86_REG_EIP, &begin_pc32); |
1134 | 14.4k | break; |
1135 | 19.2k | case UC_MODE_64: |
1136 | 19.2k | uc_reg_write(uc, UC_X86_REG_RIP, &begin); |
1137 | 19.2k | break; |
1138 | 33.7k | } |
1139 | 33.7k | break; |
1140 | 33.7k | #endif |
1141 | 33.7k | #ifdef UNICORN_HAS_ARM |
1142 | 54.1k | case UC_ARCH_ARM: |
1143 | 54.1k | uc_reg_write(uc, UC_ARM_REG_R15, &begin_pc32); |
1144 | 54.1k | break; |
1145 | 0 | #endif |
1146 | 0 | #ifdef UNICORN_HAS_ARM64 |
1147 | 62.3k | case UC_ARCH_ARM64: |
1148 | 62.3k | uc_reg_write(uc, UC_ARM64_REG_PC, &begin); |
1149 | 62.3k | break; |
1150 | 0 | #endif |
1151 | 0 | #ifdef UNICORN_HAS_MIPS |
1152 | 19.7k | case UC_ARCH_MIPS: |
1153 | 19.7k | if (uc->mode & UC_MODE_MIPS64) { |
1154 | 0 | uc_reg_write(uc, UC_MIPS_REG_PC, &begin); |
1155 | 19.7k | } else { |
1156 | 19.7k | uc_reg_write(uc, UC_MIPS_REG_PC, &begin_pc32); |
1157 | 19.7k | } |
1158 | 19.7k | break; |
1159 | 0 | #endif |
1160 | 0 | #ifdef UNICORN_HAS_SPARC |
1161 | 41 | case UC_ARCH_SPARC: |
1162 | | // TODO: Sparc/Sparc64 |
1163 | 41 | uc_reg_write(uc, UC_SPARC_REG_PC, &begin); |
1164 | 41 | break; |
1165 | 0 | #endif |
1166 | 0 | #ifdef UNICORN_HAS_PPC |
1167 | 0 | case UC_ARCH_PPC: |
1168 | 0 | if (uc->mode & UC_MODE_PPC64) { |
1169 | 0 | uc_reg_write(uc, UC_PPC_REG_PC, &begin); |
1170 | 0 | } else { |
1171 | 0 | uc_reg_write(uc, UC_PPC_REG_PC, &begin_pc32); |
1172 | 0 | } |
1173 | 0 | break; |
1174 | 0 | #endif |
1175 | 0 | #ifdef UNICORN_HAS_RISCV |
1176 | 0 | case UC_ARCH_RISCV: |
1177 | 0 | if (uc->mode & UC_MODE_RISCV64) { |
1178 | 0 | uc_reg_write(uc, UC_RISCV_REG_PC, &begin); |
1179 | 0 | } else { |
1180 | 0 | uc_reg_write(uc, UC_RISCV_REG_PC, &begin_pc32); |
1181 | 0 | } |
1182 | 0 | break; |
1183 | 0 | #endif |
1184 | 0 | #ifdef UNICORN_HAS_S390X |
1185 | 26.5k | case UC_ARCH_S390X: |
1186 | 26.5k | uc_reg_write(uc, UC_S390X_REG_PC, &begin); |
1187 | 26.5k | break; |
1188 | 0 | #endif |
1189 | 0 | #ifdef UNICORN_HAS_TRICORE |
1190 | 0 | case UC_ARCH_TRICORE: |
1191 | 0 | uc_reg_write(uc, UC_TRICORE_REG_PC, &begin_pc32); |
1192 | 0 | break; |
1193 | 196k | #endif |
1194 | 196k | } |
1195 | 196k | uc->skip_sync_pc_on_exit = false; |
1196 | 196k | uc->stop_request = false; |
1197 | | |
1198 | 196k | uc->emu_count = count; |
1199 | | // remove count hook if counting isn't necessary |
1200 | 196k | if (count <= 0 && uc->count_hook != 0) { |
1201 | 0 | uc_hook_del(uc, uc->count_hook); |
1202 | 0 | uc->count_hook = 0; |
1203 | | |
1204 | | // In this case, we have to drop all translated blocks. |
1205 | 0 | uc->tb_flush(uc); |
1206 | 0 | } |
1207 | | // set up count hook to count instructions. |
1208 | 196k | if (count > 0 && uc->count_hook == 0) { |
1209 | 196k | uc_err err; |
1210 | | // callback to count instructions must be run before everything else, |
1211 | | // so instead of appending, we must insert the hook at the begin |
1212 | | // of the hook list |
1213 | 196k | uc->hook_insert = 1; |
1214 | 196k | err = uc_hook_add(uc, &uc->count_hook, UC_HOOK_CODE, hook_count_cb, |
1215 | 196k | NULL, 1, 0); |
1216 | | // restore to append mode for uc_hook_add() |
1217 | 196k | uc->hook_insert = 0; |
1218 | 196k | if (err != UC_ERR_OK) { |
1219 | 0 | uc->nested_level--; |
1220 | 0 | return err; |
1221 | 0 | } |
1222 | 196k | } |
1223 | | |
1224 | | // If UC_CTL_UC_USE_EXITS is set, then the @until param won't have any |
1225 | | // effect. This is designed for the backward compatibility. |
1226 | 196k | if (!uc->use_exits) { |
1227 | 196k | uc->exits[uc->nested_level - 1] = until; |
1228 | 196k | } |
1229 | | |
1230 | 196k | if (timeout) { |
1231 | 0 | enable_emu_timer(uc, timeout * 1000); // microseconds -> nanoseconds |
1232 | 0 | } |
1233 | | |
1234 | 196k | uc->vm_start(uc); |
1235 | | |
1236 | 196k | uc->nested_level--; |
1237 | | |
1238 | | // emulation is done if and only if we exit the outer uc_emu_start |
1239 | | // or we may lost uc_emu_stop |
1240 | 196k | if (uc->nested_level == 0) { |
1241 | 196k | uc->emulation_done = true; |
1242 | | |
1243 | | // remove hooks to delete |
1244 | | // make sure we delete all hooks at the first level. |
1245 | 196k | clear_deleted_hooks(uc); |
1246 | | |
1247 | 196k | restore_jit_state(uc); |
1248 | 196k | } |
1249 | | |
1250 | 196k | if (timeout) { |
1251 | | // wait for the timer to finish |
1252 | 0 | qemu_thread_join(&uc->timer); |
1253 | 0 | } |
1254 | | |
1255 | | // We may be in a nested uc_emu_start and thus clear invalid_error |
1256 | | // once we are done. |
1257 | 196k | err = uc->invalid_error; |
1258 | 196k | uc->invalid_error = 0; |
1259 | 196k | return err; |
1260 | 196k | } |
1261 | | |
1262 | | UNICORN_EXPORT |
1263 | | uc_err uc_emu_stop(uc_engine *uc) |
1264 | 54.9M | { |
1265 | 54.9M | UC_INIT(uc); |
1266 | 54.9M | uc->stop_request = true; |
1267 | 54.9M | uc_err err = break_translation_loop(uc); |
1268 | 54.9M | restore_jit_state(uc); |
1269 | 54.9M | return err; |
1270 | 54.9M | } |
1271 | | |
1272 | | // return target index where a memory region at the address exists, or could be |
1273 | | // inserted |
1274 | | // |
1275 | | // address either is inside the mapping at the returned index, or is in free |
1276 | | // space before the next mapping. |
1277 | | // |
1278 | | // if there is overlap, between regions, ending address will be higher than the |
1279 | | // starting address of the mapping at returned index |
1280 | | static int bsearch_mapped_blocks(const uc_engine *uc, uint64_t address) |
1281 | 393k | { |
1282 | 393k | int left, right, mid; |
1283 | 393k | MemoryRegion *mapping; |
1284 | | |
1285 | 393k | left = 0; |
1286 | 393k | right = uc->mapped_block_count; |
1287 | | |
1288 | 393k | while (left < right) { |
1289 | 0 | mid = left + (right - left) / 2; |
1290 | |
|
1291 | 0 | mapping = uc->mapped_blocks[mid]; |
1292 | |
|
1293 | 0 | if (mapping->end - 1 < address) { |
1294 | 0 | left = mid + 1; |
1295 | 0 | } else if (mapping->addr > address) { |
1296 | 0 | right = mid; |
1297 | 0 | } else { |
1298 | 0 | return mid; |
1299 | 0 | } |
1300 | 0 | } |
1301 | | |
1302 | 393k | return left; |
1303 | 393k | } |
1304 | | |
1305 | | // find if a memory range overlaps with existing mapped regions |
1306 | | static bool memory_overlap(struct uc_struct *uc, uint64_t begin, size_t size) |
1307 | 196k | { |
1308 | 196k | unsigned int i; |
1309 | 196k | uint64_t end = begin + size - 1; |
1310 | | |
1311 | 196k | i = bsearch_mapped_blocks(uc, begin); |
1312 | | |
1313 | | // is this the highest region with no possible overlap? |
1314 | 196k | if (i >= uc->mapped_block_count) |
1315 | 196k | return false; |
1316 | | |
1317 | | // end address overlaps this region? |
1318 | 0 | if (end >= uc->mapped_blocks[i]->addr) |
1319 | 0 | return true; |
1320 | | |
1321 | | // not found |
1322 | | |
1323 | 0 | return false; |
1324 | 0 | } |
1325 | | |
1326 | | // common setup/error checking shared between uc_mem_map and uc_mem_map_ptr |
1327 | | static uc_err mem_map(uc_engine *uc, MemoryRegion *block) |
1328 | 196k | { |
1329 | | |
1330 | 196k | MemoryRegion **regions; |
1331 | 196k | int pos; |
1332 | | |
1333 | 196k | if (block == NULL) { |
1334 | 0 | return UC_ERR_NOMEM; |
1335 | 0 | } |
1336 | | |
1337 | 196k | if ((uc->mapped_block_count & (MEM_BLOCK_INCR - 1)) == 0) { // time to grow |
1338 | 196k | regions = (MemoryRegion **)g_realloc( |
1339 | 196k | uc->mapped_blocks, |
1340 | 196k | sizeof(MemoryRegion *) * (uc->mapped_block_count + MEM_BLOCK_INCR)); |
1341 | 196k | if (regions == NULL) { |
1342 | 0 | return UC_ERR_NOMEM; |
1343 | 0 | } |
1344 | 196k | uc->mapped_blocks = regions; |
1345 | 196k | } |
1346 | | |
1347 | 196k | pos = bsearch_mapped_blocks(uc, block->addr); |
1348 | | |
1349 | | // shift the array right to give space for the new pointer |
1350 | 196k | memmove(&uc->mapped_blocks[pos + 1], &uc->mapped_blocks[pos], |
1351 | 196k | sizeof(MemoryRegion *) * (uc->mapped_block_count - pos)); |
1352 | | |
1353 | 196k | uc->mapped_blocks[pos] = block; |
1354 | 196k | uc->mapped_block_count++; |
1355 | | |
1356 | 196k | return UC_ERR_OK; |
1357 | 196k | } |
1358 | | |
1359 | | static uc_err mem_map_check(uc_engine *uc, uint64_t address, uint64_t size, |
1360 | | uint32_t perms) |
1361 | 196k | { |
1362 | 196k | if (size == 0) { |
1363 | | // invalid memory mapping |
1364 | 0 | return UC_ERR_ARG; |
1365 | 0 | } |
1366 | | |
1367 | | // address cannot wrap around |
1368 | 196k | if (address + size - 1 < address) { |
1369 | 0 | return UC_ERR_ARG; |
1370 | 0 | } |
1371 | | |
1372 | | // address must be aligned to uc->target_page_size |
1373 | 196k | if ((address & uc->target_page_align) != 0) { |
1374 | 0 | return UC_ERR_ARG; |
1375 | 0 | } |
1376 | | |
1377 | | // size must be multiple of uc->target_page_size |
1378 | 196k | if ((size & uc->target_page_align) != 0) { |
1379 | 0 | return UC_ERR_ARG; |
1380 | 0 | } |
1381 | | |
1382 | | // check for only valid permissions |
1383 | 196k | if ((perms & ~UC_PROT_ALL) != 0) { |
1384 | 0 | return UC_ERR_ARG; |
1385 | 0 | } |
1386 | | |
1387 | | // this area overlaps existing mapped regions? |
1388 | 196k | if (memory_overlap(uc, address, size)) { |
1389 | 0 | return UC_ERR_MAP; |
1390 | 0 | } |
1391 | | |
1392 | 196k | return UC_ERR_OK; |
1393 | 196k | } |
1394 | | |
1395 | | UNICORN_EXPORT |
1396 | | uc_err uc_mem_map(uc_engine *uc, uint64_t address, uint64_t size, |
1397 | | uint32_t perms) |
1398 | 196k | { |
1399 | 196k | uc_err res; |
1400 | | |
1401 | 196k | UC_INIT(uc); |
1402 | | |
1403 | 196k | res = mem_map_check(uc, address, size, perms); |
1404 | 196k | if (res) { |
1405 | 0 | restore_jit_state(uc); |
1406 | 0 | return res; |
1407 | 0 | } |
1408 | | |
1409 | 196k | res = mem_map(uc, uc->memory_map(uc, address, size, perms)); |
1410 | 196k | restore_jit_state(uc); |
1411 | 196k | return res; |
1412 | 196k | } |
1413 | | |
1414 | | UNICORN_EXPORT |
1415 | | uc_err uc_mem_map_ptr(uc_engine *uc, uint64_t address, uint64_t size, |
1416 | | uint32_t perms, void *ptr) |
1417 | 0 | { |
1418 | 0 | uc_err res; |
1419 | |
|
1420 | 0 | UC_INIT(uc); |
1421 | |
|
1422 | 0 | if (ptr == NULL) { |
1423 | 0 | restore_jit_state(uc); |
1424 | 0 | return UC_ERR_ARG; |
1425 | 0 | } |
1426 | | |
1427 | 0 | res = mem_map_check(uc, address, size, perms); |
1428 | 0 | if (res) { |
1429 | 0 | restore_jit_state(uc); |
1430 | 0 | return res; |
1431 | 0 | } |
1432 | | |
1433 | 0 | res = mem_map(uc, uc->memory_map_ptr(uc, address, size, perms, ptr)); |
1434 | 0 | restore_jit_state(uc); |
1435 | 0 | return res; |
1436 | 0 | } |
1437 | | |
1438 | | UNICORN_EXPORT |
1439 | | uc_err uc_mmio_map(uc_engine *uc, uint64_t address, uint64_t size, |
1440 | | uc_cb_mmio_read_t read_cb, void *user_data_read, |
1441 | | uc_cb_mmio_write_t write_cb, void *user_data_write) |
1442 | 0 | { |
1443 | 0 | uc_err res; |
1444 | |
|
1445 | 0 | UC_INIT(uc); |
1446 | |
|
1447 | 0 | res = mem_map_check(uc, address, size, UC_PROT_ALL); |
1448 | 0 | if (res) { |
1449 | 0 | restore_jit_state(uc); |
1450 | 0 | return res; |
1451 | 0 | } |
1452 | | |
1453 | | // The callbacks do not need to be checked for NULL here, as their presence |
1454 | | // (or lack thereof) will determine the permissions used. |
1455 | 0 | res = mem_map(uc, uc->memory_map_io(uc, address, size, read_cb, write_cb, |
1456 | 0 | user_data_read, user_data_write)); |
1457 | 0 | restore_jit_state(uc); |
1458 | 0 | return res; |
1459 | 0 | } |
1460 | | |
1461 | | // Create a backup copy of the indicated MemoryRegion. |
1462 | | // Generally used in prepartion for splitting a MemoryRegion. |
1463 | | static uint8_t *copy_region(struct uc_struct *uc, MemoryRegion *mr) |
1464 | 0 | { |
1465 | 0 | uint8_t *block = (uint8_t *)g_malloc0((uint64_t)int128_get64(mr->size)); |
1466 | 0 | if (block != NULL) { |
1467 | 0 | uc_err err = |
1468 | 0 | uc_mem_read(uc, mr->addr, block, (uint64_t)int128_get64(mr->size)); |
1469 | 0 | if (err != UC_ERR_OK) { |
1470 | 0 | free(block); |
1471 | 0 | block = NULL; |
1472 | 0 | } |
1473 | 0 | } |
1474 | |
|
1475 | 0 | return block; |
1476 | 0 | } |
1477 | | |
1478 | | /* |
1479 | | This function is similar to split_region, but for MMIO memory. |
1480 | | |
1481 | | Note this function may be called recursively. |
1482 | | */ |
1483 | | static bool split_mmio_region(struct uc_struct *uc, MemoryRegion *mr, |
1484 | | uint64_t address, uint64_t size, bool do_delete) |
1485 | 0 | { |
1486 | 0 | uint64_t begin, end, chunk_end; |
1487 | 0 | uint64_t l_size, r_size, m_size; |
1488 | 0 | mmio_cbs backup; |
1489 | |
|
1490 | 0 | chunk_end = address + size; |
1491 | | |
1492 | | // This branch also break recursion. |
1493 | 0 | if (address <= mr->addr && chunk_end >= mr->end) { |
1494 | 0 | return true; |
1495 | 0 | } |
1496 | | |
1497 | 0 | if (size == 0) { |
1498 | 0 | return false; |
1499 | 0 | } |
1500 | | |
1501 | 0 | begin = mr->addr; |
1502 | 0 | end = mr->end; |
1503 | |
|
1504 | 0 | memcpy(&backup, mr->opaque, sizeof(mmio_cbs)); |
1505 | | |
1506 | | /* overlapping cases |
1507 | | * |------mr------| |
1508 | | * case 1 |---size--| // Is it possible??? |
1509 | | * case 2 |--size--| |
1510 | | * case 3 |---size--| |
1511 | | */ |
1512 | | |
1513 | | // unmap this region first, then do split it later |
1514 | 0 | if (uc_mem_unmap(uc, mr->addr, (uint64_t)int128_get64(mr->size)) != |
1515 | 0 | UC_ERR_OK) { |
1516 | 0 | return false; |
1517 | 0 | } |
1518 | | |
1519 | | // adjust some things |
1520 | 0 | if (address < begin) { |
1521 | 0 | address = begin; |
1522 | 0 | } |
1523 | 0 | if (chunk_end > end) { |
1524 | 0 | chunk_end = end; |
1525 | 0 | } |
1526 | | |
1527 | | // compute sub region sizes |
1528 | 0 | l_size = (uint64_t)(address - begin); |
1529 | 0 | r_size = (uint64_t)(end - chunk_end); |
1530 | 0 | m_size = (uint64_t)(chunk_end - address); |
1531 | |
|
1532 | 0 | if (l_size > 0) { |
1533 | 0 | if (uc_mmio_map(uc, begin, l_size, backup.read, backup.user_data_read, |
1534 | 0 | backup.write, backup.user_data_write) != UC_ERR_OK) { |
1535 | 0 | return false; |
1536 | 0 | } |
1537 | 0 | } |
1538 | | |
1539 | 0 | if (m_size > 0 && !do_delete) { |
1540 | 0 | if (uc_mmio_map(uc, address, m_size, backup.read, backup.user_data_read, |
1541 | 0 | backup.write, backup.user_data_write) != UC_ERR_OK) { |
1542 | 0 | return false; |
1543 | 0 | } |
1544 | 0 | } |
1545 | | |
1546 | 0 | if (r_size > 0) { |
1547 | 0 | if (uc_mmio_map(uc, chunk_end, r_size, backup.read, |
1548 | 0 | backup.user_data_read, backup.write, |
1549 | 0 | backup.user_data_write) != UC_ERR_OK) { |
1550 | 0 | return false; |
1551 | 0 | } |
1552 | 0 | } |
1553 | | |
1554 | 0 | return true; |
1555 | 0 | } |
1556 | | |
1557 | | /* |
1558 | | Split the given MemoryRegion at the indicated address for the indicated size |
1559 | | this may result in the create of up to 3 spanning sections. If the delete |
1560 | | parameter is true, the no new section will be created to replace the indicate |
1561 | | range. This functions exists to support uc_mem_protect and uc_mem_unmap. |
1562 | | |
1563 | | This is a static function and callers have already done some preliminary |
1564 | | parameter validation. |
1565 | | |
1566 | | The do_delete argument indicates that we are being called to support |
1567 | | uc_mem_unmap. In this case we save some time by choosing NOT to remap |
1568 | | the areas that are intended to get unmapped |
1569 | | */ |
1570 | | // TODO: investigate whether qemu region manipulation functions already offered |
1571 | | // this capability |
1572 | | static bool split_region(struct uc_struct *uc, MemoryRegion *mr, |
1573 | | uint64_t address, uint64_t size, bool do_delete) |
1574 | 0 | { |
1575 | 0 | uint8_t *backup; |
1576 | 0 | uint32_t perms; |
1577 | 0 | uint64_t begin, end, chunk_end; |
1578 | 0 | uint64_t l_size, m_size, r_size; |
1579 | 0 | RAMBlock *block = NULL; |
1580 | 0 | bool prealloc = false; |
1581 | |
|
1582 | 0 | chunk_end = address + size; |
1583 | | |
1584 | | // if this region belongs to area [address, address+size], |
1585 | | // then there is no work to do. |
1586 | 0 | if (address <= mr->addr && chunk_end >= mr->end) { |
1587 | 0 | return true; |
1588 | 0 | } |
1589 | | |
1590 | 0 | if (size == 0) { |
1591 | | // trivial case |
1592 | 0 | return true; |
1593 | 0 | } |
1594 | | |
1595 | 0 | if (address >= mr->end || chunk_end <= mr->addr) { |
1596 | | // impossible case |
1597 | 0 | return false; |
1598 | 0 | } |
1599 | | |
1600 | | // Find the correct and large enough (which contains our target mr) |
1601 | | // to create the content backup. |
1602 | 0 | block = mr->ram_block; |
1603 | |
|
1604 | 0 | if (block == NULL) { |
1605 | 0 | return false; |
1606 | 0 | } |
1607 | | |
1608 | | // RAM_PREALLOC is not defined outside exec.c and I didn't feel like |
1609 | | // moving it |
1610 | 0 | prealloc = !!(block->flags & 1); |
1611 | |
|
1612 | 0 | if (block->flags & 1) { |
1613 | 0 | backup = block->host; |
1614 | 0 | } else { |
1615 | 0 | backup = copy_region(uc, mr); |
1616 | 0 | if (backup == NULL) { |
1617 | 0 | return false; |
1618 | 0 | } |
1619 | 0 | } |
1620 | | |
1621 | | // save the essential information required for the split before mr gets |
1622 | | // deleted |
1623 | 0 | perms = mr->perms; |
1624 | 0 | begin = mr->addr; |
1625 | 0 | end = mr->end; |
1626 | | |
1627 | | // unmap this region first, then do split it later |
1628 | 0 | if (uc_mem_unmap(uc, mr->addr, (uint64_t)int128_get64(mr->size)) != |
1629 | 0 | UC_ERR_OK) { |
1630 | 0 | goto error; |
1631 | 0 | } |
1632 | | |
1633 | | /* overlapping cases |
1634 | | * |------mr------| |
1635 | | * case 1 |---size--| |
1636 | | * case 2 |--size--| |
1637 | | * case 3 |---size--| |
1638 | | */ |
1639 | | |
1640 | | // adjust some things |
1641 | 0 | if (address < begin) { |
1642 | 0 | address = begin; |
1643 | 0 | } |
1644 | 0 | if (chunk_end > end) { |
1645 | 0 | chunk_end = end; |
1646 | 0 | } |
1647 | | |
1648 | | // compute sub region sizes |
1649 | 0 | l_size = (uint64_t)(address - begin); |
1650 | 0 | r_size = (uint64_t)(end - chunk_end); |
1651 | 0 | m_size = (uint64_t)(chunk_end - address); |
1652 | | |
1653 | | // If there are error in any of the below operations, things are too far |
1654 | | // gone at that point to recover. Could try to remap orignal region, but |
1655 | | // these smaller allocation just failed so no guarantee that we can recover |
1656 | | // the original allocation at this point |
1657 | 0 | if (l_size > 0) { |
1658 | 0 | if (!prealloc) { |
1659 | 0 | if (uc_mem_map(uc, begin, l_size, perms) != UC_ERR_OK) { |
1660 | 0 | goto error; |
1661 | 0 | } |
1662 | 0 | if (uc_mem_write(uc, begin, backup, l_size) != UC_ERR_OK) { |
1663 | 0 | goto error; |
1664 | 0 | } |
1665 | 0 | } else { |
1666 | 0 | if (uc_mem_map_ptr(uc, begin, l_size, perms, backup) != UC_ERR_OK) { |
1667 | 0 | goto error; |
1668 | 0 | } |
1669 | 0 | } |
1670 | 0 | } |
1671 | | |
1672 | 0 | if (m_size > 0 && !do_delete) { |
1673 | 0 | if (!prealloc) { |
1674 | 0 | if (uc_mem_map(uc, address, m_size, perms) != UC_ERR_OK) { |
1675 | 0 | goto error; |
1676 | 0 | } |
1677 | 0 | if (uc_mem_write(uc, address, backup + l_size, m_size) != |
1678 | 0 | UC_ERR_OK) { |
1679 | 0 | goto error; |
1680 | 0 | } |
1681 | 0 | } else { |
1682 | 0 | if (uc_mem_map_ptr(uc, address, m_size, perms, backup + l_size) != |
1683 | 0 | UC_ERR_OK) { |
1684 | 0 | goto error; |
1685 | 0 | } |
1686 | 0 | } |
1687 | 0 | } |
1688 | | |
1689 | 0 | if (r_size > 0) { |
1690 | 0 | if (!prealloc) { |
1691 | 0 | if (uc_mem_map(uc, chunk_end, r_size, perms) != UC_ERR_OK) { |
1692 | 0 | goto error; |
1693 | 0 | } |
1694 | 0 | if (uc_mem_write(uc, chunk_end, backup + l_size + m_size, r_size) != |
1695 | 0 | UC_ERR_OK) { |
1696 | 0 | goto error; |
1697 | 0 | } |
1698 | 0 | } else { |
1699 | 0 | if (uc_mem_map_ptr(uc, chunk_end, r_size, perms, |
1700 | 0 | backup + l_size + m_size) != UC_ERR_OK) { |
1701 | 0 | goto error; |
1702 | 0 | } |
1703 | 0 | } |
1704 | 0 | } |
1705 | | |
1706 | 0 | if (!prealloc) { |
1707 | 0 | free(backup); |
1708 | 0 | } |
1709 | 0 | return true; |
1710 | | |
1711 | 0 | error: |
1712 | 0 | if (!prealloc) { |
1713 | 0 | free(backup); |
1714 | 0 | } |
1715 | 0 | return false; |
1716 | 0 | } |
1717 | | |
1718 | | UNICORN_EXPORT |
1719 | | uc_err uc_mem_protect(struct uc_struct *uc, uint64_t address, uint64_t size, |
1720 | | uint32_t perms) |
1721 | 0 | { |
1722 | 0 | MemoryRegion *mr; |
1723 | 0 | uint64_t addr = address; |
1724 | 0 | uint64_t pc; |
1725 | 0 | uint64_t count, len; |
1726 | 0 | bool remove_exec = false; |
1727 | |
|
1728 | 0 | UC_INIT(uc); |
1729 | | |
1730 | | // snapshot and protection can't be mixed |
1731 | 0 | if (uc->snapshot_level > 0) { |
1732 | 0 | restore_jit_state(uc); |
1733 | 0 | return UC_ERR_ARG; |
1734 | 0 | } |
1735 | | |
1736 | 0 | if (size == 0) { |
1737 | | // trivial case, no change |
1738 | 0 | restore_jit_state(uc); |
1739 | 0 | return UC_ERR_OK; |
1740 | 0 | } |
1741 | | |
1742 | | // address must be aligned to uc->target_page_size |
1743 | 0 | if ((address & uc->target_page_align) != 0) { |
1744 | 0 | restore_jit_state(uc); |
1745 | 0 | return UC_ERR_ARG; |
1746 | 0 | } |
1747 | | |
1748 | | // size must be multiple of uc->target_page_size |
1749 | 0 | if ((size & uc->target_page_align) != 0) { |
1750 | 0 | restore_jit_state(uc); |
1751 | 0 | return UC_ERR_ARG; |
1752 | 0 | } |
1753 | | |
1754 | | // check for only valid permissions |
1755 | 0 | if ((perms & ~UC_PROT_ALL) != 0) { |
1756 | 0 | restore_jit_state(uc); |
1757 | 0 | return UC_ERR_ARG; |
1758 | 0 | } |
1759 | | |
1760 | | // check that user's entire requested block is mapped |
1761 | | // TODO check if protected is possible |
1762 | | // deny after cow |
1763 | 0 | if (!check_mem_area(uc, address, size)) { |
1764 | 0 | restore_jit_state(uc); |
1765 | 0 | return UC_ERR_NOMEM; |
1766 | 0 | } |
1767 | | |
1768 | | // Now we know entire region is mapped, so change permissions |
1769 | | // We may need to split regions if this area spans adjacent regions |
1770 | 0 | addr = address; |
1771 | 0 | count = 0; |
1772 | 0 | while (count < size) { |
1773 | 0 | mr = uc->memory_mapping(uc, addr); |
1774 | 0 | len = memory_region_len(uc, mr, addr, size - count); |
1775 | 0 | if (mr->ram) { |
1776 | 0 | if (!split_region(uc, mr, addr, len, false)) { |
1777 | 0 | restore_jit_state(uc); |
1778 | 0 | return UC_ERR_NOMEM; |
1779 | 0 | } |
1780 | | |
1781 | 0 | mr = uc->memory_mapping(uc, addr); |
1782 | | // will this remove EXEC permission? |
1783 | 0 | if (((mr->perms & UC_PROT_EXEC) != 0) && |
1784 | 0 | ((perms & UC_PROT_EXEC) == 0)) { |
1785 | 0 | remove_exec = true; |
1786 | 0 | } |
1787 | 0 | mr->perms = perms; |
1788 | 0 | uc->readonly_mem(mr, (perms & UC_PROT_WRITE) == 0); |
1789 | |
|
1790 | 0 | } else { |
1791 | 0 | if (!split_mmio_region(uc, mr, addr, len, false)) { |
1792 | 0 | restore_jit_state(uc); |
1793 | 0 | return UC_ERR_NOMEM; |
1794 | 0 | } |
1795 | | |
1796 | 0 | mr = uc->memory_mapping(uc, addr); |
1797 | 0 | mr->perms = perms; |
1798 | 0 | } |
1799 | | |
1800 | 0 | count += len; |
1801 | 0 | addr += len; |
1802 | 0 | } |
1803 | | |
1804 | | // if EXEC permission is removed, then quit TB and continue at the same |
1805 | | // place |
1806 | 0 | if (remove_exec) { |
1807 | 0 | pc = uc->get_pc(uc); |
1808 | 0 | if (pc < address + size && pc >= address) { |
1809 | 0 | uc->quit_request = true; |
1810 | 0 | uc_emu_stop(uc); |
1811 | 0 | } |
1812 | 0 | } |
1813 | |
|
1814 | 0 | restore_jit_state(uc); |
1815 | 0 | return UC_ERR_OK; |
1816 | 0 | } |
1817 | | |
1818 | | static uc_err uc_mem_unmap_snapshot(struct uc_struct *uc, uint64_t address, |
1819 | | uint64_t size, MemoryRegion **ret) |
1820 | 0 | { |
1821 | 0 | MemoryRegion *mr; |
1822 | |
|
1823 | 0 | mr = uc->memory_mapping(uc, address); |
1824 | 0 | while (mr->container != uc->system_memory) { |
1825 | 0 | mr = mr->container; |
1826 | 0 | } |
1827 | |
|
1828 | 0 | if (mr->addr != address || int128_get64(mr->size) != size) { |
1829 | 0 | return UC_ERR_ARG; |
1830 | 0 | } |
1831 | | |
1832 | 0 | if (ret) { |
1833 | 0 | *ret = mr; |
1834 | 0 | } |
1835 | |
|
1836 | 0 | uc->memory_moveout(uc, mr); |
1837 | |
|
1838 | 0 | return UC_ERR_OK; |
1839 | 0 | } |
1840 | | |
1841 | | UNICORN_EXPORT |
1842 | | uc_err uc_mem_unmap(struct uc_struct *uc, uint64_t address, uint64_t size) |
1843 | 0 | { |
1844 | 0 | MemoryRegion *mr; |
1845 | 0 | uint64_t addr; |
1846 | 0 | uint64_t count, len; |
1847 | |
|
1848 | 0 | UC_INIT(uc); |
1849 | |
|
1850 | 0 | if (size == 0) { |
1851 | | // nothing to unmap |
1852 | 0 | restore_jit_state(uc); |
1853 | 0 | return UC_ERR_OK; |
1854 | 0 | } |
1855 | | |
1856 | | // address must be aligned to uc->target_page_size |
1857 | 0 | if ((address & uc->target_page_align) != 0) { |
1858 | 0 | restore_jit_state(uc); |
1859 | 0 | return UC_ERR_ARG; |
1860 | 0 | } |
1861 | | |
1862 | | // size must be multiple of uc->target_page_size |
1863 | 0 | if ((size & uc->target_page_align) != 0) { |
1864 | 0 | restore_jit_state(uc); |
1865 | 0 | return UC_ERR_ARG; |
1866 | 0 | } |
1867 | | |
1868 | | // check that user's entire requested block is mapped |
1869 | 0 | if (!check_mem_area(uc, address, size)) { |
1870 | 0 | restore_jit_state(uc); |
1871 | 0 | return UC_ERR_NOMEM; |
1872 | 0 | } |
1873 | | |
1874 | 0 | if (uc->snapshot_level > 0) { |
1875 | 0 | uc_err res = uc_mem_unmap_snapshot(uc, address, size, NULL); |
1876 | 0 | restore_jit_state(uc); |
1877 | 0 | return res; |
1878 | 0 | } |
1879 | | |
1880 | | // Now we know entire region is mapped, so do the unmap |
1881 | | // We may need to split regions if this area spans adjacent regions |
1882 | 0 | addr = address; |
1883 | 0 | count = 0; |
1884 | 0 | while (count < size) { |
1885 | 0 | mr = uc->memory_mapping(uc, addr); |
1886 | 0 | len = memory_region_len(uc, mr, addr, size - count); |
1887 | 0 | if (!mr->ram) { |
1888 | 0 | if (!split_mmio_region(uc, mr, addr, len, true)) { |
1889 | 0 | restore_jit_state(uc); |
1890 | 0 | return UC_ERR_NOMEM; |
1891 | 0 | } |
1892 | 0 | } else { |
1893 | 0 | if (!split_region(uc, mr, addr, len, true)) { |
1894 | 0 | restore_jit_state(uc); |
1895 | 0 | return UC_ERR_NOMEM; |
1896 | 0 | } |
1897 | 0 | } |
1898 | | |
1899 | | // if we can retrieve the mapping, then no splitting took place |
1900 | | // so unmap here |
1901 | 0 | mr = uc->memory_mapping(uc, addr); |
1902 | 0 | if (mr != NULL) { |
1903 | 0 | uc->memory_unmap(uc, mr); |
1904 | 0 | } |
1905 | 0 | count += len; |
1906 | 0 | addr += len; |
1907 | 0 | } |
1908 | | |
1909 | 0 | restore_jit_state(uc); |
1910 | 0 | return UC_ERR_OK; |
1911 | 0 | } |
1912 | | |
1913 | | UNICORN_EXPORT |
1914 | | uc_err uc_hook_add(uc_engine *uc, uc_hook *hh, int type, void *callback, |
1915 | | void *user_data, uint64_t begin, uint64_t end, ...) |
1916 | 196k | { |
1917 | 196k | int ret = UC_ERR_OK; |
1918 | 196k | int i = 0; |
1919 | | |
1920 | 196k | UC_INIT(uc); |
1921 | | |
1922 | 196k | struct hook *hook = calloc(1, sizeof(struct hook)); |
1923 | 196k | if (hook == NULL) { |
1924 | 0 | restore_jit_state(uc); |
1925 | 0 | return UC_ERR_NOMEM; |
1926 | 0 | } |
1927 | | |
1928 | 196k | hook->begin = begin; |
1929 | 196k | hook->end = end; |
1930 | 196k | hook->type = type; |
1931 | 196k | hook->callback = callback; |
1932 | 196k | hook->user_data = user_data; |
1933 | 196k | hook->refs = 0; |
1934 | 196k | hook->to_delete = false; |
1935 | 196k | hook->hooked_regions = g_hash_table_new_full( |
1936 | 196k | hooked_regions_hash, hooked_regions_equal, g_free, NULL); |
1937 | 196k | *hh = (uc_hook)hook; |
1938 | | |
1939 | | // UC_HOOK_INSN has an extra argument for instruction ID |
1940 | 196k | if (type & UC_HOOK_INSN) { |
1941 | 0 | va_list valist; |
1942 | |
|
1943 | 0 | va_start(valist, end); |
1944 | 0 | hook->insn = va_arg(valist, int); |
1945 | 0 | va_end(valist); |
1946 | |
|
1947 | 0 | if (uc->insn_hook_validate) { |
1948 | 0 | if (!uc->insn_hook_validate(hook->insn)) { |
1949 | 0 | free(hook); |
1950 | 0 | restore_jit_state(uc); |
1951 | 0 | return UC_ERR_HOOK; |
1952 | 0 | } |
1953 | 0 | } |
1954 | | |
1955 | 0 | if (uc->hook_insert) { |
1956 | 0 | if (hook_insert(&uc->hook[UC_HOOK_INSN_IDX], hook) == NULL) { |
1957 | 0 | free(hook); |
1958 | 0 | restore_jit_state(uc); |
1959 | 0 | return UC_ERR_NOMEM; |
1960 | 0 | } |
1961 | 0 | } else { |
1962 | 0 | if (hook_append(&uc->hook[UC_HOOK_INSN_IDX], hook) == NULL) { |
1963 | 0 | free(hook); |
1964 | 0 | restore_jit_state(uc); |
1965 | 0 | return UC_ERR_NOMEM; |
1966 | 0 | } |
1967 | 0 | } |
1968 | | |
1969 | 0 | uc->hooks_count[UC_HOOK_INSN_IDX]++; |
1970 | 0 | restore_jit_state(uc); |
1971 | 0 | return UC_ERR_OK; |
1972 | 0 | } |
1973 | | |
1974 | 196k | if (type & UC_HOOK_TCG_OPCODE) { |
1975 | 0 | va_list valist; |
1976 | |
|
1977 | 0 | va_start(valist, end); |
1978 | 0 | hook->op = va_arg(valist, int); |
1979 | 0 | hook->op_flags = va_arg(valist, int); |
1980 | 0 | va_end(valist); |
1981 | |
|
1982 | 0 | if (uc->opcode_hook_invalidate) { |
1983 | 0 | if (!uc->opcode_hook_invalidate(hook->op, hook->op_flags)) { |
1984 | 0 | free(hook); |
1985 | 0 | restore_jit_state(uc); |
1986 | 0 | return UC_ERR_HOOK; |
1987 | 0 | } |
1988 | 0 | } |
1989 | | |
1990 | 0 | if (uc->hook_insert) { |
1991 | 0 | if (hook_insert(&uc->hook[UC_HOOK_TCG_OPCODE_IDX], hook) == NULL) { |
1992 | 0 | free(hook); |
1993 | 0 | restore_jit_state(uc); |
1994 | 0 | return UC_ERR_NOMEM; |
1995 | 0 | } |
1996 | 0 | } else { |
1997 | 0 | if (hook_append(&uc->hook[UC_HOOK_TCG_OPCODE_IDX], hook) == NULL) { |
1998 | 0 | free(hook); |
1999 | 0 | restore_jit_state(uc); |
2000 | 0 | return UC_ERR_NOMEM; |
2001 | 0 | } |
2002 | 0 | } |
2003 | | |
2004 | 0 | uc->hooks_count[UC_HOOK_TCG_OPCODE_IDX]++; |
2005 | 0 | return UC_ERR_OK; |
2006 | 0 | } |
2007 | | |
2008 | 786k | while ((type >> i) > 0) { |
2009 | 589k | if ((type >> i) & 1) { |
2010 | | // TODO: invalid hook error? |
2011 | 196k | if (i < UC_HOOK_MAX) { |
2012 | 196k | if (uc->hook_insert) { |
2013 | 196k | if (hook_insert(&uc->hook[i], hook) == NULL) { |
2014 | 0 | free(hook); |
2015 | 0 | restore_jit_state(uc); |
2016 | 0 | return UC_ERR_NOMEM; |
2017 | 0 | } |
2018 | 196k | } else { |
2019 | 0 | if (hook_append(&uc->hook[i], hook) == NULL) { |
2020 | 0 | free(hook); |
2021 | 0 | restore_jit_state(uc); |
2022 | 0 | return UC_ERR_NOMEM; |
2023 | 0 | } |
2024 | 0 | } |
2025 | 196k | uc->hooks_count[i]++; |
2026 | 196k | } |
2027 | 196k | } |
2028 | 589k | i++; |
2029 | 589k | } |
2030 | | |
2031 | | // we didn't use the hook |
2032 | | // TODO: return an error? |
2033 | 196k | if (hook->refs == 0) { |
2034 | 0 | free(hook); |
2035 | 0 | } |
2036 | | |
2037 | 196k | restore_jit_state(uc); |
2038 | 196k | return ret; |
2039 | 196k | } |
2040 | | |
2041 | | UNICORN_EXPORT |
2042 | | uc_err uc_hook_del(uc_engine *uc, uc_hook hh) |
2043 | 0 | { |
2044 | 0 | int i; |
2045 | 0 | struct hook *hook = (struct hook *)hh; |
2046 | |
|
2047 | 0 | UC_INIT(uc); |
2048 | | |
2049 | | // we can't dereference hook->type if hook is invalid |
2050 | | // so for now we need to iterate over all possible types to remove the hook |
2051 | | // which is less efficient |
2052 | | // an optimization would be to align the hook pointer |
2053 | | // and store the type mask in the hook pointer. |
2054 | 0 | for (i = 0; i < UC_HOOK_MAX; i++) { |
2055 | 0 | if (list_exists(&uc->hook[i], (void *)hook)) { |
2056 | 0 | g_hash_table_foreach(hook->hooked_regions, hook_invalidate_region, |
2057 | 0 | uc); |
2058 | 0 | g_hash_table_remove_all(hook->hooked_regions); |
2059 | 0 | hook->to_delete = true; |
2060 | 0 | uc->hooks_count[i]--; |
2061 | 0 | hook_append(&uc->hooks_to_del, hook); |
2062 | 0 | } |
2063 | 0 | } |
2064 | |
|
2065 | 0 | restore_jit_state(uc); |
2066 | 0 | return UC_ERR_OK; |
2067 | 0 | } |
2068 | | |
2069 | | UNICORN_EXPORT |
2070 | | uc_err uc_hook_set_user_data(uc_engine *uc, uc_hook hh, void *user_data) |
2071 | 0 | { |
2072 | 0 | struct hook *hook = (struct hook *)hh; |
2073 | 0 | if (hook->type == UC_HOOK_BLOCK || hook->type == UC_HOOK_CODE) { |
2074 | 0 | if (uc->nested_level) { |
2075 | 0 | return UC_ERR_ARG; |
2076 | 0 | } |
2077 | 0 | if (hook->end < hook->begin) { |
2078 | 0 | uc->tb_flush(uc); |
2079 | 0 | } else { |
2080 | 0 | uc->uc_invalidate_tb(uc, hook->begin, hook->end - hook->begin); |
2081 | 0 | } |
2082 | 0 | } |
2083 | 0 | hook->user_data = user_data; |
2084 | 0 | return UC_ERR_OK; |
2085 | 0 | } |
2086 | | |
2087 | | // TCG helper |
2088 | | // 2 arguments are enough for most opcodes. Load/Store needs 3 arguments but we |
2089 | | // have memory hooks already. We may exceed the maximum arguments of a tcg |
2090 | | // helper but that's easy to extend. |
2091 | | void helper_uc_traceopcode(struct hook *hook, uint64_t arg1, uint64_t arg2, |
2092 | | uint32_t size, void *handle, uint64_t address); |
2093 | | void helper_uc_traceopcode(struct hook *hook, uint64_t arg1, uint64_t arg2, |
2094 | | uint32_t size, void *handle, uint64_t address) |
2095 | 0 | { |
2096 | 0 | struct uc_struct *uc = handle; |
2097 | |
|
2098 | 0 | if (unlikely(uc->stop_request)) { |
2099 | 0 | return; |
2100 | 0 | } |
2101 | | |
2102 | 0 | if (unlikely(hook->to_delete)) { |
2103 | 0 | return; |
2104 | 0 | } |
2105 | | |
2106 | | // We did all checks in translation time. |
2107 | | // |
2108 | | // This could optimize the case that we have multiple hooks with different |
2109 | | // opcodes and have one callback per opcode. Note that the assumption don't |
2110 | | // hold in most cases for uc_tracecode. |
2111 | | // |
2112 | | // TODO: Shall we have a flag to allow users to control whether updating PC? |
2113 | 0 | JIT_CALLBACK_GUARD(((uc_hook_tcg_op_2)hook->callback)( |
2114 | 0 | uc, address, arg1, arg2, size, hook->user_data)); |
2115 | |
|
2116 | 0 | if (unlikely(uc->stop_request)) { |
2117 | 0 | return; |
2118 | 0 | } |
2119 | 0 | } |
2120 | | |
2121 | | void helper_uc_tracecode(int32_t size, uc_hook_idx index, void *handle, |
2122 | | int64_t address); |
2123 | | void helper_uc_tracecode(int32_t size, uc_hook_idx index, void *handle, |
2124 | | int64_t address) |
2125 | 61.4M | { |
2126 | 61.4M | struct uc_struct *uc = handle; |
2127 | 61.4M | struct list_item *cur; |
2128 | 61.4M | struct hook *hook; |
2129 | 61.4M | int hook_flags = |
2130 | 61.4M | index & |
2131 | 61.4M | UC_HOOK_FLAG_MASK; // The index here may contain additional flags. See |
2132 | | // the comments of uc_hook_idx for details. |
2133 | | // bool not_allow_stop = (size & UC_HOOK_FLAG_NO_STOP) || (hook_flags & |
2134 | | // UC_HOOK_FLAG_NO_STOP); |
2135 | 61.4M | bool not_allow_stop = hook_flags & UC_HOOK_FLAG_NO_STOP; |
2136 | | |
2137 | 61.4M | index = index & UC_HOOK_IDX_MASK; |
2138 | | // // Like hook index, only low 6 bits of size is used for representing |
2139 | | // sizes. size = size & UC_HOOK_IDX_MASK; |
2140 | | |
2141 | | // This has been done in tcg code. |
2142 | | // sync PC in CPUArchState with address |
2143 | | // if (uc->set_pc) { |
2144 | | // uc->set_pc(uc, address); |
2145 | | // } |
2146 | | |
2147 | | // the last callback may already asked to stop emulation |
2148 | 61.4M | if (uc->stop_request && !not_allow_stop) { |
2149 | 0 | return; |
2150 | 61.4M | } else if (not_allow_stop && uc->stop_request) { |
2151 | 0 | revert_uc_emu_stop(uc); |
2152 | 0 | } |
2153 | | |
2154 | 61.4M | for (cur = uc->hook[index].head; |
2155 | 122M | cur != NULL && (hook = (struct hook *)cur->data); cur = cur->next) { |
2156 | 61.4M | if (hook->to_delete) { |
2157 | 0 | continue; |
2158 | 0 | } |
2159 | | |
2160 | | // on invalid block/instruction, call instruction counter (if enable), |
2161 | | // then quit |
2162 | 61.4M | if (size == 0) { |
2163 | 0 | if (index == UC_HOOK_CODE_IDX && uc->count_hook) { |
2164 | | // this is the instruction counter (first hook in the list) |
2165 | 0 | JIT_CALLBACK_GUARD(((uc_cb_hookcode_t)hook->callback)( |
2166 | 0 | uc, address, size, hook->user_data)); |
2167 | 0 | } |
2168 | |
|
2169 | 0 | return; |
2170 | 0 | } |
2171 | | |
2172 | 61.4M | if (HOOK_BOUND_CHECK(hook, (uint64_t)address)) { |
2173 | 61.4M | JIT_CALLBACK_GUARD(((uc_cb_hookcode_t)hook->callback)( |
2174 | 61.4M | uc, address, size, hook->user_data)); |
2175 | 61.4M | } |
2176 | | |
2177 | | // the last callback may already asked to stop emulation |
2178 | | // Unicorn: |
2179 | | // In an ARM IT block, we behave like the emulation continues |
2180 | | // normally. No check_exit_request is generated and the hooks are |
2181 | | // triggered normally. In other words, the whole IT block is treated |
2182 | | // as a single instruction. |
2183 | 61.4M | if (not_allow_stop && uc->stop_request) { |
2184 | 54.9M | revert_uc_emu_stop(uc); |
2185 | 54.9M | } else if (!not_allow_stop && uc->stop_request) { |
2186 | 0 | break; |
2187 | 0 | } |
2188 | 61.4M | } |
2189 | 61.4M | } |
2190 | | |
2191 | | UNICORN_EXPORT |
2192 | | uc_err uc_mem_regions(uc_engine *uc, uc_mem_region **regions, uint32_t *count) |
2193 | 0 | { |
2194 | 0 | uint32_t i; |
2195 | 0 | uc_mem_region *r = NULL; |
2196 | |
|
2197 | 0 | UC_INIT(uc); |
2198 | |
|
2199 | 0 | *count = uc->mapped_block_count; |
2200 | |
|
2201 | 0 | if (*count) { |
2202 | 0 | r = g_malloc0(*count * sizeof(uc_mem_region)); |
2203 | 0 | if (r == NULL) { |
2204 | | // out of memory |
2205 | 0 | restore_jit_state(uc); |
2206 | 0 | return UC_ERR_NOMEM; |
2207 | 0 | } |
2208 | 0 | } |
2209 | | |
2210 | 0 | for (i = 0; i < *count; i++) { |
2211 | 0 | r[i].begin = uc->mapped_blocks[i]->addr; |
2212 | 0 | r[i].end = uc->mapped_blocks[i]->end - 1; |
2213 | 0 | r[i].perms = uc->mapped_blocks[i]->perms; |
2214 | 0 | } |
2215 | |
|
2216 | 0 | *regions = r; |
2217 | |
|
2218 | 0 | restore_jit_state(uc); |
2219 | 0 | return UC_ERR_OK; |
2220 | 0 | } |
2221 | | |
2222 | | UNICORN_EXPORT |
2223 | | uc_err uc_query(uc_engine *uc, uc_query_type type, size_t *result) |
2224 | 0 | { |
2225 | 0 | UC_INIT(uc); |
2226 | |
|
2227 | 0 | switch (type) { |
2228 | 0 | default: |
2229 | 0 | return UC_ERR_ARG; |
2230 | | |
2231 | 0 | case UC_QUERY_PAGE_SIZE: |
2232 | 0 | *result = uc->target_page_size; |
2233 | 0 | break; |
2234 | | |
2235 | 0 | case UC_QUERY_ARCH: |
2236 | 0 | *result = uc->arch; |
2237 | 0 | break; |
2238 | | |
2239 | 0 | case UC_QUERY_MODE: |
2240 | 0 | #ifdef UNICORN_HAS_ARM |
2241 | 0 | if (uc->arch == UC_ARCH_ARM) { |
2242 | 0 | return uc->query(uc, type, result); |
2243 | 0 | } |
2244 | 0 | #endif |
2245 | 0 | *result = uc->mode; |
2246 | 0 | break; |
2247 | | |
2248 | 0 | case UC_QUERY_TIMEOUT: |
2249 | 0 | *result = uc->timed_out; |
2250 | 0 | break; |
2251 | 0 | } |
2252 | | |
2253 | 0 | restore_jit_state(uc); |
2254 | 0 | return UC_ERR_OK; |
2255 | 0 | } |
2256 | | |
2257 | | UNICORN_EXPORT |
2258 | | uc_err uc_context_alloc(uc_engine *uc, uc_context **context) |
2259 | 0 | { |
2260 | 0 | struct uc_context **_context = context; |
2261 | 0 | size_t size = uc_context_size(uc); |
2262 | |
|
2263 | 0 | UC_INIT(uc); |
2264 | |
|
2265 | 0 | *_context = g_malloc(size); |
2266 | 0 | if (*_context) { |
2267 | 0 | (*_context)->context_size = size - sizeof(uc_context); |
2268 | 0 | (*_context)->arch = uc->arch; |
2269 | 0 | (*_context)->mode = uc->mode; |
2270 | 0 | (*_context)->fv = NULL; |
2271 | 0 | restore_jit_state(uc); |
2272 | 0 | return UC_ERR_OK; |
2273 | 0 | } else { |
2274 | 0 | restore_jit_state(uc); |
2275 | 0 | return UC_ERR_NOMEM; |
2276 | 0 | } |
2277 | 0 | } |
2278 | | |
2279 | | UNICORN_EXPORT |
2280 | | uc_err uc_free(void *mem) |
2281 | 0 | { |
2282 | 0 | g_free(mem); |
2283 | 0 | return UC_ERR_OK; |
2284 | 0 | } |
2285 | | |
2286 | | UNICORN_EXPORT |
2287 | | size_t uc_context_size(uc_engine *uc) |
2288 | 0 | { |
2289 | 0 | UC_INIT(uc); |
2290 | |
|
2291 | 0 | restore_jit_state(uc); |
2292 | 0 | if (!uc->context_size) { |
2293 | | // return the total size of struct uc_context |
2294 | 0 | return sizeof(uc_context) + uc->cpu_context_size; |
2295 | 0 | } else { |
2296 | 0 | return sizeof(uc_context) + uc->context_size(uc); |
2297 | 0 | } |
2298 | 0 | } |
2299 | | |
2300 | | UNICORN_EXPORT |
2301 | | uc_err uc_context_save(uc_engine *uc, uc_context *context) |
2302 | 0 | { |
2303 | 0 | UC_INIT(uc); |
2304 | 0 | uc_err ret = UC_ERR_OK; |
2305 | |
|
2306 | 0 | if (uc->context_content & UC_CTL_CONTEXT_MEMORY) { |
2307 | 0 | if (!context->fv) { |
2308 | 0 | context->fv = g_malloc0(sizeof(*context->fv)); |
2309 | 0 | } |
2310 | 0 | if (!context->fv) { |
2311 | 0 | return UC_ERR_NOMEM; |
2312 | 0 | } |
2313 | 0 | if (!uc->flatview_copy(uc, context->fv, |
2314 | 0 | uc->address_space_memory.current_map, false)) { |
2315 | 0 | restore_jit_state(uc); |
2316 | 0 | return UC_ERR_NOMEM; |
2317 | 0 | } |
2318 | 0 | ret = uc_snapshot(uc); |
2319 | 0 | if (ret != UC_ERR_OK) { |
2320 | 0 | restore_jit_state(uc); |
2321 | 0 | return ret; |
2322 | 0 | } |
2323 | 0 | context->ramblock_freed = uc->ram_list.freed; |
2324 | 0 | context->last_block = uc->ram_list.last_block; |
2325 | 0 | uc->tcg_flush_tlb(uc); |
2326 | 0 | } |
2327 | | |
2328 | 0 | context->snapshot_level = uc->snapshot_level; |
2329 | |
|
2330 | 0 | if (uc->context_content & UC_CTL_CONTEXT_CPU) { |
2331 | 0 | if (!uc->context_save) { |
2332 | 0 | memcpy(context->data, uc->cpu->env_ptr, context->context_size); |
2333 | 0 | restore_jit_state(uc); |
2334 | 0 | return UC_ERR_OK; |
2335 | 0 | } else { |
2336 | 0 | ret = uc->context_save(uc, context); |
2337 | 0 | restore_jit_state(uc); |
2338 | 0 | return ret; |
2339 | 0 | } |
2340 | 0 | } |
2341 | 0 | restore_jit_state(uc); |
2342 | 0 | return ret; |
2343 | 0 | } |
2344 | | |
2345 | | // Keep in mind that we don't a uc_engine when r/w the registers of a context. |
2346 | | static context_reg_rw_t find_context_reg_rw(uc_arch arch, uc_mode mode) |
2347 | 0 | { |
2348 | | // We believe that the arch/mode pair is correct. |
2349 | 0 | context_reg_rw_t rw = {default_reg_read, default_reg_write}; |
2350 | 0 | switch (arch) { |
2351 | 0 | default: |
2352 | 0 | break; |
2353 | 0 | #ifdef UNICORN_HAS_M68K |
2354 | 0 | case UC_ARCH_M68K: |
2355 | 0 | rw.read = reg_read_m68k; |
2356 | 0 | rw.write = reg_write_m68k; |
2357 | 0 | break; |
2358 | 0 | #endif |
2359 | 0 | #ifdef UNICORN_HAS_X86 |
2360 | 0 | case UC_ARCH_X86: |
2361 | 0 | rw.read = reg_read_x86_64; |
2362 | 0 | rw.write = reg_write_x86_64; |
2363 | 0 | break; |
2364 | 0 | #endif |
2365 | 0 | #ifdef UNICORN_HAS_ARM |
2366 | 0 | case UC_ARCH_ARM: |
2367 | 0 | rw.read = reg_read_arm; |
2368 | 0 | rw.write = reg_write_arm; |
2369 | 0 | break; |
2370 | 0 | #endif |
2371 | 0 | #ifdef UNICORN_HAS_ARM64 |
2372 | 0 | case UC_ARCH_ARM64: |
2373 | 0 | rw.read = reg_read_aarch64; |
2374 | 0 | rw.write = reg_write_aarch64; |
2375 | 0 | break; |
2376 | 0 | #endif |
2377 | | |
2378 | 0 | #if defined(UNICORN_HAS_MIPS) || defined(UNICORN_HAS_MIPSEL) || \ |
2379 | 0 | defined(UNICORN_HAS_MIPS64) || defined(UNICORN_HAS_MIPS64EL) |
2380 | 0 | case UC_ARCH_MIPS: |
2381 | 0 | if (mode & UC_MODE_BIG_ENDIAN) { |
2382 | 0 | #ifdef UNICORN_HAS_MIPS |
2383 | 0 | if (mode & UC_MODE_MIPS32) { |
2384 | 0 | rw.read = reg_read_mips; |
2385 | 0 | rw.write = reg_write_mips; |
2386 | 0 | } |
2387 | 0 | #endif |
2388 | 0 | #ifdef UNICORN_HAS_MIPS64 |
2389 | 0 | if (mode & UC_MODE_MIPS64) { |
2390 | 0 | rw.read = reg_read_mips64; |
2391 | 0 | rw.write = reg_write_mips64; |
2392 | 0 | } |
2393 | 0 | #endif |
2394 | 0 | } else { // little endian |
2395 | 0 | #ifdef UNICORN_HAS_MIPSEL |
2396 | 0 | if (mode & UC_MODE_MIPS32) { |
2397 | 0 | rw.read = reg_read_mipsel; |
2398 | 0 | rw.write = reg_write_mipsel; |
2399 | 0 | } |
2400 | 0 | #endif |
2401 | 0 | #ifdef UNICORN_HAS_MIPS64EL |
2402 | 0 | if (mode & UC_MODE_MIPS64) { |
2403 | 0 | rw.read = reg_read_mips64el; |
2404 | 0 | rw.write = reg_write_mips64el; |
2405 | 0 | } |
2406 | 0 | #endif |
2407 | 0 | } |
2408 | 0 | break; |
2409 | 0 | #endif |
2410 | | |
2411 | 0 | #ifdef UNICORN_HAS_SPARC |
2412 | 0 | case UC_ARCH_SPARC: |
2413 | 0 | if (mode & UC_MODE_SPARC64) { |
2414 | 0 | rw.read = reg_read_sparc64; |
2415 | 0 | rw.write = reg_write_sparc64; |
2416 | 0 | } else { |
2417 | 0 | rw.read = reg_read_sparc; |
2418 | 0 | rw.write = reg_write_sparc; |
2419 | 0 | } |
2420 | 0 | break; |
2421 | 0 | #endif |
2422 | 0 | #ifdef UNICORN_HAS_PPC |
2423 | 0 | case UC_ARCH_PPC: |
2424 | 0 | if (mode & UC_MODE_PPC64) { |
2425 | 0 | rw.read = reg_read_ppc64; |
2426 | 0 | rw.write = reg_write_ppc64; |
2427 | 0 | } else { |
2428 | 0 | rw.read = reg_read_ppc; |
2429 | 0 | rw.write = reg_write_ppc; |
2430 | 0 | } |
2431 | 0 | break; |
2432 | 0 | #endif |
2433 | 0 | #ifdef UNICORN_HAS_RISCV |
2434 | 0 | case UC_ARCH_RISCV: |
2435 | 0 | if (mode & UC_MODE_RISCV32) { |
2436 | 0 | rw.read = reg_read_riscv32; |
2437 | 0 | rw.write = reg_write_riscv32; |
2438 | 0 | } else if (mode & UC_MODE_RISCV64) { |
2439 | 0 | rw.read = reg_read_riscv64; |
2440 | 0 | rw.write = reg_write_riscv64; |
2441 | 0 | } |
2442 | 0 | break; |
2443 | 0 | #endif |
2444 | 0 | #ifdef UNICORN_HAS_S390X |
2445 | 0 | case UC_ARCH_S390X: |
2446 | 0 | rw.read = reg_read_s390x; |
2447 | 0 | rw.write = reg_write_s390x; |
2448 | 0 | break; |
2449 | 0 | #endif |
2450 | 0 | #ifdef UNICORN_HAS_TRICORE |
2451 | 0 | case UC_ARCH_TRICORE: |
2452 | 0 | rw.read = reg_read_tricore; |
2453 | 0 | rw.write = reg_write_tricore; |
2454 | 0 | break; |
2455 | 0 | #endif |
2456 | 0 | } |
2457 | | |
2458 | 0 | return rw; |
2459 | 0 | } |
2460 | | |
2461 | | UNICORN_EXPORT |
2462 | | uc_err uc_context_reg_write(uc_context *ctx, int regid, const void *value) |
2463 | 0 | { |
2464 | 0 | int setpc = 0; |
2465 | 0 | size_t size = (size_t)-1; |
2466 | 0 | return find_context_reg_rw(ctx->arch, ctx->mode) |
2467 | 0 | .write(ctx->data, ctx->mode, regid, value, &size, &setpc); |
2468 | 0 | } |
2469 | | |
2470 | | UNICORN_EXPORT |
2471 | | uc_err uc_context_reg_read(uc_context *ctx, int regid, void *value) |
2472 | 0 | { |
2473 | 0 | size_t size = (size_t)-1; |
2474 | 0 | return find_context_reg_rw(ctx->arch, ctx->mode) |
2475 | 0 | .read(ctx->data, ctx->mode, regid, value, &size); |
2476 | 0 | } |
2477 | | |
2478 | | UNICORN_EXPORT |
2479 | | uc_err uc_context_reg_write2(uc_context *ctx, int regid, const void *value, |
2480 | | size_t *size) |
2481 | 0 | { |
2482 | 0 | int setpc = 0; |
2483 | 0 | return find_context_reg_rw(ctx->arch, ctx->mode) |
2484 | 0 | .write(ctx->data, ctx->mode, regid, value, size, &setpc); |
2485 | 0 | } |
2486 | | |
2487 | | UNICORN_EXPORT |
2488 | | uc_err uc_context_reg_read2(uc_context *ctx, int regid, void *value, |
2489 | | size_t *size) |
2490 | 0 | { |
2491 | 0 | return find_context_reg_rw(ctx->arch, ctx->mode) |
2492 | 0 | .read(ctx->data, ctx->mode, regid, value, size); |
2493 | 0 | } |
2494 | | |
2495 | | UNICORN_EXPORT |
2496 | | uc_err uc_context_reg_write_batch(uc_context *ctx, int const *regs, |
2497 | | void *const *vals, int count) |
2498 | 0 | { |
2499 | 0 | reg_write_t reg_write = find_context_reg_rw(ctx->arch, ctx->mode).write; |
2500 | 0 | void *env = ctx->data; |
2501 | 0 | int mode = ctx->mode; |
2502 | 0 | int setpc = 0; |
2503 | 0 | int i; |
2504 | |
|
2505 | 0 | for (i = 0; i < count; i++) { |
2506 | 0 | unsigned int regid = regs[i]; |
2507 | 0 | const void *value = vals[i]; |
2508 | 0 | size_t size = (size_t)-1; |
2509 | 0 | uc_err err = reg_write(env, mode, regid, value, &size, &setpc); |
2510 | 0 | if (err) { |
2511 | 0 | return err; |
2512 | 0 | } |
2513 | 0 | } |
2514 | | |
2515 | 0 | return UC_ERR_OK; |
2516 | 0 | } |
2517 | | |
2518 | | UNICORN_EXPORT |
2519 | | uc_err uc_context_reg_read_batch(uc_context *ctx, int const *regs, void **vals, |
2520 | | int count) |
2521 | 0 | { |
2522 | 0 | reg_read_t reg_read = find_context_reg_rw(ctx->arch, ctx->mode).read; |
2523 | 0 | void *env = ctx->data; |
2524 | 0 | int mode = ctx->mode; |
2525 | 0 | int i; |
2526 | |
|
2527 | 0 | for (i = 0; i < count; i++) { |
2528 | 0 | unsigned int regid = regs[i]; |
2529 | 0 | void *value = vals[i]; |
2530 | 0 | size_t size = (size_t)-1; |
2531 | 0 | uc_err err = reg_read(env, mode, regid, value, &size); |
2532 | 0 | if (err) { |
2533 | 0 | return err; |
2534 | 0 | } |
2535 | 0 | } |
2536 | | |
2537 | 0 | return UC_ERR_OK; |
2538 | 0 | } |
2539 | | |
2540 | | UNICORN_EXPORT |
2541 | | uc_err uc_context_reg_write_batch2(uc_context *ctx, int const *regs, |
2542 | | const void *const *vals, size_t *sizes, |
2543 | | int count) |
2544 | 0 | { |
2545 | 0 | reg_write_t reg_write = find_context_reg_rw(ctx->arch, ctx->mode).write; |
2546 | 0 | void *env = ctx->data; |
2547 | 0 | int mode = ctx->mode; |
2548 | 0 | int setpc = 0; |
2549 | 0 | int i; |
2550 | |
|
2551 | 0 | for (i = 0; i < count; i++) { |
2552 | 0 | unsigned int regid = regs[i]; |
2553 | 0 | const void *value = vals[i]; |
2554 | 0 | uc_err err = reg_write(env, mode, regid, value, sizes + i, &setpc); |
2555 | 0 | if (err) { |
2556 | 0 | return err; |
2557 | 0 | } |
2558 | 0 | } |
2559 | | |
2560 | 0 | return UC_ERR_OK; |
2561 | 0 | } |
2562 | | |
2563 | | UNICORN_EXPORT |
2564 | | uc_err uc_context_reg_read_batch2(uc_context *ctx, int const *regs, |
2565 | | void *const *vals, size_t *sizes, int count) |
2566 | 0 | { |
2567 | 0 | reg_read_t reg_read = find_context_reg_rw(ctx->arch, ctx->mode).read; |
2568 | 0 | void *env = ctx->data; |
2569 | 0 | int mode = ctx->mode; |
2570 | 0 | int i; |
2571 | |
|
2572 | 0 | for (i = 0; i < count; i++) { |
2573 | 0 | unsigned int regid = regs[i]; |
2574 | 0 | void *value = vals[i]; |
2575 | 0 | uc_err err = reg_read(env, mode, regid, value, sizes + i); |
2576 | 0 | if (err) { |
2577 | 0 | return err; |
2578 | 0 | } |
2579 | 0 | } |
2580 | | |
2581 | 0 | return UC_ERR_OK; |
2582 | 0 | } |
2583 | | |
2584 | | UNICORN_EXPORT |
2585 | | uc_err uc_context_restore(uc_engine *uc, uc_context *context) |
2586 | 0 | { |
2587 | 0 | UC_INIT(uc); |
2588 | 0 | uc_err ret; |
2589 | |
|
2590 | 0 | if (uc->context_content & UC_CTL_CONTEXT_MEMORY) { |
2591 | 0 | uc->snapshot_level = context->snapshot_level; |
2592 | 0 | if (!uc->flatview_copy(uc, uc->address_space_memory.current_map, |
2593 | 0 | context->fv, true)) { |
2594 | 0 | return UC_ERR_NOMEM; |
2595 | 0 | } |
2596 | 0 | ret = uc_restore_latest_snapshot(uc); |
2597 | 0 | if (ret != UC_ERR_OK) { |
2598 | 0 | restore_jit_state(uc); |
2599 | 0 | return ret; |
2600 | 0 | } |
2601 | 0 | uc_snapshot(uc); |
2602 | 0 | uc->ram_list.freed = context->ramblock_freed; |
2603 | 0 | uc->ram_list.last_block = context->last_block; |
2604 | 0 | uc->tcg_flush_tlb(uc); |
2605 | 0 | } |
2606 | | |
2607 | 0 | if (uc->context_content & UC_CTL_CONTEXT_CPU) { |
2608 | 0 | if (!uc->context_restore) { |
2609 | 0 | memcpy(uc->cpu->env_ptr, context->data, context->context_size); |
2610 | 0 | restore_jit_state(uc); |
2611 | 0 | return UC_ERR_OK; |
2612 | 0 | } else { |
2613 | 0 | ret = uc->context_restore(uc, context); |
2614 | 0 | restore_jit_state(uc); |
2615 | 0 | return ret; |
2616 | 0 | } |
2617 | 0 | } |
2618 | 0 | return UC_ERR_OK; |
2619 | 0 | } |
2620 | | |
2621 | | UNICORN_EXPORT |
2622 | | uc_err uc_context_free(uc_context *context) |
2623 | 0 | { |
2624 | 0 | if (context->fv) { |
2625 | 0 | free(context->fv->ranges); |
2626 | 0 | g_free(context->fv); |
2627 | 0 | } |
2628 | 0 | return uc_free(context); |
2629 | 0 | } |
2630 | | |
2631 | | typedef struct _uc_ctl_exit_request { |
2632 | | uint64_t *array; |
2633 | | size_t len; |
2634 | | } uc_ctl_exit_request; |
2635 | | |
2636 | | static inline gboolean uc_read_exit_iter(gpointer key, gpointer val, |
2637 | | gpointer data) |
2638 | 0 | { |
2639 | 0 | uc_ctl_exit_request *req = (uc_ctl_exit_request *)data; |
2640 | |
|
2641 | 0 | req->array[req->len++] = *(uint64_t *)key; |
2642 | |
|
2643 | 0 | return false; |
2644 | 0 | } |
2645 | | |
2646 | | UNICORN_EXPORT |
2647 | | uc_err uc_ctl(uc_engine *uc, uc_control_type control, ...) |
2648 | 0 | { |
2649 | 0 | int rw, type; |
2650 | 0 | uc_err err = UC_ERR_OK; |
2651 | 0 | va_list args; |
2652 | | |
2653 | | // MSVC Would do signed shift on signed integers. |
2654 | 0 | rw = (uint32_t)control >> 30; |
2655 | 0 | type = (control & ((1 << 16) - 1)); |
2656 | 0 | va_start(args, control); |
2657 | |
|
2658 | 0 | switch (type) { |
2659 | 0 | case UC_CTL_UC_MODE: { |
2660 | 0 | if (rw == UC_CTL_IO_READ) { |
2661 | 0 | int *pmode = va_arg(args, int *); |
2662 | 0 | *pmode = uc->mode; |
2663 | 0 | } else { |
2664 | 0 | err = UC_ERR_ARG; |
2665 | 0 | } |
2666 | 0 | break; |
2667 | 0 | } |
2668 | | |
2669 | 0 | case UC_CTL_UC_ARCH: { |
2670 | 0 | if (rw == UC_CTL_IO_READ) { |
2671 | 0 | int *arch = va_arg(args, int *); |
2672 | 0 | *arch = uc->arch; |
2673 | 0 | } else { |
2674 | 0 | err = UC_ERR_ARG; |
2675 | 0 | } |
2676 | 0 | break; |
2677 | 0 | } |
2678 | | |
2679 | 0 | case UC_CTL_UC_TIMEOUT: { |
2680 | 0 | if (rw == UC_CTL_IO_READ) { |
2681 | 0 | uint64_t *arch = va_arg(args, uint64_t *); |
2682 | 0 | *arch = uc->timeout; |
2683 | 0 | } else { |
2684 | 0 | err = UC_ERR_ARG; |
2685 | 0 | } |
2686 | 0 | break; |
2687 | 0 | } |
2688 | | |
2689 | 0 | case UC_CTL_UC_PAGE_SIZE: { |
2690 | 0 | if (rw == UC_CTL_IO_READ) { |
2691 | |
|
2692 | 0 | UC_INIT(uc); |
2693 | |
|
2694 | 0 | uint32_t *page_size = va_arg(args, uint32_t *); |
2695 | 0 | *page_size = uc->target_page_size; |
2696 | |
|
2697 | 0 | restore_jit_state(uc); |
2698 | 0 | } else { |
2699 | 0 | uint32_t page_size = va_arg(args, uint32_t); |
2700 | 0 | int bits = 0; |
2701 | |
|
2702 | 0 | if (uc->init_done) { |
2703 | 0 | err = UC_ERR_ARG; |
2704 | 0 | break; |
2705 | 0 | } |
2706 | | |
2707 | 0 | if (uc->arch != UC_ARCH_ARM && uc->arch != UC_ARCH_ARM64) { |
2708 | 0 | err = UC_ERR_ARG; |
2709 | 0 | break; |
2710 | 0 | } |
2711 | | |
2712 | 0 | if ((page_size & (page_size - 1))) { |
2713 | 0 | err = UC_ERR_ARG; |
2714 | 0 | break; |
2715 | 0 | } |
2716 | | |
2717 | | // Bits is used to calculate the mask |
2718 | 0 | while (page_size > 1) { |
2719 | 0 | bits++; |
2720 | 0 | page_size >>= 1; |
2721 | 0 | } |
2722 | |
|
2723 | 0 | uc->target_bits = bits; |
2724 | |
|
2725 | 0 | err = UC_ERR_OK; |
2726 | 0 | } |
2727 | | |
2728 | 0 | break; |
2729 | 0 | } |
2730 | | |
2731 | 0 | case UC_CTL_UC_USE_EXITS: { |
2732 | 0 | if (rw == UC_CTL_IO_WRITE) { |
2733 | 0 | int use_exits = va_arg(args, int); |
2734 | 0 | uc->use_exits = use_exits; |
2735 | 0 | } else { |
2736 | 0 | err = UC_ERR_ARG; |
2737 | 0 | } |
2738 | 0 | break; |
2739 | 0 | } |
2740 | | |
2741 | 0 | case UC_CTL_UC_EXITS_CNT: { |
2742 | |
|
2743 | 0 | UC_INIT(uc); |
2744 | |
|
2745 | 0 | if (!uc->use_exits) { |
2746 | 0 | err = UC_ERR_ARG; |
2747 | 0 | } else if (rw == UC_CTL_IO_READ) { |
2748 | 0 | size_t *exits_cnt = va_arg(args, size_t *); |
2749 | 0 | *exits_cnt = g_tree_nnodes(uc->ctl_exits); |
2750 | 0 | } else { |
2751 | 0 | err = UC_ERR_ARG; |
2752 | 0 | } |
2753 | |
|
2754 | 0 | restore_jit_state(uc); |
2755 | 0 | break; |
2756 | 0 | } |
2757 | | |
2758 | 0 | case UC_CTL_UC_EXITS: { |
2759 | |
|
2760 | 0 | UC_INIT(uc); |
2761 | |
|
2762 | 0 | if (!uc->use_exits) { |
2763 | 0 | err = UC_ERR_ARG; |
2764 | 0 | } else if (rw == UC_CTL_IO_READ) { |
2765 | 0 | uint64_t *exits = va_arg(args, uint64_t *); |
2766 | 0 | size_t cnt = va_arg(args, size_t); |
2767 | 0 | if (cnt < g_tree_nnodes(uc->ctl_exits)) { |
2768 | 0 | err = UC_ERR_ARG; |
2769 | 0 | } else { |
2770 | 0 | uc_ctl_exit_request req; |
2771 | 0 | req.array = exits; |
2772 | 0 | req.len = 0; |
2773 | |
|
2774 | 0 | g_tree_foreach(uc->ctl_exits, uc_read_exit_iter, (void *)&req); |
2775 | 0 | } |
2776 | 0 | } else if (rw == UC_CTL_IO_WRITE) { |
2777 | 0 | uint64_t *exits = va_arg(args, uint64_t *); |
2778 | 0 | size_t cnt = va_arg(args, size_t); |
2779 | |
|
2780 | 0 | g_tree_remove_all(uc->ctl_exits); |
2781 | |
|
2782 | 0 | for (size_t i = 0; i < cnt; i++) { |
2783 | 0 | uc_add_exit(uc, exits[i]); |
2784 | 0 | } |
2785 | 0 | } else { |
2786 | 0 | err = UC_ERR_ARG; |
2787 | 0 | } |
2788 | |
|
2789 | 0 | restore_jit_state(uc); |
2790 | 0 | break; |
2791 | 0 | } |
2792 | | |
2793 | 0 | case UC_CTL_CPU_MODEL: { |
2794 | 0 | if (rw == UC_CTL_IO_READ) { |
2795 | |
|
2796 | 0 | UC_INIT(uc); |
2797 | |
|
2798 | 0 | int *model = va_arg(args, int *); |
2799 | 0 | *model = uc->cpu_model; |
2800 | |
|
2801 | 0 | save_jit_state(uc); |
2802 | 0 | } else { |
2803 | 0 | int model = va_arg(args, int); |
2804 | |
|
2805 | 0 | if (model < 0 || uc->init_done) { |
2806 | 0 | err = UC_ERR_ARG; |
2807 | 0 | break; |
2808 | 0 | } |
2809 | | |
2810 | 0 | if (uc->arch == UC_ARCH_X86) { |
2811 | 0 | if (model >= UC_CPU_X86_ENDING) { |
2812 | 0 | err = UC_ERR_ARG; |
2813 | 0 | break; |
2814 | 0 | } |
2815 | 0 | } else if (uc->arch == UC_ARCH_ARM) { |
2816 | 0 | if (model >= UC_CPU_ARM_ENDING) { |
2817 | 0 | err = UC_ERR_ARG; |
2818 | 0 | break; |
2819 | 0 | } |
2820 | | |
2821 | 0 | if (uc->mode & UC_MODE_BIG_ENDIAN) { |
2822 | | // These cpu models don't support big endian code access. |
2823 | 0 | if (model <= UC_CPU_ARM_CORTEX_A15 && |
2824 | 0 | model >= UC_CPU_ARM_CORTEX_A7) { |
2825 | 0 | err = UC_ERR_ARG; |
2826 | 0 | break; |
2827 | 0 | } |
2828 | 0 | } |
2829 | 0 | } else if (uc->arch == UC_ARCH_ARM64) { |
2830 | 0 | if (model >= UC_CPU_ARM64_ENDING) { |
2831 | 0 | err = UC_ERR_ARG; |
2832 | 0 | break; |
2833 | 0 | } |
2834 | 0 | } else if (uc->arch == UC_ARCH_MIPS) { |
2835 | 0 | if (uc->mode & UC_MODE_32 && model >= UC_CPU_MIPS32_ENDING) { |
2836 | 0 | err = UC_ERR_ARG; |
2837 | 0 | break; |
2838 | 0 | } |
2839 | | |
2840 | 0 | if (uc->mode & UC_MODE_64 && model >= UC_CPU_MIPS64_ENDING) { |
2841 | 0 | err = UC_ERR_ARG; |
2842 | 0 | break; |
2843 | 0 | } |
2844 | 0 | } else if (uc->arch == UC_ARCH_PPC) { |
2845 | | // UC_MODE_PPC32 == UC_MODE_32 |
2846 | 0 | if (uc->mode & UC_MODE_32 && model >= UC_CPU_PPC32_ENDING) { |
2847 | 0 | err = UC_ERR_ARG; |
2848 | 0 | break; |
2849 | 0 | } |
2850 | | |
2851 | 0 | if (uc->mode & UC_MODE_64 && model >= UC_CPU_PPC64_ENDING) { |
2852 | 0 | err = UC_ERR_ARG; |
2853 | 0 | break; |
2854 | 0 | } |
2855 | 0 | } else if (uc->arch == UC_ARCH_RISCV) { |
2856 | 0 | if (uc->mode & UC_MODE_32 && model >= UC_CPU_RISCV32_ENDING) { |
2857 | 0 | err = UC_ERR_ARG; |
2858 | 0 | break; |
2859 | 0 | } |
2860 | | |
2861 | 0 | if (uc->mode & UC_MODE_64 && model >= UC_CPU_RISCV64_ENDING) { |
2862 | 0 | err = UC_ERR_ARG; |
2863 | 0 | break; |
2864 | 0 | } |
2865 | 0 | } else if (uc->arch == UC_ARCH_S390X) { |
2866 | 0 | if (model >= UC_CPU_S390X_ENDING) { |
2867 | 0 | err = UC_ERR_ARG; |
2868 | 0 | break; |
2869 | 0 | } |
2870 | 0 | } else if (uc->arch == UC_ARCH_SPARC) { |
2871 | 0 | if (uc->mode & UC_MODE_32 && model >= UC_CPU_SPARC32_ENDING) { |
2872 | 0 | err = UC_ERR_ARG; |
2873 | 0 | break; |
2874 | 0 | } |
2875 | 0 | if (uc->mode & UC_MODE_64 && model >= UC_CPU_SPARC64_ENDING) { |
2876 | 0 | err = UC_ERR_ARG; |
2877 | 0 | break; |
2878 | 0 | } |
2879 | 0 | } else if (uc->arch == UC_ARCH_M68K) { |
2880 | 0 | if (model >= UC_CPU_M68K_ENDING) { |
2881 | 0 | err = UC_ERR_ARG; |
2882 | 0 | break; |
2883 | 0 | } |
2884 | 0 | } else { |
2885 | 0 | err = UC_ERR_ARG; |
2886 | 0 | break; |
2887 | 0 | } |
2888 | | |
2889 | 0 | uc->cpu_model = model; |
2890 | |
|
2891 | 0 | err = UC_ERR_OK; |
2892 | 0 | } |
2893 | 0 | break; |
2894 | 0 | } |
2895 | | |
2896 | 0 | case UC_CTL_TB_REQUEST_CACHE: { |
2897 | |
|
2898 | 0 | UC_INIT(uc); |
2899 | |
|
2900 | 0 | if (rw == UC_CTL_IO_READ_WRITE) { |
2901 | 0 | uint64_t addr = va_arg(args, uint64_t); |
2902 | 0 | uc_tb *tb = va_arg(args, uc_tb *); |
2903 | 0 | err = uc->uc_gen_tb(uc, addr, tb); |
2904 | 0 | } else { |
2905 | 0 | err = UC_ERR_ARG; |
2906 | 0 | } |
2907 | |
|
2908 | 0 | restore_jit_state(uc); |
2909 | 0 | break; |
2910 | 0 | } |
2911 | | |
2912 | 0 | case UC_CTL_TB_REMOVE_CACHE: { |
2913 | |
|
2914 | 0 | UC_INIT(uc); |
2915 | |
|
2916 | 0 | if (rw == UC_CTL_IO_WRITE) { |
2917 | 0 | uint64_t addr = va_arg(args, uint64_t); |
2918 | 0 | uint64_t end = va_arg(args, uint64_t); |
2919 | 0 | if (end <= addr) { |
2920 | 0 | err = UC_ERR_ARG; |
2921 | 0 | } else { |
2922 | 0 | uc->uc_invalidate_tb(uc, addr, end - addr); |
2923 | 0 | } |
2924 | 0 | } else { |
2925 | 0 | err = UC_ERR_ARG; |
2926 | 0 | } |
2927 | |
|
2928 | 0 | restore_jit_state(uc); |
2929 | 0 | break; |
2930 | 0 | } |
2931 | | |
2932 | 0 | case UC_CTL_TB_FLUSH: |
2933 | |
|
2934 | 0 | UC_INIT(uc); |
2935 | |
|
2936 | 0 | if (rw == UC_CTL_IO_WRITE) { |
2937 | 0 | uc->tb_flush(uc); |
2938 | 0 | } else { |
2939 | 0 | err = UC_ERR_ARG; |
2940 | 0 | } |
2941 | |
|
2942 | 0 | restore_jit_state(uc); |
2943 | 0 | break; |
2944 | | |
2945 | 0 | case UC_CTL_TLB_FLUSH: |
2946 | |
|
2947 | 0 | UC_INIT(uc); |
2948 | |
|
2949 | 0 | if (rw == UC_CTL_IO_WRITE) { |
2950 | 0 | uc->tcg_flush_tlb(uc); |
2951 | 0 | } else { |
2952 | 0 | err = UC_ERR_ARG; |
2953 | 0 | } |
2954 | |
|
2955 | 0 | restore_jit_state(uc); |
2956 | 0 | break; |
2957 | | |
2958 | 0 | case UC_CTL_TLB_TYPE: { |
2959 | |
|
2960 | 0 | UC_INIT(uc); |
2961 | |
|
2962 | 0 | if (rw == UC_CTL_IO_WRITE) { |
2963 | 0 | int mode = va_arg(args, int); |
2964 | 0 | err = uc->set_tlb(uc, mode); |
2965 | 0 | } else { |
2966 | 0 | err = UC_ERR_ARG; |
2967 | 0 | } |
2968 | |
|
2969 | 0 | restore_jit_state(uc); |
2970 | 0 | break; |
2971 | 0 | } |
2972 | | |
2973 | 0 | case UC_CTL_TCG_BUFFER_SIZE: { |
2974 | 0 | if (rw == UC_CTL_IO_WRITE) { |
2975 | 0 | uint32_t size = va_arg(args, uint32_t); |
2976 | 0 | uc->tcg_buffer_size = size; |
2977 | 0 | } else { |
2978 | |
|
2979 | 0 | UC_INIT(uc); |
2980 | |
|
2981 | 0 | uint32_t *size = va_arg(args, uint32_t *); |
2982 | 0 | *size = uc->tcg_buffer_size; |
2983 | |
|
2984 | 0 | restore_jit_state(uc); |
2985 | 0 | } |
2986 | 0 | break; |
2987 | 0 | } |
2988 | | |
2989 | 0 | case UC_CTL_CONTEXT_MODE: |
2990 | |
|
2991 | 0 | UC_INIT(uc); |
2992 | |
|
2993 | 0 | if (rw == UC_CTL_IO_WRITE) { |
2994 | 0 | int mode = va_arg(args, int); |
2995 | 0 | uc->context_content = mode; |
2996 | 0 | err = UC_ERR_OK; |
2997 | 0 | } else { |
2998 | 0 | err = UC_ERR_ARG; |
2999 | 0 | } |
3000 | |
|
3001 | 0 | restore_jit_state(uc); |
3002 | 0 | break; |
3003 | | |
3004 | 0 | case UC_CTL_PAUTH_SIGN: { |
3005 | |
|
3006 | 0 | UC_INIT(uc); |
3007 | |
|
3008 | 0 | if (rw == UC_CTL_IO_READ_WRITE) { |
3009 | 0 | uint64_t ptr = va_arg(args, uint64_t); |
3010 | 0 | int key = va_arg(args, int); |
3011 | 0 | uint64_t diversifier = va_arg(args, uint64_t); |
3012 | 0 | uint64_t *signed_ptr = va_arg(args, uint64_t *); |
3013 | 0 | if (uc->pauth_sign != NULL) { |
3014 | 0 | err = uc->pauth_sign(uc, ptr, key, diversifier, signed_ptr); |
3015 | 0 | } else { |
3016 | 0 | err = UC_ERR_ARG; |
3017 | 0 | } |
3018 | 0 | } else { |
3019 | 0 | err = UC_ERR_ARG; |
3020 | 0 | } |
3021 | |
|
3022 | 0 | restore_jit_state(uc); |
3023 | 0 | break; |
3024 | 0 | } |
3025 | | |
3026 | 0 | case UC_CTL_PAUTH_STRIP: { |
3027 | |
|
3028 | 0 | UC_INIT(uc); |
3029 | |
|
3030 | 0 | if (rw == UC_CTL_IO_READ_WRITE) { |
3031 | 0 | uint64_t ptr = va_arg(args, uint64_t); |
3032 | 0 | int key = va_arg(args, int); |
3033 | 0 | uint64_t *stripped_ptr = va_arg(args, uint64_t *); |
3034 | 0 | if (uc->pauth_strip != NULL) { |
3035 | 0 | err = uc->pauth_strip(uc, ptr, key, stripped_ptr); |
3036 | 0 | } else { |
3037 | 0 | err = UC_ERR_ARG; |
3038 | 0 | } |
3039 | 0 | } else { |
3040 | 0 | err = UC_ERR_ARG; |
3041 | 0 | } |
3042 | |
|
3043 | 0 | restore_jit_state(uc); |
3044 | 0 | break; |
3045 | 0 | } |
3046 | | |
3047 | 0 | case UC_CTL_PAUTH_AUTH: { |
3048 | |
|
3049 | 0 | UC_INIT(uc); |
3050 | |
|
3051 | 0 | if (rw == UC_CTL_IO_READ_WRITE) { |
3052 | 0 | uint64_t ptr = va_arg(args, uint64_t); |
3053 | 0 | int key = va_arg(args, int); |
3054 | 0 | uint64_t diversifier = va_arg(args, uint64_t); |
3055 | 0 | bool *valid = va_arg(args, bool *); |
3056 | 0 | if (uc->pauth_auth != NULL) { |
3057 | 0 | err = uc->pauth_auth(uc, ptr, key, diversifier, valid); |
3058 | 0 | } else { |
3059 | 0 | err = UC_ERR_ARG; |
3060 | 0 | } |
3061 | 0 | } else { |
3062 | 0 | err = UC_ERR_ARG; |
3063 | 0 | } |
3064 | |
|
3065 | 0 | restore_jit_state(uc); |
3066 | 0 | break; |
3067 | 0 | } |
3068 | 0 | case UC_CTL_INVALID_ADDR: |
3069 | 0 | if (rw == UC_CTL_IO_READ) { |
3070 | 0 | uint64_t *invalid_addr = va_arg(args, uint64_t *); |
3071 | 0 | *invalid_addr = uc->invalid_addr; |
3072 | 0 | } else { |
3073 | 0 | err = UC_ERR_ARG; |
3074 | 0 | } |
3075 | 0 | break; |
3076 | | |
3077 | 0 | default: |
3078 | 0 | err = UC_ERR_ARG; |
3079 | 0 | break; |
3080 | 0 | } |
3081 | | |
3082 | 0 | va_end(args); |
3083 | |
|
3084 | 0 | return err; |
3085 | 0 | } |
3086 | | |
3087 | | static uc_err uc_snapshot(struct uc_struct *uc) |
3088 | 0 | { |
3089 | 0 | if (uc->snapshot_level == INT32_MAX) { |
3090 | 0 | return UC_ERR_RESOURCE; |
3091 | 0 | } |
3092 | 0 | uc->snapshot_level++; |
3093 | 0 | return UC_ERR_OK; |
3094 | 0 | } |
3095 | | |
3096 | | static uc_err uc_restore_latest_snapshot(struct uc_struct *uc) |
3097 | 0 | { |
3098 | 0 | MemoryRegion *subregion, *subregion_next, *mr, *initial_mr; |
3099 | 0 | int level; |
3100 | |
|
3101 | 0 | QTAILQ_FOREACH_SAFE(subregion, &uc->system_memory->subregions, |
3102 | 0 | subregions_link, subregion_next) |
3103 | 0 | { |
3104 | 0 | uc->memory_filter_subregions(subregion, uc->snapshot_level); |
3105 | 0 | if (subregion->priority >= uc->snapshot_level || |
3106 | 0 | (!subregion->terminates && QTAILQ_EMPTY(&subregion->subregions))) { |
3107 | 0 | uc->memory_unmap(uc, subregion); |
3108 | 0 | } |
3109 | 0 | } |
3110 | |
|
3111 | 0 | for (size_t i = uc->unmapped_regions->len; i-- > 0;) { |
3112 | 0 | mr = g_array_index(uc->unmapped_regions, MemoryRegion *, i); |
3113 | | // same dirty hack as in memory_moveout see qemu/softmmu/memory.c |
3114 | 0 | initial_mr = QTAILQ_FIRST(&mr->subregions); |
3115 | 0 | if (!initial_mr) { |
3116 | 0 | initial_mr = mr; |
3117 | 0 | } |
3118 | | /* same dirty hack as in memory_moveout see qemu/softmmu/memory.c */ |
3119 | 0 | level = (intptr_t)mr->container; |
3120 | 0 | mr->container = NULL; |
3121 | |
|
3122 | 0 | if (level < uc->snapshot_level) { |
3123 | 0 | break; |
3124 | 0 | } |
3125 | 0 | if (memory_overlap(uc, mr->addr, int128_get64(mr->size))) { |
3126 | 0 | return UC_ERR_MAP; |
3127 | 0 | } |
3128 | 0 | uc->memory_movein(uc, mr); |
3129 | 0 | uc->memory_filter_subregions(mr, uc->snapshot_level); |
3130 | 0 | if (initial_mr != mr && QTAILQ_EMPTY(&mr->subregions)) { |
3131 | 0 | uc->memory_unmap(uc, subregion); |
3132 | 0 | } |
3133 | 0 | mem_map(uc, initial_mr); |
3134 | 0 | g_array_remove_range(uc->unmapped_regions, i, 1); |
3135 | 0 | } |
3136 | 0 | uc->snapshot_level--; |
3137 | |
|
3138 | 0 | return UC_ERR_OK; |
3139 | 0 | } |
3140 | | |
3141 | | #ifdef UNICORN_TRACER |
3142 | | uc_tracer *get_tracer() |
3143 | | { |
3144 | | static uc_tracer tracer; |
3145 | | return &tracer; |
3146 | | } |
3147 | | |
3148 | | void trace_start(uc_tracer *tracer, trace_loc loc) |
3149 | | { |
3150 | | tracer->starts[loc] = get_clock(); |
3151 | | } |
3152 | | |
3153 | | void trace_end(uc_tracer *tracer, trace_loc loc, const char *fmt, ...) |
3154 | | { |
3155 | | va_list args; |
3156 | | int64_t end = get_clock(); |
3157 | | |
3158 | | va_start(args, fmt); |
3159 | | |
3160 | | vfprintf(stderr, fmt, args); |
3161 | | |
3162 | | va_end(args); |
3163 | | |
3164 | | fprintf(stderr, "%.6fus\n", |
3165 | | (double)(end - tracer->starts[loc]) / (double)(1000)); |
3166 | | } |
3167 | | #endif |