Coverage Report

Created: 2025-07-11 06:41

/src/unicorn/uc.c
Line
Count
Source (jump to first uncovered line)
1
/* Unicorn Emulator Engine */
2
/* By Nguyen Anh Quynh <aquynh@gmail.com>, 2015 */
3
/* Modified for Unicorn Engine by Chen Huitao<chenhuitao@hfmrit.com>, 2020 */
4
5
#include "unicorn/unicorn.h"
6
#if defined(UNICORN_HAS_OSXKERNEL)
7
#include <libkern/libkern.h>
8
#else
9
#include <stddef.h>
10
#include <stdio.h>
11
#include <stdlib.h>
12
#endif
13
14
#include <time.h> // nanosleep
15
#include <string.h>
16
17
#include "uc_priv.h"
18
19
// target specific headers
20
#include "qemu/target/m68k/unicorn.h"
21
#include "qemu/target/i386/unicorn.h"
22
#include "qemu/target/arm/unicorn.h"
23
#include "qemu/target/mips/unicorn.h"
24
#include "qemu/target/sparc/unicorn.h"
25
#include "qemu/target/ppc/unicorn.h"
26
#include "qemu/target/riscv/unicorn.h"
27
#include "qemu/target/s390x/unicorn.h"
28
#include "qemu/target/tricore/unicorn.h"
29
30
#include "qemu/include/tcg/tcg-apple-jit.h"
31
#include "qemu/include/qemu/queue.h"
32
#include "qemu-common.h"
33
34
static void clear_deleted_hooks(uc_engine *uc);
35
static uc_err uc_snapshot(uc_engine *uc);
36
static uc_err uc_restore_latest_snapshot(uc_engine *uc);
37
38
#if defined(__APPLE__) && defined(HAVE_PTHREAD_JIT_PROTECT) &&                 \
39
    (defined(__arm__) || defined(__aarch64__))
40
static void save_jit_state(uc_engine *uc)
41
{
42
    if (!uc->nested) {
43
        uc->thread_executable_entry = thread_executable();
44
        uc->current_executable = uc->thread_executable_entry;
45
    }
46
47
    uc->nested += 1;
48
}
49
50
static void restore_jit_state(uc_engine *uc)
51
{
52
    assert(uc->nested > 0);
53
    if (uc->nested == 1) {
54
        assert_executable(uc->current_executable);
55
        if (uc->current_executable != uc->thread_executable_entry) {
56
            if (uc->thread_executable_entry) {
57
                jit_write_protect(true);
58
            } else {
59
                jit_write_protect(false);
60
            }
61
        }
62
    }
63
    uc->nested -= 1;
64
}
65
#else
66
static void save_jit_state(uc_engine *uc)
67
6.51M
{
68
6.51M
    (void)uc;
69
6.51M
}
70
static void restore_jit_state(uc_engine *uc)
71
6.51M
{
72
6.51M
    (void)uc;
73
6.51M
}
74
#endif
75
76
static void *hook_insert(struct list *l, struct hook *h)
77
206k
{
78
206k
    void *item = list_insert(l, (void *)h);
79
206k
    if (item) {
80
206k
        h->refs++;
81
206k
    }
82
206k
    return item;
83
206k
}
84
85
static void *hook_append(struct list *l, struct hook *h)
86
0
{
87
0
    void *item = list_append(l, (void *)h);
88
0
    if (item) {
89
0
        h->refs++;
90
0
    }
91
0
    return item;
92
0
}
93
94
static void hook_invalidate_region(void *key, void *data, void *opaq)
95
0
{
96
0
    uc_engine *uc = (uc_engine *)opaq;
97
0
    HookedRegion *region = (HookedRegion *)key;
98
99
0
    uc->uc_invalidate_tb(uc, region->start, region->length);
100
0
}
101
102
static void hook_delete(void *data)
103
206k
{
104
206k
    struct hook *h = (struct hook *)data;
105
106
206k
    h->refs--;
107
108
206k
    if (h->refs == 0) {
109
206k
        g_hash_table_destroy(h->hooked_regions);
110
206k
        free(h);
111
206k
    }
112
206k
}
113
114
UNICORN_EXPORT
115
unsigned int uc_version(unsigned int *major, unsigned int *minor)
116
0
{
117
0
    if (major != NULL && minor != NULL) {
118
0
        *major = UC_API_MAJOR;
119
0
        *minor = UC_API_MINOR;
120
0
    }
121
122
0
    return (UC_API_MAJOR << 24) + (UC_API_MINOR << 16) + (UC_API_PATCH << 8) +
123
0
           UC_API_EXTRA;
124
0
}
125
126
static uc_err default_reg_read(void *env, int mode, unsigned int regid,
127
                               void *value, size_t *size)
128
0
{
129
0
    return UC_ERR_HANDLE;
130
0
}
131
132
static uc_err default_reg_write(void *env, int mode, unsigned int regid,
133
                                const void *value, size_t *size, int *setpc)
134
0
{
135
0
    return UC_ERR_HANDLE;
136
0
}
137
138
UNICORN_EXPORT
139
uc_err uc_errno(uc_engine *uc)
140
0
{
141
0
    return uc->errnum;
142
0
}
143
144
UNICORN_EXPORT
145
const char *uc_strerror(uc_err code)
146
91.7k
{
147
91.7k
    switch (code) {
148
0
    default:
149
0
        return "Unknown error code";
150
0
    case UC_ERR_OK:
151
0
        return "OK (UC_ERR_OK)";
152
0
    case UC_ERR_NOMEM:
153
0
        return "No memory available or memory not present (UC_ERR_NOMEM)";
154
0
    case UC_ERR_ARCH:
155
0
        return "Invalid/unsupported architecture (UC_ERR_ARCH)";
156
0
    case UC_ERR_HANDLE:
157
0
        return "Invalid handle (UC_ERR_HANDLE)";
158
0
    case UC_ERR_MODE:
159
0
        return "Invalid mode (UC_ERR_MODE)";
160
0
    case UC_ERR_VERSION:
161
0
        return "Different API version between core & binding (UC_ERR_VERSION)";
162
20.9k
    case UC_ERR_READ_UNMAPPED:
163
20.9k
        return "Invalid memory read (UC_ERR_READ_UNMAPPED)";
164
9.16k
    case UC_ERR_WRITE_UNMAPPED:
165
9.16k
        return "Invalid memory write (UC_ERR_WRITE_UNMAPPED)";
166
4.94k
    case UC_ERR_FETCH_UNMAPPED:
167
4.94k
        return "Invalid memory fetch (UC_ERR_FETCH_UNMAPPED)";
168
0
    case UC_ERR_HOOK:
169
0
        return "Invalid hook type (UC_ERR_HOOK)";
170
10.9k
    case UC_ERR_INSN_INVALID:
171
10.9k
        return "Invalid instruction (UC_ERR_INSN_INVALID)";
172
0
    case UC_ERR_MAP:
173
0
        return "Invalid memory mapping (UC_ERR_MAP)";
174
0
    case UC_ERR_WRITE_PROT:
175
0
        return "Write to write-protected memory (UC_ERR_WRITE_PROT)";
176
0
    case UC_ERR_READ_PROT:
177
0
        return "Read from non-readable memory (UC_ERR_READ_PROT)";
178
0
    case UC_ERR_FETCH_PROT:
179
0
        return "Fetch from non-executable memory (UC_ERR_FETCH_PROT)";
180
0
    case UC_ERR_ARG:
181
0
        return "Invalid argument (UC_ERR_ARG)";
182
166
    case UC_ERR_READ_UNALIGNED:
183
166
        return "Read from unaligned memory (UC_ERR_READ_UNALIGNED)";
184
102
    case UC_ERR_WRITE_UNALIGNED:
185
102
        return "Write to unaligned memory (UC_ERR_WRITE_UNALIGNED)";
186
0
    case UC_ERR_FETCH_UNALIGNED:
187
0
        return "Fetch from unaligned memory (UC_ERR_FETCH_UNALIGNED)";
188
0
    case UC_ERR_RESOURCE:
189
0
        return "Insufficient resource (UC_ERR_RESOURCE)";
190
45.4k
    case UC_ERR_EXCEPTION:
191
45.4k
        return "Unhandled CPU exception (UC_ERR_EXCEPTION)";
192
0
    case UC_ERR_OVERFLOW:
193
0
        return "Provided buffer is too small (UC_ERR_OVERFLOW)";
194
91.7k
    }
195
91.7k
}
196
197
UNICORN_EXPORT
198
bool uc_arch_supported(uc_arch arch)
199
0
{
200
0
    switch (arch) {
201
0
#ifdef UNICORN_HAS_ARM
202
0
    case UC_ARCH_ARM:
203
0
        return true;
204
0
#endif
205
0
#ifdef UNICORN_HAS_ARM64
206
0
    case UC_ARCH_ARM64:
207
0
        return true;
208
0
#endif
209
0
#ifdef UNICORN_HAS_M68K
210
0
    case UC_ARCH_M68K:
211
0
        return true;
212
0
#endif
213
0
#ifdef UNICORN_HAS_MIPS
214
0
    case UC_ARCH_MIPS:
215
0
        return true;
216
0
#endif
217
0
#ifdef UNICORN_HAS_PPC
218
0
    case UC_ARCH_PPC:
219
0
        return true;
220
0
#endif
221
0
#ifdef UNICORN_HAS_SPARC
222
0
    case UC_ARCH_SPARC:
223
0
        return true;
224
0
#endif
225
0
#ifdef UNICORN_HAS_X86
226
0
    case UC_ARCH_X86:
227
0
        return true;
228
0
#endif
229
0
#ifdef UNICORN_HAS_RISCV
230
0
    case UC_ARCH_RISCV:
231
0
        return true;
232
0
#endif
233
0
#ifdef UNICORN_HAS_S390X
234
0
    case UC_ARCH_S390X:
235
0
        return true;
236
0
#endif
237
0
#ifdef UNICORN_HAS_TRICORE
238
0
    case UC_ARCH_TRICORE:
239
0
        return true;
240
0
#endif
241
    /* Invalid or disabled arch */
242
0
    default:
243
0
        return false;
244
0
    }
245
0
}
246
247
#define UC_INIT(uc)                                                            \
248
6.51M
    save_jit_state(uc);                                                        \
249
6.51M
    if (unlikely(!(uc)->init_done)) {                                          \
250
206k
        int __init_ret = uc_init_engine(uc);                                   \
251
206k
        if (unlikely(__init_ret != UC_ERR_OK)) {                               \
252
0
            return __init_ret;                                                 \
253
0
        }                                                                      \
254
206k
    }
255
256
static gint uc_exits_cmp(gconstpointer a, gconstpointer b, gpointer user_data)
257
0
{
258
0
    uint64_t lhs = *((uint64_t *)a);
259
0
    uint64_t rhs = *((uint64_t *)b);
260
261
0
    if (lhs < rhs) {
262
0
        return -1;
263
0
    } else if (lhs == rhs) {
264
0
        return 0;
265
0
    } else {
266
0
        return 1;
267
0
    }
268
0
}
269
270
static uc_err uc_init_engine(uc_engine *uc)
271
206k
{
272
206k
    if (uc->init_done) {
273
0
        return UC_ERR_HANDLE;
274
0
    }
275
276
206k
    uc->hooks_to_del.delete_fn = hook_delete;
277
278
3.92M
    for (int i = 0; i < UC_HOOK_MAX; i++) {
279
3.72M
        uc->hook[i].delete_fn = hook_delete;
280
3.72M
    }
281
282
206k
    uc->ctl_exits = g_tree_new_full(uc_exits_cmp, NULL, g_free, NULL);
283
284
206k
    if (machine_initialize(uc)) {
285
0
        return UC_ERR_RESOURCE;
286
0
    }
287
288
    // init tlb function
289
206k
    if (!uc->cpu->cc->tlb_fill) {
290
206k
        uc->set_tlb(uc, UC_TLB_CPU);
291
206k
    }
292
293
    // init fpu softfloat
294
206k
    uc->softfloat_initialize();
295
296
206k
    if (uc->reg_reset) {
297
206k
        uc->reg_reset(uc);
298
206k
    }
299
300
206k
    uc->context_content = UC_CTL_CONTEXT_CPU;
301
302
206k
    uc->unmapped_regions = g_array_new(false, false, sizeof(MemoryRegion *));
303
304
206k
    uc->init_done = true;
305
306
206k
    return UC_ERR_OK;
307
206k
}
308
309
UNICORN_EXPORT
310
uc_err uc_open(uc_arch arch, uc_mode mode, uc_engine **result)
311
206k
{
312
206k
    struct uc_struct *uc;
313
314
206k
    if (arch < UC_ARCH_MAX) {
315
206k
        uc = calloc(1, sizeof(*uc));
316
206k
        if (!uc) {
317
            // memory insufficient
318
0
            return UC_ERR_NOMEM;
319
0
        }
320
321
        /* qemu/exec.c: phys_map_node_reserve() */
322
206k
        uc->alloc_hint = 16;
323
206k
        uc->errnum = UC_ERR_OK;
324
206k
        uc->arch = arch;
325
206k
        uc->mode = mode;
326
206k
        uc->reg_read = default_reg_read;
327
206k
        uc->reg_write = default_reg_write;
328
329
        // uc->ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
330
206k
        QLIST_INIT(&uc->ram_list.blocks);
331
332
206k
        QTAILQ_INIT(&uc->memory_listeners);
333
334
206k
        QTAILQ_INIT(&uc->address_spaces);
335
336
206k
        switch (arch) {
337
0
        default:
338
0
            break;
339
0
#ifdef UNICORN_HAS_M68K
340
4.07k
        case UC_ARCH_M68K:
341
4.07k
            if ((mode & ~UC_MODE_M68K_MASK) || !(mode & UC_MODE_BIG_ENDIAN)) {
342
0
                free(uc);
343
0
                return UC_ERR_MODE;
344
0
            }
345
4.07k
            uc->init_arch = uc_init_m68k;
346
4.07k
            break;
347
0
#endif
348
0
#ifdef UNICORN_HAS_X86
349
32.3k
        case UC_ARCH_X86:
350
32.3k
            if ((mode & ~UC_MODE_X86_MASK) || (mode & UC_MODE_BIG_ENDIAN) ||
351
32.3k
                !(mode & (UC_MODE_16 | UC_MODE_32 | UC_MODE_64))) {
352
0
                free(uc);
353
0
                return UC_ERR_MODE;
354
0
            }
355
32.3k
            uc->init_arch = uc_init_x86_64;
356
32.3k
            break;
357
0
#endif
358
0
#ifdef UNICORN_HAS_ARM
359
54.3k
        case UC_ARCH_ARM:
360
54.3k
            if ((mode & ~UC_MODE_ARM_MASK)) {
361
0
                free(uc);
362
0
                return UC_ERR_MODE;
363
0
            }
364
54.3k
            uc->init_arch = uc_init_arm;
365
366
54.3k
            if (mode & UC_MODE_THUMB) {
367
21.5k
                uc->thumb = 1;
368
21.5k
            }
369
54.3k
            break;
370
0
#endif
371
0
#ifdef UNICORN_HAS_ARM64
372
87.6k
        case UC_ARCH_ARM64:
373
87.6k
            if (mode & ~UC_MODE_ARM_MASK) {
374
0
                free(uc);
375
0
                return UC_ERR_MODE;
376
0
            }
377
87.6k
            uc->init_arch = uc_init_aarch64;
378
87.6k
            break;
379
0
#endif
380
381
0
#if defined(UNICORN_HAS_MIPS) || defined(UNICORN_HAS_MIPSEL) ||                \
382
0
    defined(UNICORN_HAS_MIPS64) || defined(UNICORN_HAS_MIPS64EL)
383
13.9k
        case UC_ARCH_MIPS:
384
13.9k
            if ((mode & ~UC_MODE_MIPS_MASK) ||
385
13.9k
                !(mode & (UC_MODE_MIPS32 | UC_MODE_MIPS64))) {
386
0
                free(uc);
387
0
                return UC_ERR_MODE;
388
0
            }
389
13.9k
            if (mode & UC_MODE_BIG_ENDIAN) {
390
6.91k
#ifdef UNICORN_HAS_MIPS
391
6.91k
                if (mode & UC_MODE_MIPS32) {
392
6.91k
                    uc->init_arch = uc_init_mips;
393
6.91k
                }
394
6.91k
#endif
395
6.91k
#ifdef UNICORN_HAS_MIPS64
396
6.91k
                if (mode & UC_MODE_MIPS64) {
397
0
                    uc->init_arch = uc_init_mips64;
398
0
                }
399
6.91k
#endif
400
7.05k
            } else { // little endian
401
7.05k
#ifdef UNICORN_HAS_MIPSEL
402
7.05k
                if (mode & UC_MODE_MIPS32) {
403
7.05k
                    uc->init_arch = uc_init_mipsel;
404
7.05k
                }
405
7.05k
#endif
406
7.05k
#ifdef UNICORN_HAS_MIPS64EL
407
7.05k
                if (mode & UC_MODE_MIPS64) {
408
0
                    uc->init_arch = uc_init_mips64el;
409
0
                }
410
7.05k
#endif
411
7.05k
            }
412
13.9k
            break;
413
0
#endif
414
415
0
#ifdef UNICORN_HAS_SPARC
416
39
        case UC_ARCH_SPARC:
417
39
            if ((mode & ~UC_MODE_SPARC_MASK) || !(mode & UC_MODE_BIG_ENDIAN) ||
418
39
                !(mode & (UC_MODE_SPARC32 | UC_MODE_SPARC64))) {
419
0
                free(uc);
420
0
                return UC_ERR_MODE;
421
0
            }
422
39
            if (mode & UC_MODE_SPARC64) {
423
0
                uc->init_arch = uc_init_sparc64;
424
39
            } else {
425
39
                uc->init_arch = uc_init_sparc;
426
39
            }
427
39
            break;
428
0
#endif
429
0
#ifdef UNICORN_HAS_PPC
430
0
        case UC_ARCH_PPC:
431
0
            if ((mode & ~UC_MODE_PPC_MASK) || !(mode & UC_MODE_BIG_ENDIAN) ||
432
0
                !(mode & (UC_MODE_PPC32 | UC_MODE_PPC64))) {
433
0
                free(uc);
434
0
                return UC_ERR_MODE;
435
0
            }
436
0
            if (mode & UC_MODE_PPC64) {
437
0
                uc->init_arch = uc_init_ppc64;
438
0
            } else {
439
0
                uc->init_arch = uc_init_ppc;
440
0
            }
441
0
            break;
442
0
#endif
443
0
#ifdef UNICORN_HAS_RISCV
444
0
        case UC_ARCH_RISCV:
445
0
            if ((mode & ~UC_MODE_RISCV_MASK) ||
446
0
                !(mode & (UC_MODE_RISCV32 | UC_MODE_RISCV64))) {
447
0
                free(uc);
448
0
                return UC_ERR_MODE;
449
0
            }
450
0
            if (mode & UC_MODE_RISCV32) {
451
0
                uc->init_arch = uc_init_riscv32;
452
0
            } else if (mode & UC_MODE_RISCV64) {
453
0
                uc->init_arch = uc_init_riscv64;
454
0
            } else {
455
0
                free(uc);
456
0
                return UC_ERR_MODE;
457
0
            }
458
0
            break;
459
0
#endif
460
0
#ifdef UNICORN_HAS_S390X
461
14.3k
        case UC_ARCH_S390X:
462
14.3k
            if ((mode & ~UC_MODE_S390X_MASK) || !(mode & UC_MODE_BIG_ENDIAN)) {
463
0
                free(uc);
464
0
                return UC_ERR_MODE;
465
0
            }
466
14.3k
            uc->init_arch = uc_init_s390x;
467
14.3k
            break;
468
0
#endif
469
0
#ifdef UNICORN_HAS_TRICORE
470
0
        case UC_ARCH_TRICORE:
471
0
            if ((mode & ~UC_MODE_TRICORE_MASK)) {
472
0
                free(uc);
473
0
                return UC_ERR_MODE;
474
0
            }
475
0
            uc->init_arch = uc_init_tricore;
476
0
            break;
477
206k
#endif
478
206k
        }
479
480
206k
        if (uc->init_arch == NULL) {
481
0
            free(uc);
482
0
            return UC_ERR_ARCH;
483
0
        }
484
485
206k
        uc->init_done = false;
486
206k
        uc->cpu_model = INT_MAX; // INT_MAX means the default cpu model.
487
488
206k
        *result = uc;
489
490
206k
        return UC_ERR_OK;
491
206k
    } else {
492
0
        return UC_ERR_ARCH;
493
0
    }
494
206k
}
495
496
UNICORN_EXPORT
497
uc_err uc_close(uc_engine *uc)
498
206k
{
499
206k
    int i;
500
206k
    MemoryRegion *mr;
501
502
206k
    if (!uc->init_done) {
503
0
        free(uc);
504
0
        return UC_ERR_OK;
505
0
    }
506
507
    // Flush all translation buffers or we leak memory allocated by MMU
508
206k
    uc->tb_flush(uc);
509
510
    // Cleanup internally.
511
206k
    if (uc->release) {
512
206k
        uc->release(uc->tcg_ctx);
513
206k
    }
514
206k
    g_free(uc->tcg_ctx);
515
516
    // Cleanup CPU.
517
206k
    g_free(uc->cpu->cpu_ases);
518
206k
    g_free(uc->cpu->thread);
519
520
    /* cpu */
521
206k
    qemu_vfree(uc->cpu);
522
523
    /* flatviews */
524
206k
    g_hash_table_destroy(uc->flat_views);
525
526
    // During flatviews destruction, we may still access memory regions.
527
    // So we free them afterwards.
528
    /* memory */
529
206k
    mr = &uc->io_mem_unassigned;
530
206k
    mr->destructor(mr);
531
206k
    mr = uc->system_io;
532
206k
    mr->destructor(mr);
533
206k
    mr = uc->system_memory;
534
206k
    mr->destructor(mr);
535
206k
    g_free(uc->system_memory);
536
206k
    g_free(uc->system_io);
537
206k
    for (size_t i = 0; i < uc->unmapped_regions->len; i++) {
538
0
        mr = g_array_index(uc->unmapped_regions, MemoryRegion *, i);
539
0
        mr->destructor(mr);
540
0
        g_free(mr);
541
0
    }
542
206k
    g_array_free(uc->unmapped_regions, true);
543
544
    // Thread relateds.
545
206k
    if (uc->qemu_thread_data) {
546
0
        g_free(uc->qemu_thread_data);
547
0
    }
548
549
    /* free */
550
206k
    g_free(uc->init_target_page);
551
552
    // Other auxilaries.
553
206k
    g_free(uc->l1_map);
554
555
206k
    if (uc->bounce.buffer) {
556
0
        qemu_vfree(uc->bounce.buffer);
557
0
    }
558
559
    // free hooks and hook lists
560
206k
    clear_deleted_hooks(uc);
561
562
3.92M
    for (i = 0; i < UC_HOOK_MAX; i++) {
563
3.72M
        list_clear(&uc->hook[i]);
564
3.72M
    }
565
566
206k
    free(uc->mapped_blocks);
567
568
206k
    g_tree_destroy(uc->ctl_exits);
569
570
    // finally, free uc itself.
571
206k
    memset(uc, 0, sizeof(*uc));
572
206k
    free(uc);
573
574
206k
    return UC_ERR_OK;
575
206k
}
576
577
UNICORN_EXPORT
578
uc_err uc_reg_read_batch(uc_engine *uc, int const *regs, void **vals, int count)
579
0
{
580
0
    UC_INIT(uc);
581
0
    reg_read_t reg_read = uc->reg_read;
582
0
    void *env = uc->cpu->env_ptr;
583
0
    int mode = uc->mode;
584
0
    int i;
585
586
0
    for (i = 0; i < count; i++) {
587
0
        unsigned int regid = regs[i];
588
0
        void *value = vals[i];
589
0
        size_t size = (size_t)-1;
590
0
        uc_err err = reg_read(env, mode, regid, value, &size);
591
0
        if (err) {
592
0
            restore_jit_state(uc);
593
0
            return err;
594
0
        }
595
0
    }
596
597
0
    restore_jit_state(uc);
598
0
    return UC_ERR_OK;
599
0
}
600
601
UNICORN_EXPORT
602
uc_err uc_reg_write_batch(uc_engine *uc, int const *regs, void *const *vals,
603
                          int count)
604
0
{
605
0
    UC_INIT(uc);
606
0
    reg_write_t reg_write = uc->reg_write;
607
0
    void *env = uc->cpu->env_ptr;
608
0
    int mode = uc->mode;
609
0
    int setpc = 0;
610
0
    int i;
611
612
0
    for (i = 0; i < count; i++) {
613
0
        unsigned int regid = regs[i];
614
0
        const void *value = vals[i];
615
0
        size_t size = (size_t)-1;
616
0
        uc_err err = reg_write(env, mode, regid, value, &size, &setpc);
617
0
        if (err) {
618
0
            restore_jit_state(uc);
619
0
            return err;
620
0
        }
621
0
    }
622
0
    if (setpc) {
623
        // force to quit execution and flush TB
624
0
        uc->quit_request = true;
625
0
        break_translation_loop(uc);
626
0
    }
627
628
0
    restore_jit_state(uc);
629
0
    return UC_ERR_OK;
630
0
}
631
632
UNICORN_EXPORT
633
uc_err uc_reg_read_batch2(uc_engine *uc, int const *regs, void *const *vals,
634
                          size_t *sizes, int count)
635
0
{
636
0
    UC_INIT(uc);
637
0
    reg_read_t reg_read = uc->reg_read;
638
0
    void *env = uc->cpu->env_ptr;
639
0
    int mode = uc->mode;
640
0
    int i;
641
642
0
    for (i = 0; i < count; i++) {
643
0
        unsigned int regid = regs[i];
644
0
        void *value = vals[i];
645
0
        uc_err err = reg_read(env, mode, regid, value, sizes + i);
646
0
        if (err) {
647
0
            restore_jit_state(uc);
648
0
            return err;
649
0
        }
650
0
    }
651
652
0
    restore_jit_state(uc);
653
0
    return UC_ERR_OK;
654
0
}
655
656
UNICORN_EXPORT
657
uc_err uc_reg_write_batch2(uc_engine *uc, int const *regs,
658
                           const void *const *vals, size_t *sizes, int count)
659
0
{
660
0
    UC_INIT(uc);
661
0
    reg_write_t reg_write = uc->reg_write;
662
0
    void *env = uc->cpu->env_ptr;
663
0
    int mode = uc->mode;
664
0
    int setpc = 0;
665
0
    int i;
666
667
0
    for (i = 0; i < count; i++) {
668
0
        unsigned int regid = regs[i];
669
0
        const void *value = vals[i];
670
0
        uc_err err = reg_write(env, mode, regid, value, sizes + i, &setpc);
671
0
        if (err) {
672
0
            restore_jit_state(uc);
673
0
            return err;
674
0
        }
675
0
    }
676
0
    if (setpc) {
677
        // force to quit execution and flush TB
678
0
        uc->quit_request = true;
679
0
        break_translation_loop(uc);
680
0
    }
681
682
0
    restore_jit_state(uc);
683
0
    return UC_ERR_OK;
684
0
}
685
686
UNICORN_EXPORT
687
uc_err uc_reg_read(uc_engine *uc, int regid, void *value)
688
74
{
689
74
    UC_INIT(uc);
690
74
    size_t size = (size_t)-1;
691
74
    uc_err err = uc->reg_read(uc->cpu->env_ptr, uc->mode, regid, value, &size);
692
74
    restore_jit_state(uc);
693
74
    return err;
694
74
}
695
696
UNICORN_EXPORT
697
uc_err uc_reg_write(uc_engine *uc, int regid, const void *value)
698
206k
{
699
206k
    UC_INIT(uc);
700
206k
    int setpc = 0;
701
206k
    size_t size = (size_t)-1;
702
206k
    uc_err err =
703
206k
        uc->reg_write(uc->cpu->env_ptr, uc->mode, regid, value, &size, &setpc);
704
206k
    if (err) {
705
0
        restore_jit_state(uc);
706
0
        return err;
707
0
    }
708
206k
    if (setpc) {
709
        // force to quit execution and flush TB
710
206k
        uc->quit_request = true;
711
206k
        uc->skip_sync_pc_on_exit = true;
712
206k
        break_translation_loop(uc);
713
206k
    }
714
715
206k
    restore_jit_state(uc);
716
206k
    return UC_ERR_OK;
717
206k
}
718
719
UNICORN_EXPORT
720
uc_err uc_reg_read2(uc_engine *uc, int regid, void *value, size_t *size)
721
0
{
722
0
    UC_INIT(uc);
723
0
    uc_err err = uc->reg_read(uc->cpu->env_ptr, uc->mode, regid, value, size);
724
0
    restore_jit_state(uc);
725
0
    return err;
726
0
}
727
728
UNICORN_EXPORT
729
uc_err uc_reg_write2(uc_engine *uc, int regid, const void *value, size_t *size)
730
0
{
731
0
    UC_INIT(uc);
732
0
    int setpc = 0;
733
0
    uc_err err =
734
0
        uc->reg_write(uc->cpu->env_ptr, uc->mode, regid, value, size, &setpc);
735
0
    if (err) {
736
0
        restore_jit_state(uc);
737
0
        return err;
738
0
    }
739
0
    if (setpc) {
740
        // force to quit execution and flush TB
741
0
        uc->quit_request = true;
742
0
        break_translation_loop(uc);
743
0
    }
744
745
0
    restore_jit_state(uc);
746
0
    return UC_ERR_OK;
747
0
}
748
749
static uint64_t memory_region_len(uc_engine *uc, MemoryRegion *mr,
750
                                  uint64_t address, uint64_t count)
751
413k
{
752
413k
    hwaddr end = mr->end;
753
413k
    while (mr->container != uc->system_memory) {
754
0
        mr = mr->container;
755
0
        end += mr->addr;
756
0
    }
757
413k
    return (uint64_t)MIN(count, end - address);
758
413k
}
759
760
// check if a memory area is mapped
761
// this is complicated because an area can overlap adjacent blocks
762
static bool check_mem_area(uc_engine *uc, uint64_t address, size_t size)
763
206k
{
764
206k
    size_t count = 0, len;
765
766
413k
    while (count < size) {
767
206k
        MemoryRegion *mr = uc->memory_mapping(uc, address);
768
206k
        if (mr) {
769
206k
            len = memory_region_len(uc, mr, address, size - count);
770
206k
            count += len;
771
206k
            address += len;
772
206k
        } else { // this address is not mapped in yet
773
0
            break;
774
0
        }
775
206k
    }
776
777
206k
    return (count == size);
778
206k
}
779
780
uc_err uc_vmem_translate(uc_engine *uc, uint64_t address, uc_prot prot,
781
                              uint64_t *paddress)
782
0
{
783
0
    UC_INIT(uc);
784
785
0
    if (!(UC_PROT_READ == prot || UC_PROT_WRITE == prot ||
786
0
          UC_PROT_EXEC == prot)) {
787
0
        restore_jit_state(uc);
788
0
        return UC_ERR_ARG;
789
0
    }
790
791
    // The sparc mmu doesn't support probe mode
792
0
    if (uc->arch == UC_ARCH_SPARC && uc->cpu->cc->tlb_fill == uc->cpu->cc->tlb_fill_cpu) {
793
0
        restore_jit_state(uc);
794
0
        return UC_ERR_ARG;
795
0
    }
796
797
0
    if (!uc->virtual_to_physical(uc, address, prot, paddress)) {
798
0
        restore_jit_state(uc);
799
0
        switch (prot) {
800
0
        case UC_PROT_READ:
801
0
            return UC_ERR_READ_PROT;
802
0
        case UC_PROT_WRITE:
803
0
            return UC_ERR_WRITE_PROT;
804
0
        case UC_PROT_EXEC:
805
0
            return UC_ERR_FETCH_PROT;
806
0
        default:
807
0
            return UC_ERR_ARG;
808
0
        }
809
0
    }
810
811
0
    restore_jit_state(uc);
812
0
    return UC_ERR_OK;
813
0
}
814
815
UNICORN_EXPORT
816
uc_err uc_vmem_read(uc_engine *uc, uint64_t address, uc_prot prot,
817
                           void *_bytes, size_t size)
818
0
{
819
0
    size_t count = 0, len;
820
0
    uint8_t *bytes = _bytes;
821
0
    uint64_t align;
822
0
    uint64_t pagesize;
823
824
0
    UC_INIT(uc);
825
826
    // qemu cpu_physical_memory_rw() size is an int
827
0
    if (size > INT_MAX) {
828
0
        restore_jit_state(uc);
829
0
        return UC_ERR_ARG;
830
0
    }
831
832
    // The sparc mmu doesn't support probe mode
833
0
    if (uc->arch == UC_ARCH_SPARC && uc->cpu->cc->tlb_fill == uc->cpu->cc->tlb_fill_cpu) {
834
0
        restore_jit_state(uc);
835
0
        return UC_ERR_ARG;
836
0
    }
837
838
0
    if (!(UC_PROT_READ == prot || UC_PROT_WRITE == prot ||
839
0
          UC_PROT_EXEC == prot)) {
840
0
        restore_jit_state(uc);
841
0
        return UC_ERR_ARG;
842
0
    }
843
844
0
    while (count < size) {
845
0
        align = uc->target_page_align;
846
0
        pagesize = uc->target_page_size;
847
0
        len = MIN(size - count, (address & ~align) + pagesize - address);
848
0
        if (!uc->read_mem_virtual(uc, address, prot, bytes, len)) {
849
0
            restore_jit_state(uc);
850
0
            return UC_ERR_READ_PROT;
851
0
        }
852
0
        bytes += len;
853
0
        address += len;
854
0
        count += len;
855
0
    }
856
0
    assert(count == size);
857
0
    restore_jit_state(uc);
858
0
    return UC_ERR_OK;
859
0
}
860
861
UNICORN_EXPORT
862
uc_err uc_vmem_write(uc_engine *uc, uint64_t address, uc_prot prot,
863
                           void *_bytes, size_t size)
864
0
{
865
0
    size_t count = 0, len;
866
0
    uint8_t *bytes = _bytes;
867
0
    uint64_t align;
868
0
    uint64_t pagesize;
869
0
    uint64_t paddr = 0;
870
871
0
    UC_INIT(uc);
872
873
    // qemu cpu_physical_memory_rw() size is an int
874
0
    if (size > INT_MAX) {
875
0
        restore_jit_state(uc);
876
0
        return UC_ERR_ARG;
877
0
    }
878
879
    // The sparc mmu doesn't support probe mode
880
0
    if (uc->arch == UC_ARCH_SPARC && uc->cpu->cc->tlb_fill == uc->cpu->cc->tlb_fill_cpu) {
881
0
        restore_jit_state(uc);
882
0
        return UC_ERR_ARG;
883
0
    }
884
885
0
    if (!(UC_PROT_READ == prot || UC_PROT_WRITE == prot ||
886
0
          UC_PROT_EXEC == prot)) {
887
0
        restore_jit_state(uc);
888
0
        return UC_ERR_ARG;
889
0
    }
890
891
0
    while (count < size) {
892
0
        align = uc->target_page_align;
893
0
        pagesize = uc->target_page_size;
894
0
        len = MIN(size - count, (address & ~align) + pagesize - address);
895
0
  if (uc_vmem_translate(uc, address, prot, &paddr) != UC_ERR_OK) {
896
0
            restore_jit_state(uc);
897
0
            return UC_ERR_WRITE_PROT;
898
0
  }
899
0
        if (uc_mem_write(uc, paddr, bytes, len) != UC_ERR_OK) {
900
0
            restore_jit_state(uc);
901
0
            return UC_ERR_WRITE_PROT;
902
0
        }
903
0
        bytes += len;
904
0
        address += len;
905
0
        count += len;
906
0
    }
907
0
    assert(count == size);
908
0
    restore_jit_state(uc);
909
0
    return UC_ERR_OK;
910
0
}
911
912
UNICORN_EXPORT
913
uc_err uc_mem_read(uc_engine *uc, uint64_t address, void *_bytes, uint64_t size)
914
0
{
915
0
    uint64_t count = 0, len;
916
0
    uint8_t *bytes = _bytes;
917
918
0
    UC_INIT(uc);
919
920
0
    if (!check_mem_area(uc, address, size)) {
921
0
        restore_jit_state(uc);
922
0
        return UC_ERR_READ_UNMAPPED;
923
0
    }
924
925
    // memory area can overlap adjacent memory blocks
926
0
    while (count < size) {
927
0
        MemoryRegion *mr = uc->memory_mapping(uc, address);
928
0
        if (mr) {
929
0
            len = memory_region_len(uc, mr, address, size - count);
930
0
            if (uc->read_mem(&uc->address_space_memory, address, bytes, len) ==
931
0
                false) {
932
0
                break;
933
0
            }
934
0
            count += len;
935
0
            address += len;
936
0
            bytes += len;
937
0
        } else { // this address is not mapped in yet
938
0
            break;
939
0
        }
940
0
    }
941
942
0
    if (count == size) {
943
0
        restore_jit_state(uc);
944
0
        return UC_ERR_OK;
945
0
    } else {
946
0
        restore_jit_state(uc);
947
0
        return UC_ERR_READ_UNMAPPED;
948
0
    }
949
0
}
950
951
UNICORN_EXPORT
952
uc_err uc_mem_write(uc_engine *uc, uint64_t address, const void *_bytes,
953
                    uint64_t size)
954
206k
{
955
206k
    uint64_t count = 0, len;
956
206k
    const uint8_t *bytes = _bytes;
957
958
206k
    UC_INIT(uc);
959
960
206k
    if (!check_mem_area(uc, address, size)) {
961
0
        restore_jit_state(uc);
962
0
        return UC_ERR_WRITE_UNMAPPED;
963
0
    }
964
965
    // memory area can overlap adjacent memory blocks
966
413k
    while (count < size) {
967
206k
        MemoryRegion *mr = uc->memory_mapping(uc, address);
968
206k
        if (mr) {
969
206k
            uint32_t operms = mr->perms;
970
206k
            uint64_t align = uc->target_page_align;
971
206k
            if (!(operms & UC_PROT_WRITE)) { // write protected
972
                // but this is not the program accessing memory, so temporarily
973
                // mark writable
974
0
                uc->readonly_mem(mr, false);
975
0
            }
976
977
206k
            len = memory_region_len(uc, mr, address, size - count);
978
206k
            if (uc->snapshot_level && uc->snapshot_level > mr->priority) {
979
0
                mr = uc->memory_cow(uc, mr, address & ~align,
980
0
                                    (len + (address & align) + align) & ~align);
981
0
                if (!mr) {
982
0
                    return UC_ERR_NOMEM;
983
0
                }
984
0
            }
985
206k
            if (uc->write_mem(&uc->address_space_memory, address, bytes, len) ==
986
206k
                false) {
987
0
                break;
988
0
            }
989
990
206k
            if (!(operms & UC_PROT_WRITE)) { // write protected
991
                // now write protect it again
992
0
                uc->readonly_mem(mr, true);
993
0
            }
994
995
206k
            count += len;
996
206k
            address += len;
997
206k
            bytes += len;
998
206k
        } else { // this address is not mapped in yet
999
0
            break;
1000
0
        }
1001
206k
    }
1002
1003
206k
    if (count == size) {
1004
206k
        restore_jit_state(uc);
1005
206k
        return UC_ERR_OK;
1006
206k
    } else {
1007
0
        restore_jit_state(uc);
1008
0
        return UC_ERR_WRITE_UNMAPPED;
1009
0
    }
1010
206k
}
1011
1012
0
#define TIMEOUT_STEP 2 // microseconds
1013
static void *_timeout_fn(void *arg)
1014
0
{
1015
0
    struct uc_struct *uc = arg;
1016
0
    int64_t current_time = get_clock();
1017
1018
0
    do {
1019
0
        usleep(TIMEOUT_STEP);
1020
        // perhaps emulation is even done before timeout?
1021
0
        if (uc->emulation_done) {
1022
0
            break;
1023
0
        }
1024
0
    } while ((uint64_t)(get_clock() - current_time) < uc->timeout);
1025
1026
    // timeout before emulation is done?
1027
0
    if (!uc->emulation_done) {
1028
0
        uc->timed_out = true;
1029
        // force emulation to stop
1030
0
        uc_emu_stop(uc);
1031
0
    }
1032
1033
0
    return NULL;
1034
0
}
1035
1036
static void enable_emu_timer(uc_engine *uc, uint64_t timeout)
1037
0
{
1038
0
    uc->timeout = timeout;
1039
0
    qemu_thread_create(uc, &uc->timer, "timeout", _timeout_fn, uc,
1040
0
                       QEMU_THREAD_JOINABLE);
1041
0
}
1042
1043
static void hook_count_cb(struct uc_struct *uc, uint64_t address, uint32_t size,
1044
                          void *user_data)
1045
370M
{
1046
    // count this instruction. ah ah ah.
1047
370M
    uc->emu_counter++;
1048
    // printf(":: emu counter = %u, at %lx\n", uc->emu_counter, address);
1049
1050
370M
    if (uc->emu_counter > uc->emu_count) {
1051
        // printf(":: emu counter = %u, stop emulation\n", uc->emu_counter);
1052
5.48M
        uc_emu_stop(uc);
1053
5.48M
    }
1054
370M
}
1055
1056
static void clear_deleted_hooks(uc_engine *uc)
1057
413k
{
1058
413k
    struct list_item *cur;
1059
413k
    struct hook *hook;
1060
413k
    int i;
1061
1062
413k
    for (cur = uc->hooks_to_del.head;
1063
413k
         cur != NULL && (hook = (struct hook *)cur->data); cur = cur->next) {
1064
0
        assert(hook->to_delete);
1065
0
        for (i = 0; i < UC_HOOK_MAX; i++) {
1066
0
            if (list_remove(&uc->hook[i], (void *)hook)) {
1067
0
                break;
1068
0
            }
1069
0
        }
1070
0
    }
1071
1072
413k
    list_clear(&uc->hooks_to_del);
1073
413k
}
1074
1075
UNICORN_EXPORT
1076
uc_err uc_emu_start(uc_engine *uc, uint64_t begin, uint64_t until,
1077
                    uint64_t timeout, size_t count)
1078
206k
{
1079
206k
    uc_err err;
1080
1081
    // reset the counter
1082
206k
    uc->emu_counter = 0;
1083
206k
    uc->invalid_error = UC_ERR_OK;
1084
206k
    uc->emulation_done = false;
1085
206k
    uc->size_recur_mem = 0;
1086
206k
    uc->timed_out = false;
1087
206k
    uc->first_tb = true;
1088
1089
    // Avoid nested uc_emu_start saves wrong jit states.
1090
206k
    if (uc->nested_level == 0) {
1091
206k
        UC_INIT(uc);
1092
206k
    }
1093
1094
    // Advance the nested levels. We must decrease the level count by one when
1095
    // we return from uc_emu_start.
1096
206k
    if (uc->nested_level >= UC_MAX_NESTED_LEVEL) {
1097
        // We can't support so many nested levels.
1098
0
        return UC_ERR_RESOURCE;
1099
0
    }
1100
206k
    uc->nested_level++;
1101
1102
206k
    uint32_t begin_pc32 = READ_DWORD(begin);
1103
206k
    switch (uc->arch) {
1104
0
    default:
1105
0
        break;
1106
0
#ifdef UNICORN_HAS_M68K
1107
4.07k
    case UC_ARCH_M68K:
1108
4.07k
        uc_reg_write(uc, UC_M68K_REG_PC, &begin_pc32);
1109
4.07k
        break;
1110
0
#endif
1111
0
#ifdef UNICORN_HAS_X86
1112
32.3k
    case UC_ARCH_X86:
1113
32.3k
        switch (uc->mode) {
1114
0
        default:
1115
0
            break;
1116
74
        case UC_MODE_16: {
1117
74
            uint16_t ip;
1118
74
            uint16_t cs;
1119
1120
74
            uc_reg_read(uc, UC_X86_REG_CS, &cs);
1121
            // compensate for later adding up IP & CS
1122
74
            ip = begin - cs * 16;
1123
74
            uc_reg_write(uc, UC_X86_REG_IP, &ip);
1124
74
            break;
1125
0
        }
1126
11.5k
        case UC_MODE_32:
1127
11.5k
            uc_reg_write(uc, UC_X86_REG_EIP, &begin_pc32);
1128
11.5k
            break;
1129
20.6k
        case UC_MODE_64:
1130
20.6k
            uc_reg_write(uc, UC_X86_REG_RIP, &begin);
1131
20.6k
            break;
1132
32.3k
        }
1133
32.3k
        break;
1134
32.3k
#endif
1135
32.3k
#ifdef UNICORN_HAS_ARM
1136
54.3k
    case UC_ARCH_ARM:
1137
54.3k
        uc_reg_write(uc, UC_ARM_REG_R15, &begin_pc32);
1138
54.3k
        break;
1139
0
#endif
1140
0
#ifdef UNICORN_HAS_ARM64
1141
87.6k
    case UC_ARCH_ARM64:
1142
87.6k
        uc_reg_write(uc, UC_ARM64_REG_PC, &begin);
1143
87.6k
        break;
1144
0
#endif
1145
0
#ifdef UNICORN_HAS_MIPS
1146
13.9k
    case UC_ARCH_MIPS:
1147
13.9k
        if (uc->mode & UC_MODE_MIPS64) {
1148
0
            uc_reg_write(uc, UC_MIPS_REG_PC, &begin);
1149
13.9k
        } else {
1150
13.9k
            uc_reg_write(uc, UC_MIPS_REG_PC, &begin_pc32);
1151
13.9k
        }
1152
13.9k
        break;
1153
0
#endif
1154
0
#ifdef UNICORN_HAS_SPARC
1155
39
    case UC_ARCH_SPARC:
1156
        // TODO: Sparc/Sparc64
1157
39
        uc_reg_write(uc, UC_SPARC_REG_PC, &begin);
1158
39
        break;
1159
0
#endif
1160
0
#ifdef UNICORN_HAS_PPC
1161
0
    case UC_ARCH_PPC:
1162
0
        if (uc->mode & UC_MODE_PPC64) {
1163
0
            uc_reg_write(uc, UC_PPC_REG_PC, &begin);
1164
0
        } else {
1165
0
            uc_reg_write(uc, UC_PPC_REG_PC, &begin_pc32);
1166
0
        }
1167
0
        break;
1168
0
#endif
1169
0
#ifdef UNICORN_HAS_RISCV
1170
0
    case UC_ARCH_RISCV:
1171
0
        if (uc->mode & UC_MODE_RISCV64) {
1172
0
            uc_reg_write(uc, UC_RISCV_REG_PC, &begin);
1173
0
        } else {
1174
0
            uc_reg_write(uc, UC_RISCV_REG_PC, &begin_pc32);
1175
0
        }
1176
0
        break;
1177
0
#endif
1178
0
#ifdef UNICORN_HAS_S390X
1179
14.3k
    case UC_ARCH_S390X:
1180
14.3k
        uc_reg_write(uc, UC_S390X_REG_PC, &begin);
1181
14.3k
        break;
1182
0
#endif
1183
0
#ifdef UNICORN_HAS_TRICORE
1184
0
    case UC_ARCH_TRICORE:
1185
0
        uc_reg_write(uc, UC_TRICORE_REG_PC, &begin_pc32);
1186
0
        break;
1187
206k
#endif
1188
206k
    }
1189
206k
    uc->skip_sync_pc_on_exit = false;
1190
206k
    uc->stop_request = false;
1191
1192
206k
    uc->emu_count = count;
1193
    // remove count hook if counting isn't necessary
1194
206k
    if (count <= 0 && uc->count_hook != 0) {
1195
0
        uc_hook_del(uc, uc->count_hook);
1196
0
        uc->count_hook = 0;
1197
1198
        // In this case, we have to drop all translated blocks.
1199
0
        uc->tb_flush(uc);
1200
0
    }
1201
    // set up count hook to count instructions.
1202
206k
    if (count > 0 && uc->count_hook == 0) {
1203
206k
        uc_err err;
1204
        // callback to count instructions must be run before everything else,
1205
        // so instead of appending, we must insert the hook at the begin
1206
        // of the hook list
1207
206k
        uc->hook_insert = 1;
1208
206k
        err = uc_hook_add(uc, &uc->count_hook, UC_HOOK_CODE, hook_count_cb,
1209
206k
                          NULL, 1, 0);
1210
        // restore to append mode for uc_hook_add()
1211
206k
        uc->hook_insert = 0;
1212
206k
        if (err != UC_ERR_OK) {
1213
0
            uc->nested_level--;
1214
0
            return err;
1215
0
        }
1216
206k
    }
1217
1218
    // If UC_CTL_UC_USE_EXITS is set, then the @until param won't have any
1219
    // effect. This is designed for the backward compatibility.
1220
206k
    if (!uc->use_exits) {
1221
206k
        uc->exits[uc->nested_level - 1] = until;
1222
206k
    }
1223
1224
206k
    if (timeout) {
1225
0
        enable_emu_timer(uc, timeout * 1000); // microseconds -> nanoseconds
1226
0
    }
1227
1228
206k
    uc->vm_start(uc);
1229
1230
206k
    uc->nested_level--;
1231
1232
    // emulation is done if and only if we exit the outer uc_emu_start
1233
    // or we may lost uc_emu_stop
1234
206k
    if (uc->nested_level == 0) {
1235
206k
        uc->emulation_done = true;
1236
1237
        // remove hooks to delete
1238
        // make sure we delete all hooks at the first level.
1239
206k
        clear_deleted_hooks(uc);
1240
1241
206k
        restore_jit_state(uc);
1242
206k
    }
1243
1244
206k
    if (timeout) {
1245
        // wait for the timer to finish
1246
0
        qemu_thread_join(&uc->timer);
1247
0
    }
1248
1249
    // We may be in a nested uc_emu_start and thus clear invalid_error
1250
    // once we are done.
1251
206k
    err = uc->invalid_error;
1252
206k
    uc->invalid_error = 0;
1253
206k
    return err;
1254
206k
}
1255
1256
UNICORN_EXPORT
1257
uc_err uc_emu_stop(uc_engine *uc)
1258
5.48M
{
1259
5.48M
    UC_INIT(uc);
1260
5.48M
    uc->stop_request = true;
1261
5.48M
    uc_err err = break_translation_loop(uc);
1262
5.48M
    restore_jit_state(uc);
1263
5.48M
    return err;
1264
5.48M
}
1265
1266
// return target index where a memory region at the address exists, or could be
1267
// inserted
1268
//
1269
// address either is inside the mapping at the returned index, or is in free
1270
// space before the next mapping.
1271
//
1272
// if there is overlap, between regions, ending address will be higher than the
1273
// starting address of the mapping at returned index
1274
static int bsearch_mapped_blocks(const uc_engine *uc, uint64_t address)
1275
413k
{
1276
413k
    int left, right, mid;
1277
413k
    MemoryRegion *mapping;
1278
1279
413k
    left = 0;
1280
413k
    right = uc->mapped_block_count;
1281
1282
413k
    while (left < right) {
1283
0
        mid = left + (right - left) / 2;
1284
1285
0
        mapping = uc->mapped_blocks[mid];
1286
1287
0
        if (mapping->end - 1 < address) {
1288
0
            left = mid + 1;
1289
0
        } else if (mapping->addr > address) {
1290
0
            right = mid;
1291
0
        } else {
1292
0
            return mid;
1293
0
        }
1294
0
    }
1295
1296
413k
    return left;
1297
413k
}
1298
1299
// find if a memory range overlaps with existing mapped regions
1300
static bool memory_overlap(struct uc_struct *uc, uint64_t begin, size_t size)
1301
206k
{
1302
206k
    unsigned int i;
1303
206k
    uint64_t end = begin + size - 1;
1304
1305
206k
    i = bsearch_mapped_blocks(uc, begin);
1306
1307
    // is this the highest region with no possible overlap?
1308
206k
    if (i >= uc->mapped_block_count)
1309
206k
        return false;
1310
1311
    // end address overlaps this region?
1312
0
    if (end >= uc->mapped_blocks[i]->addr)
1313
0
        return true;
1314
1315
    // not found
1316
1317
0
    return false;
1318
0
}
1319
1320
// common setup/error checking shared between uc_mem_map and uc_mem_map_ptr
1321
static uc_err mem_map(uc_engine *uc, MemoryRegion *block)
1322
206k
{
1323
1324
206k
    MemoryRegion **regions;
1325
206k
    int pos;
1326
1327
206k
    if (block == NULL) {
1328
0
        return UC_ERR_NOMEM;
1329
0
    }
1330
1331
206k
    if ((uc->mapped_block_count & (MEM_BLOCK_INCR - 1)) == 0) { // time to grow
1332
206k
        regions = (MemoryRegion **)g_realloc(
1333
206k
            uc->mapped_blocks,
1334
206k
            sizeof(MemoryRegion *) * (uc->mapped_block_count + MEM_BLOCK_INCR));
1335
206k
        if (regions == NULL) {
1336
0
            return UC_ERR_NOMEM;
1337
0
        }
1338
206k
        uc->mapped_blocks = regions;
1339
206k
    }
1340
1341
206k
    pos = bsearch_mapped_blocks(uc, block->addr);
1342
1343
    // shift the array right to give space for the new pointer
1344
206k
    memmove(&uc->mapped_blocks[pos + 1], &uc->mapped_blocks[pos],
1345
206k
            sizeof(MemoryRegion *) * (uc->mapped_block_count - pos));
1346
1347
206k
    uc->mapped_blocks[pos] = block;
1348
206k
    uc->mapped_block_count++;
1349
1350
206k
    return UC_ERR_OK;
1351
206k
}
1352
1353
static uc_err mem_map_check(uc_engine *uc, uint64_t address, uint64_t size,
1354
                            uint32_t perms)
1355
206k
{
1356
206k
    if (size == 0) {
1357
        // invalid memory mapping
1358
0
        return UC_ERR_ARG;
1359
0
    }
1360
1361
    // address cannot wrap around
1362
206k
    if (address + size - 1 < address) {
1363
0
        return UC_ERR_ARG;
1364
0
    }
1365
1366
    // address must be aligned to uc->target_page_size
1367
206k
    if ((address & uc->target_page_align) != 0) {
1368
0
        return UC_ERR_ARG;
1369
0
    }
1370
1371
    // size must be multiple of uc->target_page_size
1372
206k
    if ((size & uc->target_page_align) != 0) {
1373
0
        return UC_ERR_ARG;
1374
0
    }
1375
1376
    // check for only valid permissions
1377
206k
    if ((perms & ~UC_PROT_ALL) != 0) {
1378
0
        return UC_ERR_ARG;
1379
0
    }
1380
1381
    // this area overlaps existing mapped regions?
1382
206k
    if (memory_overlap(uc, address, size)) {
1383
0
        return UC_ERR_MAP;
1384
0
    }
1385
1386
206k
    return UC_ERR_OK;
1387
206k
}
1388
1389
UNICORN_EXPORT
1390
uc_err uc_mem_map(uc_engine *uc, uint64_t address, uint64_t size,
1391
                  uint32_t perms)
1392
206k
{
1393
206k
    uc_err res;
1394
1395
206k
    UC_INIT(uc);
1396
1397
206k
    res = mem_map_check(uc, address, size, perms);
1398
206k
    if (res) {
1399
0
        restore_jit_state(uc);
1400
0
        return res;
1401
0
    }
1402
1403
206k
    res = mem_map(uc, uc->memory_map(uc, address, size, perms));
1404
206k
    restore_jit_state(uc);
1405
206k
    return res;
1406
206k
}
1407
1408
UNICORN_EXPORT
1409
uc_err uc_mem_map_ptr(uc_engine *uc, uint64_t address, uint64_t size,
1410
                      uint32_t perms, void *ptr)
1411
0
{
1412
0
    uc_err res;
1413
1414
0
    UC_INIT(uc);
1415
1416
0
    if (ptr == NULL) {
1417
0
        restore_jit_state(uc);
1418
0
        return UC_ERR_ARG;
1419
0
    }
1420
1421
0
    res = mem_map_check(uc, address, size, perms);
1422
0
    if (res) {
1423
0
        restore_jit_state(uc);
1424
0
        return res;
1425
0
    }
1426
1427
0
    res = mem_map(uc, uc->memory_map_ptr(uc, address, size, perms, ptr));
1428
0
    restore_jit_state(uc);
1429
0
    return res;
1430
0
}
1431
1432
UNICORN_EXPORT
1433
uc_err uc_mmio_map(uc_engine *uc, uint64_t address, uint64_t size,
1434
                   uc_cb_mmio_read_t read_cb, void *user_data_read,
1435
                   uc_cb_mmio_write_t write_cb, void *user_data_write)
1436
0
{
1437
0
    uc_err res;
1438
1439
0
    UC_INIT(uc);
1440
1441
0
    res = mem_map_check(uc, address, size, UC_PROT_ALL);
1442
0
    if (res) {
1443
0
        restore_jit_state(uc);
1444
0
        return res;
1445
0
    }
1446
1447
    // The callbacks do not need to be checked for NULL here, as their presence
1448
    // (or lack thereof) will determine the permissions used.
1449
0
    res = mem_map(uc, uc->memory_map_io(uc, address, size, read_cb, write_cb,
1450
0
                                        user_data_read, user_data_write));
1451
0
    restore_jit_state(uc);
1452
0
    return res;
1453
0
}
1454
1455
// Create a backup copy of the indicated MemoryRegion.
1456
// Generally used in prepartion for splitting a MemoryRegion.
1457
static uint8_t *copy_region(struct uc_struct *uc, MemoryRegion *mr)
1458
0
{
1459
0
    uint8_t *block = (uint8_t *)g_malloc0((uint64_t)int128_get64(mr->size));
1460
0
    if (block != NULL) {
1461
0
        uc_err err =
1462
0
            uc_mem_read(uc, mr->addr, block, (uint64_t)int128_get64(mr->size));
1463
0
        if (err != UC_ERR_OK) {
1464
0
            free(block);
1465
0
            block = NULL;
1466
0
        }
1467
0
    }
1468
1469
0
    return block;
1470
0
}
1471
1472
/*
1473
    This function is similar to split_region, but for MMIO memory.
1474
1475
    Note this function may be called recursively.
1476
*/
1477
static bool split_mmio_region(struct uc_struct *uc, MemoryRegion *mr,
1478
                              uint64_t address, uint64_t size, bool do_delete)
1479
0
{
1480
0
    uint64_t begin, end, chunk_end;
1481
0
    uint64_t l_size, r_size, m_size;
1482
0
    mmio_cbs backup;
1483
1484
0
    chunk_end = address + size;
1485
1486
    // This branch also break recursion.
1487
0
    if (address <= mr->addr && chunk_end >= mr->end) {
1488
0
        return true;
1489
0
    }
1490
1491
0
    if (size == 0) {
1492
0
        return false;
1493
0
    }
1494
1495
0
    begin = mr->addr;
1496
0
    end = mr->end;
1497
1498
0
    memcpy(&backup, mr->opaque, sizeof(mmio_cbs));
1499
1500
    /* overlapping cases
1501
     *               |------mr------|
1502
     * case 1    |---size--|            // Is it possible???
1503
     * case 2           |--size--|
1504
     * case 3                  |---size--|
1505
     */
1506
1507
    // unmap this region first, then do split it later
1508
0
    if (uc_mem_unmap(uc, mr->addr, (uint64_t)int128_get64(mr->size)) !=
1509
0
        UC_ERR_OK) {
1510
0
        return false;
1511
0
    }
1512
1513
    // adjust some things
1514
0
    if (address < begin) {
1515
0
        address = begin;
1516
0
    }
1517
0
    if (chunk_end > end) {
1518
0
        chunk_end = end;
1519
0
    }
1520
1521
    // compute sub region sizes
1522
0
    l_size = (uint64_t)(address - begin);
1523
0
    r_size = (uint64_t)(end - chunk_end);
1524
0
    m_size = (uint64_t)(chunk_end - address);
1525
1526
0
    if (l_size > 0) {
1527
0
        if (uc_mmio_map(uc, begin, l_size, backup.read, backup.user_data_read,
1528
0
                        backup.write, backup.user_data_write) != UC_ERR_OK) {
1529
0
            return false;
1530
0
        }
1531
0
    }
1532
1533
0
    if (m_size > 0 && !do_delete) {
1534
0
        if (uc_mmio_map(uc, address, m_size, backup.read, backup.user_data_read,
1535
0
                        backup.write, backup.user_data_write) != UC_ERR_OK) {
1536
0
            return false;
1537
0
        }
1538
0
    }
1539
1540
0
    if (r_size > 0) {
1541
0
        if (uc_mmio_map(uc, chunk_end, r_size, backup.read,
1542
0
                        backup.user_data_read, backup.write,
1543
0
                        backup.user_data_write) != UC_ERR_OK) {
1544
0
            return false;
1545
0
        }
1546
0
    }
1547
1548
0
    return true;
1549
0
}
1550
1551
/*
1552
   Split the given MemoryRegion at the indicated address for the indicated size
1553
   this may result in the create of up to 3 spanning sections. If the delete
1554
   parameter is true, the no new section will be created to replace the indicate
1555
   range. This functions exists to support uc_mem_protect and uc_mem_unmap.
1556
1557
   This is a static function and callers have already done some preliminary
1558
   parameter validation.
1559
1560
   The do_delete argument indicates that we are being called to support
1561
   uc_mem_unmap. In this case we save some time by choosing NOT to remap
1562
   the areas that are intended to get unmapped
1563
 */
1564
// TODO: investigate whether qemu region manipulation functions already offered
1565
// this capability
1566
static bool split_region(struct uc_struct *uc, MemoryRegion *mr,
1567
                         uint64_t address, uint64_t size, bool do_delete)
1568
0
{
1569
0
    uint8_t *backup;
1570
0
    uint32_t perms;
1571
0
    uint64_t begin, end, chunk_end;
1572
0
    uint64_t l_size, m_size, r_size;
1573
0
    RAMBlock *block = NULL;
1574
0
    bool prealloc = false;
1575
1576
0
    chunk_end = address + size;
1577
1578
    // if this region belongs to area [address, address+size],
1579
    // then there is no work to do.
1580
0
    if (address <= mr->addr && chunk_end >= mr->end) {
1581
0
        return true;
1582
0
    }
1583
1584
0
    if (size == 0) {
1585
        // trivial case
1586
0
        return true;
1587
0
    }
1588
1589
0
    if (address >= mr->end || chunk_end <= mr->addr) {
1590
        // impossible case
1591
0
        return false;
1592
0
    }
1593
1594
    // Find the correct and large enough (which contains our target mr)
1595
    // to create the content backup.
1596
0
    block = mr->ram_block;
1597
1598
0
    if (block == NULL) {
1599
0
        return false;
1600
0
    }
1601
1602
    // RAM_PREALLOC is not defined outside exec.c and I didn't feel like
1603
    // moving it
1604
0
    prealloc = !!(block->flags & 1);
1605
1606
0
    if (block->flags & 1) {
1607
0
        backup = block->host;
1608
0
    } else {
1609
0
        backup = copy_region(uc, mr);
1610
0
        if (backup == NULL) {
1611
0
            return false;
1612
0
        }
1613
0
    }
1614
1615
    // save the essential information required for the split before mr gets
1616
    // deleted
1617
0
    perms = mr->perms;
1618
0
    begin = mr->addr;
1619
0
    end = mr->end;
1620
1621
    // unmap this region first, then do split it later
1622
0
    if (uc_mem_unmap(uc, mr->addr, (uint64_t)int128_get64(mr->size)) !=
1623
0
        UC_ERR_OK) {
1624
0
        goto error;
1625
0
    }
1626
1627
    /* overlapping cases
1628
     *               |------mr------|
1629
     * case 1    |---size--|
1630
     * case 2           |--size--|
1631
     * case 3                  |---size--|
1632
     */
1633
1634
    // adjust some things
1635
0
    if (address < begin) {
1636
0
        address = begin;
1637
0
    }
1638
0
    if (chunk_end > end) {
1639
0
        chunk_end = end;
1640
0
    }
1641
1642
    // compute sub region sizes
1643
0
    l_size = (uint64_t)(address - begin);
1644
0
    r_size = (uint64_t)(end - chunk_end);
1645
0
    m_size = (uint64_t)(chunk_end - address);
1646
1647
    // If there are error in any of the below operations, things are too far
1648
    // gone at that point to recover. Could try to remap orignal region, but
1649
    // these smaller allocation just failed so no guarantee that we can recover
1650
    // the original allocation at this point
1651
0
    if (l_size > 0) {
1652
0
        if (!prealloc) {
1653
0
            if (uc_mem_map(uc, begin, l_size, perms) != UC_ERR_OK) {
1654
0
                goto error;
1655
0
            }
1656
0
            if (uc_mem_write(uc, begin, backup, l_size) != UC_ERR_OK) {
1657
0
                goto error;
1658
0
            }
1659
0
        } else {
1660
0
            if (uc_mem_map_ptr(uc, begin, l_size, perms, backup) != UC_ERR_OK) {
1661
0
                goto error;
1662
0
            }
1663
0
        }
1664
0
    }
1665
1666
0
    if (m_size > 0 && !do_delete) {
1667
0
        if (!prealloc) {
1668
0
            if (uc_mem_map(uc, address, m_size, perms) != UC_ERR_OK) {
1669
0
                goto error;
1670
0
            }
1671
0
            if (uc_mem_write(uc, address, backup + l_size, m_size) !=
1672
0
                UC_ERR_OK) {
1673
0
                goto error;
1674
0
            }
1675
0
        } else {
1676
0
            if (uc_mem_map_ptr(uc, address, m_size, perms, backup + l_size) !=
1677
0
                UC_ERR_OK) {
1678
0
                goto error;
1679
0
            }
1680
0
        }
1681
0
    }
1682
1683
0
    if (r_size > 0) {
1684
0
        if (!prealloc) {
1685
0
            if (uc_mem_map(uc, chunk_end, r_size, perms) != UC_ERR_OK) {
1686
0
                goto error;
1687
0
            }
1688
0
            if (uc_mem_write(uc, chunk_end, backup + l_size + m_size, r_size) !=
1689
0
                UC_ERR_OK) {
1690
0
                goto error;
1691
0
            }
1692
0
        } else {
1693
0
            if (uc_mem_map_ptr(uc, chunk_end, r_size, perms,
1694
0
                               backup + l_size + m_size) != UC_ERR_OK) {
1695
0
                goto error;
1696
0
            }
1697
0
        }
1698
0
    }
1699
1700
0
    if (!prealloc) {
1701
0
        free(backup);
1702
0
    }
1703
0
    return true;
1704
1705
0
error:
1706
0
    if (!prealloc) {
1707
0
        free(backup);
1708
0
    }
1709
0
    return false;
1710
0
}
1711
1712
UNICORN_EXPORT
1713
uc_err uc_mem_protect(struct uc_struct *uc, uint64_t address, uint64_t size,
1714
                      uint32_t perms)
1715
0
{
1716
0
    MemoryRegion *mr;
1717
0
    uint64_t addr = address;
1718
0
    uint64_t pc;
1719
0
    uint64_t count, len;
1720
0
    bool remove_exec = false;
1721
1722
0
    UC_INIT(uc);
1723
1724
    // snapshot and protection can't be mixed
1725
0
    if (uc->snapshot_level > 0) {
1726
0
        restore_jit_state(uc);
1727
0
        return UC_ERR_ARG;
1728
0
    }
1729
1730
0
    if (size == 0) {
1731
        // trivial case, no change
1732
0
        restore_jit_state(uc);
1733
0
        return UC_ERR_OK;
1734
0
    }
1735
1736
    // address must be aligned to uc->target_page_size
1737
0
    if ((address & uc->target_page_align) != 0) {
1738
0
        restore_jit_state(uc);
1739
0
        return UC_ERR_ARG;
1740
0
    }
1741
1742
    // size must be multiple of uc->target_page_size
1743
0
    if ((size & uc->target_page_align) != 0) {
1744
0
        restore_jit_state(uc);
1745
0
        return UC_ERR_ARG;
1746
0
    }
1747
1748
    // check for only valid permissions
1749
0
    if ((perms & ~UC_PROT_ALL) != 0) {
1750
0
        restore_jit_state(uc);
1751
0
        return UC_ERR_ARG;
1752
0
    }
1753
1754
    // check that user's entire requested block is mapped
1755
    // TODO check if protected is possible
1756
    // deny after cow
1757
0
    if (!check_mem_area(uc, address, size)) {
1758
0
        restore_jit_state(uc);
1759
0
        return UC_ERR_NOMEM;
1760
0
    }
1761
1762
    // Now we know entire region is mapped, so change permissions
1763
    // We may need to split regions if this area spans adjacent regions
1764
0
    addr = address;
1765
0
    count = 0;
1766
0
    while (count < size) {
1767
0
        mr = uc->memory_mapping(uc, addr);
1768
0
        len = memory_region_len(uc, mr, addr, size - count);
1769
0
        if (mr->ram) {
1770
0
            if (!split_region(uc, mr, addr, len, false)) {
1771
0
                restore_jit_state(uc);
1772
0
                return UC_ERR_NOMEM;
1773
0
            }
1774
1775
0
            mr = uc->memory_mapping(uc, addr);
1776
            // will this remove EXEC permission?
1777
0
            if (((mr->perms & UC_PROT_EXEC) != 0) &&
1778
0
                ((perms & UC_PROT_EXEC) == 0)) {
1779
0
                remove_exec = true;
1780
0
            }
1781
0
            mr->perms = perms;
1782
0
            uc->readonly_mem(mr, (perms & UC_PROT_WRITE) == 0);
1783
1784
0
        } else {
1785
0
            if (!split_mmio_region(uc, mr, addr, len, false)) {
1786
0
                restore_jit_state(uc);
1787
0
                return UC_ERR_NOMEM;
1788
0
            }
1789
1790
0
            mr = uc->memory_mapping(uc, addr);
1791
0
            mr->perms = perms;
1792
0
        }
1793
1794
0
        count += len;
1795
0
        addr += len;
1796
0
    }
1797
1798
    // if EXEC permission is removed, then quit TB and continue at the same
1799
    // place
1800
0
    if (remove_exec) {
1801
0
        pc = uc->get_pc(uc);
1802
0
        if (pc < address + size && pc >= address) {
1803
0
            uc->quit_request = true;
1804
0
            uc_emu_stop(uc);
1805
0
        }
1806
0
    }
1807
1808
0
    restore_jit_state(uc);
1809
0
    return UC_ERR_OK;
1810
0
}
1811
1812
static uc_err uc_mem_unmap_snapshot(struct uc_struct *uc, uint64_t address,
1813
                                    uint64_t size, MemoryRegion **ret)
1814
0
{
1815
0
    MemoryRegion *mr;
1816
1817
0
    mr = uc->memory_mapping(uc, address);
1818
0
    while (mr->container != uc->system_memory) {
1819
0
        mr = mr->container;
1820
0
    }
1821
1822
0
    if (mr->addr != address || int128_get64(mr->size) != size) {
1823
0
        return UC_ERR_ARG;
1824
0
    }
1825
1826
0
    if (ret) {
1827
0
        *ret = mr;
1828
0
    }
1829
1830
0
    uc->memory_moveout(uc, mr);
1831
1832
0
    return UC_ERR_OK;
1833
0
}
1834
1835
UNICORN_EXPORT
1836
uc_err uc_mem_unmap(struct uc_struct *uc, uint64_t address, uint64_t size)
1837
0
{
1838
0
    MemoryRegion *mr;
1839
0
    uint64_t addr;
1840
0
    uint64_t count, len;
1841
1842
0
    UC_INIT(uc);
1843
1844
0
    if (size == 0) {
1845
        // nothing to unmap
1846
0
        restore_jit_state(uc);
1847
0
        return UC_ERR_OK;
1848
0
    }
1849
1850
    // address must be aligned to uc->target_page_size
1851
0
    if ((address & uc->target_page_align) != 0) {
1852
0
        restore_jit_state(uc);
1853
0
        return UC_ERR_ARG;
1854
0
    }
1855
1856
    // size must be multiple of uc->target_page_size
1857
0
    if ((size & uc->target_page_align) != 0) {
1858
0
        restore_jit_state(uc);
1859
0
        return UC_ERR_ARG;
1860
0
    }
1861
1862
    // check that user's entire requested block is mapped
1863
0
    if (!check_mem_area(uc, address, size)) {
1864
0
        restore_jit_state(uc);
1865
0
        return UC_ERR_NOMEM;
1866
0
    }
1867
1868
0
    if (uc->snapshot_level > 0) {
1869
0
        uc_err res = uc_mem_unmap_snapshot(uc, address, size, NULL);
1870
0
        restore_jit_state(uc);
1871
0
        return res;
1872
0
    }
1873
1874
    // Now we know entire region is mapped, so do the unmap
1875
    // We may need to split regions if this area spans adjacent regions
1876
0
    addr = address;
1877
0
    count = 0;
1878
0
    while (count < size) {
1879
0
        mr = uc->memory_mapping(uc, addr);
1880
0
        len = memory_region_len(uc, mr, addr, size - count);
1881
0
        if (!mr->ram) {
1882
0
            if (!split_mmio_region(uc, mr, addr, len, true)) {
1883
0
                restore_jit_state(uc);
1884
0
                return UC_ERR_NOMEM;
1885
0
            }
1886
0
        } else {
1887
0
            if (!split_region(uc, mr, addr, len, true)) {
1888
0
                restore_jit_state(uc);
1889
0
                return UC_ERR_NOMEM;
1890
0
            }
1891
0
        }
1892
1893
        // if we can retrieve the mapping, then no splitting took place
1894
        // so unmap here
1895
0
        mr = uc->memory_mapping(uc, addr);
1896
0
        if (mr != NULL) {
1897
0
            uc->memory_unmap(uc, mr);
1898
0
        }
1899
0
        count += len;
1900
0
        addr += len;
1901
0
    }
1902
1903
0
    restore_jit_state(uc);
1904
0
    return UC_ERR_OK;
1905
0
}
1906
1907
UNICORN_EXPORT
1908
uc_err uc_hook_add(uc_engine *uc, uc_hook *hh, int type, void *callback,
1909
                   void *user_data, uint64_t begin, uint64_t end, ...)
1910
206k
{
1911
206k
    int ret = UC_ERR_OK;
1912
206k
    int i = 0;
1913
1914
206k
    UC_INIT(uc);
1915
1916
206k
    struct hook *hook = calloc(1, sizeof(struct hook));
1917
206k
    if (hook == NULL) {
1918
0
        restore_jit_state(uc);
1919
0
        return UC_ERR_NOMEM;
1920
0
    }
1921
1922
206k
    hook->begin = begin;
1923
206k
    hook->end = end;
1924
206k
    hook->type = type;
1925
206k
    hook->callback = callback;
1926
206k
    hook->user_data = user_data;
1927
206k
    hook->refs = 0;
1928
206k
    hook->to_delete = false;
1929
206k
    hook->hooked_regions = g_hash_table_new_full(
1930
206k
        hooked_regions_hash, hooked_regions_equal, g_free, NULL);
1931
206k
    *hh = (uc_hook)hook;
1932
1933
    // UC_HOOK_INSN has an extra argument for instruction ID
1934
206k
    if (type & UC_HOOK_INSN) {
1935
0
        va_list valist;
1936
1937
0
        va_start(valist, end);
1938
0
        hook->insn = va_arg(valist, int);
1939
0
        va_end(valist);
1940
1941
0
        if (uc->insn_hook_validate) {
1942
0
            if (!uc->insn_hook_validate(hook->insn)) {
1943
0
                free(hook);
1944
0
                restore_jit_state(uc);
1945
0
                return UC_ERR_HOOK;
1946
0
            }
1947
0
        }
1948
1949
0
        if (uc->hook_insert) {
1950
0
            if (hook_insert(&uc->hook[UC_HOOK_INSN_IDX], hook) == NULL) {
1951
0
                free(hook);
1952
0
                restore_jit_state(uc);
1953
0
                return UC_ERR_NOMEM;
1954
0
            }
1955
0
        } else {
1956
0
            if (hook_append(&uc->hook[UC_HOOK_INSN_IDX], hook) == NULL) {
1957
0
                free(hook);
1958
0
                restore_jit_state(uc);
1959
0
                return UC_ERR_NOMEM;
1960
0
            }
1961
0
        }
1962
1963
0
        uc->hooks_count[UC_HOOK_INSN_IDX]++;
1964
0
        restore_jit_state(uc);
1965
0
        return UC_ERR_OK;
1966
0
    }
1967
1968
206k
    if (type & UC_HOOK_TCG_OPCODE) {
1969
0
        va_list valist;
1970
1971
0
        va_start(valist, end);
1972
0
        hook->op = va_arg(valist, int);
1973
0
        hook->op_flags = va_arg(valist, int);
1974
0
        va_end(valist);
1975
1976
0
        if (uc->opcode_hook_invalidate) {
1977
0
            if (!uc->opcode_hook_invalidate(hook->op, hook->op_flags)) {
1978
0
                free(hook);
1979
0
                restore_jit_state(uc);
1980
0
                return UC_ERR_HOOK;
1981
0
            }
1982
0
        }
1983
1984
0
        if (uc->hook_insert) {
1985
0
            if (hook_insert(&uc->hook[UC_HOOK_TCG_OPCODE_IDX], hook) == NULL) {
1986
0
                free(hook);
1987
0
                restore_jit_state(uc);
1988
0
                return UC_ERR_NOMEM;
1989
0
            }
1990
0
        } else {
1991
0
            if (hook_append(&uc->hook[UC_HOOK_TCG_OPCODE_IDX], hook) == NULL) {
1992
0
                free(hook);
1993
0
                restore_jit_state(uc);
1994
0
                return UC_ERR_NOMEM;
1995
0
            }
1996
0
        }
1997
1998
0
        uc->hooks_count[UC_HOOK_TCG_OPCODE_IDX]++;
1999
0
        return UC_ERR_OK;
2000
0
    }
2001
2002
826k
    while ((type >> i) > 0) {
2003
620k
        if ((type >> i) & 1) {
2004
            // TODO: invalid hook error?
2005
206k
            if (i < UC_HOOK_MAX) {
2006
206k
                if (uc->hook_insert) {
2007
206k
                    if (hook_insert(&uc->hook[i], hook) == NULL) {
2008
0
                        free(hook);
2009
0
                        restore_jit_state(uc);
2010
0
                        return UC_ERR_NOMEM;
2011
0
                    }
2012
206k
                } else {
2013
0
                    if (hook_append(&uc->hook[i], hook) == NULL) {
2014
0
                        free(hook);
2015
0
                        restore_jit_state(uc);
2016
0
                        return UC_ERR_NOMEM;
2017
0
                    }
2018
0
                }
2019
206k
                uc->hooks_count[i]++;
2020
206k
            }
2021
206k
        }
2022
620k
        i++;
2023
620k
    }
2024
2025
    // we didn't use the hook
2026
    // TODO: return an error?
2027
206k
    if (hook->refs == 0) {
2028
0
        free(hook);
2029
0
    }
2030
2031
206k
    restore_jit_state(uc);
2032
206k
    return ret;
2033
206k
}
2034
2035
UNICORN_EXPORT
2036
uc_err uc_hook_del(uc_engine *uc, uc_hook hh)
2037
0
{
2038
0
    int i;
2039
0
    struct hook *hook = (struct hook *)hh;
2040
2041
0
    UC_INIT(uc);
2042
2043
    // we can't dereference hook->type if hook is invalid
2044
    // so for now we need to iterate over all possible types to remove the hook
2045
    // which is less efficient
2046
    // an optimization would be to align the hook pointer
2047
    // and store the type mask in the hook pointer.
2048
0
    for (i = 0; i < UC_HOOK_MAX; i++) {
2049
0
        if (list_exists(&uc->hook[i], (void *)hook)) {
2050
0
            g_hash_table_foreach(hook->hooked_regions, hook_invalidate_region,
2051
0
                                 uc);
2052
0
            g_hash_table_remove_all(hook->hooked_regions);
2053
0
            hook->to_delete = true;
2054
0
            uc->hooks_count[i]--;
2055
0
            hook_append(&uc->hooks_to_del, hook);
2056
0
        }
2057
0
    }
2058
2059
0
    restore_jit_state(uc);
2060
0
    return UC_ERR_OK;
2061
0
}
2062
2063
// TCG helper
2064
// 2 arguments are enough for most opcodes. Load/Store needs 3 arguments but we
2065
// have memory hooks already. We may exceed the maximum arguments of a tcg
2066
// helper but that's easy to extend.
2067
void helper_uc_traceopcode(struct hook *hook, uint64_t arg1, uint64_t arg2,
2068
                           uint32_t size, void *handle, uint64_t address);
2069
void helper_uc_traceopcode(struct hook *hook, uint64_t arg1, uint64_t arg2,
2070
                           uint32_t size, void *handle, uint64_t address)
2071
0
{
2072
0
    struct uc_struct *uc = handle;
2073
2074
0
    if (unlikely(uc->stop_request)) {
2075
0
        return;
2076
0
    }
2077
2078
0
    if (unlikely(hook->to_delete)) {
2079
0
        return;
2080
0
    }
2081
2082
    // We did all checks in translation time.
2083
    //
2084
    // This could optimize the case that we have multiple hooks with different
2085
    // opcodes and have one callback per opcode. Note that the assumption don't
2086
    // hold in most cases for uc_tracecode.
2087
    //
2088
    // TODO: Shall we have a flag to allow users to control whether updating PC?
2089
0
    JIT_CALLBACK_GUARD(((uc_hook_tcg_op_2)hook->callback)(
2090
0
        uc, address, arg1, arg2, size, hook->user_data));
2091
2092
0
    if (unlikely(uc->stop_request)) {
2093
0
        return;
2094
0
    }
2095
0
}
2096
2097
void helper_uc_tracecode(int32_t size, uc_hook_idx index, void *handle,
2098
                         int64_t address);
2099
void helper_uc_tracecode(int32_t size, uc_hook_idx index, void *handle,
2100
                         int64_t address)
2101
12.8M
{
2102
12.8M
    struct uc_struct *uc = handle;
2103
12.8M
    struct list_item *cur;
2104
12.8M
    struct hook *hook;
2105
12.8M
    int hook_flags =
2106
12.8M
        index &
2107
12.8M
        UC_HOOK_FLAG_MASK; // The index here may contain additional flags. See
2108
                           // the comments of uc_hook_idx for details.
2109
    // bool not_allow_stop = (size & UC_HOOK_FLAG_NO_STOP) || (hook_flags &
2110
    // UC_HOOK_FLAG_NO_STOP);
2111
12.8M
    bool not_allow_stop = hook_flags & UC_HOOK_FLAG_NO_STOP;
2112
2113
12.8M
    index = index & UC_HOOK_IDX_MASK;
2114
    // // Like hook index, only low 6 bits of size is used for representing
2115
    // sizes. size = size & UC_HOOK_IDX_MASK;
2116
2117
    // This has been done in tcg code.
2118
    // sync PC in CPUArchState with address
2119
    // if (uc->set_pc) {
2120
    //     uc->set_pc(uc, address);
2121
    // }
2122
2123
    // the last callback may already asked to stop emulation
2124
12.8M
    if (uc->stop_request && !not_allow_stop) {
2125
0
        return;
2126
12.8M
    } else if (not_allow_stop && uc->stop_request) {
2127
0
        revert_uc_emu_stop(uc);
2128
0
    }
2129
2130
12.8M
    for (cur = uc->hook[index].head;
2131
25.6M
         cur != NULL && (hook = (struct hook *)cur->data); cur = cur->next) {
2132
12.8M
        if (hook->to_delete) {
2133
0
            continue;
2134
0
        }
2135
2136
        // on invalid block/instruction, call instruction counter (if enable),
2137
        // then quit
2138
12.8M
        if (size == 0) {
2139
0
            if (index == UC_HOOK_CODE_IDX && uc->count_hook) {
2140
                // this is the instruction counter (first hook in the list)
2141
0
                JIT_CALLBACK_GUARD(((uc_cb_hookcode_t)hook->callback)(
2142
0
                    uc, address, size, hook->user_data));
2143
0
            }
2144
2145
0
            return;
2146
0
        }
2147
2148
12.8M
        if (HOOK_BOUND_CHECK(hook, (uint64_t)address)) {
2149
12.8M
            JIT_CALLBACK_GUARD(((uc_cb_hookcode_t)hook->callback)(
2150
12.8M
                uc, address, size, hook->user_data));
2151
12.8M
        }
2152
2153
        // the last callback may already asked to stop emulation
2154
        // Unicorn:
2155
        //   In an ARM IT block, we behave like the emulation continues
2156
        //   normally. No check_exit_request is generated and the hooks are
2157
        //   triggered normally. In other words, the whole IT block is treated
2158
        //   as a single instruction.
2159
12.8M
        if (not_allow_stop && uc->stop_request) {
2160
5.39M
            revert_uc_emu_stop(uc);
2161
7.43M
        } else if (!not_allow_stop && uc->stop_request) {
2162
0
            break;
2163
0
        }
2164
12.8M
    }
2165
12.8M
}
2166
2167
UNICORN_EXPORT
2168
uc_err uc_mem_regions(uc_engine *uc, uc_mem_region **regions, uint32_t *count)
2169
0
{
2170
0
    uint32_t i;
2171
0
    uc_mem_region *r = NULL;
2172
2173
0
    UC_INIT(uc);
2174
2175
0
    *count = uc->mapped_block_count;
2176
2177
0
    if (*count) {
2178
0
        r = g_malloc0(*count * sizeof(uc_mem_region));
2179
0
        if (r == NULL) {
2180
            // out of memory
2181
0
            restore_jit_state(uc);
2182
0
            return UC_ERR_NOMEM;
2183
0
        }
2184
0
    }
2185
2186
0
    for (i = 0; i < *count; i++) {
2187
0
        r[i].begin = uc->mapped_blocks[i]->addr;
2188
0
        r[i].end = uc->mapped_blocks[i]->end - 1;
2189
0
        r[i].perms = uc->mapped_blocks[i]->perms;
2190
0
    }
2191
2192
0
    *regions = r;
2193
2194
0
    restore_jit_state(uc);
2195
0
    return UC_ERR_OK;
2196
0
}
2197
2198
UNICORN_EXPORT
2199
uc_err uc_query(uc_engine *uc, uc_query_type type, size_t *result)
2200
0
{
2201
0
    UC_INIT(uc);
2202
2203
0
    switch (type) {
2204
0
    default:
2205
0
        return UC_ERR_ARG;
2206
2207
0
    case UC_QUERY_PAGE_SIZE:
2208
0
        *result = uc->target_page_size;
2209
0
        break;
2210
2211
0
    case UC_QUERY_ARCH:
2212
0
        *result = uc->arch;
2213
0
        break;
2214
2215
0
    case UC_QUERY_MODE:
2216
0
#ifdef UNICORN_HAS_ARM
2217
0
        if (uc->arch == UC_ARCH_ARM) {
2218
0
            return uc->query(uc, type, result);
2219
0
        }
2220
0
#endif
2221
0
        *result = uc->mode;
2222
0
        break;
2223
2224
0
    case UC_QUERY_TIMEOUT:
2225
0
        *result = uc->timed_out;
2226
0
        break;
2227
0
    }
2228
2229
0
    restore_jit_state(uc);
2230
0
    return UC_ERR_OK;
2231
0
}
2232
2233
UNICORN_EXPORT
2234
uc_err uc_context_alloc(uc_engine *uc, uc_context **context)
2235
0
{
2236
0
    struct uc_context **_context = context;
2237
0
    size_t size = uc_context_size(uc);
2238
2239
0
    UC_INIT(uc);
2240
2241
0
    *_context = g_malloc(size);
2242
0
    if (*_context) {
2243
0
        (*_context)->context_size = size - sizeof(uc_context);
2244
0
        (*_context)->arch = uc->arch;
2245
0
        (*_context)->mode = uc->mode;
2246
0
        (*_context)->fv = NULL;
2247
0
        restore_jit_state(uc);
2248
0
        return UC_ERR_OK;
2249
0
    } else {
2250
0
        restore_jit_state(uc);
2251
0
        return UC_ERR_NOMEM;
2252
0
    }
2253
0
}
2254
2255
UNICORN_EXPORT
2256
uc_err uc_free(void *mem)
2257
0
{
2258
0
    g_free(mem);
2259
0
    return UC_ERR_OK;
2260
0
}
2261
2262
UNICORN_EXPORT
2263
size_t uc_context_size(uc_engine *uc)
2264
0
{
2265
0
    UC_INIT(uc);
2266
2267
0
    restore_jit_state(uc);
2268
0
    if (!uc->context_size) {
2269
        // return the total size of struct uc_context
2270
0
        return sizeof(uc_context) + uc->cpu_context_size;
2271
0
    } else {
2272
0
        return sizeof(uc_context) + uc->context_size(uc);
2273
0
    }
2274
0
}
2275
2276
UNICORN_EXPORT
2277
uc_err uc_context_save(uc_engine *uc, uc_context *context)
2278
0
{
2279
0
    UC_INIT(uc);
2280
0
    uc_err ret = UC_ERR_OK;
2281
2282
0
    if (uc->context_content & UC_CTL_CONTEXT_MEMORY) {
2283
0
        if (!context->fv) {
2284
0
            context->fv = g_malloc0(sizeof(*context->fv));
2285
0
        }
2286
0
        if (!context->fv) {
2287
0
            return UC_ERR_NOMEM;
2288
0
        }
2289
0
        if (!uc->flatview_copy(uc, context->fv,
2290
0
                               uc->address_space_memory.current_map, false)) {
2291
0
            restore_jit_state(uc);
2292
0
            return UC_ERR_NOMEM;
2293
0
        }
2294
0
        ret = uc_snapshot(uc);
2295
0
        if (ret != UC_ERR_OK) {
2296
0
            restore_jit_state(uc);
2297
0
            return ret;
2298
0
        }
2299
0
        context->ramblock_freed = uc->ram_list.freed;
2300
0
        context->last_block = uc->ram_list.last_block;
2301
0
        uc->tcg_flush_tlb(uc);
2302
0
    }
2303
2304
0
    context->snapshot_level = uc->snapshot_level;
2305
2306
0
    if (uc->context_content & UC_CTL_CONTEXT_CPU) {
2307
0
        if (!uc->context_save) {
2308
0
            memcpy(context->data, uc->cpu->env_ptr, context->context_size);
2309
0
            restore_jit_state(uc);
2310
0
            return UC_ERR_OK;
2311
0
        } else {
2312
0
            ret = uc->context_save(uc, context);
2313
0
            restore_jit_state(uc);
2314
0
            return ret;
2315
0
        }
2316
0
    }
2317
0
    restore_jit_state(uc);
2318
0
    return ret;
2319
0
}
2320
2321
// Keep in mind that we don't a uc_engine when r/w the registers of a context.
2322
static context_reg_rw_t find_context_reg_rw(uc_arch arch, uc_mode mode)
2323
0
{
2324
    // We believe that the arch/mode pair is correct.
2325
0
    context_reg_rw_t rw = {default_reg_read, default_reg_write};
2326
0
    switch (arch) {
2327
0
    default:
2328
0
        break;
2329
0
#ifdef UNICORN_HAS_M68K
2330
0
    case UC_ARCH_M68K:
2331
0
        rw.read = reg_read_m68k;
2332
0
        rw.write = reg_write_m68k;
2333
0
        break;
2334
0
#endif
2335
0
#ifdef UNICORN_HAS_X86
2336
0
    case UC_ARCH_X86:
2337
0
        rw.read = reg_read_x86_64;
2338
0
        rw.write = reg_write_x86_64;
2339
0
        break;
2340
0
#endif
2341
0
#ifdef UNICORN_HAS_ARM
2342
0
    case UC_ARCH_ARM:
2343
0
        rw.read = reg_read_arm;
2344
0
        rw.write = reg_write_arm;
2345
0
        break;
2346
0
#endif
2347
0
#ifdef UNICORN_HAS_ARM64
2348
0
    case UC_ARCH_ARM64:
2349
0
        rw.read = reg_read_aarch64;
2350
0
        rw.write = reg_write_aarch64;
2351
0
        break;
2352
0
#endif
2353
2354
0
#if defined(UNICORN_HAS_MIPS) || defined(UNICORN_HAS_MIPSEL) ||                \
2355
0
    defined(UNICORN_HAS_MIPS64) || defined(UNICORN_HAS_MIPS64EL)
2356
0
    case UC_ARCH_MIPS:
2357
0
        if (mode & UC_MODE_BIG_ENDIAN) {
2358
0
#ifdef UNICORN_HAS_MIPS
2359
0
            if (mode & UC_MODE_MIPS32) {
2360
0
                rw.read = reg_read_mips;
2361
0
                rw.write = reg_write_mips;
2362
0
            }
2363
0
#endif
2364
0
#ifdef UNICORN_HAS_MIPS64
2365
0
            if (mode & UC_MODE_MIPS64) {
2366
0
                rw.read = reg_read_mips64;
2367
0
                rw.write = reg_write_mips64;
2368
0
            }
2369
0
#endif
2370
0
        } else { // little endian
2371
0
#ifdef UNICORN_HAS_MIPSEL
2372
0
            if (mode & UC_MODE_MIPS32) {
2373
0
                rw.read = reg_read_mipsel;
2374
0
                rw.write = reg_write_mipsel;
2375
0
            }
2376
0
#endif
2377
0
#ifdef UNICORN_HAS_MIPS64EL
2378
0
            if (mode & UC_MODE_MIPS64) {
2379
0
                rw.read = reg_read_mips64el;
2380
0
                rw.write = reg_write_mips64el;
2381
0
            }
2382
0
#endif
2383
0
        }
2384
0
        break;
2385
0
#endif
2386
2387
0
#ifdef UNICORN_HAS_SPARC
2388
0
    case UC_ARCH_SPARC:
2389
0
        if (mode & UC_MODE_SPARC64) {
2390
0
            rw.read = reg_read_sparc64;
2391
0
            rw.write = reg_write_sparc64;
2392
0
        } else {
2393
0
            rw.read = reg_read_sparc;
2394
0
            rw.write = reg_write_sparc;
2395
0
        }
2396
0
        break;
2397
0
#endif
2398
0
#ifdef UNICORN_HAS_PPC
2399
0
    case UC_ARCH_PPC:
2400
0
        if (mode & UC_MODE_PPC64) {
2401
0
            rw.read = reg_read_ppc64;
2402
0
            rw.write = reg_write_ppc64;
2403
0
        } else {
2404
0
            rw.read = reg_read_ppc;
2405
0
            rw.write = reg_write_ppc;
2406
0
        }
2407
0
        break;
2408
0
#endif
2409
0
#ifdef UNICORN_HAS_RISCV
2410
0
    case UC_ARCH_RISCV:
2411
0
        if (mode & UC_MODE_RISCV32) {
2412
0
            rw.read = reg_read_riscv32;
2413
0
            rw.write = reg_write_riscv32;
2414
0
        } else if (mode & UC_MODE_RISCV64) {
2415
0
            rw.read = reg_read_riscv64;
2416
0
            rw.write = reg_write_riscv64;
2417
0
        }
2418
0
        break;
2419
0
#endif
2420
0
#ifdef UNICORN_HAS_S390X
2421
0
    case UC_ARCH_S390X:
2422
0
        rw.read = reg_read_s390x;
2423
0
        rw.write = reg_write_s390x;
2424
0
        break;
2425
0
#endif
2426
0
#ifdef UNICORN_HAS_TRICORE
2427
0
    case UC_ARCH_TRICORE:
2428
0
        rw.read = reg_read_tricore;
2429
0
        rw.write = reg_write_tricore;
2430
0
        break;
2431
0
#endif
2432
0
    }
2433
2434
0
    return rw;
2435
0
}
2436
2437
UNICORN_EXPORT
2438
uc_err uc_context_reg_write(uc_context *ctx, int regid, const void *value)
2439
0
{
2440
0
    int setpc = 0;
2441
0
    size_t size = (size_t)-1;
2442
0
    return find_context_reg_rw(ctx->arch, ctx->mode)
2443
0
        .write(ctx->data, ctx->mode, regid, value, &size, &setpc);
2444
0
}
2445
2446
UNICORN_EXPORT
2447
uc_err uc_context_reg_read(uc_context *ctx, int regid, void *value)
2448
0
{
2449
0
    size_t size = (size_t)-1;
2450
0
    return find_context_reg_rw(ctx->arch, ctx->mode)
2451
0
        .read(ctx->data, ctx->mode, regid, value, &size);
2452
0
}
2453
2454
UNICORN_EXPORT
2455
uc_err uc_context_reg_write2(uc_context *ctx, int regid, const void *value,
2456
                             size_t *size)
2457
0
{
2458
0
    int setpc = 0;
2459
0
    return find_context_reg_rw(ctx->arch, ctx->mode)
2460
0
        .write(ctx->data, ctx->mode, regid, value, size, &setpc);
2461
0
}
2462
2463
UNICORN_EXPORT
2464
uc_err uc_context_reg_read2(uc_context *ctx, int regid, void *value,
2465
                            size_t *size)
2466
0
{
2467
0
    return find_context_reg_rw(ctx->arch, ctx->mode)
2468
0
        .read(ctx->data, ctx->mode, regid, value, size);
2469
0
}
2470
2471
UNICORN_EXPORT
2472
uc_err uc_context_reg_write_batch(uc_context *ctx, int const *regs,
2473
                                  void *const *vals, int count)
2474
0
{
2475
0
    reg_write_t reg_write = find_context_reg_rw(ctx->arch, ctx->mode).write;
2476
0
    void *env = ctx->data;
2477
0
    int mode = ctx->mode;
2478
0
    int setpc = 0;
2479
0
    int i;
2480
2481
0
    for (i = 0; i < count; i++) {
2482
0
        unsigned int regid = regs[i];
2483
0
        const void *value = vals[i];
2484
0
        size_t size = (size_t)-1;
2485
0
        uc_err err = reg_write(env, mode, regid, value, &size, &setpc);
2486
0
        if (err) {
2487
0
            return err;
2488
0
        }
2489
0
    }
2490
2491
0
    return UC_ERR_OK;
2492
0
}
2493
2494
UNICORN_EXPORT
2495
uc_err uc_context_reg_read_batch(uc_context *ctx, int const *regs, void **vals,
2496
                                 int count)
2497
0
{
2498
0
    reg_read_t reg_read = find_context_reg_rw(ctx->arch, ctx->mode).read;
2499
0
    void *env = ctx->data;
2500
0
    int mode = ctx->mode;
2501
0
    int i;
2502
2503
0
    for (i = 0; i < count; i++) {
2504
0
        unsigned int regid = regs[i];
2505
0
        void *value = vals[i];
2506
0
        size_t size = (size_t)-1;
2507
0
        uc_err err = reg_read(env, mode, regid, value, &size);
2508
0
        if (err) {
2509
0
            return err;
2510
0
        }
2511
0
    }
2512
2513
0
    return UC_ERR_OK;
2514
0
}
2515
2516
UNICORN_EXPORT
2517
uc_err uc_context_reg_write_batch2(uc_context *ctx, int const *regs,
2518
                                   const void *const *vals, size_t *sizes,
2519
                                   int count)
2520
0
{
2521
0
    reg_write_t reg_write = find_context_reg_rw(ctx->arch, ctx->mode).write;
2522
0
    void *env = ctx->data;
2523
0
    int mode = ctx->mode;
2524
0
    int setpc = 0;
2525
0
    int i;
2526
2527
0
    for (i = 0; i < count; i++) {
2528
0
        unsigned int regid = regs[i];
2529
0
        const void *value = vals[i];
2530
0
        uc_err err = reg_write(env, mode, regid, value, sizes + i, &setpc);
2531
0
        if (err) {
2532
0
            return err;
2533
0
        }
2534
0
    }
2535
2536
0
    return UC_ERR_OK;
2537
0
}
2538
2539
UNICORN_EXPORT
2540
uc_err uc_context_reg_read_batch2(uc_context *ctx, int const *regs,
2541
                                  void *const *vals, size_t *sizes, int count)
2542
0
{
2543
0
    reg_read_t reg_read = find_context_reg_rw(ctx->arch, ctx->mode).read;
2544
0
    void *env = ctx->data;
2545
0
    int mode = ctx->mode;
2546
0
    int i;
2547
2548
0
    for (i = 0; i < count; i++) {
2549
0
        unsigned int regid = regs[i];
2550
0
        void *value = vals[i];
2551
0
        uc_err err = reg_read(env, mode, regid, value, sizes + i);
2552
0
        if (err) {
2553
0
            return err;
2554
0
        }
2555
0
    }
2556
2557
0
    return UC_ERR_OK;
2558
0
}
2559
2560
UNICORN_EXPORT
2561
uc_err uc_context_restore(uc_engine *uc, uc_context *context)
2562
0
{
2563
0
    UC_INIT(uc);
2564
0
    uc_err ret;
2565
2566
0
    if (uc->context_content & UC_CTL_CONTEXT_MEMORY) {
2567
0
        uc->snapshot_level = context->snapshot_level;
2568
0
        if (!uc->flatview_copy(uc, uc->address_space_memory.current_map,
2569
0
                               context->fv, true)) {
2570
0
            return UC_ERR_NOMEM;
2571
0
        }
2572
0
        ret = uc_restore_latest_snapshot(uc);
2573
0
        if (ret != UC_ERR_OK) {
2574
0
            restore_jit_state(uc);
2575
0
            return ret;
2576
0
        }
2577
0
        uc_snapshot(uc);
2578
0
        uc->ram_list.freed = context->ramblock_freed;
2579
0
        uc->ram_list.last_block = context->last_block;
2580
0
        uc->tcg_flush_tlb(uc);
2581
0
    }
2582
2583
0
    if (uc->context_content & UC_CTL_CONTEXT_CPU) {
2584
0
        if (!uc->context_restore) {
2585
0
            memcpy(uc->cpu->env_ptr, context->data, context->context_size);
2586
0
            restore_jit_state(uc);
2587
0
            return UC_ERR_OK;
2588
0
        } else {
2589
0
            ret = uc->context_restore(uc, context);
2590
0
            restore_jit_state(uc);
2591
0
            return ret;
2592
0
        }
2593
0
    }
2594
0
    return UC_ERR_OK;
2595
0
}
2596
2597
UNICORN_EXPORT
2598
uc_err uc_context_free(uc_context *context)
2599
0
{
2600
0
    if (context->fv) {
2601
0
        free(context->fv->ranges);
2602
0
        g_free(context->fv);
2603
0
    }
2604
0
    return uc_free(context);
2605
0
}
2606
2607
typedef struct _uc_ctl_exit_request {
2608
    uint64_t *array;
2609
    size_t len;
2610
} uc_ctl_exit_request;
2611
2612
static inline gboolean uc_read_exit_iter(gpointer key, gpointer val,
2613
                                         gpointer data)
2614
0
{
2615
0
    uc_ctl_exit_request *req = (uc_ctl_exit_request *)data;
2616
2617
0
    req->array[req->len++] = *(uint64_t *)key;
2618
2619
0
    return false;
2620
0
}
2621
2622
UNICORN_EXPORT
2623
uc_err uc_ctl(uc_engine *uc, uc_control_type control, ...)
2624
0
{
2625
0
    int rw, type;
2626
0
    uc_err err = UC_ERR_OK;
2627
0
    va_list args;
2628
2629
    // MSVC Would do signed shift on signed integers.
2630
0
    rw = (uint32_t)control >> 30;
2631
0
    type = (control & ((1 << 16) - 1));
2632
0
    va_start(args, control);
2633
2634
0
    switch (type) {
2635
0
    case UC_CTL_UC_MODE: {
2636
0
        if (rw == UC_CTL_IO_READ) {
2637
0
            int *pmode = va_arg(args, int *);
2638
0
            *pmode = uc->mode;
2639
0
        } else {
2640
0
            err = UC_ERR_ARG;
2641
0
        }
2642
0
        break;
2643
0
    }
2644
2645
0
    case UC_CTL_UC_ARCH: {
2646
0
        if (rw == UC_CTL_IO_READ) {
2647
0
            int *arch = va_arg(args, int *);
2648
0
            *arch = uc->arch;
2649
0
        } else {
2650
0
            err = UC_ERR_ARG;
2651
0
        }
2652
0
        break;
2653
0
    }
2654
2655
0
    case UC_CTL_UC_TIMEOUT: {
2656
0
        if (rw == UC_CTL_IO_READ) {
2657
0
            uint64_t *arch = va_arg(args, uint64_t *);
2658
0
            *arch = uc->timeout;
2659
0
        } else {
2660
0
            err = UC_ERR_ARG;
2661
0
        }
2662
0
        break;
2663
0
    }
2664
2665
0
    case UC_CTL_UC_PAGE_SIZE: {
2666
0
        if (rw == UC_CTL_IO_READ) {
2667
2668
0
            UC_INIT(uc);
2669
2670
0
            uint32_t *page_size = va_arg(args, uint32_t *);
2671
0
            *page_size = uc->target_page_size;
2672
2673
0
            restore_jit_state(uc);
2674
0
        } else {
2675
0
            uint32_t page_size = va_arg(args, uint32_t);
2676
0
            int bits = 0;
2677
2678
0
            if (uc->init_done) {
2679
0
                err = UC_ERR_ARG;
2680
0
                break;
2681
0
            }
2682
2683
0
            if (uc->arch != UC_ARCH_ARM && uc->arch != UC_ARCH_ARM64) {
2684
0
                err = UC_ERR_ARG;
2685
0
                break;
2686
0
            }
2687
2688
0
            if ((page_size & (page_size - 1))) {
2689
0
                err = UC_ERR_ARG;
2690
0
                break;
2691
0
            }
2692
2693
            // Bits is used to calculate the mask
2694
0
            while (page_size > 1) {
2695
0
                bits++;
2696
0
                page_size >>= 1;
2697
0
            }
2698
2699
0
            uc->target_bits = bits;
2700
2701
0
            err = UC_ERR_OK;
2702
0
        }
2703
2704
0
        break;
2705
0
    }
2706
2707
0
    case UC_CTL_UC_USE_EXITS: {
2708
0
        if (rw == UC_CTL_IO_WRITE) {
2709
0
            int use_exits = va_arg(args, int);
2710
0
            uc->use_exits = use_exits;
2711
0
        } else {
2712
0
            err = UC_ERR_ARG;
2713
0
        }
2714
0
        break;
2715
0
    }
2716
2717
0
    case UC_CTL_UC_EXITS_CNT: {
2718
2719
0
        UC_INIT(uc);
2720
2721
0
        if (!uc->use_exits) {
2722
0
            err = UC_ERR_ARG;
2723
0
        } else if (rw == UC_CTL_IO_READ) {
2724
0
            size_t *exits_cnt = va_arg(args, size_t *);
2725
0
            *exits_cnt = g_tree_nnodes(uc->ctl_exits);
2726
0
        } else {
2727
0
            err = UC_ERR_ARG;
2728
0
        }
2729
2730
0
        restore_jit_state(uc);
2731
0
        break;
2732
0
    }
2733
2734
0
    case UC_CTL_UC_EXITS: {
2735
2736
0
        UC_INIT(uc);
2737
2738
0
        if (!uc->use_exits) {
2739
0
            err = UC_ERR_ARG;
2740
0
        } else if (rw == UC_CTL_IO_READ) {
2741
0
            uint64_t *exits = va_arg(args, uint64_t *);
2742
0
            size_t cnt = va_arg(args, size_t);
2743
0
            if (cnt < g_tree_nnodes(uc->ctl_exits)) {
2744
0
                err = UC_ERR_ARG;
2745
0
            } else {
2746
0
                uc_ctl_exit_request req;
2747
0
                req.array = exits;
2748
0
                req.len = 0;
2749
2750
0
                g_tree_foreach(uc->ctl_exits, uc_read_exit_iter, (void *)&req);
2751
0
            }
2752
0
        } else if (rw == UC_CTL_IO_WRITE) {
2753
0
            uint64_t *exits = va_arg(args, uint64_t *);
2754
0
            size_t cnt = va_arg(args, size_t);
2755
2756
0
            g_tree_remove_all(uc->ctl_exits);
2757
2758
0
            for (size_t i = 0; i < cnt; i++) {
2759
0
                uc_add_exit(uc, exits[i]);
2760
0
            }
2761
0
        } else {
2762
0
            err = UC_ERR_ARG;
2763
0
        }
2764
2765
0
        restore_jit_state(uc);
2766
0
        break;
2767
0
    }
2768
2769
0
    case UC_CTL_CPU_MODEL: {
2770
0
        if (rw == UC_CTL_IO_READ) {
2771
2772
0
            UC_INIT(uc);
2773
2774
0
            int *model = va_arg(args, int *);
2775
0
            *model = uc->cpu_model;
2776
2777
0
            save_jit_state(uc);
2778
0
        } else {
2779
0
            int model = va_arg(args, int);
2780
2781
0
            if (model < 0 || uc->init_done) {
2782
0
                err = UC_ERR_ARG;
2783
0
                break;
2784
0
            }
2785
2786
0
            if (uc->arch == UC_ARCH_X86) {
2787
0
                if (model >= UC_CPU_X86_ENDING) {
2788
0
                    err = UC_ERR_ARG;
2789
0
                    break;
2790
0
                }
2791
0
            } else if (uc->arch == UC_ARCH_ARM) {
2792
0
                if (model >= UC_CPU_ARM_ENDING) {
2793
0
                    err = UC_ERR_ARG;
2794
0
                    break;
2795
0
                }
2796
2797
0
                if (uc->mode & UC_MODE_BIG_ENDIAN) {
2798
                    // These cpu models don't support big endian code access.
2799
0
                    if (model <= UC_CPU_ARM_CORTEX_A15 &&
2800
0
                        model >= UC_CPU_ARM_CORTEX_A7) {
2801
0
                        err = UC_ERR_ARG;
2802
0
                        break;
2803
0
                    }
2804
0
                }
2805
0
            } else if (uc->arch == UC_ARCH_ARM64) {
2806
0
                if (model >= UC_CPU_ARM64_ENDING) {
2807
0
                    err = UC_ERR_ARG;
2808
0
                    break;
2809
0
                }
2810
0
            } else if (uc->arch == UC_ARCH_MIPS) {
2811
0
                if (uc->mode & UC_MODE_32 && model >= UC_CPU_MIPS32_ENDING) {
2812
0
                    err = UC_ERR_ARG;
2813
0
                    break;
2814
0
                }
2815
2816
0
                if (uc->mode & UC_MODE_64 && model >= UC_CPU_MIPS64_ENDING) {
2817
0
                    err = UC_ERR_ARG;
2818
0
                    break;
2819
0
                }
2820
0
            } else if (uc->arch == UC_ARCH_PPC) {
2821
                // UC_MODE_PPC32 == UC_MODE_32
2822
0
                if (uc->mode & UC_MODE_32 && model >= UC_CPU_PPC32_ENDING) {
2823
0
                    err = UC_ERR_ARG;
2824
0
                    break;
2825
0
                }
2826
2827
0
                if (uc->mode & UC_MODE_64 && model >= UC_CPU_PPC64_ENDING) {
2828
0
                    err = UC_ERR_ARG;
2829
0
                    break;
2830
0
                }
2831
0
            } else if (uc->arch == UC_ARCH_RISCV) {
2832
0
                if (uc->mode & UC_MODE_32 && model >= UC_CPU_RISCV32_ENDING) {
2833
0
                    err = UC_ERR_ARG;
2834
0
                    break;
2835
0
                }
2836
2837
0
                if (uc->mode & UC_MODE_64 && model >= UC_CPU_RISCV64_ENDING) {
2838
0
                    err = UC_ERR_ARG;
2839
0
                    break;
2840
0
                }
2841
0
            } else if (uc->arch == UC_ARCH_S390X) {
2842
0
                if (model >= UC_CPU_S390X_ENDING) {
2843
0
                    err = UC_ERR_ARG;
2844
0
                    break;
2845
0
                }
2846
0
            } else if (uc->arch == UC_ARCH_SPARC) {
2847
0
                if (uc->mode & UC_MODE_32 && model >= UC_CPU_SPARC32_ENDING) {
2848
0
                    err = UC_ERR_ARG;
2849
0
                    break;
2850
0
                }
2851
0
                if (uc->mode & UC_MODE_64 && model >= UC_CPU_SPARC64_ENDING) {
2852
0
                    err = UC_ERR_ARG;
2853
0
                    break;
2854
0
                }
2855
0
            } else if (uc->arch == UC_ARCH_M68K) {
2856
0
                if (model >= UC_CPU_M68K_ENDING) {
2857
0
                    err = UC_ERR_ARG;
2858
0
                    break;
2859
0
                }
2860
0
            } else {
2861
0
                err = UC_ERR_ARG;
2862
0
                break;
2863
0
            }
2864
2865
0
            uc->cpu_model = model;
2866
2867
0
            err = UC_ERR_OK;
2868
0
        }
2869
0
        break;
2870
0
    }
2871
2872
0
    case UC_CTL_TB_REQUEST_CACHE: {
2873
2874
0
        UC_INIT(uc);
2875
2876
0
        if (rw == UC_CTL_IO_READ_WRITE) {
2877
0
            uint64_t addr = va_arg(args, uint64_t);
2878
0
            uc_tb *tb = va_arg(args, uc_tb *);
2879
0
            err = uc->uc_gen_tb(uc, addr, tb);
2880
0
        } else {
2881
0
            err = UC_ERR_ARG;
2882
0
        }
2883
2884
0
        restore_jit_state(uc);
2885
0
        break;
2886
0
    }
2887
2888
0
    case UC_CTL_TB_REMOVE_CACHE: {
2889
2890
0
        UC_INIT(uc);
2891
2892
0
        if (rw == UC_CTL_IO_WRITE) {
2893
0
            uint64_t addr = va_arg(args, uint64_t);
2894
0
            uint64_t end = va_arg(args, uint64_t);
2895
0
            if (end <= addr) {
2896
0
                err = UC_ERR_ARG;
2897
0
            } else {
2898
0
                uc->uc_invalidate_tb(uc, addr, end - addr);
2899
0
            }
2900
0
        } else {
2901
0
            err = UC_ERR_ARG;
2902
0
        }
2903
2904
0
        restore_jit_state(uc);
2905
0
        break;
2906
0
    }
2907
2908
0
    case UC_CTL_TB_FLUSH:
2909
2910
0
        UC_INIT(uc);
2911
2912
0
        if (rw == UC_CTL_IO_WRITE) {
2913
0
            uc->tb_flush(uc);
2914
0
        } else {
2915
0
            err = UC_ERR_ARG;
2916
0
        }
2917
2918
0
        restore_jit_state(uc);
2919
0
        break;
2920
2921
0
    case UC_CTL_TLB_FLUSH:
2922
2923
0
        UC_INIT(uc);
2924
2925
0
        if (rw == UC_CTL_IO_WRITE) {
2926
0
            uc->tcg_flush_tlb(uc);
2927
0
        } else {
2928
0
            err = UC_ERR_ARG;
2929
0
        }
2930
2931
0
        restore_jit_state(uc);
2932
0
        break;
2933
2934
0
    case UC_CTL_TLB_TYPE: {
2935
2936
0
        UC_INIT(uc);
2937
2938
0
        if (rw == UC_CTL_IO_WRITE) {
2939
0
            int mode = va_arg(args, int);
2940
0
            err = uc->set_tlb(uc, mode);
2941
0
        } else {
2942
0
            err = UC_ERR_ARG;
2943
0
        }
2944
2945
0
        restore_jit_state(uc);
2946
0
        break;
2947
0
    }
2948
2949
0
    case UC_CTL_TCG_BUFFER_SIZE: {
2950
0
        if (rw == UC_CTL_IO_WRITE) {
2951
0
            uint32_t size = va_arg(args, uint32_t);
2952
0
            uc->tcg_buffer_size = size;
2953
0
        } else {
2954
2955
0
            UC_INIT(uc);
2956
2957
0
            uint32_t *size = va_arg(args, uint32_t *);
2958
0
            *size = uc->tcg_buffer_size;
2959
2960
0
            restore_jit_state(uc);
2961
0
        }
2962
0
        break;
2963
0
    }
2964
2965
0
    case UC_CTL_CONTEXT_MODE:
2966
2967
0
        UC_INIT(uc);
2968
2969
0
        if (rw == UC_CTL_IO_WRITE) {
2970
0
            int mode = va_arg(args, int);
2971
0
            uc->context_content = mode;
2972
0
            err = UC_ERR_OK;
2973
0
        } else {
2974
0
            err = UC_ERR_ARG;
2975
0
        }
2976
2977
0
        restore_jit_state(uc);
2978
0
        break;
2979
2980
0
    default:
2981
0
        err = UC_ERR_ARG;
2982
0
        break;
2983
0
    }
2984
2985
0
    va_end(args);
2986
2987
0
    return err;
2988
0
}
2989
2990
static uc_err uc_snapshot(struct uc_struct *uc)
2991
0
{
2992
0
    if (uc->snapshot_level == INT32_MAX) {
2993
0
        return UC_ERR_RESOURCE;
2994
0
    }
2995
0
    uc->snapshot_level++;
2996
0
    return UC_ERR_OK;
2997
0
}
2998
2999
static uc_err uc_restore_latest_snapshot(struct uc_struct *uc)
3000
0
{
3001
0
    MemoryRegion *subregion, *subregion_next, *mr, *initial_mr;
3002
0
    int level;
3003
3004
0
    QTAILQ_FOREACH_SAFE(subregion, &uc->system_memory->subregions,
3005
0
                        subregions_link, subregion_next)
3006
0
    {
3007
0
        uc->memory_filter_subregions(subregion, uc->snapshot_level);
3008
0
        if (subregion->priority >= uc->snapshot_level ||
3009
0
            (!subregion->terminates && QTAILQ_EMPTY(&subregion->subregions))) {
3010
0
            uc->memory_unmap(uc, subregion);
3011
0
        }
3012
0
    }
3013
3014
0
    for (size_t i = uc->unmapped_regions->len; i-- > 0;) {
3015
0
        mr = g_array_index(uc->unmapped_regions, MemoryRegion *, i);
3016
        // same dirty hack as in memory_moveout see qemu/softmmu/memory.c
3017
0
        initial_mr = QTAILQ_FIRST(&mr->subregions);
3018
0
        if (!initial_mr) {
3019
0
            initial_mr = mr;
3020
0
        }
3021
        /* same dirty hack as in memory_moveout see qemu/softmmu/memory.c */
3022
0
        level = (intptr_t)mr->container;
3023
0
        mr->container = NULL;
3024
3025
0
        if (level < uc->snapshot_level) {
3026
0
            break;
3027
0
        }
3028
0
        if (memory_overlap(uc, mr->addr, int128_get64(mr->size))) {
3029
0
            return UC_ERR_MAP;
3030
0
        }
3031
0
        uc->memory_movein(uc, mr);
3032
0
        uc->memory_filter_subregions(mr, uc->snapshot_level);
3033
0
        if (initial_mr != mr && QTAILQ_EMPTY(&mr->subregions)) {
3034
0
            uc->memory_unmap(uc, subregion);
3035
0
        }
3036
0
        mem_map(uc, initial_mr);
3037
0
        g_array_remove_range(uc->unmapped_regions, i, 1);
3038
0
    }
3039
0
    uc->snapshot_level--;
3040
3041
0
    return UC_ERR_OK;
3042
0
}
3043
3044
#ifdef UNICORN_TRACER
3045
uc_tracer *get_tracer()
3046
{
3047
    static uc_tracer tracer;
3048
    return &tracer;
3049
}
3050
3051
void trace_start(uc_tracer *tracer, trace_loc loc)
3052
{
3053
    tracer->starts[loc] = get_clock();
3054
}
3055
3056
void trace_end(uc_tracer *tracer, trace_loc loc, const char *fmt, ...)
3057
{
3058
    va_list args;
3059
    int64_t end = get_clock();
3060
3061
    va_start(args, fmt);
3062
3063
    vfprintf(stderr, fmt, args);
3064
3065
    va_end(args);
3066
3067
    fprintf(stderr, "%.6fus\n",
3068
            (double)(end - tracer->starts[loc]) / (double)(1000));
3069
}
3070
#endif