Coverage Report

Created: 2025-11-16 06:58

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/unicorn/qemu/exec.c
Line
Count
Source
1
/*
2
 *  Virtual page mapping
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18
 */
19
20
#include "qemu/osdep.h"
21
#include "qemu-common.h"
22
#include "exec/cpu-defs.h"
23
#include "cpu.h"
24
25
#include "qemu/cutils.h"
26
#include "exec/exec-all.h"
27
#include "exec/target_page.h"
28
#include "tcg/tcg.h"
29
#include "sysemu/sysemu.h"
30
#include "sysemu/tcg.h"
31
#include "qemu/timer.h"
32
#include "exec/memory.h"
33
#include "exec/ioport.h"
34
35
#ifdef CONFIG_FALLOCATE_PUNCH_HOLE
36
#include <linux/falloc.h>
37
#endif
38
39
#include "accel/tcg/translate-all.h"
40
41
#include "exec/memory-internal.h"
42
#include "exec/ram_addr.h"
43
44
#include "qemu/range.h"
45
#include "qemu/rcu_queue.h"
46
#include "uc_priv.h"
47
48
typedef struct PhysPageEntry PhysPageEntry;
49
50
struct PhysPageEntry {
51
    /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
52
    uint32_t skip : 6;
53
     /* index into phys_sections (!skip) or phys_map_nodes (skip) */
54
    uint32_t ptr : 26;
55
};
56
57
2.09G
#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
58
59
/* Size of the L2 (and L3, etc) page tables.  */
60
5.05M
#define ADDR_SPACE_BITS 64
61
62
4.21G
#define P_L2_BITS 9
63
4.19G
#define P_L2_SIZE (1 << P_L2_BITS)
64
65
5.05M
#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
66
67
typedef PhysPageEntry Node[P_L2_SIZE];
68
69
typedef struct PhysPageMap {
70
    unsigned sections_nb;
71
    unsigned sections_nb_alloc;
72
    unsigned nodes_nb;
73
    unsigned nodes_nb_alloc;
74
    Node *nodes;
75
    MemoryRegionSection *sections;
76
} PhysPageMap;
77
78
struct AddressSpaceDispatch {
79
    MemoryRegionSection *mru_section;
80
    /* This is a multi-level map on the physical address space.
81
     * The bottom level has pointers to MemoryRegionSections.
82
     */
83
    PhysPageEntry phys_map;
84
    PhysPageMap map;
85
    struct uc_struct *uc;
86
};
87
88
0
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
89
typedef struct subpage_t {
90
    MemoryRegion iomem;
91
    FlatView *fv;
92
    hwaddr base;
93
    uint16_t sub_section[];
94
} subpage_t;
95
96
540M
#define PHYS_SECTION_UNASSIGNED 0
97
98
static void tcg_commit(MemoryListener *listener);
99
100
/**
101
 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
102
 * @cpu: the CPU whose AddressSpace this is
103
 * @as: the AddressSpace itself
104
 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
105
 * @tcg_as_listener: listener for tracking changes to the AddressSpace
106
 */
107
struct CPUAddressSpace {
108
    CPUState *cpu;
109
    AddressSpace *as;
110
    struct AddressSpaceDispatch *memory_dispatch;
111
    MemoryListener tcg_as_listener;
112
};
113
114
115
static void phys_map_node_reserve(AddressSpaceDispatch *d, PhysPageMap *map, unsigned nodes)
116
705k
{
117
705k
    if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
118
705k
        map->nodes_nb_alloc = MAX(d->uc->alloc_hint, map->nodes_nb + nodes);
119
705k
        map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
120
705k
        d->uc->alloc_hint = map->nodes_nb_alloc;
121
705k
    }
122
705k
}
123
124
static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
125
4.05M
{
126
4.05M
    unsigned i;
127
4.05M
    uint32_t ret;
128
4.05M
    PhysPageEntry e;
129
4.05M
    PhysPageEntry *p;
130
131
4.05M
    ret = map->nodes_nb++;
132
4.05M
    p = map->nodes[ret];
133
4.05M
    assert(ret != PHYS_MAP_NODE_NIL);
134
4.05M
    assert(ret != map->nodes_nb_alloc);
135
136
4.05M
    e.skip = leaf ? 0 : 1;
137
4.05M
    e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
138
2.08G
    for (i = 0; i < P_L2_SIZE; ++i) {
139
2.07G
        memcpy(&p[i], &e, sizeof(e));
140
2.07G
    }
141
4.05M
    return ret;
142
4.05M
}
143
144
static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
145
                                hwaddr *index, uint64_t *nb, uint16_t leaf,
146
                                int level)
147
4.05M
{
148
4.05M
    PhysPageEntry *p;
149
4.05M
    hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
150
151
4.05M
    if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
152
4.05M
        lp->ptr = phys_map_node_alloc(map, level == 0);
153
4.05M
    }
154
4.05M
    p = map->nodes[lp->ptr];
155
4.05M
    lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
156
157
31.8M
    while (*nb && lp < &p[P_L2_SIZE]) {
158
27.8M
        if ((*index & (step - 1)) == 0 && *nb >= step) {
159
24.4M
            lp->skip = 0;
160
24.4M
            lp->ptr = leaf;
161
24.4M
            *index += step;
162
24.4M
            *nb -= step;
163
24.4M
        } else {
164
3.34M
            phys_page_set_level(map, lp, index, nb, leaf, level - 1);
165
3.34M
        }
166
27.8M
        ++lp;
167
27.8M
    }
168
4.05M
}
169
170
static void phys_page_set(AddressSpaceDispatch *d,
171
                          hwaddr index, uint64_t nb,
172
                          uint16_t leaf)
173
705k
{
174
#ifdef TARGET_ARM
175
    struct uc_struct *uc = d->uc;
176
#endif
177
    /* Wildly overreserve - it doesn't matter much. */
178
705k
    phys_map_node_reserve(d, &d->map, 3 * P_L2_LEVELS);
179
180
705k
    phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
181
705k
}
182
183
/* Compact a non leaf page entry. Simply detect that the entry has a single child,
184
 * and update our entry so we can skip it and go directly to the destination.
185
 */
186
static void phys_page_compact(struct uc_struct *uc, PhysPageEntry *lp, Node *nodes)
187
4.40M
{
188
4.40M
    unsigned valid_ptr = P_L2_SIZE;
189
4.40M
    int valid = 0;
190
4.40M
    PhysPageEntry *p;
191
4.40M
    int i;
192
193
4.40M
    if (lp->ptr == PHYS_MAP_NODE_NIL) {
194
352k
        return;
195
352k
    }
196
197
4.05M
    p = nodes[lp->ptr];
198
2.08G
    for (i = 0; i < P_L2_SIZE; i++) {
199
2.07G
        if (p[i].ptr == PHYS_MAP_NODE_NIL) {
200
1.80G
            continue;
201
1.80G
        }
202
203
275M
        valid_ptr = i;
204
275M
        valid++;
205
275M
        if (p[i].skip) {
206
3.34M
            phys_page_compact(uc, &p[i], nodes);
207
3.34M
        }
208
275M
    }
209
210
    /* We can only compress if there's only one child. */
211
4.05M
    if (valid != 1) {
212
705k
        return;
213
705k
    }
214
215
4.05M
    assert(valid_ptr < P_L2_SIZE);
216
217
    /* Don't compress if it won't fit in the # of bits we have. */
218
3.34M
    if (P_L2_LEVELS >= (1 << 6) &&
219
0
        lp->skip + p[valid_ptr].skip >= (1 << 6)) {
220
0
        return;
221
0
    }
222
223
3.34M
    lp->ptr = p[valid_ptr].ptr;
224
3.34M
    if (!p[valid_ptr].skip) {
225
        /* If our only child is a leaf, make this a leaf. */
226
        /* By design, we should have made this node a leaf to begin with so we
227
         * should never reach here.
228
         * But since it's so simple to handle this, let's do it just in case we
229
         * change this rule.
230
         */
231
0
        lp->skip = 0;
232
3.34M
    } else {
233
3.34M
        lp->skip += p[valid_ptr].skip;
234
3.34M
    }
235
3.34M
}
236
237
void address_space_dispatch_compact(AddressSpaceDispatch *d)
238
1.05M
{
239
1.05M
    if (d->phys_map.skip) {
240
1.05M
        phys_page_compact(d->uc, &d->phys_map, d->map.nodes);
241
1.05M
    }
242
1.05M
}
address_space_dispatch_compact_x86_64
Line
Count
Source
238
201k
{
239
201k
    if (d->phys_map.skip) {
240
201k
        phys_page_compact(d->uc, &d->phys_map, d->map.nodes);
241
201k
    }
242
201k
}
address_space_dispatch_compact_arm
Line
Count
Source
238
263k
{
239
263k
    if (d->phys_map.skip) {
240
263k
        phys_page_compact(d->uc, &d->phys_map, d->map.nodes);
241
263k
    }
242
263k
}
address_space_dispatch_compact_aarch64
Line
Count
Source
238
361k
{
239
361k
    if (d->phys_map.skip) {
240
361k
        phys_page_compact(d->uc, &d->phys_map, d->map.nodes);
241
361k
    }
242
361k
}
address_space_dispatch_compact_m68k
Line
Count
Source
238
174
{
239
174
    if (d->phys_map.skip) {
240
174
        phys_page_compact(d->uc, &d->phys_map, d->map.nodes);
241
174
    }
242
174
}
address_space_dispatch_compact_mips
Line
Count
Source
238
44.5k
{
239
44.5k
    if (d->phys_map.skip) {
240
44.5k
        phys_page_compact(d->uc, &d->phys_map, d->map.nodes);
241
44.5k
    }
242
44.5k
}
address_space_dispatch_compact_mipsel
Line
Count
Source
238
53.6k
{
239
53.6k
    if (d->phys_map.skip) {
240
53.6k
        phys_page_compact(d->uc, &d->phys_map, d->map.nodes);
241
53.6k
    }
242
53.6k
}
Unexecuted instantiation: address_space_dispatch_compact_mips64
Unexecuted instantiation: address_space_dispatch_compact_mips64el
address_space_dispatch_compact_sparc
Line
Count
Source
238
276
{
239
276
    if (d->phys_map.skip) {
240
276
        phys_page_compact(d->uc, &d->phys_map, d->map.nodes);
241
276
    }
242
276
}
Unexecuted instantiation: address_space_dispatch_compact_sparc64
Unexecuted instantiation: address_space_dispatch_compact_ppc
Unexecuted instantiation: address_space_dispatch_compact_ppc64
Unexecuted instantiation: address_space_dispatch_compact_riscv32
Unexecuted instantiation: address_space_dispatch_compact_riscv64
address_space_dispatch_compact_s390x
Line
Count
Source
238
131k
{
239
131k
    if (d->phys_map.skip) {
240
131k
        phys_page_compact(d->uc, &d->phys_map, d->map.nodes);
241
131k
    }
242
131k
}
Unexecuted instantiation: address_space_dispatch_compact_tricore
243
244
static inline bool section_covers_addr(const MemoryRegionSection *section,
245
                                       hwaddr addr)
246
540M
{
247
    /* Memory topology clips a memory region to [0, 2^64); size.hi > 0 means
248
     * the section must cover the entire address space.
249
     */
250
540M
    return int128_gethi(section->size) ||
251
540M
           range_covers_byte(section->offset_within_address_space,
252
540M
                             int128_getlo(section->size), addr);
253
540M
}
254
255
static MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, hwaddr addr)
256
294k
{
257
#ifdef TARGET_ARM
258
    struct uc_struct *uc = d->uc;
259
#endif
260
294k
    PhysPageEntry lp = d->phys_map, *p;
261
294k
    Node *nodes = d->map.nodes;
262
294k
    MemoryRegionSection *sections = d->map.sections;
263
294k
    hwaddr index = addr >> TARGET_PAGE_BITS;
264
294k
    int i;
265
266
588k
    for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
267
408k
        if (lp.ptr == PHYS_MAP_NODE_NIL) {
268
114k
            return &sections[PHYS_SECTION_UNASSIGNED];
269
114k
        }
270
294k
        p = nodes[lp.ptr];
271
294k
        lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
272
294k
    }
273
274
179k
    if (section_covers_addr(&sections[lp.ptr], addr)) {
275
177k
        return &sections[lp.ptr];
276
177k
    } else {
277
1.89k
        return &sections[PHYS_SECTION_UNASSIGNED];
278
1.89k
    }
279
179k
}
280
281
/* Called from RCU critical section */
282
static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
283
                                                        hwaddr addr,
284
                                                        bool resolve_subpage)
285
540M
{
286
#ifdef TARGET_ARM
287
    struct uc_struct *uc = d->uc;
288
#endif
289
540M
    MemoryRegionSection *section = d->mru_section;
290
540M
    subpage_t *subpage;
291
292
540M
    if (!section || section == &d->map.sections[PHYS_SECTION_UNASSIGNED] ||
293
539M
        !section_covers_addr(section, addr)) {
294
294k
        section = phys_page_find(d, addr);
295
294k
        d->mru_section = section;
296
294k
    }
297
540M
    if (resolve_subpage && section->mr->subpage) {
298
0
        subpage = container_of(section->mr, subpage_t, iomem);
299
0
        section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
300
0
    }
301
540M
    return section;
302
540M
}
303
304
/* Called from RCU critical section */
305
static MemoryRegionSection *
306
address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
307
                                 hwaddr *plen, bool resolve_subpage)
308
540M
{
309
540M
    MemoryRegionSection *section;
310
540M
    MemoryRegion *mr;
311
540M
    Int128 diff;
312
313
540M
    section = address_space_lookup_region(d, addr, resolve_subpage);
314
    /* Compute offset within MemoryRegionSection */
315
540M
    addr -= section->offset_within_address_space;
316
317
    /* Compute offset within MemoryRegion */
318
540M
    *xlat = addr + section->offset_within_region;
319
320
540M
    mr = section->mr;
321
322
    /* MMIO registers can be expected to perform full-width accesses based only
323
     * on their address, without considering adjacent registers that could
324
     * decode to completely different MemoryRegions.  When such registers
325
     * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
326
     * regions overlap wildly.  For this reason we cannot clamp the accesses
327
     * here.
328
     *
329
     * If the length is small (as is the case for address_space_ldl/stl),
330
     * everything works fine.  If the incoming length is large, however,
331
     * the caller really has to do the clamping through memory_access_size.
332
     */
333
540M
    if (memory_region_is_ram(mr)) {
334
540M
        diff = int128_sub(section->size, int128_make64(addr));
335
540M
        *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
336
540M
    }
337
540M
    return section;
338
540M
}
339
340
/**
341
 * address_space_translate_iommu - translate an address through an IOMMU
342
 * memory region and then through the target address space.
343
 *
344
 * @iommu_mr: the IOMMU memory region that we start the translation from
345
 * @addr: the address to be translated through the MMU
346
 * @xlat: the translated address offset within the destination memory region.
347
 *        It cannot be %NULL.
348
 * @plen_out: valid read/write length of the translated address. It
349
 *            cannot be %NULL.
350
 * @page_mask_out: page mask for the translated address. This
351
 *            should only be meaningful for IOMMU translated
352
 *            addresses, since there may be huge pages that this bit
353
 *            would tell. It can be %NULL if we don't care about it.
354
 * @is_write: whether the translation operation is for write
355
 * @is_mmio: whether this can be MMIO, set true if it can
356
 * @target_as: the address space targeted by the IOMMU
357
 * @attrs: transaction attributes
358
 *
359
 * This function is called from RCU critical section.  It is the common
360
 * part of flatview_do_translate and address_space_translate_cached.
361
 */
362
static MemoryRegionSection address_space_translate_iommu(IOMMUMemoryRegion *iommu_mr,
363
                                                         hwaddr *xlat,
364
                                                         hwaddr *plen_out,
365
                                                         hwaddr *page_mask_out,
366
                                                         bool is_write,
367
                                                         bool is_mmio,
368
                                                         AddressSpace **target_as,
369
                                                         MemTxAttrs attrs)
370
0
{
371
0
    MemoryRegionSection *section;
372
0
    hwaddr page_mask = (hwaddr)-1;
373
0
    MemoryRegion *mr = MEMORY_REGION(iommu_mr);
374
375
0
    do {
376
0
        hwaddr addr = *xlat;
377
0
        IOMMUMemoryRegionClass *imrc = memory_region_get_iommu_class_nocheck(iommu_mr);
378
0
        int iommu_idx = 0;
379
0
        IOMMUTLBEntry iotlb;
380
381
0
        if (imrc->attrs_to_index) {
382
0
            iommu_idx = imrc->attrs_to_index(iommu_mr, attrs);
383
0
        }
384
385
0
        iotlb = imrc->translate(iommu_mr, addr, is_write ?
386
0
                                IOMMU_WO : IOMMU_RO, iommu_idx);
387
388
0
        if (!(iotlb.perm & (1 << is_write))) {
389
0
            goto unassigned;
390
0
        }
391
392
0
        addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
393
0
                | (addr & iotlb.addr_mask));
394
0
        page_mask &= iotlb.addr_mask;
395
0
        *plen_out = MIN(*plen_out, (addr | iotlb.addr_mask) - addr + 1);
396
0
        *target_as = iotlb.target_as;
397
398
0
        section = address_space_translate_internal(
399
0
                address_space_to_dispatch(iotlb.target_as), addr, xlat,
400
0
                plen_out, is_mmio);
401
402
0
        iommu_mr = memory_region_get_iommu(section->mr);
403
0
    } while (unlikely(iommu_mr));
404
405
0
    if (page_mask_out) {
406
0
        *page_mask_out = page_mask;
407
0
    }
408
0
    return *section;
409
410
0
unassigned:
411
0
    return (MemoryRegionSection) { .mr = &(mr->uc->io_mem_unassigned) };
412
0
}
413
414
/**
415
 * flatview_do_translate - translate an address in FlatView
416
 *
417
 * @fv: the flat view that we want to translate on
418
 * @addr: the address to be translated in above address space
419
 * @xlat: the translated address offset within memory region. It
420
 *        cannot be @NULL.
421
 * @plen_out: valid read/write length of the translated address. It
422
 *            can be @NULL when we don't care about it.
423
 * @page_mask_out: page mask for the translated address. This
424
 *            should only be meaningful for IOMMU translated
425
 *            addresses, since there may be huge pages that this bit
426
 *            would tell. It can be @NULL if we don't care about it.
427
 * @is_write: whether the translation operation is for write
428
 * @is_mmio: whether this can be MMIO, set true if it can
429
 * @target_as: the address space targeted by the IOMMU
430
 * @attrs: memory transaction attributes
431
 *
432
 * This function is called from RCU critical section
433
 */
434
static MemoryRegionSection flatview_do_translate(struct uc_struct *uc, FlatView *fv,
435
                                                 hwaddr addr,
436
                                                 hwaddr *xlat,
437
                                                 hwaddr *plen_out,
438
                                                 hwaddr *page_mask_out,
439
                                                 bool is_write,
440
                                                 bool is_mmio,
441
                                                 AddressSpace **target_as,
442
                                                 MemTxAttrs attrs)
443
537M
{
444
537M
    MemoryRegionSection *section;
445
537M
    IOMMUMemoryRegion *iommu_mr;
446
537M
    hwaddr plen = (hwaddr)(-1);
447
448
537M
    if (!plen_out) {
449
0
        plen_out = &plen;
450
0
    }
451
452
537M
    section = address_space_translate_internal(
453
537M
            flatview_to_dispatch(fv), addr, xlat,
454
537M
            plen_out, is_mmio);
455
456
537M
    iommu_mr = memory_region_get_iommu(section->mr);
457
537M
    if (unlikely(iommu_mr)) {
458
0
        return address_space_translate_iommu(iommu_mr, xlat,
459
0
                                             plen_out, page_mask_out,
460
0
                                             is_write, is_mmio,
461
0
                                             target_as, attrs);
462
0
    }
463
537M
    if (page_mask_out) {
464
        /* Not behind an IOMMU, use default page size. */
465
0
        *page_mask_out = ~TARGET_PAGE_MASK;
466
0
    }
467
468
537M
    return *section;
469
537M
}
470
471
/* Called from RCU critical section */
472
MemoryRegion *flatview_translate(struct uc_struct *uc, FlatView *fv, hwaddr addr, hwaddr *xlat,
473
                                 hwaddr *plen, bool is_write,
474
                                 MemTxAttrs attrs)
475
537M
{
476
537M
    MemoryRegion *mr;
477
537M
    MemoryRegionSection section;
478
537M
    AddressSpace *as = NULL;
479
480
    /* This can be MMIO, so setup MMIO bit. */
481
537M
    section = flatview_do_translate(uc, fv, addr, xlat, plen, NULL,
482
537M
                                    is_write, true, &as, attrs);
483
537M
    mr = section.mr;
484
485
537M
    return mr;
486
537M
}
flatview_translate_x86_64
Line
Count
Source
475
192M
{
476
192M
    MemoryRegion *mr;
477
192M
    MemoryRegionSection section;
478
192M
    AddressSpace *as = NULL;
479
480
    /* This can be MMIO, so setup MMIO bit. */
481
192M
    section = flatview_do_translate(uc, fv, addr, xlat, plen, NULL,
482
192M
                                    is_write, true, &as, attrs);
483
192M
    mr = section.mr;
484
485
192M
    return mr;
486
192M
}
flatview_translate_arm
Line
Count
Source
475
108M
{
476
108M
    MemoryRegion *mr;
477
108M
    MemoryRegionSection section;
478
108M
    AddressSpace *as = NULL;
479
480
    /* This can be MMIO, so setup MMIO bit. */
481
108M
    section = flatview_do_translate(uc, fv, addr, xlat, plen, NULL,
482
108M
                                    is_write, true, &as, attrs);
483
108M
    mr = section.mr;
484
485
108M
    return mr;
486
108M
}
flatview_translate_aarch64
Line
Count
Source
475
80.4M
{
476
80.4M
    MemoryRegion *mr;
477
80.4M
    MemoryRegionSection section;
478
80.4M
    AddressSpace *as = NULL;
479
480
    /* This can be MMIO, so setup MMIO bit. */
481
80.4M
    section = flatview_do_translate(uc, fv, addr, xlat, plen, NULL,
482
80.4M
                                    is_write, true, &as, attrs);
483
80.4M
    mr = section.mr;
484
485
80.4M
    return mr;
486
80.4M
}
flatview_translate_m68k
Line
Count
Source
475
53.1k
{
476
53.1k
    MemoryRegion *mr;
477
53.1k
    MemoryRegionSection section;
478
53.1k
    AddressSpace *as = NULL;
479
480
    /* This can be MMIO, so setup MMIO bit. */
481
53.1k
    section = flatview_do_translate(uc, fv, addr, xlat, plen, NULL,
482
53.1k
                                    is_write, true, &as, attrs);
483
53.1k
    mr = section.mr;
484
485
53.1k
    return mr;
486
53.1k
}
flatview_translate_mips
Line
Count
Source
475
9.09M
{
476
9.09M
    MemoryRegion *mr;
477
9.09M
    MemoryRegionSection section;
478
9.09M
    AddressSpace *as = NULL;
479
480
    /* This can be MMIO, so setup MMIO bit. */
481
9.09M
    section = flatview_do_translate(uc, fv, addr, xlat, plen, NULL,
482
9.09M
                                    is_write, true, &as, attrs);
483
9.09M
    mr = section.mr;
484
485
9.09M
    return mr;
486
9.09M
}
flatview_translate_mipsel
Line
Count
Source
475
13.5M
{
476
13.5M
    MemoryRegion *mr;
477
13.5M
    MemoryRegionSection section;
478
13.5M
    AddressSpace *as = NULL;
479
480
    /* This can be MMIO, so setup MMIO bit. */
481
13.5M
    section = flatview_do_translate(uc, fv, addr, xlat, plen, NULL,
482
13.5M
                                    is_write, true, &as, attrs);
483
13.5M
    mr = section.mr;
484
485
13.5M
    return mr;
486
13.5M
}
Unexecuted instantiation: flatview_translate_mips64
Unexecuted instantiation: flatview_translate_mips64el
flatview_translate_sparc
Line
Count
Source
475
62.6k
{
476
62.6k
    MemoryRegion *mr;
477
62.6k
    MemoryRegionSection section;
478
62.6k
    AddressSpace *as = NULL;
479
480
    /* This can be MMIO, so setup MMIO bit. */
481
62.6k
    section = flatview_do_translate(uc, fv, addr, xlat, plen, NULL,
482
62.6k
                                    is_write, true, &as, attrs);
483
62.6k
    mr = section.mr;
484
485
62.6k
    return mr;
486
62.6k
}
Unexecuted instantiation: flatview_translate_sparc64
Unexecuted instantiation: flatview_translate_ppc
Unexecuted instantiation: flatview_translate_ppc64
Unexecuted instantiation: flatview_translate_riscv32
Unexecuted instantiation: flatview_translate_riscv64
flatview_translate_s390x
Line
Count
Source
475
133M
{
476
133M
    MemoryRegion *mr;
477
133M
    MemoryRegionSection section;
478
133M
    AddressSpace *as = NULL;
479
480
    /* This can be MMIO, so setup MMIO bit. */
481
133M
    section = flatview_do_translate(uc, fv, addr, xlat, plen, NULL,
482
133M
                                    is_write, true, &as, attrs);
483
133M
    mr = section.mr;
484
485
133M
    return mr;
486
133M
}
Unexecuted instantiation: flatview_translate_tricore
487
488
/* Called from RCU critical section */
489
MemoryRegionSection *
490
address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
491
                                  hwaddr *xlat, hwaddr *plen,
492
                                  MemTxAttrs attrs, int *prot)
493
2.72M
{
494
2.72M
    MemoryRegionSection *section;
495
2.72M
    IOMMUMemoryRegion *iommu_mr;
496
2.72M
    IOMMUMemoryRegionClass *imrc;
497
2.72M
    IOMMUTLBEntry iotlb;
498
2.72M
    int iommu_idx;
499
2.72M
    AddressSpaceDispatch *d = cpu->cpu_ases[asidx].memory_dispatch;
500
501
2.72M
    for (;;) {
502
2.72M
        section = address_space_translate_internal(d, addr, &addr, plen, false);
503
504
2.72M
        iommu_mr = memory_region_get_iommu(section->mr);
505
2.72M
        if (!iommu_mr) {
506
2.72M
            break;
507
2.72M
        }
508
509
0
        imrc = memory_region_get_iommu_class_nocheck(iommu_mr);
510
511
0
        iommu_idx = imrc->attrs_to_index(iommu_mr, attrs);
512
513
        // tcg_register_iommu_notifier(cpu, iommu_mr, iommu_idx);
514
515
        /* We need all the permissions, so pass IOMMU_NONE so the IOMMU
516
         * doesn't short-cut its translation table walk.
517
         */
518
0
        iotlb = imrc->translate(iommu_mr, addr, IOMMU_NONE, iommu_idx);
519
0
        addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
520
0
                | (addr & iotlb.addr_mask));
521
        /* Update the caller's prot bits to remove permissions the IOMMU
522
         * is giving us a failure response for. If we get down to no
523
         * permissions left at all we can give up now.
524
         */
525
0
        if (!(iotlb.perm & IOMMU_RO)) {
526
0
            *prot &= ~(PAGE_READ | PAGE_EXEC);
527
0
        }
528
0
        if (!(iotlb.perm & IOMMU_WO)) {
529
0
            *prot &= ~PAGE_WRITE;
530
0
        }
531
532
0
        if (!*prot) {
533
0
            goto translate_fail;
534
0
        }
535
536
0
        d = flatview_to_dispatch(address_space_to_flatview(iotlb.target_as));
537
0
    }
538
539
2.72M
    assert(!(memory_region_get_iommu(section->mr) != NULL));
540
2.72M
    *xlat = addr;
541
    // Unicorn:
542
    //   If there is no memory mapped but still we start emulation, we will get
543
    //   a default memory region section and it would be marked as an IO memory
544
    //   in cputlb which prevents further fecthing and execution.
545
    //
546
    //   The reason we set prot to 0 here is not to setting protection but to notify
547
    //   the outer function to add a new **blank** tlb which will never be hitted.
548
2.72M
    if (!memory_region_is_ram(section->mr) && section == &d->map.sections[PHYS_SECTION_UNASSIGNED]) {
549
58.6k
        *prot = 0;
550
58.6k
    }
551
2.72M
    return section;
552
553
0
translate_fail:
554
0
    return &d->map.sections[PHYS_SECTION_UNASSIGNED];
555
2.72M
}
address_space_translate_for_iotlb_x86_64
Line
Count
Source
493
242k
{
494
242k
    MemoryRegionSection *section;
495
242k
    IOMMUMemoryRegion *iommu_mr;
496
242k
    IOMMUMemoryRegionClass *imrc;
497
242k
    IOMMUTLBEntry iotlb;
498
242k
    int iommu_idx;
499
242k
    AddressSpaceDispatch *d = cpu->cpu_ases[asidx].memory_dispatch;
500
501
242k
    for (;;) {
502
242k
        section = address_space_translate_internal(d, addr, &addr, plen, false);
503
504
242k
        iommu_mr = memory_region_get_iommu(section->mr);
505
242k
        if (!iommu_mr) {
506
242k
            break;
507
242k
        }
508
509
0
        imrc = memory_region_get_iommu_class_nocheck(iommu_mr);
510
511
0
        iommu_idx = imrc->attrs_to_index(iommu_mr, attrs);
512
513
        // tcg_register_iommu_notifier(cpu, iommu_mr, iommu_idx);
514
515
        /* We need all the permissions, so pass IOMMU_NONE so the IOMMU
516
         * doesn't short-cut its translation table walk.
517
         */
518
0
        iotlb = imrc->translate(iommu_mr, addr, IOMMU_NONE, iommu_idx);
519
0
        addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
520
0
                | (addr & iotlb.addr_mask));
521
        /* Update the caller's prot bits to remove permissions the IOMMU
522
         * is giving us a failure response for. If we get down to no
523
         * permissions left at all we can give up now.
524
         */
525
0
        if (!(iotlb.perm & IOMMU_RO)) {
526
0
            *prot &= ~(PAGE_READ | PAGE_EXEC);
527
0
        }
528
0
        if (!(iotlb.perm & IOMMU_WO)) {
529
0
            *prot &= ~PAGE_WRITE;
530
0
        }
531
532
0
        if (!*prot) {
533
0
            goto translate_fail;
534
0
        }
535
536
0
        d = flatview_to_dispatch(address_space_to_flatview(iotlb.target_as));
537
0
    }
538
539
242k
    assert(!(memory_region_get_iommu(section->mr) != NULL));
540
242k
    *xlat = addr;
541
    // Unicorn:
542
    //   If there is no memory mapped but still we start emulation, we will get
543
    //   a default memory region section and it would be marked as an IO memory
544
    //   in cputlb which prevents further fecthing and execution.
545
    //
546
    //   The reason we set prot to 0 here is not to setting protection but to notify
547
    //   the outer function to add a new **blank** tlb which will never be hitted.
548
242k
    if (!memory_region_is_ram(section->mr) && section == &d->map.sections[PHYS_SECTION_UNASSIGNED]) {
549
23.7k
        *prot = 0;
550
23.7k
    }
551
242k
    return section;
552
553
0
translate_fail:
554
0
    return &d->map.sections[PHYS_SECTION_UNASSIGNED];
555
242k
}
address_space_translate_for_iotlb_arm
Line
Count
Source
493
743k
{
494
743k
    MemoryRegionSection *section;
495
743k
    IOMMUMemoryRegion *iommu_mr;
496
743k
    IOMMUMemoryRegionClass *imrc;
497
743k
    IOMMUTLBEntry iotlb;
498
743k
    int iommu_idx;
499
743k
    AddressSpaceDispatch *d = cpu->cpu_ases[asidx].memory_dispatch;
500
501
743k
    for (;;) {
502
743k
        section = address_space_translate_internal(d, addr, &addr, plen, false);
503
504
743k
        iommu_mr = memory_region_get_iommu(section->mr);
505
743k
        if (!iommu_mr) {
506
743k
            break;
507
743k
        }
508
509
0
        imrc = memory_region_get_iommu_class_nocheck(iommu_mr);
510
511
0
        iommu_idx = imrc->attrs_to_index(iommu_mr, attrs);
512
513
        // tcg_register_iommu_notifier(cpu, iommu_mr, iommu_idx);
514
515
        /* We need all the permissions, so pass IOMMU_NONE so the IOMMU
516
         * doesn't short-cut its translation table walk.
517
         */
518
0
        iotlb = imrc->translate(iommu_mr, addr, IOMMU_NONE, iommu_idx);
519
0
        addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
520
0
                | (addr & iotlb.addr_mask));
521
        /* Update the caller's prot bits to remove permissions the IOMMU
522
         * is giving us a failure response for. If we get down to no
523
         * permissions left at all we can give up now.
524
         */
525
0
        if (!(iotlb.perm & IOMMU_RO)) {
526
0
            *prot &= ~(PAGE_READ | PAGE_EXEC);
527
0
        }
528
0
        if (!(iotlb.perm & IOMMU_WO)) {
529
0
            *prot &= ~PAGE_WRITE;
530
0
        }
531
532
0
        if (!*prot) {
533
0
            goto translate_fail;
534
0
        }
535
536
0
        d = flatview_to_dispatch(address_space_to_flatview(iotlb.target_as));
537
0
    }
538
539
743k
    assert(!(memory_region_get_iommu(section->mr) != NULL));
540
743k
    *xlat = addr;
541
    // Unicorn:
542
    //   If there is no memory mapped but still we start emulation, we will get
543
    //   a default memory region section and it would be marked as an IO memory
544
    //   in cputlb which prevents further fecthing and execution.
545
    //
546
    //   The reason we set prot to 0 here is not to setting protection but to notify
547
    //   the outer function to add a new **blank** tlb which will never be hitted.
548
743k
    if (!memory_region_is_ram(section->mr) && section == &d->map.sections[PHYS_SECTION_UNASSIGNED]) {
549
11.3k
        *prot = 0;
550
11.3k
    }
551
743k
    return section;
552
553
0
translate_fail:
554
0
    return &d->map.sections[PHYS_SECTION_UNASSIGNED];
555
743k
}
address_space_translate_for_iotlb_aarch64
Line
Count
Source
493
286k
{
494
286k
    MemoryRegionSection *section;
495
286k
    IOMMUMemoryRegion *iommu_mr;
496
286k
    IOMMUMemoryRegionClass *imrc;
497
286k
    IOMMUTLBEntry iotlb;
498
286k
    int iommu_idx;
499
286k
    AddressSpaceDispatch *d = cpu->cpu_ases[asidx].memory_dispatch;
500
501
286k
    for (;;) {
502
286k
        section = address_space_translate_internal(d, addr, &addr, plen, false);
503
504
286k
        iommu_mr = memory_region_get_iommu(section->mr);
505
286k
        if (!iommu_mr) {
506
286k
            break;
507
286k
        }
508
509
0
        imrc = memory_region_get_iommu_class_nocheck(iommu_mr);
510
511
0
        iommu_idx = imrc->attrs_to_index(iommu_mr, attrs);
512
513
        // tcg_register_iommu_notifier(cpu, iommu_mr, iommu_idx);
514
515
        /* We need all the permissions, so pass IOMMU_NONE so the IOMMU
516
         * doesn't short-cut its translation table walk.
517
         */
518
0
        iotlb = imrc->translate(iommu_mr, addr, IOMMU_NONE, iommu_idx);
519
0
        addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
520
0
                | (addr & iotlb.addr_mask));
521
        /* Update the caller's prot bits to remove permissions the IOMMU
522
         * is giving us a failure response for. If we get down to no
523
         * permissions left at all we can give up now.
524
         */
525
0
        if (!(iotlb.perm & IOMMU_RO)) {
526
0
            *prot &= ~(PAGE_READ | PAGE_EXEC);
527
0
        }
528
0
        if (!(iotlb.perm & IOMMU_WO)) {
529
0
            *prot &= ~PAGE_WRITE;
530
0
        }
531
532
0
        if (!*prot) {
533
0
            goto translate_fail;
534
0
        }
535
536
0
        d = flatview_to_dispatch(address_space_to_flatview(iotlb.target_as));
537
0
    }
538
539
286k
    assert(!(memory_region_get_iommu(section->mr) != NULL));
540
286k
    *xlat = addr;
541
    // Unicorn:
542
    //   If there is no memory mapped but still we start emulation, we will get
543
    //   a default memory region section and it would be marked as an IO memory
544
    //   in cputlb which prevents further fecthing and execution.
545
    //
546
    //   The reason we set prot to 0 here is not to setting protection but to notify
547
    //   the outer function to add a new **blank** tlb which will never be hitted.
548
286k
    if (!memory_region_is_ram(section->mr) && section == &d->map.sections[PHYS_SECTION_UNASSIGNED]) {
549
20.7k
        *prot = 0;
550
20.7k
    }
551
286k
    return section;
552
553
0
translate_fail:
554
0
    return &d->map.sections[PHYS_SECTION_UNASSIGNED];
555
286k
}
address_space_translate_for_iotlb_m68k
Line
Count
Source
493
56
{
494
56
    MemoryRegionSection *section;
495
56
    IOMMUMemoryRegion *iommu_mr;
496
56
    IOMMUMemoryRegionClass *imrc;
497
56
    IOMMUTLBEntry iotlb;
498
56
    int iommu_idx;
499
56
    AddressSpaceDispatch *d = cpu->cpu_ases[asidx].memory_dispatch;
500
501
56
    for (;;) {
502
56
        section = address_space_translate_internal(d, addr, &addr, plen, false);
503
504
56
        iommu_mr = memory_region_get_iommu(section->mr);
505
56
        if (!iommu_mr) {
506
56
            break;
507
56
        }
508
509
0
        imrc = memory_region_get_iommu_class_nocheck(iommu_mr);
510
511
0
        iommu_idx = imrc->attrs_to_index(iommu_mr, attrs);
512
513
        // tcg_register_iommu_notifier(cpu, iommu_mr, iommu_idx);
514
515
        /* We need all the permissions, so pass IOMMU_NONE so the IOMMU
516
         * doesn't short-cut its translation table walk.
517
         */
518
0
        iotlb = imrc->translate(iommu_mr, addr, IOMMU_NONE, iommu_idx);
519
0
        addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
520
0
                | (addr & iotlb.addr_mask));
521
        /* Update the caller's prot bits to remove permissions the IOMMU
522
         * is giving us a failure response for. If we get down to no
523
         * permissions left at all we can give up now.
524
         */
525
0
        if (!(iotlb.perm & IOMMU_RO)) {
526
0
            *prot &= ~(PAGE_READ | PAGE_EXEC);
527
0
        }
528
0
        if (!(iotlb.perm & IOMMU_WO)) {
529
0
            *prot &= ~PAGE_WRITE;
530
0
        }
531
532
0
        if (!*prot) {
533
0
            goto translate_fail;
534
0
        }
535
536
0
        d = flatview_to_dispatch(address_space_to_flatview(iotlb.target_as));
537
0
    }
538
539
56
    assert(!(memory_region_get_iommu(section->mr) != NULL));
540
56
    *xlat = addr;
541
    // Unicorn:
542
    //   If there is no memory mapped but still we start emulation, we will get
543
    //   a default memory region section and it would be marked as an IO memory
544
    //   in cputlb which prevents further fecthing and execution.
545
    //
546
    //   The reason we set prot to 0 here is not to setting protection but to notify
547
    //   the outer function to add a new **blank** tlb which will never be hitted.
548
56
    if (!memory_region_is_ram(section->mr) && section == &d->map.sections[PHYS_SECTION_UNASSIGNED]) {
549
9
        *prot = 0;
550
9
    }
551
56
    return section;
552
553
0
translate_fail:
554
0
    return &d->map.sections[PHYS_SECTION_UNASSIGNED];
555
56
}
address_space_translate_for_iotlb_mips
Line
Count
Source
493
279k
{
494
279k
    MemoryRegionSection *section;
495
279k
    IOMMUMemoryRegion *iommu_mr;
496
279k
    IOMMUMemoryRegionClass *imrc;
497
279k
    IOMMUTLBEntry iotlb;
498
279k
    int iommu_idx;
499
279k
    AddressSpaceDispatch *d = cpu->cpu_ases[asidx].memory_dispatch;
500
501
279k
    for (;;) {
502
279k
        section = address_space_translate_internal(d, addr, &addr, plen, false);
503
504
279k
        iommu_mr = memory_region_get_iommu(section->mr);
505
279k
        if (!iommu_mr) {
506
279k
            break;
507
279k
        }
508
509
0
        imrc = memory_region_get_iommu_class_nocheck(iommu_mr);
510
511
0
        iommu_idx = imrc->attrs_to_index(iommu_mr, attrs);
512
513
        // tcg_register_iommu_notifier(cpu, iommu_mr, iommu_idx);
514
515
        /* We need all the permissions, so pass IOMMU_NONE so the IOMMU
516
         * doesn't short-cut its translation table walk.
517
         */
518
0
        iotlb = imrc->translate(iommu_mr, addr, IOMMU_NONE, iommu_idx);
519
0
        addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
520
0
                | (addr & iotlb.addr_mask));
521
        /* Update the caller's prot bits to remove permissions the IOMMU
522
         * is giving us a failure response for. If we get down to no
523
         * permissions left at all we can give up now.
524
         */
525
0
        if (!(iotlb.perm & IOMMU_RO)) {
526
0
            *prot &= ~(PAGE_READ | PAGE_EXEC);
527
0
        }
528
0
        if (!(iotlb.perm & IOMMU_WO)) {
529
0
            *prot &= ~PAGE_WRITE;
530
0
        }
531
532
0
        if (!*prot) {
533
0
            goto translate_fail;
534
0
        }
535
536
0
        d = flatview_to_dispatch(address_space_to_flatview(iotlb.target_as));
537
0
    }
538
539
279k
    assert(!(memory_region_get_iommu(section->mr) != NULL));
540
279k
    *xlat = addr;
541
    // Unicorn:
542
    //   If there is no memory mapped but still we start emulation, we will get
543
    //   a default memory region section and it would be marked as an IO memory
544
    //   in cputlb which prevents further fecthing and execution.
545
    //
546
    //   The reason we set prot to 0 here is not to setting protection but to notify
547
    //   the outer function to add a new **blank** tlb which will never be hitted.
548
279k
    if (!memory_region_is_ram(section->mr) && section == &d->map.sections[PHYS_SECTION_UNASSIGNED]) {
549
1.03k
        *prot = 0;
550
1.03k
    }
551
279k
    return section;
552
553
0
translate_fail:
554
0
    return &d->map.sections[PHYS_SECTION_UNASSIGNED];
555
279k
}
address_space_translate_for_iotlb_mipsel
Line
Count
Source
493
26.0k
{
494
26.0k
    MemoryRegionSection *section;
495
26.0k
    IOMMUMemoryRegion *iommu_mr;
496
26.0k
    IOMMUMemoryRegionClass *imrc;
497
26.0k
    IOMMUTLBEntry iotlb;
498
26.0k
    int iommu_idx;
499
26.0k
    AddressSpaceDispatch *d = cpu->cpu_ases[asidx].memory_dispatch;
500
501
26.0k
    for (;;) {
502
26.0k
        section = address_space_translate_internal(d, addr, &addr, plen, false);
503
504
26.0k
        iommu_mr = memory_region_get_iommu(section->mr);
505
26.0k
        if (!iommu_mr) {
506
26.0k
            break;
507
26.0k
        }
508
509
0
        imrc = memory_region_get_iommu_class_nocheck(iommu_mr);
510
511
0
        iommu_idx = imrc->attrs_to_index(iommu_mr, attrs);
512
513
        // tcg_register_iommu_notifier(cpu, iommu_mr, iommu_idx);
514
515
        /* We need all the permissions, so pass IOMMU_NONE so the IOMMU
516
         * doesn't short-cut its translation table walk.
517
         */
518
0
        iotlb = imrc->translate(iommu_mr, addr, IOMMU_NONE, iommu_idx);
519
0
        addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
520
0
                | (addr & iotlb.addr_mask));
521
        /* Update the caller's prot bits to remove permissions the IOMMU
522
         * is giving us a failure response for. If we get down to no
523
         * permissions left at all we can give up now.
524
         */
525
0
        if (!(iotlb.perm & IOMMU_RO)) {
526
0
            *prot &= ~(PAGE_READ | PAGE_EXEC);
527
0
        }
528
0
        if (!(iotlb.perm & IOMMU_WO)) {
529
0
            *prot &= ~PAGE_WRITE;
530
0
        }
531
532
0
        if (!*prot) {
533
0
            goto translate_fail;
534
0
        }
535
536
0
        d = flatview_to_dispatch(address_space_to_flatview(iotlb.target_as));
537
0
    }
538
539
26.0k
    assert(!(memory_region_get_iommu(section->mr) != NULL));
540
26.0k
    *xlat = addr;
541
    // Unicorn:
542
    //   If there is no memory mapped but still we start emulation, we will get
543
    //   a default memory region section and it would be marked as an IO memory
544
    //   in cputlb which prevents further fecthing and execution.
545
    //
546
    //   The reason we set prot to 0 here is not to setting protection but to notify
547
    //   the outer function to add a new **blank** tlb which will never be hitted.
548
26.0k
    if (!memory_region_is_ram(section->mr) && section == &d->map.sections[PHYS_SECTION_UNASSIGNED]) {
549
1.76k
        *prot = 0;
550
1.76k
    }
551
26.0k
    return section;
552
553
0
translate_fail:
554
0
    return &d->map.sections[PHYS_SECTION_UNASSIGNED];
555
26.0k
}
Unexecuted instantiation: address_space_translate_for_iotlb_mips64
Unexecuted instantiation: address_space_translate_for_iotlb_mips64el
address_space_translate_for_iotlb_sparc
Line
Count
Source
493
67
{
494
67
    MemoryRegionSection *section;
495
67
    IOMMUMemoryRegion *iommu_mr;
496
67
    IOMMUMemoryRegionClass *imrc;
497
67
    IOMMUTLBEntry iotlb;
498
67
    int iommu_idx;
499
67
    AddressSpaceDispatch *d = cpu->cpu_ases[asidx].memory_dispatch;
500
501
67
    for (;;) {
502
67
        section = address_space_translate_internal(d, addr, &addr, plen, false);
503
504
67
        iommu_mr = memory_region_get_iommu(section->mr);
505
67
        if (!iommu_mr) {
506
67
            break;
507
67
        }
508
509
0
        imrc = memory_region_get_iommu_class_nocheck(iommu_mr);
510
511
0
        iommu_idx = imrc->attrs_to_index(iommu_mr, attrs);
512
513
        // tcg_register_iommu_notifier(cpu, iommu_mr, iommu_idx);
514
515
        /* We need all the permissions, so pass IOMMU_NONE so the IOMMU
516
         * doesn't short-cut its translation table walk.
517
         */
518
0
        iotlb = imrc->translate(iommu_mr, addr, IOMMU_NONE, iommu_idx);
519
0
        addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
520
0
                | (addr & iotlb.addr_mask));
521
        /* Update the caller's prot bits to remove permissions the IOMMU
522
         * is giving us a failure response for. If we get down to no
523
         * permissions left at all we can give up now.
524
         */
525
0
        if (!(iotlb.perm & IOMMU_RO)) {
526
0
            *prot &= ~(PAGE_READ | PAGE_EXEC);
527
0
        }
528
0
        if (!(iotlb.perm & IOMMU_WO)) {
529
0
            *prot &= ~PAGE_WRITE;
530
0
        }
531
532
0
        if (!*prot) {
533
0
            goto translate_fail;
534
0
        }
535
536
0
        d = flatview_to_dispatch(address_space_to_flatview(iotlb.target_as));
537
0
    }
538
539
67
    assert(!(memory_region_get_iommu(section->mr) != NULL));
540
67
    *xlat = addr;
541
    // Unicorn:
542
    //   If there is no memory mapped but still we start emulation, we will get
543
    //   a default memory region section and it would be marked as an IO memory
544
    //   in cputlb which prevents further fecthing and execution.
545
    //
546
    //   The reason we set prot to 0 here is not to setting protection but to notify
547
    //   the outer function to add a new **blank** tlb which will never be hitted.
548
67
    if (!memory_region_is_ram(section->mr) && section == &d->map.sections[PHYS_SECTION_UNASSIGNED]) {
549
7
        *prot = 0;
550
7
    }
551
67
    return section;
552
553
0
translate_fail:
554
0
    return &d->map.sections[PHYS_SECTION_UNASSIGNED];
555
67
}
Unexecuted instantiation: address_space_translate_for_iotlb_sparc64
Unexecuted instantiation: address_space_translate_for_iotlb_ppc
Unexecuted instantiation: address_space_translate_for_iotlb_ppc64
Unexecuted instantiation: address_space_translate_for_iotlb_riscv32
Unexecuted instantiation: address_space_translate_for_iotlb_riscv64
address_space_translate_for_iotlb_s390x
Line
Count
Source
493
1.14M
{
494
1.14M
    MemoryRegionSection *section;
495
1.14M
    IOMMUMemoryRegion *iommu_mr;
496
1.14M
    IOMMUMemoryRegionClass *imrc;
497
1.14M
    IOMMUTLBEntry iotlb;
498
1.14M
    int iommu_idx;
499
1.14M
    AddressSpaceDispatch *d = cpu->cpu_ases[asidx].memory_dispatch;
500
501
1.14M
    for (;;) {
502
1.14M
        section = address_space_translate_internal(d, addr, &addr, plen, false);
503
504
1.14M
        iommu_mr = memory_region_get_iommu(section->mr);
505
1.14M
        if (!iommu_mr) {
506
1.14M
            break;
507
1.14M
        }
508
509
0
        imrc = memory_region_get_iommu_class_nocheck(iommu_mr);
510
511
0
        iommu_idx = imrc->attrs_to_index(iommu_mr, attrs);
512
513
        // tcg_register_iommu_notifier(cpu, iommu_mr, iommu_idx);
514
515
        /* We need all the permissions, so pass IOMMU_NONE so the IOMMU
516
         * doesn't short-cut its translation table walk.
517
         */
518
0
        iotlb = imrc->translate(iommu_mr, addr, IOMMU_NONE, iommu_idx);
519
0
        addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
520
0
                | (addr & iotlb.addr_mask));
521
        /* Update the caller's prot bits to remove permissions the IOMMU
522
         * is giving us a failure response for. If we get down to no
523
         * permissions left at all we can give up now.
524
         */
525
0
        if (!(iotlb.perm & IOMMU_RO)) {
526
0
            *prot &= ~(PAGE_READ | PAGE_EXEC);
527
0
        }
528
0
        if (!(iotlb.perm & IOMMU_WO)) {
529
0
            *prot &= ~PAGE_WRITE;
530
0
        }
531
532
0
        if (!*prot) {
533
0
            goto translate_fail;
534
0
        }
535
536
0
        d = flatview_to_dispatch(address_space_to_flatview(iotlb.target_as));
537
0
    }
538
539
1.14M
    assert(!(memory_region_get_iommu(section->mr) != NULL));
540
1.14M
    *xlat = addr;
541
    // Unicorn:
542
    //   If there is no memory mapped but still we start emulation, we will get
543
    //   a default memory region section and it would be marked as an IO memory
544
    //   in cputlb which prevents further fecthing and execution.
545
    //
546
    //   The reason we set prot to 0 here is not to setting protection but to notify
547
    //   the outer function to add a new **blank** tlb which will never be hitted.
548
1.14M
    if (!memory_region_is_ram(section->mr) && section == &d->map.sections[PHYS_SECTION_UNASSIGNED]) {
549
0
        *prot = 0;
550
0
    }
551
1.14M
    return section;
552
553
0
translate_fail:
554
0
    return &d->map.sections[PHYS_SECTION_UNASSIGNED];
555
1.14M
}
Unexecuted instantiation: address_space_translate_for_iotlb_tricore
556
557
CPUState *qemu_get_cpu(struct uc_struct *uc, int index)
558
297k
{
559
297k
    CPUState *cpu = uc->cpu;
560
297k
    if (cpu->cpu_index == index) {
561
297k
        return cpu;
562
297k
    }
563
564
0
    return NULL;
565
297k
}
Unexecuted instantiation: qemu_get_cpu_x86_64
Unexecuted instantiation: qemu_get_cpu_arm
Unexecuted instantiation: qemu_get_cpu_aarch64
Unexecuted instantiation: qemu_get_cpu_m68k
Unexecuted instantiation: qemu_get_cpu_mips
Unexecuted instantiation: qemu_get_cpu_mipsel
Unexecuted instantiation: qemu_get_cpu_mips64
Unexecuted instantiation: qemu_get_cpu_mips64el
Unexecuted instantiation: qemu_get_cpu_sparc
Unexecuted instantiation: qemu_get_cpu_sparc64
Unexecuted instantiation: qemu_get_cpu_ppc
Unexecuted instantiation: qemu_get_cpu_ppc64
Unexecuted instantiation: qemu_get_cpu_riscv32
Unexecuted instantiation: qemu_get_cpu_riscv64
qemu_get_cpu_s390x
Line
Count
Source
558
297k
{
559
297k
    CPUState *cpu = uc->cpu;
560
297k
    if (cpu->cpu_index == index) {
561
297k
        return cpu;
562
297k
    }
563
564
0
    return NULL;
565
297k
}
Unexecuted instantiation: qemu_get_cpu_tricore
566
567
void cpu_address_space_init(CPUState *cpu, int asidx, MemoryRegion *mr)
568
384k
{
569
    /* Target code should have set num_ases before calling us */
570
384k
    assert(asidx < cpu->num_ases);
571
572
384k
    if (!cpu->cpu_ases) {
573
176k
        cpu->cpu_ases = g_new0(CPUAddressSpace, cpu->num_ases);
574
176k
        cpu->cpu_ases[0].cpu = cpu;
575
176k
        cpu->cpu_ases[0].as = &(cpu->uc->address_space_memory);
576
176k
        cpu->cpu_ases[0].tcg_as_listener.commit = tcg_commit;
577
176k
        memory_listener_register(&(cpu->cpu_ases[0].tcg_as_listener), cpu->cpu_ases[0].as);
578
176k
    }
579
    /* arm security memory */
580
384k
    if (asidx > 0) {
581
104k
        cpu->cpu_ases[asidx].cpu = cpu;
582
104k
        cpu->cpu_ases[asidx].as = &(cpu->uc->address_space_memory);
583
104k
        cpu->cpu_ases[asidx].tcg_as_listener.commit = tcg_commit;
584
104k
        memory_listener_register(&(cpu->cpu_ases[asidx].tcg_as_listener), cpu->cpu_ases[asidx].as);
585
104k
    }
586
384k
}
cpu_address_space_init_x86_64
Line
Count
Source
568
33.6k
{
569
    /* Target code should have set num_ases before calling us */
570
33.6k
    assert(asidx < cpu->num_ases);
571
572
33.6k
    if (!cpu->cpu_ases) {
573
33.6k
        cpu->cpu_ases = g_new0(CPUAddressSpace, cpu->num_ases);
574
33.6k
        cpu->cpu_ases[0].cpu = cpu;
575
33.6k
        cpu->cpu_ases[0].as = &(cpu->uc->address_space_memory);
576
33.6k
        cpu->cpu_ases[0].tcg_as_listener.commit = tcg_commit;
577
33.6k
        memory_listener_register(&(cpu->cpu_ases[0].tcg_as_listener), cpu->cpu_ases[0].as);
578
33.6k
    }
579
    /* arm security memory */
580
33.6k
    if (asidx > 0) {
581
0
        cpu->cpu_ases[asidx].cpu = cpu;
582
0
        cpu->cpu_ases[asidx].as = &(cpu->uc->address_space_memory);
583
0
        cpu->cpu_ases[asidx].tcg_as_listener.commit = tcg_commit;
584
0
        memory_listener_register(&(cpu->cpu_ases[asidx].tcg_as_listener), cpu->cpu_ases[asidx].as);
585
0
    }
586
33.6k
}
cpu_address_space_init_arm
Line
Count
Source
568
131k
{
569
    /* Target code should have set num_ases before calling us */
570
131k
    assert(asidx < cpu->num_ases);
571
572
131k
    if (!cpu->cpu_ases) {
573
43.9k
        cpu->cpu_ases = g_new0(CPUAddressSpace, cpu->num_ases);
574
43.9k
        cpu->cpu_ases[0].cpu = cpu;
575
43.9k
        cpu->cpu_ases[0].as = &(cpu->uc->address_space_memory);
576
43.9k
        cpu->cpu_ases[0].tcg_as_listener.commit = tcg_commit;
577
43.9k
        memory_listener_register(&(cpu->cpu_ases[0].tcg_as_listener), cpu->cpu_ases[0].as);
578
43.9k
    }
579
    /* arm security memory */
580
131k
    if (asidx > 0) {
581
43.9k
        cpu->cpu_ases[asidx].cpu = cpu;
582
43.9k
        cpu->cpu_ases[asidx].as = &(cpu->uc->address_space_memory);
583
43.9k
        cpu->cpu_ases[asidx].tcg_as_listener.commit = tcg_commit;
584
43.9k
        memory_listener_register(&(cpu->cpu_ases[asidx].tcg_as_listener), cpu->cpu_ases[asidx].as);
585
43.9k
    }
586
131k
}
cpu_address_space_init_aarch64
Line
Count
Source
568
180k
{
569
    /* Target code should have set num_ases before calling us */
570
180k
    assert(asidx < cpu->num_ases);
571
572
180k
    if (!cpu->cpu_ases) {
573
60.2k
        cpu->cpu_ases = g_new0(CPUAddressSpace, cpu->num_ases);
574
60.2k
        cpu->cpu_ases[0].cpu = cpu;
575
60.2k
        cpu->cpu_ases[0].as = &(cpu->uc->address_space_memory);
576
60.2k
        cpu->cpu_ases[0].tcg_as_listener.commit = tcg_commit;
577
60.2k
        memory_listener_register(&(cpu->cpu_ases[0].tcg_as_listener), cpu->cpu_ases[0].as);
578
60.2k
    }
579
    /* arm security memory */
580
180k
    if (asidx > 0) {
581
60.2k
        cpu->cpu_ases[asidx].cpu = cpu;
582
60.2k
        cpu->cpu_ases[asidx].as = &(cpu->uc->address_space_memory);
583
60.2k
        cpu->cpu_ases[asidx].tcg_as_listener.commit = tcg_commit;
584
60.2k
        memory_listener_register(&(cpu->cpu_ases[asidx].tcg_as_listener), cpu->cpu_ases[asidx].as);
585
60.2k
    }
586
180k
}
cpu_address_space_init_m68k
Line
Count
Source
568
29
{
569
    /* Target code should have set num_ases before calling us */
570
29
    assert(asidx < cpu->num_ases);
571
572
29
    if (!cpu->cpu_ases) {
573
29
        cpu->cpu_ases = g_new0(CPUAddressSpace, cpu->num_ases);
574
29
        cpu->cpu_ases[0].cpu = cpu;
575
29
        cpu->cpu_ases[0].as = &(cpu->uc->address_space_memory);
576
29
        cpu->cpu_ases[0].tcg_as_listener.commit = tcg_commit;
577
29
        memory_listener_register(&(cpu->cpu_ases[0].tcg_as_listener), cpu->cpu_ases[0].as);
578
29
    }
579
    /* arm security memory */
580
29
    if (asidx > 0) {
581
0
        cpu->cpu_ases[asidx].cpu = cpu;
582
0
        cpu->cpu_ases[asidx].as = &(cpu->uc->address_space_memory);
583
0
        cpu->cpu_ases[asidx].tcg_as_listener.commit = tcg_commit;
584
0
        memory_listener_register(&(cpu->cpu_ases[asidx].tcg_as_listener), cpu->cpu_ases[asidx].as);
585
0
    }
586
29
}
cpu_address_space_init_mips
Line
Count
Source
568
7.41k
{
569
    /* Target code should have set num_ases before calling us */
570
7.41k
    assert(asidx < cpu->num_ases);
571
572
7.41k
    if (!cpu->cpu_ases) {
573
7.41k
        cpu->cpu_ases = g_new0(CPUAddressSpace, cpu->num_ases);
574
7.41k
        cpu->cpu_ases[0].cpu = cpu;
575
7.41k
        cpu->cpu_ases[0].as = &(cpu->uc->address_space_memory);
576
7.41k
        cpu->cpu_ases[0].tcg_as_listener.commit = tcg_commit;
577
7.41k
        memory_listener_register(&(cpu->cpu_ases[0].tcg_as_listener), cpu->cpu_ases[0].as);
578
7.41k
    }
579
    /* arm security memory */
580
7.41k
    if (asidx > 0) {
581
0
        cpu->cpu_ases[asidx].cpu = cpu;
582
0
        cpu->cpu_ases[asidx].as = &(cpu->uc->address_space_memory);
583
0
        cpu->cpu_ases[asidx].tcg_as_listener.commit = tcg_commit;
584
0
        memory_listener_register(&(cpu->cpu_ases[asidx].tcg_as_listener), cpu->cpu_ases[asidx].as);
585
0
    }
586
7.41k
}
cpu_address_space_init_mipsel
Line
Count
Source
568
8.94k
{
569
    /* Target code should have set num_ases before calling us */
570
8.94k
    assert(asidx < cpu->num_ases);
571
572
8.94k
    if (!cpu->cpu_ases) {
573
8.94k
        cpu->cpu_ases = g_new0(CPUAddressSpace, cpu->num_ases);
574
8.94k
        cpu->cpu_ases[0].cpu = cpu;
575
8.94k
        cpu->cpu_ases[0].as = &(cpu->uc->address_space_memory);
576
8.94k
        cpu->cpu_ases[0].tcg_as_listener.commit = tcg_commit;
577
8.94k
        memory_listener_register(&(cpu->cpu_ases[0].tcg_as_listener), cpu->cpu_ases[0].as);
578
8.94k
    }
579
    /* arm security memory */
580
8.94k
    if (asidx > 0) {
581
0
        cpu->cpu_ases[asidx].cpu = cpu;
582
0
        cpu->cpu_ases[asidx].as = &(cpu->uc->address_space_memory);
583
0
        cpu->cpu_ases[asidx].tcg_as_listener.commit = tcg_commit;
584
0
        memory_listener_register(&(cpu->cpu_ases[asidx].tcg_as_listener), cpu->cpu_ases[asidx].as);
585
0
    }
586
8.94k
}
Unexecuted instantiation: cpu_address_space_init_mips64
Unexecuted instantiation: cpu_address_space_init_mips64el
cpu_address_space_init_sparc
Line
Count
Source
568
46
{
569
    /* Target code should have set num_ases before calling us */
570
46
    assert(asidx < cpu->num_ases);
571
572
46
    if (!cpu->cpu_ases) {
573
46
        cpu->cpu_ases = g_new0(CPUAddressSpace, cpu->num_ases);
574
46
        cpu->cpu_ases[0].cpu = cpu;
575
46
        cpu->cpu_ases[0].as = &(cpu->uc->address_space_memory);
576
46
        cpu->cpu_ases[0].tcg_as_listener.commit = tcg_commit;
577
46
        memory_listener_register(&(cpu->cpu_ases[0].tcg_as_listener), cpu->cpu_ases[0].as);
578
46
    }
579
    /* arm security memory */
580
46
    if (asidx > 0) {
581
0
        cpu->cpu_ases[asidx].cpu = cpu;
582
0
        cpu->cpu_ases[asidx].as = &(cpu->uc->address_space_memory);
583
0
        cpu->cpu_ases[asidx].tcg_as_listener.commit = tcg_commit;
584
0
        memory_listener_register(&(cpu->cpu_ases[asidx].tcg_as_listener), cpu->cpu_ases[asidx].as);
585
0
    }
586
46
}
Unexecuted instantiation: cpu_address_space_init_sparc64
Unexecuted instantiation: cpu_address_space_init_ppc
Unexecuted instantiation: cpu_address_space_init_ppc64
Unexecuted instantiation: cpu_address_space_init_riscv32
Unexecuted instantiation: cpu_address_space_init_riscv64
cpu_address_space_init_s390x
Line
Count
Source
568
21.9k
{
569
    /* Target code should have set num_ases before calling us */
570
21.9k
    assert(asidx < cpu->num_ases);
571
572
21.9k
    if (!cpu->cpu_ases) {
573
21.9k
        cpu->cpu_ases = g_new0(CPUAddressSpace, cpu->num_ases);
574
21.9k
        cpu->cpu_ases[0].cpu = cpu;
575
21.9k
        cpu->cpu_ases[0].as = &(cpu->uc->address_space_memory);
576
21.9k
        cpu->cpu_ases[0].tcg_as_listener.commit = tcg_commit;
577
21.9k
        memory_listener_register(&(cpu->cpu_ases[0].tcg_as_listener), cpu->cpu_ases[0].as);
578
21.9k
    }
579
    /* arm security memory */
580
21.9k
    if (asidx > 0) {
581
0
        cpu->cpu_ases[asidx].cpu = cpu;
582
0
        cpu->cpu_ases[asidx].as = &(cpu->uc->address_space_memory);
583
0
        cpu->cpu_ases[asidx].tcg_as_listener.commit = tcg_commit;
584
0
        memory_listener_register(&(cpu->cpu_ases[asidx].tcg_as_listener), cpu->cpu_ases[asidx].as);
585
0
    }
586
21.9k
}
Unexecuted instantiation: cpu_address_space_init_tricore
587
588
AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx)
589
98.5k
{
590
    /* only one AddressSpace. */
591
98.5k
    return cpu->cpu_ases[0].as;
592
98.5k
}
cpu_get_address_space_x86_64
Line
Count
Source
589
603
{
590
    /* only one AddressSpace. */
591
603
    return cpu->cpu_ases[0].as;
592
603
}
cpu_get_address_space_arm
Line
Count
Source
589
97.6k
{
590
    /* only one AddressSpace. */
591
97.6k
    return cpu->cpu_ases[0].as;
592
97.6k
}
cpu_get_address_space_aarch64
Line
Count
Source
589
370
{
590
    /* only one AddressSpace. */
591
370
    return cpu->cpu_ases[0].as;
592
370
}
Unexecuted instantiation: cpu_get_address_space_m68k
Unexecuted instantiation: cpu_get_address_space_mips
Unexecuted instantiation: cpu_get_address_space_mipsel
Unexecuted instantiation: cpu_get_address_space_mips64
Unexecuted instantiation: cpu_get_address_space_mips64el
Unexecuted instantiation: cpu_get_address_space_sparc
Unexecuted instantiation: cpu_get_address_space_sparc64
Unexecuted instantiation: cpu_get_address_space_ppc
Unexecuted instantiation: cpu_get_address_space_ppc64
Unexecuted instantiation: cpu_get_address_space_riscv32
Unexecuted instantiation: cpu_get_address_space_riscv64
Unexecuted instantiation: cpu_get_address_space_s390x
Unexecuted instantiation: cpu_get_address_space_tricore
593
594
void cpu_exec_unrealizefn(CPUState *cpu)
595
0
{
596
0
}
Unexecuted instantiation: cpu_exec_unrealizefn_x86_64
Unexecuted instantiation: cpu_exec_unrealizefn_arm
Unexecuted instantiation: cpu_exec_unrealizefn_aarch64
Unexecuted instantiation: cpu_exec_unrealizefn_m68k
Unexecuted instantiation: cpu_exec_unrealizefn_mips
Unexecuted instantiation: cpu_exec_unrealizefn_mipsel
Unexecuted instantiation: cpu_exec_unrealizefn_mips64
Unexecuted instantiation: cpu_exec_unrealizefn_mips64el
Unexecuted instantiation: cpu_exec_unrealizefn_sparc
Unexecuted instantiation: cpu_exec_unrealizefn_sparc64
Unexecuted instantiation: cpu_exec_unrealizefn_ppc
Unexecuted instantiation: cpu_exec_unrealizefn_ppc64
Unexecuted instantiation: cpu_exec_unrealizefn_riscv32
Unexecuted instantiation: cpu_exec_unrealizefn_riscv64
Unexecuted instantiation: cpu_exec_unrealizefn_s390x
Unexecuted instantiation: cpu_exec_unrealizefn_tricore
597
598
void cpu_exec_initfn(CPUState *cpu)
599
0
{
600
0
    cpu->num_ases = 1;
601
0
    cpu->as = &(cpu->uc->address_space_memory);
602
0
    cpu->memory = cpu->uc->system_memory;
603
0
}
Unexecuted instantiation: cpu_exec_initfn_x86_64
Unexecuted instantiation: cpu_exec_initfn_arm
Unexecuted instantiation: cpu_exec_initfn_aarch64
Unexecuted instantiation: cpu_exec_initfn_m68k
Unexecuted instantiation: cpu_exec_initfn_mips
Unexecuted instantiation: cpu_exec_initfn_mipsel
Unexecuted instantiation: cpu_exec_initfn_mips64
Unexecuted instantiation: cpu_exec_initfn_mips64el
Unexecuted instantiation: cpu_exec_initfn_sparc
Unexecuted instantiation: cpu_exec_initfn_sparc64
Unexecuted instantiation: cpu_exec_initfn_ppc
Unexecuted instantiation: cpu_exec_initfn_ppc64
Unexecuted instantiation: cpu_exec_initfn_riscv32
Unexecuted instantiation: cpu_exec_initfn_riscv64
Unexecuted instantiation: cpu_exec_initfn_s390x
Unexecuted instantiation: cpu_exec_initfn_tricore
604
605
void cpu_exec_realizefn(CPUState *cpu)
606
176k
{
607
176k
    CPUClass *cc = CPU_GET_CLASS(cpu);
608
609
176k
    cc->tcg_initialize(cpu->uc);
610
176k
    tlb_init(cpu);
611
176k
}
cpu_exec_realizefn_x86_64
Line
Count
Source
606
33.6k
{
607
33.6k
    CPUClass *cc = CPU_GET_CLASS(cpu);
608
609
33.6k
    cc->tcg_initialize(cpu->uc);
610
33.6k
    tlb_init(cpu);
611
33.6k
}
cpu_exec_realizefn_arm
Line
Count
Source
606
43.9k
{
607
43.9k
    CPUClass *cc = CPU_GET_CLASS(cpu);
608
609
43.9k
    cc->tcg_initialize(cpu->uc);
610
43.9k
    tlb_init(cpu);
611
43.9k
}
cpu_exec_realizefn_aarch64
Line
Count
Source
606
60.2k
{
607
60.2k
    CPUClass *cc = CPU_GET_CLASS(cpu);
608
609
60.2k
    cc->tcg_initialize(cpu->uc);
610
60.2k
    tlb_init(cpu);
611
60.2k
}
cpu_exec_realizefn_m68k
Line
Count
Source
606
29
{
607
29
    CPUClass *cc = CPU_GET_CLASS(cpu);
608
609
29
    cc->tcg_initialize(cpu->uc);
610
29
    tlb_init(cpu);
611
29
}
cpu_exec_realizefn_mips
Line
Count
Source
606
7.41k
{
607
7.41k
    CPUClass *cc = CPU_GET_CLASS(cpu);
608
609
7.41k
    cc->tcg_initialize(cpu->uc);
610
7.41k
    tlb_init(cpu);
611
7.41k
}
cpu_exec_realizefn_mipsel
Line
Count
Source
606
8.94k
{
607
8.94k
    CPUClass *cc = CPU_GET_CLASS(cpu);
608
609
8.94k
    cc->tcg_initialize(cpu->uc);
610
8.94k
    tlb_init(cpu);
611
8.94k
}
Unexecuted instantiation: cpu_exec_realizefn_mips64
Unexecuted instantiation: cpu_exec_realizefn_mips64el
cpu_exec_realizefn_sparc
Line
Count
Source
606
46
{
607
46
    CPUClass *cc = CPU_GET_CLASS(cpu);
608
609
46
    cc->tcg_initialize(cpu->uc);
610
46
    tlb_init(cpu);
611
46
}
Unexecuted instantiation: cpu_exec_realizefn_sparc64
Unexecuted instantiation: cpu_exec_realizefn_ppc
Unexecuted instantiation: cpu_exec_realizefn_ppc64
Unexecuted instantiation: cpu_exec_realizefn_riscv32
Unexecuted instantiation: cpu_exec_realizefn_riscv64
cpu_exec_realizefn_s390x
Line
Count
Source
606
21.9k
{
607
21.9k
    CPUClass *cc = CPU_GET_CLASS(cpu);
608
609
21.9k
    cc->tcg_initialize(cpu->uc);
610
21.9k
    tlb_init(cpu);
611
21.9k
}
Unexecuted instantiation: cpu_exec_realizefn_tricore
612
613
void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs)
614
0
{
615
0
    ram_addr_t ram_addr;
616
0
    MemoryRegion *mr;
617
0
    hwaddr l = 1;
618
619
0
    mr = address_space_translate(as, addr, &addr, &l, false, attrs);
620
0
    if (!memory_region_is_ram(mr)) {
621
0
        return;
622
0
    }
623
624
0
    ram_addr = memory_region_get_ram_addr(mr) + addr;
625
0
    tb_invalidate_phys_page_range(as->uc, ram_addr, ram_addr + 1);
626
0
}
Unexecuted instantiation: tb_invalidate_phys_addr_x86_64
Unexecuted instantiation: tb_invalidate_phys_addr_arm
Unexecuted instantiation: tb_invalidate_phys_addr_aarch64
Unexecuted instantiation: tb_invalidate_phys_addr_m68k
Unexecuted instantiation: tb_invalidate_phys_addr_mips
Unexecuted instantiation: tb_invalidate_phys_addr_mipsel
Unexecuted instantiation: tb_invalidate_phys_addr_mips64
Unexecuted instantiation: tb_invalidate_phys_addr_mips64el
Unexecuted instantiation: tb_invalidate_phys_addr_sparc
Unexecuted instantiation: tb_invalidate_phys_addr_sparc64
Unexecuted instantiation: tb_invalidate_phys_addr_ppc
Unexecuted instantiation: tb_invalidate_phys_addr_ppc64
Unexecuted instantiation: tb_invalidate_phys_addr_riscv32
Unexecuted instantiation: tb_invalidate_phys_addr_riscv64
Unexecuted instantiation: tb_invalidate_phys_addr_s390x
Unexecuted instantiation: tb_invalidate_phys_addr_tricore
627
628
static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
629
281k
{
630
    /*
631
     * There may not be a virtual to physical translation for the pc
632
     * right now, but there may exist cached TB for this pc.
633
     * Flush the whole TB cache to force re-translation of such TBs.
634
     * This is heavyweight, but we're debugging anyway.
635
     */
636
281k
    tb_flush(cpu);
637
281k
}
638
639
/* Add a watchpoint.  */
640
int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
641
                          int flags, CPUWatchpoint **watchpoint)
642
93.0k
{
643
#if 0
644
    CPUWatchpoint *wp;
645
646
    /* forbid ranges which are empty or run off the end of the address space */
647
    if (len == 0 || (addr + len - 1) < addr) {
648
        error_report("tried to set invalid watchpoint at %"
649
                     VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
650
        return -EINVAL;
651
    }
652
    wp = g_malloc(sizeof(*wp));
653
654
    wp->vaddr = addr;
655
    wp->len = len;
656
    wp->flags = flags;
657
658
    /* keep all GDB-injected watchpoints in front */
659
    if (flags & BP_GDB) {
660
        QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
661
    } else {
662
        QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
663
    }
664
665
    tlb_flush_page(cpu, addr);
666
667
    if (watchpoint)
668
        *watchpoint = wp;
669
#endif
670
671
93.0k
    return 0;
672
93.0k
}
cpu_watchpoint_insert_x86_64
Line
Count
Source
642
37.5k
{
643
#if 0
644
    CPUWatchpoint *wp;
645
646
    /* forbid ranges which are empty or run off the end of the address space */
647
    if (len == 0 || (addr + len - 1) < addr) {
648
        error_report("tried to set invalid watchpoint at %"
649
                     VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
650
        return -EINVAL;
651
    }
652
    wp = g_malloc(sizeof(*wp));
653
654
    wp->vaddr = addr;
655
    wp->len = len;
656
    wp->flags = flags;
657
658
    /* keep all GDB-injected watchpoints in front */
659
    if (flags & BP_GDB) {
660
        QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
661
    } else {
662
        QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
663
    }
664
665
    tlb_flush_page(cpu, addr);
666
667
    if (watchpoint)
668
        *watchpoint = wp;
669
#endif
670
671
37.5k
    return 0;
672
37.5k
}
cpu_watchpoint_insert_arm
Line
Count
Source
642
7.43k
{
643
#if 0
644
    CPUWatchpoint *wp;
645
646
    /* forbid ranges which are empty or run off the end of the address space */
647
    if (len == 0 || (addr + len - 1) < addr) {
648
        error_report("tried to set invalid watchpoint at %"
649
                     VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
650
        return -EINVAL;
651
    }
652
    wp = g_malloc(sizeof(*wp));
653
654
    wp->vaddr = addr;
655
    wp->len = len;
656
    wp->flags = flags;
657
658
    /* keep all GDB-injected watchpoints in front */
659
    if (flags & BP_GDB) {
660
        QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
661
    } else {
662
        QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
663
    }
664
665
    tlb_flush_page(cpu, addr);
666
667
    if (watchpoint)
668
        *watchpoint = wp;
669
#endif
670
671
7.43k
    return 0;
672
7.43k
}
cpu_watchpoint_insert_aarch64
Line
Count
Source
642
8.22k
{
643
#if 0
644
    CPUWatchpoint *wp;
645
646
    /* forbid ranges which are empty or run off the end of the address space */
647
    if (len == 0 || (addr + len - 1) < addr) {
648
        error_report("tried to set invalid watchpoint at %"
649
                     VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
650
        return -EINVAL;
651
    }
652
    wp = g_malloc(sizeof(*wp));
653
654
    wp->vaddr = addr;
655
    wp->len = len;
656
    wp->flags = flags;
657
658
    /* keep all GDB-injected watchpoints in front */
659
    if (flags & BP_GDB) {
660
        QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
661
    } else {
662
        QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
663
    }
664
665
    tlb_flush_page(cpu, addr);
666
667
    if (watchpoint)
668
        *watchpoint = wp;
669
#endif
670
671
8.22k
    return 0;
672
8.22k
}
Unexecuted instantiation: cpu_watchpoint_insert_m68k
Unexecuted instantiation: cpu_watchpoint_insert_mips
Unexecuted instantiation: cpu_watchpoint_insert_mipsel
Unexecuted instantiation: cpu_watchpoint_insert_mips64
Unexecuted instantiation: cpu_watchpoint_insert_mips64el
Unexecuted instantiation: cpu_watchpoint_insert_sparc
Unexecuted instantiation: cpu_watchpoint_insert_sparc64
Unexecuted instantiation: cpu_watchpoint_insert_ppc
Unexecuted instantiation: cpu_watchpoint_insert_ppc64
Unexecuted instantiation: cpu_watchpoint_insert_riscv32
Unexecuted instantiation: cpu_watchpoint_insert_riscv64
cpu_watchpoint_insert_s390x
Line
Count
Source
642
39.7k
{
643
#if 0
644
    CPUWatchpoint *wp;
645
646
    /* forbid ranges which are empty or run off the end of the address space */
647
    if (len == 0 || (addr + len - 1) < addr) {
648
        error_report("tried to set invalid watchpoint at %"
649
                     VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
650
        return -EINVAL;
651
    }
652
    wp = g_malloc(sizeof(*wp));
653
654
    wp->vaddr = addr;
655
    wp->len = len;
656
    wp->flags = flags;
657
658
    /* keep all GDB-injected watchpoints in front */
659
    if (flags & BP_GDB) {
660
        QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
661
    } else {
662
        QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
663
    }
664
665
    tlb_flush_page(cpu, addr);
666
667
    if (watchpoint)
668
        *watchpoint = wp;
669
#endif
670
671
39.7k
    return 0;
672
39.7k
}
Unexecuted instantiation: cpu_watchpoint_insert_tricore
673
674
/* Remove a specific watchpoint by reference.  */
675
void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
676
0
{
677
#if 0
678
    QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
679
680
    tlb_flush_page(cpu, watchpoint->vaddr);
681
682
    g_free(watchpoint);
683
#endif
684
0
}
Unexecuted instantiation: cpu_watchpoint_remove_by_ref_x86_64
Unexecuted instantiation: cpu_watchpoint_remove_by_ref_arm
Unexecuted instantiation: cpu_watchpoint_remove_by_ref_aarch64
Unexecuted instantiation: cpu_watchpoint_remove_by_ref_m68k
Unexecuted instantiation: cpu_watchpoint_remove_by_ref_mips
Unexecuted instantiation: cpu_watchpoint_remove_by_ref_mipsel
Unexecuted instantiation: cpu_watchpoint_remove_by_ref_mips64
Unexecuted instantiation: cpu_watchpoint_remove_by_ref_mips64el
Unexecuted instantiation: cpu_watchpoint_remove_by_ref_sparc
Unexecuted instantiation: cpu_watchpoint_remove_by_ref_sparc64
Unexecuted instantiation: cpu_watchpoint_remove_by_ref_ppc
Unexecuted instantiation: cpu_watchpoint_remove_by_ref_ppc64
Unexecuted instantiation: cpu_watchpoint_remove_by_ref_riscv32
Unexecuted instantiation: cpu_watchpoint_remove_by_ref_riscv64
Unexecuted instantiation: cpu_watchpoint_remove_by_ref_s390x
Unexecuted instantiation: cpu_watchpoint_remove_by_ref_tricore
685
686
/* Remove all matching watchpoints.  */
687
void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
688
373k
{
689
#if 0
690
    CPUWatchpoint *wp, *next;
691
692
    QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
693
        if (wp->flags & mask) {
694
            cpu_watchpoint_remove_by_ref(cpu, wp);
695
        }
696
    }
697
#endif
698
373k
}
cpu_watchpoint_remove_all_x86_64
Line
Count
Source
688
67.2k
{
689
#if 0
690
    CPUWatchpoint *wp, *next;
691
692
    QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
693
        if (wp->flags & mask) {
694
            cpu_watchpoint_remove_by_ref(cpu, wp);
695
        }
696
    }
697
#endif
698
67.2k
}
cpu_watchpoint_remove_all_arm
Line
Count
Source
688
87.9k
{
689
#if 0
690
    CPUWatchpoint *wp, *next;
691
692
    QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
693
        if (wp->flags & mask) {
694
            cpu_watchpoint_remove_by_ref(cpu, wp);
695
        }
696
    }
697
#endif
698
87.9k
}
cpu_watchpoint_remove_all_aarch64
Line
Count
Source
688
120k
{
689
#if 0
690
    CPUWatchpoint *wp, *next;
691
692
    QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
693
        if (wp->flags & mask) {
694
            cpu_watchpoint_remove_by_ref(cpu, wp);
695
        }
696
    }
697
#endif
698
120k
}
cpu_watchpoint_remove_all_m68k
Line
Count
Source
688
29
{
689
#if 0
690
    CPUWatchpoint *wp, *next;
691
692
    QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
693
        if (wp->flags & mask) {
694
            cpu_watchpoint_remove_by_ref(cpu, wp);
695
        }
696
    }
697
#endif
698
29
}
cpu_watchpoint_remove_all_mips
Line
Count
Source
688
7.41k
{
689
#if 0
690
    CPUWatchpoint *wp, *next;
691
692
    QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
693
        if (wp->flags & mask) {
694
            cpu_watchpoint_remove_by_ref(cpu, wp);
695
        }
696
    }
697
#endif
698
7.41k
}
cpu_watchpoint_remove_all_mipsel
Line
Count
Source
688
8.94k
{
689
#if 0
690
    CPUWatchpoint *wp, *next;
691
692
    QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
693
        if (wp->flags & mask) {
694
            cpu_watchpoint_remove_by_ref(cpu, wp);
695
        }
696
    }
697
#endif
698
8.94k
}
Unexecuted instantiation: cpu_watchpoint_remove_all_mips64
Unexecuted instantiation: cpu_watchpoint_remove_all_mips64el
cpu_watchpoint_remove_all_sparc
Line
Count
Source
688
46
{
689
#if 0
690
    CPUWatchpoint *wp, *next;
691
692
    QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
693
        if (wp->flags & mask) {
694
            cpu_watchpoint_remove_by_ref(cpu, wp);
695
        }
696
    }
697
#endif
698
46
}
Unexecuted instantiation: cpu_watchpoint_remove_all_sparc64
Unexecuted instantiation: cpu_watchpoint_remove_all_ppc
Unexecuted instantiation: cpu_watchpoint_remove_all_ppc64
Unexecuted instantiation: cpu_watchpoint_remove_all_riscv32
Unexecuted instantiation: cpu_watchpoint_remove_all_riscv64
cpu_watchpoint_remove_all_s390x
Line
Count
Source
688
81.3k
{
689
#if 0
690
    CPUWatchpoint *wp, *next;
691
692
    QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
693
        if (wp->flags & mask) {
694
            cpu_watchpoint_remove_by_ref(cpu, wp);
695
        }
696
    }
697
#endif
698
81.3k
}
Unexecuted instantiation: cpu_watchpoint_remove_all_tricore
699
700
/* Return flags for watchpoints that match addr + prot.  */
701
int cpu_watchpoint_address_matches(CPUState *cpu, vaddr addr, vaddr len)
702
2.72M
{
703
#if 0
704
    CPUWatchpoint *wp;
705
    int ret = 0;
706
707
    QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
708
        if (watchpoint_address_matches(wp, addr, TARGET_PAGE_SIZE)) {
709
            ret |= wp->flags;
710
        }
711
    }
712
    return ret;
713
#endif
714
2.72M
    return 0;
715
2.72M
}
cpu_watchpoint_address_matches_x86_64
Line
Count
Source
702
242k
{
703
#if 0
704
    CPUWatchpoint *wp;
705
    int ret = 0;
706
707
    QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
708
        if (watchpoint_address_matches(wp, addr, TARGET_PAGE_SIZE)) {
709
            ret |= wp->flags;
710
        }
711
    }
712
    return ret;
713
#endif
714
242k
    return 0;
715
242k
}
cpu_watchpoint_address_matches_arm
Line
Count
Source
702
743k
{
703
#if 0
704
    CPUWatchpoint *wp;
705
    int ret = 0;
706
707
    QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
708
        if (watchpoint_address_matches(wp, addr, TARGET_PAGE_SIZE)) {
709
            ret |= wp->flags;
710
        }
711
    }
712
    return ret;
713
#endif
714
743k
    return 0;
715
743k
}
cpu_watchpoint_address_matches_aarch64
Line
Count
Source
702
286k
{
703
#if 0
704
    CPUWatchpoint *wp;
705
    int ret = 0;
706
707
    QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
708
        if (watchpoint_address_matches(wp, addr, TARGET_PAGE_SIZE)) {
709
            ret |= wp->flags;
710
        }
711
    }
712
    return ret;
713
#endif
714
286k
    return 0;
715
286k
}
cpu_watchpoint_address_matches_m68k
Line
Count
Source
702
56
{
703
#if 0
704
    CPUWatchpoint *wp;
705
    int ret = 0;
706
707
    QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
708
        if (watchpoint_address_matches(wp, addr, TARGET_PAGE_SIZE)) {
709
            ret |= wp->flags;
710
        }
711
    }
712
    return ret;
713
#endif
714
56
    return 0;
715
56
}
cpu_watchpoint_address_matches_mips
Line
Count
Source
702
279k
{
703
#if 0
704
    CPUWatchpoint *wp;
705
    int ret = 0;
706
707
    QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
708
        if (watchpoint_address_matches(wp, addr, TARGET_PAGE_SIZE)) {
709
            ret |= wp->flags;
710
        }
711
    }
712
    return ret;
713
#endif
714
279k
    return 0;
715
279k
}
cpu_watchpoint_address_matches_mipsel
Line
Count
Source
702
26.0k
{
703
#if 0
704
    CPUWatchpoint *wp;
705
    int ret = 0;
706
707
    QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
708
        if (watchpoint_address_matches(wp, addr, TARGET_PAGE_SIZE)) {
709
            ret |= wp->flags;
710
        }
711
    }
712
    return ret;
713
#endif
714
26.0k
    return 0;
715
26.0k
}
Unexecuted instantiation: cpu_watchpoint_address_matches_mips64
Unexecuted instantiation: cpu_watchpoint_address_matches_mips64el
cpu_watchpoint_address_matches_sparc
Line
Count
Source
702
67
{
703
#if 0
704
    CPUWatchpoint *wp;
705
    int ret = 0;
706
707
    QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
708
        if (watchpoint_address_matches(wp, addr, TARGET_PAGE_SIZE)) {
709
            ret |= wp->flags;
710
        }
711
    }
712
    return ret;
713
#endif
714
67
    return 0;
715
67
}
Unexecuted instantiation: cpu_watchpoint_address_matches_sparc64
Unexecuted instantiation: cpu_watchpoint_address_matches_ppc
Unexecuted instantiation: cpu_watchpoint_address_matches_ppc64
Unexecuted instantiation: cpu_watchpoint_address_matches_riscv32
Unexecuted instantiation: cpu_watchpoint_address_matches_riscv64
cpu_watchpoint_address_matches_s390x
Line
Count
Source
702
1.14M
{
703
#if 0
704
    CPUWatchpoint *wp;
705
    int ret = 0;
706
707
    QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
708
        if (watchpoint_address_matches(wp, addr, TARGET_PAGE_SIZE)) {
709
            ret |= wp->flags;
710
        }
711
    }
712
    return ret;
713
#endif
714
1.14M
    return 0;
715
1.14M
}
Unexecuted instantiation: cpu_watchpoint_address_matches_tricore
716
717
/* Add a breakpoint.  */
718
int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
719
                          CPUBreakpoint **breakpoint)
720
140k
{
721
140k
    CPUBreakpoint *bp;
722
723
140k
    bp = g_malloc(sizeof(*bp));
724
725
140k
    bp->pc = pc;
726
140k
    bp->flags = flags;
727
728
    /* keep all GDB-injected breakpoints in front */
729
140k
    if (flags & BP_GDB) {
730
0
        QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
731
140k
    } else {
732
140k
        QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
733
140k
    }
734
735
140k
    breakpoint_invalidate(cpu, pc);
736
737
140k
    if (breakpoint) {
738
140k
        *breakpoint = bp;
739
140k
    }
740
140k
    return 0;
741
140k
}
cpu_breakpoint_insert_x86_64
Line
Count
Source
720
19.8k
{
721
19.8k
    CPUBreakpoint *bp;
722
723
19.8k
    bp = g_malloc(sizeof(*bp));
724
725
19.8k
    bp->pc = pc;
726
19.8k
    bp->flags = flags;
727
728
    /* keep all GDB-injected breakpoints in front */
729
19.8k
    if (flags & BP_GDB) {
730
0
        QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
731
19.8k
    } else {
732
19.8k
        QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
733
19.8k
    }
734
735
19.8k
    breakpoint_invalidate(cpu, pc);
736
737
19.8k
    if (breakpoint) {
738
19.8k
        *breakpoint = bp;
739
19.8k
    }
740
19.8k
    return 0;
741
19.8k
}
cpu_breakpoint_insert_arm
Line
Count
Source
720
32.0k
{
721
32.0k
    CPUBreakpoint *bp;
722
723
32.0k
    bp = g_malloc(sizeof(*bp));
724
725
32.0k
    bp->pc = pc;
726
32.0k
    bp->flags = flags;
727
728
    /* keep all GDB-injected breakpoints in front */
729
32.0k
    if (flags & BP_GDB) {
730
0
        QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
731
32.0k
    } else {
732
32.0k
        QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
733
32.0k
    }
734
735
32.0k
    breakpoint_invalidate(cpu, pc);
736
737
32.0k
    if (breakpoint) {
738
32.0k
        *breakpoint = bp;
739
32.0k
    }
740
32.0k
    return 0;
741
32.0k
}
cpu_breakpoint_insert_aarch64
Line
Count
Source
720
88.9k
{
721
88.9k
    CPUBreakpoint *bp;
722
723
88.9k
    bp = g_malloc(sizeof(*bp));
724
725
88.9k
    bp->pc = pc;
726
88.9k
    bp->flags = flags;
727
728
    /* keep all GDB-injected breakpoints in front */
729
88.9k
    if (flags & BP_GDB) {
730
0
        QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
731
88.9k
    } else {
732
88.9k
        QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
733
88.9k
    }
734
735
88.9k
    breakpoint_invalidate(cpu, pc);
736
737
88.9k
    if (breakpoint) {
738
88.9k
        *breakpoint = bp;
739
88.9k
    }
740
88.9k
    return 0;
741
88.9k
}
Unexecuted instantiation: cpu_breakpoint_insert_m68k
Unexecuted instantiation: cpu_breakpoint_insert_mips
Unexecuted instantiation: cpu_breakpoint_insert_mipsel
Unexecuted instantiation: cpu_breakpoint_insert_mips64
Unexecuted instantiation: cpu_breakpoint_insert_mips64el
Unexecuted instantiation: cpu_breakpoint_insert_sparc
Unexecuted instantiation: cpu_breakpoint_insert_sparc64
Unexecuted instantiation: cpu_breakpoint_insert_ppc
Unexecuted instantiation: cpu_breakpoint_insert_ppc64
Unexecuted instantiation: cpu_breakpoint_insert_riscv32
Unexecuted instantiation: cpu_breakpoint_insert_riscv64
Unexecuted instantiation: cpu_breakpoint_insert_s390x
Unexecuted instantiation: cpu_breakpoint_insert_tricore
742
743
/* Remove a specific breakpoint.  */
744
int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
745
0
{
746
0
    CPUBreakpoint *bp;
747
748
0
    QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
749
0
        if (bp->pc == pc && bp->flags == flags) {
750
0
            cpu_breakpoint_remove_by_ref(cpu, bp);
751
0
            return 0;
752
0
        }
753
0
    }
754
0
    return -ENOENT;
755
0
}
Unexecuted instantiation: cpu_breakpoint_remove_x86_64
Unexecuted instantiation: cpu_breakpoint_remove_arm
Unexecuted instantiation: cpu_breakpoint_remove_aarch64
Unexecuted instantiation: cpu_breakpoint_remove_m68k
Unexecuted instantiation: cpu_breakpoint_remove_mips
Unexecuted instantiation: cpu_breakpoint_remove_mipsel
Unexecuted instantiation: cpu_breakpoint_remove_mips64
Unexecuted instantiation: cpu_breakpoint_remove_mips64el
Unexecuted instantiation: cpu_breakpoint_remove_sparc
Unexecuted instantiation: cpu_breakpoint_remove_sparc64
Unexecuted instantiation: cpu_breakpoint_remove_ppc
Unexecuted instantiation: cpu_breakpoint_remove_ppc64
Unexecuted instantiation: cpu_breakpoint_remove_riscv32
Unexecuted instantiation: cpu_breakpoint_remove_riscv64
Unexecuted instantiation: cpu_breakpoint_remove_s390x
Unexecuted instantiation: cpu_breakpoint_remove_tricore
756
757
/* Remove a specific breakpoint by reference.  */
758
void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
759
140k
{
760
140k
    QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
761
762
140k
    breakpoint_invalidate(cpu, breakpoint->pc);
763
764
140k
    g_free(breakpoint);
765
140k
}
cpu_breakpoint_remove_by_ref_x86_64
Line
Count
Source
759
19.8k
{
760
19.8k
    QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
761
762
19.8k
    breakpoint_invalidate(cpu, breakpoint->pc);
763
764
19.8k
    g_free(breakpoint);
765
19.8k
}
cpu_breakpoint_remove_by_ref_arm
Line
Count
Source
759
32.0k
{
760
32.0k
    QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
761
762
32.0k
    breakpoint_invalidate(cpu, breakpoint->pc);
763
764
32.0k
    g_free(breakpoint);
765
32.0k
}
cpu_breakpoint_remove_by_ref_aarch64
Line
Count
Source
759
88.9k
{
760
88.9k
    QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
761
762
88.9k
    breakpoint_invalidate(cpu, breakpoint->pc);
763
764
88.9k
    g_free(breakpoint);
765
88.9k
}
Unexecuted instantiation: cpu_breakpoint_remove_by_ref_m68k
Unexecuted instantiation: cpu_breakpoint_remove_by_ref_mips
Unexecuted instantiation: cpu_breakpoint_remove_by_ref_mipsel
Unexecuted instantiation: cpu_breakpoint_remove_by_ref_mips64
Unexecuted instantiation: cpu_breakpoint_remove_by_ref_mips64el
Unexecuted instantiation: cpu_breakpoint_remove_by_ref_sparc
Unexecuted instantiation: cpu_breakpoint_remove_by_ref_sparc64
Unexecuted instantiation: cpu_breakpoint_remove_by_ref_ppc
Unexecuted instantiation: cpu_breakpoint_remove_by_ref_ppc64
Unexecuted instantiation: cpu_breakpoint_remove_by_ref_riscv32
Unexecuted instantiation: cpu_breakpoint_remove_by_ref_riscv64
Unexecuted instantiation: cpu_breakpoint_remove_by_ref_s390x
Unexecuted instantiation: cpu_breakpoint_remove_by_ref_tricore
766
767
/* Remove all matching breakpoints. */
768
void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
769
314k
{
770
314k
    CPUBreakpoint *bp, *next;
771
772
314k
    QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
773
772
        if (bp->flags & mask) {
774
772
            cpu_breakpoint_remove_by_ref(cpu, bp);
775
772
        }
776
772
    }
777
314k
}
cpu_breakpoint_remove_all_x86_64
Line
Count
Source
769
67.2k
{
770
67.2k
    CPUBreakpoint *bp, *next;
771
772
67.2k
    QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
773
314
        if (bp->flags & mask) {
774
314
            cpu_breakpoint_remove_by_ref(cpu, bp);
775
314
        }
776
314
    }
777
67.2k
}
cpu_breakpoint_remove_all_arm
Line
Count
Source
769
87.9k
{
770
87.9k
    CPUBreakpoint *bp, *next;
771
772
87.9k
    QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
773
208
        if (bp->flags & mask) {
774
208
            cpu_breakpoint_remove_by_ref(cpu, bp);
775
208
        }
776
208
    }
777
87.9k
}
cpu_breakpoint_remove_all_aarch64
Line
Count
Source
769
120k
{
770
120k
    CPUBreakpoint *bp, *next;
771
772
120k
    QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
773
250
        if (bp->flags & mask) {
774
250
            cpu_breakpoint_remove_by_ref(cpu, bp);
775
250
        }
776
250
    }
777
120k
}
cpu_breakpoint_remove_all_m68k
Line
Count
Source
769
29
{
770
29
    CPUBreakpoint *bp, *next;
771
772
29
    QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
773
0
        if (bp->flags & mask) {
774
0
            cpu_breakpoint_remove_by_ref(cpu, bp);
775
0
        }
776
0
    }
777
29
}
cpu_breakpoint_remove_all_mips
Line
Count
Source
769
7.41k
{
770
7.41k
    CPUBreakpoint *bp, *next;
771
772
7.41k
    QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
773
0
        if (bp->flags & mask) {
774
0
            cpu_breakpoint_remove_by_ref(cpu, bp);
775
0
        }
776
0
    }
777
7.41k
}
cpu_breakpoint_remove_all_mipsel
Line
Count
Source
769
8.94k
{
770
8.94k
    CPUBreakpoint *bp, *next;
771
772
8.94k
    QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
773
0
        if (bp->flags & mask) {
774
0
            cpu_breakpoint_remove_by_ref(cpu, bp);
775
0
        }
776
0
    }
777
8.94k
}
Unexecuted instantiation: cpu_breakpoint_remove_all_mips64
Unexecuted instantiation: cpu_breakpoint_remove_all_mips64el
cpu_breakpoint_remove_all_sparc
Line
Count
Source
769
46
{
770
46
    CPUBreakpoint *bp, *next;
771
772
46
    QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
773
0
        if (bp->flags & mask) {
774
0
            cpu_breakpoint_remove_by_ref(cpu, bp);
775
0
        }
776
0
    }
777
46
}
Unexecuted instantiation: cpu_breakpoint_remove_all_sparc64
Unexecuted instantiation: cpu_breakpoint_remove_all_ppc
Unexecuted instantiation: cpu_breakpoint_remove_all_ppc64
Unexecuted instantiation: cpu_breakpoint_remove_all_riscv32
Unexecuted instantiation: cpu_breakpoint_remove_all_riscv64
cpu_breakpoint_remove_all_s390x
Line
Count
Source
769
21.9k
{
770
21.9k
    CPUBreakpoint *bp, *next;
771
772
21.9k
    QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
773
0
        if (bp->flags & mask) {
774
0
            cpu_breakpoint_remove_by_ref(cpu, bp);
775
0
        }
776
0
    }
777
21.9k
}
Unexecuted instantiation: cpu_breakpoint_remove_all_tricore
778
779
void cpu_abort(CPUState *cpu, const char *fmt, ...)
780
0
{
781
0
    abort();
782
0
}
Unexecuted instantiation: cpu_abort_x86_64
Unexecuted instantiation: cpu_abort_arm
Unexecuted instantiation: cpu_abort_aarch64
Unexecuted instantiation: cpu_abort_m68k
Unexecuted instantiation: cpu_abort_mips
Unexecuted instantiation: cpu_abort_mipsel
Unexecuted instantiation: cpu_abort_mips64
Unexecuted instantiation: cpu_abort_mips64el
Unexecuted instantiation: cpu_abort_sparc
Unexecuted instantiation: cpu_abort_sparc64
Unexecuted instantiation: cpu_abort_ppc
Unexecuted instantiation: cpu_abort_ppc64
Unexecuted instantiation: cpu_abort_riscv32
Unexecuted instantiation: cpu_abort_riscv64
Unexecuted instantiation: cpu_abort_s390x
Unexecuted instantiation: cpu_abort_tricore
783
784
/* Called from RCU critical section */
785
static RAMBlock *qemu_get_ram_block(struct uc_struct *uc, ram_addr_t addr)
786
0
{
787
0
    RAMBlock *block;
788
789
0
    block = uc->ram_list.mru_block;
790
0
    if (block && addr - block->offset < block->max_length) {
791
0
        return block;
792
0
    }
793
0
    RAMBLOCK_FOREACH(block) {
794
0
        if (addr - block->offset < block->max_length) {
795
0
            goto found;
796
0
        }
797
0
    }
798
799
0
    fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
800
0
    abort();
801
802
0
found:
803
0
    uc->ram_list.mru_block = block;
804
0
    return block;
805
0
}
806
807
/* Note: start and end must be within the same ram block.  */
808
bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
809
                                              ram_addr_t length,
810
                                              unsigned client)
811
2.05M
{
812
2.05M
    return false;
813
2.05M
}
cpu_physical_memory_test_and_clear_dirty_x86_64
Line
Count
Source
811
93.3k
{
812
    return false;
813
93.3k
}
cpu_physical_memory_test_and_clear_dirty_arm
Line
Count
Source
811
958k
{
812
    return false;
813
958k
}
cpu_physical_memory_test_and_clear_dirty_aarch64
Line
Count
Source
811
886k
{
812
    return false;
813
886k
}
cpu_physical_memory_test_and_clear_dirty_m68k
Line
Count
Source
811
311
{
812
    return false;
813
311
}
cpu_physical_memory_test_and_clear_dirty_mips
Line
Count
Source
811
53.4k
{
812
    return false;
813
53.4k
}
cpu_physical_memory_test_and_clear_dirty_mipsel
Line
Count
Source
811
17.7k
{
812
    return false;
813
17.7k
}
Unexecuted instantiation: cpu_physical_memory_test_and_clear_dirty_mips64
Unexecuted instantiation: cpu_physical_memory_test_and_clear_dirty_mips64el
cpu_physical_memory_test_and_clear_dirty_sparc
Line
Count
Source
811
87
{
812
    return false;
813
87
}
Unexecuted instantiation: cpu_physical_memory_test_and_clear_dirty_sparc64
Unexecuted instantiation: cpu_physical_memory_test_and_clear_dirty_ppc
Unexecuted instantiation: cpu_physical_memory_test_and_clear_dirty_ppc64
Unexecuted instantiation: cpu_physical_memory_test_and_clear_dirty_riscv32
Unexecuted instantiation: cpu_physical_memory_test_and_clear_dirty_riscv64
cpu_physical_memory_test_and_clear_dirty_s390x
Line
Count
Source
811
42.7k
{
812
    return false;
813
42.7k
}
Unexecuted instantiation: cpu_physical_memory_test_and_clear_dirty_tricore
814
815
/* Called from RCU critical section */
816
hwaddr memory_region_section_get_iotlb(CPUState *cpu,
817
                                       MemoryRegionSection *section)
818
58.6k
{
819
58.6k
    AddressSpaceDispatch *d = flatview_to_dispatch(section->fv);
820
58.6k
    return section - d->map.sections;
821
58.6k
}
memory_region_section_get_iotlb_x86_64
Line
Count
Source
818
23.7k
{
819
23.7k
    AddressSpaceDispatch *d = flatview_to_dispatch(section->fv);
820
23.7k
    return section - d->map.sections;
821
23.7k
}
memory_region_section_get_iotlb_arm
Line
Count
Source
818
11.3k
{
819
11.3k
    AddressSpaceDispatch *d = flatview_to_dispatch(section->fv);
820
11.3k
    return section - d->map.sections;
821
11.3k
}
memory_region_section_get_iotlb_aarch64
Line
Count
Source
818
20.7k
{
819
20.7k
    AddressSpaceDispatch *d = flatview_to_dispatch(section->fv);
820
20.7k
    return section - d->map.sections;
821
20.7k
}
memory_region_section_get_iotlb_m68k
Line
Count
Source
818
9
{
819
9
    AddressSpaceDispatch *d = flatview_to_dispatch(section->fv);
820
9
    return section - d->map.sections;
821
9
}
memory_region_section_get_iotlb_mips
Line
Count
Source
818
1.03k
{
819
1.03k
    AddressSpaceDispatch *d = flatview_to_dispatch(section->fv);
820
1.03k
    return section - d->map.sections;
821
1.03k
}
memory_region_section_get_iotlb_mipsel
Line
Count
Source
818
1.76k
{
819
1.76k
    AddressSpaceDispatch *d = flatview_to_dispatch(section->fv);
820
1.76k
    return section - d->map.sections;
821
1.76k
}
Unexecuted instantiation: memory_region_section_get_iotlb_mips64
Unexecuted instantiation: memory_region_section_get_iotlb_mips64el
memory_region_section_get_iotlb_sparc
Line
Count
Source
818
7
{
819
7
    AddressSpaceDispatch *d = flatview_to_dispatch(section->fv);
820
7
    return section - d->map.sections;
821
7
}
Unexecuted instantiation: memory_region_section_get_iotlb_sparc64
Unexecuted instantiation: memory_region_section_get_iotlb_ppc
Unexecuted instantiation: memory_region_section_get_iotlb_ppc64
Unexecuted instantiation: memory_region_section_get_iotlb_riscv32
Unexecuted instantiation: memory_region_section_get_iotlb_riscv64
Unexecuted instantiation: memory_region_section_get_iotlb_s390x
Unexecuted instantiation: memory_region_section_get_iotlb_tricore
822
823
static int subpage_register(struct uc_struct *uc, subpage_t *mmio, uint32_t start, uint32_t end,
824
                            uint16_t section);
825
static subpage_t *subpage_init(struct uc_struct *, FlatView *fv, hwaddr base);
826
827
static void *(*phys_mem_alloc)(struct uc_struct *uc, size_t size, uint64_t *align) =
828
                               qemu_anon_ram_alloc;
829
830
static uint16_t phys_section_add(struct uc_struct *uc, PhysPageMap *map,
831
                                 MemoryRegionSection *section)
832
1.76M
{
833
    /* The physical section number is ORed with a page-aligned
834
     * pointer to produce the iotlb entries.  Thus it should
835
     * never overflow into the page-aligned value.
836
     */
837
1.76M
    assert(map->sections_nb < TARGET_PAGE_SIZE);
838
839
1.76M
    if (map->sections_nb == map->sections_nb_alloc) {
840
1.05M
        map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
841
1.05M
        map->sections = g_renew(MemoryRegionSection, map->sections,
842
1.05M
                                map->sections_nb_alloc);
843
1.05M
    }
844
1.76M
    map->sections[map->sections_nb] = *section;
845
1.76M
    return map->sections_nb++;
846
1.76M
}
847
848
static void phys_section_destroy(MemoryRegion *mr)
849
1.76M
{
850
1.76M
    bool have_sub_page = mr->subpage;
851
852
1.76M
    if (have_sub_page) {
853
0
        subpage_t *subpage = container_of(mr, subpage_t, iomem);
854
        // object_unref(OBJECT(&subpage->iomem));
855
0
        g_free(subpage);
856
0
    }
857
1.76M
}
858
859
static void phys_sections_free(PhysPageMap *map)
860
1.05M
{
861
2.82M
    while (map->sections_nb > 0) {
862
1.76M
        MemoryRegionSection *section = &map->sections[--map->sections_nb];
863
1.76M
        phys_section_destroy(section->mr);
864
1.76M
    }
865
1.05M
    g_free(map->sections);
866
1.05M
    g_free(map->nodes);
867
1.05M
}
868
869
static void register_subpage(struct uc_struct *uc, FlatView *fv, MemoryRegionSection *section)
870
0
{
871
0
    AddressSpaceDispatch *d = flatview_to_dispatch(fv);
872
0
    subpage_t *subpage;
873
0
    hwaddr base = section->offset_within_address_space
874
0
        & TARGET_PAGE_MASK;
875
0
    MemoryRegionSection *existing = phys_page_find(d, base);
876
0
    MemoryRegionSection subsection = {
877
0
        .offset_within_address_space = base,
878
0
        .size = int128_make64(TARGET_PAGE_SIZE),
879
0
    };
880
0
    hwaddr start, end;
881
882
0
    assert(existing->mr->subpage || existing->mr == &(section->mr->uc->io_mem_unassigned));
883
884
0
    if (!(existing->mr->subpage)) {
885
0
        subpage = subpage_init(uc, fv, base);
886
0
        subsection.fv = fv;
887
0
        subsection.mr = &subpage->iomem;
888
0
        phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
889
0
                      phys_section_add(uc, &d->map, &subsection));
890
0
    } else {
891
0
        subpage = container_of(existing->mr, subpage_t, iomem);
892
0
    }
893
0
    start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
894
0
    end = start + int128_get64(section->size) - 1;
895
0
    subpage_register(uc, subpage, start, end,
896
0
                     phys_section_add(uc, &d->map, section));
897
0
}
898
899
900
static void register_multipage(struct uc_struct *uc, FlatView *fv,
901
                               MemoryRegionSection *section)
902
705k
{
903
705k
    AddressSpaceDispatch *d = flatview_to_dispatch(fv);
904
705k
    hwaddr start_addr = section->offset_within_address_space;
905
705k
    uint16_t section_index = phys_section_add(uc, &d->map, section);
906
705k
    uint64_t num_pages = int128_get64(int128_rshift(section->size,
907
705k
                                                    TARGET_PAGE_BITS));
908
909
705k
    assert(num_pages);
910
705k
    phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
911
705k
}
912
913
/*
914
 * The range in *section* may look like this:
915
 *
916
 *      |s|PPPPPPP|s|
917
 *
918
 * where s stands for subpage and P for page.
919
 */
920
void flatview_add_to_dispatch(struct uc_struct *uc, FlatView *fv, MemoryRegionSection *section)
921
705k
{
922
705k
    MemoryRegionSection remain = *section;
923
705k
    Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
924
925
    /* register first subpage */
926
705k
    if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
927
0
        uint64_t left = TARGET_PAGE_ALIGN(remain.offset_within_address_space)
928
0
                        - remain.offset_within_address_space;
929
930
0
        MemoryRegionSection now = remain;
931
0
        now.size = int128_min(int128_make64(left), now.size);
932
0
        register_subpage(uc, fv, &now);
933
0
        if (int128_eq(remain.size, now.size)) {
934
0
            return;
935
0
        }
936
0
        remain.size = int128_sub(remain.size, now.size);
937
0
        remain.offset_within_address_space += int128_get64(now.size);
938
0
        remain.offset_within_region += int128_get64(now.size);
939
0
    }
940
941
    /* register whole pages */
942
705k
    if (int128_ge(remain.size, page_size)) {
943
705k
        MemoryRegionSection now = remain;
944
705k
        now.size = int128_and(now.size, int128_neg(page_size));
945
705k
        register_multipage(uc, fv, &now);
946
705k
        if (int128_eq(remain.size, now.size)) {
947
705k
            return;
948
705k
        }
949
0
        remain.size = int128_sub(remain.size, now.size);
950
0
        remain.offset_within_address_space += int128_get64(now.size);
951
0
        remain.offset_within_region += int128_get64(now.size);
952
0
    }
953
954
    /* register last subpage */
955
0
    register_subpage(uc, fv, &remain);
956
0
}
flatview_add_to_dispatch_x86_64
Line
Count
Source
921
134k
{
922
134k
    MemoryRegionSection remain = *section;
923
134k
    Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
924
925
    /* register first subpage */
926
134k
    if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
927
0
        uint64_t left = TARGET_PAGE_ALIGN(remain.offset_within_address_space)
928
0
                        - remain.offset_within_address_space;
929
930
0
        MemoryRegionSection now = remain;
931
0
        now.size = int128_min(int128_make64(left), now.size);
932
0
        register_subpage(uc, fv, &now);
933
0
        if (int128_eq(remain.size, now.size)) {
934
0
            return;
935
0
        }
936
0
        remain.size = int128_sub(remain.size, now.size);
937
0
        remain.offset_within_address_space += int128_get64(now.size);
938
0
        remain.offset_within_region += int128_get64(now.size);
939
0
    }
940
941
    /* register whole pages */
942
134k
    if (int128_ge(remain.size, page_size)) {
943
134k
        MemoryRegionSection now = remain;
944
134k
        now.size = int128_and(now.size, int128_neg(page_size));
945
134k
        register_multipage(uc, fv, &now);
946
134k
        if (int128_eq(remain.size, now.size)) {
947
134k
            return;
948
134k
        }
949
0
        remain.size = int128_sub(remain.size, now.size);
950
0
        remain.offset_within_address_space += int128_get64(now.size);
951
0
        remain.offset_within_region += int128_get64(now.size);
952
0
    }
953
954
    /* register last subpage */
955
0
    register_subpage(uc, fv, &remain);
956
0
}
flatview_add_to_dispatch_arm
Line
Count
Source
921
175k
{
922
175k
    MemoryRegionSection remain = *section;
923
175k
    Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
924
925
    /* register first subpage */
926
175k
    if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
927
0
        uint64_t left = TARGET_PAGE_ALIGN(remain.offset_within_address_space)
928
0
                        - remain.offset_within_address_space;
929
930
0
        MemoryRegionSection now = remain;
931
0
        now.size = int128_min(int128_make64(left), now.size);
932
0
        register_subpage(uc, fv, &now);
933
0
        if (int128_eq(remain.size, now.size)) {
934
0
            return;
935
0
        }
936
0
        remain.size = int128_sub(remain.size, now.size);
937
0
        remain.offset_within_address_space += int128_get64(now.size);
938
0
        remain.offset_within_region += int128_get64(now.size);
939
0
    }
940
941
    /* register whole pages */
942
175k
    if (int128_ge(remain.size, page_size)) {
943
175k
        MemoryRegionSection now = remain;
944
175k
        now.size = int128_and(now.size, int128_neg(page_size));
945
175k
        register_multipage(uc, fv, &now);
946
175k
        if (int128_eq(remain.size, now.size)) {
947
175k
            return;
948
175k
        }
949
0
        remain.size = int128_sub(remain.size, now.size);
950
0
        remain.offset_within_address_space += int128_get64(now.size);
951
0
        remain.offset_within_region += int128_get64(now.size);
952
0
    }
953
954
    /* register last subpage */
955
0
    register_subpage(uc, fv, &remain);
956
0
}
flatview_add_to_dispatch_aarch64
Line
Count
Source
921
241k
{
922
241k
    MemoryRegionSection remain = *section;
923
241k
    Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
924
925
    /* register first subpage */
926
241k
    if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
927
0
        uint64_t left = TARGET_PAGE_ALIGN(remain.offset_within_address_space)
928
0
                        - remain.offset_within_address_space;
929
930
0
        MemoryRegionSection now = remain;
931
0
        now.size = int128_min(int128_make64(left), now.size);
932
0
        register_subpage(uc, fv, &now);
933
0
        if (int128_eq(remain.size, now.size)) {
934
0
            return;
935
0
        }
936
0
        remain.size = int128_sub(remain.size, now.size);
937
0
        remain.offset_within_address_space += int128_get64(now.size);
938
0
        remain.offset_within_region += int128_get64(now.size);
939
0
    }
940
941
    /* register whole pages */
942
241k
    if (int128_ge(remain.size, page_size)) {
943
241k
        MemoryRegionSection now = remain;
944
241k
        now.size = int128_and(now.size, int128_neg(page_size));
945
241k
        register_multipage(uc, fv, &now);
946
241k
        if (int128_eq(remain.size, now.size)) {
947
241k
            return;
948
241k
        }
949
0
        remain.size = int128_sub(remain.size, now.size);
950
0
        remain.offset_within_address_space += int128_get64(now.size);
951
0
        remain.offset_within_region += int128_get64(now.size);
952
0
    }
953
954
    /* register last subpage */
955
0
    register_subpage(uc, fv, &remain);
956
0
}
flatview_add_to_dispatch_m68k
Line
Count
Source
921
116
{
922
116
    MemoryRegionSection remain = *section;
923
116
    Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
924
925
    /* register first subpage */
926
116
    if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
927
0
        uint64_t left = TARGET_PAGE_ALIGN(remain.offset_within_address_space)
928
0
                        - remain.offset_within_address_space;
929
930
0
        MemoryRegionSection now = remain;
931
0
        now.size = int128_min(int128_make64(left), now.size);
932
0
        register_subpage(uc, fv, &now);
933
0
        if (int128_eq(remain.size, now.size)) {
934
0
            return;
935
0
        }
936
0
        remain.size = int128_sub(remain.size, now.size);
937
0
        remain.offset_within_address_space += int128_get64(now.size);
938
0
        remain.offset_within_region += int128_get64(now.size);
939
0
    }
940
941
    /* register whole pages */
942
116
    if (int128_ge(remain.size, page_size)) {
943
116
        MemoryRegionSection now = remain;
944
116
        now.size = int128_and(now.size, int128_neg(page_size));
945
116
        register_multipage(uc, fv, &now);
946
116
        if (int128_eq(remain.size, now.size)) {
947
116
            return;
948
116
        }
949
0
        remain.size = int128_sub(remain.size, now.size);
950
0
        remain.offset_within_address_space += int128_get64(now.size);
951
0
        remain.offset_within_region += int128_get64(now.size);
952
0
    }
953
954
    /* register last subpage */
955
0
    register_subpage(uc, fv, &remain);
956
0
}
flatview_add_to_dispatch_mips
Line
Count
Source
921
29.6k
{
922
29.6k
    MemoryRegionSection remain = *section;
923
29.6k
    Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
924
925
    /* register first subpage */
926
29.6k
    if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
927
0
        uint64_t left = TARGET_PAGE_ALIGN(remain.offset_within_address_space)
928
0
                        - remain.offset_within_address_space;
929
930
0
        MemoryRegionSection now = remain;
931
0
        now.size = int128_min(int128_make64(left), now.size);
932
0
        register_subpage(uc, fv, &now);
933
0
        if (int128_eq(remain.size, now.size)) {
934
0
            return;
935
0
        }
936
0
        remain.size = int128_sub(remain.size, now.size);
937
0
        remain.offset_within_address_space += int128_get64(now.size);
938
0
        remain.offset_within_region += int128_get64(now.size);
939
0
    }
940
941
    /* register whole pages */
942
29.6k
    if (int128_ge(remain.size, page_size)) {
943
29.6k
        MemoryRegionSection now = remain;
944
29.6k
        now.size = int128_and(now.size, int128_neg(page_size));
945
29.6k
        register_multipage(uc, fv, &now);
946
29.6k
        if (int128_eq(remain.size, now.size)) {
947
29.6k
            return;
948
29.6k
        }
949
0
        remain.size = int128_sub(remain.size, now.size);
950
0
        remain.offset_within_address_space += int128_get64(now.size);
951
0
        remain.offset_within_region += int128_get64(now.size);
952
0
    }
953
954
    /* register last subpage */
955
0
    register_subpage(uc, fv, &remain);
956
0
}
flatview_add_to_dispatch_mipsel
Line
Count
Source
921
35.7k
{
922
35.7k
    MemoryRegionSection remain = *section;
923
35.7k
    Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
924
925
    /* register first subpage */
926
35.7k
    if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
927
0
        uint64_t left = TARGET_PAGE_ALIGN(remain.offset_within_address_space)
928
0
                        - remain.offset_within_address_space;
929
930
0
        MemoryRegionSection now = remain;
931
0
        now.size = int128_min(int128_make64(left), now.size);
932
0
        register_subpage(uc, fv, &now);
933
0
        if (int128_eq(remain.size, now.size)) {
934
0
            return;
935
0
        }
936
0
        remain.size = int128_sub(remain.size, now.size);
937
0
        remain.offset_within_address_space += int128_get64(now.size);
938
0
        remain.offset_within_region += int128_get64(now.size);
939
0
    }
940
941
    /* register whole pages */
942
35.7k
    if (int128_ge(remain.size, page_size)) {
943
35.7k
        MemoryRegionSection now = remain;
944
35.7k
        now.size = int128_and(now.size, int128_neg(page_size));
945
35.7k
        register_multipage(uc, fv, &now);
946
35.7k
        if (int128_eq(remain.size, now.size)) {
947
35.7k
            return;
948
35.7k
        }
949
0
        remain.size = int128_sub(remain.size, now.size);
950
0
        remain.offset_within_address_space += int128_get64(now.size);
951
0
        remain.offset_within_region += int128_get64(now.size);
952
0
    }
953
954
    /* register last subpage */
955
0
    register_subpage(uc, fv, &remain);
956
0
}
Unexecuted instantiation: flatview_add_to_dispatch_mips64
Unexecuted instantiation: flatview_add_to_dispatch_mips64el
flatview_add_to_dispatch_sparc
Line
Count
Source
921
184
{
922
184
    MemoryRegionSection remain = *section;
923
184
    Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
924
925
    /* register first subpage */
926
184
    if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
927
0
        uint64_t left = TARGET_PAGE_ALIGN(remain.offset_within_address_space)
928
0
                        - remain.offset_within_address_space;
929
930
0
        MemoryRegionSection now = remain;
931
0
        now.size = int128_min(int128_make64(left), now.size);
932
0
        register_subpage(uc, fv, &now);
933
0
        if (int128_eq(remain.size, now.size)) {
934
0
            return;
935
0
        }
936
0
        remain.size = int128_sub(remain.size, now.size);
937
0
        remain.offset_within_address_space += int128_get64(now.size);
938
0
        remain.offset_within_region += int128_get64(now.size);
939
0
    }
940
941
    /* register whole pages */
942
184
    if (int128_ge(remain.size, page_size)) {
943
184
        MemoryRegionSection now = remain;
944
184
        now.size = int128_and(now.size, int128_neg(page_size));
945
184
        register_multipage(uc, fv, &now);
946
184
        if (int128_eq(remain.size, now.size)) {
947
184
            return;
948
184
        }
949
0
        remain.size = int128_sub(remain.size, now.size);
950
0
        remain.offset_within_address_space += int128_get64(now.size);
951
0
        remain.offset_within_region += int128_get64(now.size);
952
0
    }
953
954
    /* register last subpage */
955
0
    register_subpage(uc, fv, &remain);
956
0
}
Unexecuted instantiation: flatview_add_to_dispatch_sparc64
Unexecuted instantiation: flatview_add_to_dispatch_ppc
Unexecuted instantiation: flatview_add_to_dispatch_ppc64
Unexecuted instantiation: flatview_add_to_dispatch_riscv32
Unexecuted instantiation: flatview_add_to_dispatch_riscv64
flatview_add_to_dispatch_s390x
Line
Count
Source
921
87.9k
{
922
87.9k
    MemoryRegionSection remain = *section;
923
87.9k
    Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
924
925
    /* register first subpage */
926
87.9k
    if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
927
0
        uint64_t left = TARGET_PAGE_ALIGN(remain.offset_within_address_space)
928
0
                        - remain.offset_within_address_space;
929
930
0
        MemoryRegionSection now = remain;
931
0
        now.size = int128_min(int128_make64(left), now.size);
932
0
        register_subpage(uc, fv, &now);
933
0
        if (int128_eq(remain.size, now.size)) {
934
0
            return;
935
0
        }
936
0
        remain.size = int128_sub(remain.size, now.size);
937
0
        remain.offset_within_address_space += int128_get64(now.size);
938
0
        remain.offset_within_region += int128_get64(now.size);
939
0
    }
940
941
    /* register whole pages */
942
87.9k
    if (int128_ge(remain.size, page_size)) {
943
87.9k
        MemoryRegionSection now = remain;
944
87.9k
        now.size = int128_and(now.size, int128_neg(page_size));
945
87.9k
        register_multipage(uc, fv, &now);
946
87.9k
        if (int128_eq(remain.size, now.size)) {
947
87.9k
            return;
948
87.9k
        }
949
0
        remain.size = int128_sub(remain.size, now.size);
950
0
        remain.offset_within_address_space += int128_get64(now.size);
951
0
        remain.offset_within_region += int128_get64(now.size);
952
0
    }
953
954
    /* register last subpage */
955
0
    register_subpage(uc, fv, &remain);
956
0
}
Unexecuted instantiation: flatview_add_to_dispatch_tricore
957
958
static ram_addr_t find_ram_offset_last(struct uc_struct *uc, ram_addr_t size)
959
0
{
960
0
    ram_addr_t result = 0;
961
0
    RAMBlock *block = uc->ram_list.last_block;
962
963
0
    result = block->offset + block->max_length;
964
965
0
    if (result + size > RAM_ADDR_MAX) {
966
0
        abort();
967
0
    }
968
0
    return result;
969
0
}
970
971
/* Allocate space within the ram_addr_t space that governs the
972
 * dirty bitmaps.
973
 * Called with the ramlist lock held.
974
 */
975
static ram_addr_t find_ram_offset(struct uc_struct *uc, ram_addr_t size)
976
176k
{
977
176k
    RAMBlock *block, *next_block;
978
176k
    ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
979
980
176k
    assert(size != 0); /* it would hand out same offset multiple times */
981
982
176k
    if (QLIST_EMPTY_RCU(&uc->ram_list.blocks)) {
983
176k
        return 0;
984
176k
    }
985
986
0
    if (!uc->ram_list.freed) {
987
0
        return find_ram_offset_last(uc, size);
988
0
    }
989
990
0
    RAMBLOCK_FOREACH(block) {
991
0
        ram_addr_t candidate, next = RAM_ADDR_MAX;
992
993
        /* Align blocks to start on a 'long' in the bitmap
994
         * which makes the bitmap sync'ing take the fast path.
995
         */
996
0
        candidate = block->offset + block->max_length;
997
0
        candidate = ROUND_UP(candidate, BITS_PER_LONG << TARGET_PAGE_BITS);
998
999
        /* Search for the closest following block
1000
         * and find the gap.
1001
         */
1002
0
        RAMBLOCK_FOREACH(next_block) {
1003
0
            if (next_block->offset >= candidate) {
1004
0
                next = MIN(next, next_block->offset);
1005
0
            }
1006
0
        }
1007
1008
        /* If it fits remember our place and remember the size
1009
         * of gap, but keep going so that we might find a smaller
1010
         * gap to fill so avoiding fragmentation.
1011
         */
1012
0
        if (next - candidate >= size && next - candidate < mingap) {
1013
0
            offset = candidate;
1014
0
            mingap = next - candidate;
1015
0
        }
1016
0
    }
1017
1018
0
    if (offset == RAM_ADDR_MAX) {
1019
0
        fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1020
0
                (uint64_t)size);
1021
0
        abort();
1022
0
    }
1023
1024
0
    return offset;
1025
0
}
1026
1027
void *qemu_ram_get_host_addr(RAMBlock *rb)
1028
0
{
1029
0
    return rb->host;
1030
0
}
Unexecuted instantiation: qemu_ram_get_host_addr_x86_64
Unexecuted instantiation: qemu_ram_get_host_addr_arm
Unexecuted instantiation: qemu_ram_get_host_addr_aarch64
Unexecuted instantiation: qemu_ram_get_host_addr_m68k
Unexecuted instantiation: qemu_ram_get_host_addr_mips
Unexecuted instantiation: qemu_ram_get_host_addr_mipsel
Unexecuted instantiation: qemu_ram_get_host_addr_mips64
Unexecuted instantiation: qemu_ram_get_host_addr_mips64el
Unexecuted instantiation: qemu_ram_get_host_addr_sparc
Unexecuted instantiation: qemu_ram_get_host_addr_sparc64
Unexecuted instantiation: qemu_ram_get_host_addr_ppc
Unexecuted instantiation: qemu_ram_get_host_addr_ppc64
Unexecuted instantiation: qemu_ram_get_host_addr_riscv32
Unexecuted instantiation: qemu_ram_get_host_addr_riscv64
Unexecuted instantiation: qemu_ram_get_host_addr_s390x
Unexecuted instantiation: qemu_ram_get_host_addr_tricore
1031
1032
ram_addr_t qemu_ram_get_offset(RAMBlock *rb)
1033
0
{
1034
0
    return rb->offset;
1035
0
}
Unexecuted instantiation: qemu_ram_get_offset_x86_64
Unexecuted instantiation: qemu_ram_get_offset_arm
Unexecuted instantiation: qemu_ram_get_offset_aarch64
Unexecuted instantiation: qemu_ram_get_offset_m68k
Unexecuted instantiation: qemu_ram_get_offset_mips
Unexecuted instantiation: qemu_ram_get_offset_mipsel
Unexecuted instantiation: qemu_ram_get_offset_mips64
Unexecuted instantiation: qemu_ram_get_offset_mips64el
Unexecuted instantiation: qemu_ram_get_offset_sparc
Unexecuted instantiation: qemu_ram_get_offset_sparc64
Unexecuted instantiation: qemu_ram_get_offset_ppc
Unexecuted instantiation: qemu_ram_get_offset_ppc64
Unexecuted instantiation: qemu_ram_get_offset_riscv32
Unexecuted instantiation: qemu_ram_get_offset_riscv64
Unexecuted instantiation: qemu_ram_get_offset_s390x
Unexecuted instantiation: qemu_ram_get_offset_tricore
1036
1037
ram_addr_t qemu_ram_get_used_length(RAMBlock *rb)
1038
0
{
1039
0
    return rb->used_length;
1040
0
}
Unexecuted instantiation: qemu_ram_get_used_length_x86_64
Unexecuted instantiation: qemu_ram_get_used_length_arm
Unexecuted instantiation: qemu_ram_get_used_length_aarch64
Unexecuted instantiation: qemu_ram_get_used_length_m68k
Unexecuted instantiation: qemu_ram_get_used_length_mips
Unexecuted instantiation: qemu_ram_get_used_length_mipsel
Unexecuted instantiation: qemu_ram_get_used_length_mips64
Unexecuted instantiation: qemu_ram_get_used_length_mips64el
Unexecuted instantiation: qemu_ram_get_used_length_sparc
Unexecuted instantiation: qemu_ram_get_used_length_sparc64
Unexecuted instantiation: qemu_ram_get_used_length_ppc
Unexecuted instantiation: qemu_ram_get_used_length_ppc64
Unexecuted instantiation: qemu_ram_get_used_length_riscv32
Unexecuted instantiation: qemu_ram_get_used_length_riscv64
Unexecuted instantiation: qemu_ram_get_used_length_s390x
Unexecuted instantiation: qemu_ram_get_used_length_tricore
1041
1042
bool qemu_ram_is_shared(RAMBlock *rb)
1043
0
{
1044
0
    return rb->flags & RAM_SHARED;
1045
0
}
Unexecuted instantiation: qemu_ram_is_shared_x86_64
Unexecuted instantiation: qemu_ram_is_shared_arm
Unexecuted instantiation: qemu_ram_is_shared_aarch64
Unexecuted instantiation: qemu_ram_is_shared_m68k
Unexecuted instantiation: qemu_ram_is_shared_mips
Unexecuted instantiation: qemu_ram_is_shared_mipsel
Unexecuted instantiation: qemu_ram_is_shared_mips64
Unexecuted instantiation: qemu_ram_is_shared_mips64el
Unexecuted instantiation: qemu_ram_is_shared_sparc
Unexecuted instantiation: qemu_ram_is_shared_sparc64
Unexecuted instantiation: qemu_ram_is_shared_ppc
Unexecuted instantiation: qemu_ram_is_shared_ppc64
Unexecuted instantiation: qemu_ram_is_shared_riscv32
Unexecuted instantiation: qemu_ram_is_shared_riscv64
Unexecuted instantiation: qemu_ram_is_shared_s390x
Unexecuted instantiation: qemu_ram_is_shared_tricore
1046
1047
size_t qemu_ram_pagesize(RAMBlock *rb)
1048
0
{
1049
0
    return rb->page_size;
1050
0
}
Unexecuted instantiation: qemu_ram_pagesize_x86_64
Unexecuted instantiation: qemu_ram_pagesize_arm
Unexecuted instantiation: qemu_ram_pagesize_aarch64
Unexecuted instantiation: qemu_ram_pagesize_m68k
Unexecuted instantiation: qemu_ram_pagesize_mips
Unexecuted instantiation: qemu_ram_pagesize_mipsel
Unexecuted instantiation: qemu_ram_pagesize_mips64
Unexecuted instantiation: qemu_ram_pagesize_mips64el
Unexecuted instantiation: qemu_ram_pagesize_sparc
Unexecuted instantiation: qemu_ram_pagesize_sparc64
Unexecuted instantiation: qemu_ram_pagesize_ppc
Unexecuted instantiation: qemu_ram_pagesize_ppc64
Unexecuted instantiation: qemu_ram_pagesize_riscv32
Unexecuted instantiation: qemu_ram_pagesize_riscv64
Unexecuted instantiation: qemu_ram_pagesize_s390x
Unexecuted instantiation: qemu_ram_pagesize_tricore
1051
1052
static void ram_block_add(struct uc_struct *uc, RAMBlock *new_block)
1053
176k
{
1054
176k
    RAMBlock *block;
1055
176k
    RAMBlock *last_block = NULL;
1056
1057
176k
    new_block->offset = find_ram_offset(uc, new_block->max_length);
1058
1059
176k
    if (!new_block->host) {
1060
176k
        new_block->host = phys_mem_alloc(uc, new_block->max_length,
1061
176k
                &new_block->mr->align);
1062
176k
        if (!new_block->host) {
1063
            // mmap fails.
1064
0
            uc->invalid_error = UC_ERR_NOMEM;
1065
            // error_setg_errno(errp, errno,
1066
            //         "cannot set up guest memory '%s'",
1067
            //         memory_region_name(new_block->mr));
1068
0
            return;
1069
0
        }
1070
        // memory_try_enable_merging(new_block->host, new_block->max_length);
1071
176k
    }
1072
1073
    /* Keep the list sorted from biggest to smallest block.  Unlike QTAILQ,
1074
     * QLIST (which has an RCU-friendly variant) does not have insertion at
1075
     * tail, so save the last element in last_block.
1076
     */
1077
176k
    if (uc->ram_list.freed || new_block->max_length > uc->target_page_size) {
1078
176k
        RAMBLOCK_FOREACH(block) {
1079
0
            last_block = block;
1080
0
            if (block->max_length < new_block->max_length) {
1081
0
                break;
1082
0
            }
1083
0
        }
1084
176k
    } else {
1085
0
        last_block = uc->ram_list.last_block;
1086
0
        block = NULL;
1087
0
    }
1088
1089
176k
    if (block) {
1090
0
        QLIST_INSERT_BEFORE_RCU(block, new_block, next);
1091
176k
    } else if (last_block) {
1092
0
        QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
1093
0
        uc->ram_list.last_block = new_block;
1094
176k
    } else { /* list is empty */
1095
176k
        QLIST_INSERT_HEAD_RCU(&uc->ram_list.blocks, new_block, next);
1096
176k
        uc->ram_list.last_block = new_block;
1097
176k
    }
1098
176k
    uc->ram_list.mru_block = NULL;
1099
1100
    /* Write list before version */
1101
    //smp_wmb();
1102
1103
176k
    cpu_physical_memory_set_dirty_range(new_block->offset,
1104
176k
                                        new_block->used_length,
1105
176k
                                        DIRTY_CLIENTS_ALL);
1106
1107
176k
}
1108
1109
RAMBlock *qemu_ram_alloc_from_ptr(struct uc_struct *uc, ram_addr_t size, void *host,
1110
                                   MemoryRegion *mr)
1111
176k
{
1112
176k
    RAMBlock *new_block;
1113
176k
    ram_addr_t max_size = size;
1114
1115
    // Don't resize pre-alloced memory as they are given by users.
1116
176k
    if (!host) {
1117
176k
        size = HOST_PAGE_ALIGN(uc, size);
1118
176k
        max_size = HOST_PAGE_ALIGN(uc, max_size);
1119
176k
    }
1120
1121
176k
    new_block = g_malloc0(sizeof(*new_block));
1122
176k
    if (new_block == NULL)
1123
0
        return NULL;
1124
176k
    new_block->mr = mr;
1125
176k
    new_block->used_length = size;
1126
176k
    new_block->max_length = max_size;
1127
176k
    assert(max_size >= size);
1128
176k
    new_block->page_size = uc->qemu_real_host_page_size;
1129
176k
    new_block->host = host;
1130
176k
    if (host) {
1131
0
        new_block->flags |= RAM_PREALLOC;
1132
0
    }
1133
1134
176k
    uc->invalid_addr = 0;
1135
176k
    uc->invalid_error = UC_ERR_OK;
1136
176k
    ram_block_add(mr->uc, new_block);
1137
1138
176k
    if (uc->invalid_error != UC_ERR_OK) {
1139
0
        g_free(new_block);
1140
0
        return NULL;
1141
0
    }
1142
1143
176k
    return new_block;
1144
176k
}
qemu_ram_alloc_from_ptr_x86_64
Line
Count
Source
1111
33.6k
{
1112
33.6k
    RAMBlock *new_block;
1113
33.6k
    ram_addr_t max_size = size;
1114
1115
    // Don't resize pre-alloced memory as they are given by users.
1116
33.6k
    if (!host) {
1117
33.6k
        size = HOST_PAGE_ALIGN(uc, size);
1118
33.6k
        max_size = HOST_PAGE_ALIGN(uc, max_size);
1119
33.6k
    }
1120
1121
33.6k
    new_block = g_malloc0(sizeof(*new_block));
1122
33.6k
    if (new_block == NULL)
1123
0
        return NULL;
1124
33.6k
    new_block->mr = mr;
1125
33.6k
    new_block->used_length = size;
1126
33.6k
    new_block->max_length = max_size;
1127
33.6k
    assert(max_size >= size);
1128
33.6k
    new_block->page_size = uc->qemu_real_host_page_size;
1129
33.6k
    new_block->host = host;
1130
33.6k
    if (host) {
1131
0
        new_block->flags |= RAM_PREALLOC;
1132
0
    }
1133
1134
33.6k
    uc->invalid_addr = 0;
1135
33.6k
    uc->invalid_error = UC_ERR_OK;
1136
33.6k
    ram_block_add(mr->uc, new_block);
1137
1138
33.6k
    if (uc->invalid_error != UC_ERR_OK) {
1139
0
        g_free(new_block);
1140
0
        return NULL;
1141
0
    }
1142
1143
33.6k
    return new_block;
1144
33.6k
}
qemu_ram_alloc_from_ptr_arm
Line
Count
Source
1111
43.9k
{
1112
43.9k
    RAMBlock *new_block;
1113
43.9k
    ram_addr_t max_size = size;
1114
1115
    // Don't resize pre-alloced memory as they are given by users.
1116
43.9k
    if (!host) {
1117
43.9k
        size = HOST_PAGE_ALIGN(uc, size);
1118
43.9k
        max_size = HOST_PAGE_ALIGN(uc, max_size);
1119
43.9k
    }
1120
1121
43.9k
    new_block = g_malloc0(sizeof(*new_block));
1122
43.9k
    if (new_block == NULL)
1123
0
        return NULL;
1124
43.9k
    new_block->mr = mr;
1125
43.9k
    new_block->used_length = size;
1126
43.9k
    new_block->max_length = max_size;
1127
43.9k
    assert(max_size >= size);
1128
43.9k
    new_block->page_size = uc->qemu_real_host_page_size;
1129
43.9k
    new_block->host = host;
1130
43.9k
    if (host) {
1131
0
        new_block->flags |= RAM_PREALLOC;
1132
0
    }
1133
1134
43.9k
    uc->invalid_addr = 0;
1135
43.9k
    uc->invalid_error = UC_ERR_OK;
1136
43.9k
    ram_block_add(mr->uc, new_block);
1137
1138
43.9k
    if (uc->invalid_error != UC_ERR_OK) {
1139
0
        g_free(new_block);
1140
0
        return NULL;
1141
0
    }
1142
1143
43.9k
    return new_block;
1144
43.9k
}
qemu_ram_alloc_from_ptr_aarch64
Line
Count
Source
1111
60.2k
{
1112
60.2k
    RAMBlock *new_block;
1113
60.2k
    ram_addr_t max_size = size;
1114
1115
    // Don't resize pre-alloced memory as they are given by users.
1116
60.2k
    if (!host) {
1117
60.2k
        size = HOST_PAGE_ALIGN(uc, size);
1118
60.2k
        max_size = HOST_PAGE_ALIGN(uc, max_size);
1119
60.2k
    }
1120
1121
60.2k
    new_block = g_malloc0(sizeof(*new_block));
1122
60.2k
    if (new_block == NULL)
1123
0
        return NULL;
1124
60.2k
    new_block->mr = mr;
1125
60.2k
    new_block->used_length = size;
1126
60.2k
    new_block->max_length = max_size;
1127
60.2k
    assert(max_size >= size);
1128
60.2k
    new_block->page_size = uc->qemu_real_host_page_size;
1129
60.2k
    new_block->host = host;
1130
60.2k
    if (host) {
1131
0
        new_block->flags |= RAM_PREALLOC;
1132
0
    }
1133
1134
60.2k
    uc->invalid_addr = 0;
1135
60.2k
    uc->invalid_error = UC_ERR_OK;
1136
60.2k
    ram_block_add(mr->uc, new_block);
1137
1138
60.2k
    if (uc->invalid_error != UC_ERR_OK) {
1139
0
        g_free(new_block);
1140
0
        return NULL;
1141
0
    }
1142
1143
60.2k
    return new_block;
1144
60.2k
}
qemu_ram_alloc_from_ptr_m68k
Line
Count
Source
1111
29
{
1112
29
    RAMBlock *new_block;
1113
29
    ram_addr_t max_size = size;
1114
1115
    // Don't resize pre-alloced memory as they are given by users.
1116
29
    if (!host) {
1117
29
        size = HOST_PAGE_ALIGN(uc, size);
1118
29
        max_size = HOST_PAGE_ALIGN(uc, max_size);
1119
29
    }
1120
1121
29
    new_block = g_malloc0(sizeof(*new_block));
1122
29
    if (new_block == NULL)
1123
0
        return NULL;
1124
29
    new_block->mr = mr;
1125
29
    new_block->used_length = size;
1126
29
    new_block->max_length = max_size;
1127
29
    assert(max_size >= size);
1128
29
    new_block->page_size = uc->qemu_real_host_page_size;
1129
29
    new_block->host = host;
1130
29
    if (host) {
1131
0
        new_block->flags |= RAM_PREALLOC;
1132
0
    }
1133
1134
29
    uc->invalid_addr = 0;
1135
29
    uc->invalid_error = UC_ERR_OK;
1136
29
    ram_block_add(mr->uc, new_block);
1137
1138
29
    if (uc->invalid_error != UC_ERR_OK) {
1139
0
        g_free(new_block);
1140
0
        return NULL;
1141
0
    }
1142
1143
29
    return new_block;
1144
29
}
qemu_ram_alloc_from_ptr_mips
Line
Count
Source
1111
7.41k
{
1112
7.41k
    RAMBlock *new_block;
1113
7.41k
    ram_addr_t max_size = size;
1114
1115
    // Don't resize pre-alloced memory as they are given by users.
1116
7.41k
    if (!host) {
1117
7.41k
        size = HOST_PAGE_ALIGN(uc, size);
1118
7.41k
        max_size = HOST_PAGE_ALIGN(uc, max_size);
1119
7.41k
    }
1120
1121
7.41k
    new_block = g_malloc0(sizeof(*new_block));
1122
7.41k
    if (new_block == NULL)
1123
0
        return NULL;
1124
7.41k
    new_block->mr = mr;
1125
7.41k
    new_block->used_length = size;
1126
7.41k
    new_block->max_length = max_size;
1127
7.41k
    assert(max_size >= size);
1128
7.41k
    new_block->page_size = uc->qemu_real_host_page_size;
1129
7.41k
    new_block->host = host;
1130
7.41k
    if (host) {
1131
0
        new_block->flags |= RAM_PREALLOC;
1132
0
    }
1133
1134
7.41k
    uc->invalid_addr = 0;
1135
7.41k
    uc->invalid_error = UC_ERR_OK;
1136
7.41k
    ram_block_add(mr->uc, new_block);
1137
1138
7.41k
    if (uc->invalid_error != UC_ERR_OK) {
1139
0
        g_free(new_block);
1140
0
        return NULL;
1141
0
    }
1142
1143
7.41k
    return new_block;
1144
7.41k
}
qemu_ram_alloc_from_ptr_mipsel
Line
Count
Source
1111
8.94k
{
1112
8.94k
    RAMBlock *new_block;
1113
8.94k
    ram_addr_t max_size = size;
1114
1115
    // Don't resize pre-alloced memory as they are given by users.
1116
8.94k
    if (!host) {
1117
8.94k
        size = HOST_PAGE_ALIGN(uc, size);
1118
8.94k
        max_size = HOST_PAGE_ALIGN(uc, max_size);
1119
8.94k
    }
1120
1121
8.94k
    new_block = g_malloc0(sizeof(*new_block));
1122
8.94k
    if (new_block == NULL)
1123
0
        return NULL;
1124
8.94k
    new_block->mr = mr;
1125
8.94k
    new_block->used_length = size;
1126
8.94k
    new_block->max_length = max_size;
1127
8.94k
    assert(max_size >= size);
1128
8.94k
    new_block->page_size = uc->qemu_real_host_page_size;
1129
8.94k
    new_block->host = host;
1130
8.94k
    if (host) {
1131
0
        new_block->flags |= RAM_PREALLOC;
1132
0
    }
1133
1134
8.94k
    uc->invalid_addr = 0;
1135
8.94k
    uc->invalid_error = UC_ERR_OK;
1136
8.94k
    ram_block_add(mr->uc, new_block);
1137
1138
8.94k
    if (uc->invalid_error != UC_ERR_OK) {
1139
0
        g_free(new_block);
1140
0
        return NULL;
1141
0
    }
1142
1143
8.94k
    return new_block;
1144
8.94k
}
Unexecuted instantiation: qemu_ram_alloc_from_ptr_mips64
Unexecuted instantiation: qemu_ram_alloc_from_ptr_mips64el
qemu_ram_alloc_from_ptr_sparc
Line
Count
Source
1111
46
{
1112
46
    RAMBlock *new_block;
1113
46
    ram_addr_t max_size = size;
1114
1115
    // Don't resize pre-alloced memory as they are given by users.
1116
46
    if (!host) {
1117
46
        size = HOST_PAGE_ALIGN(uc, size);
1118
46
        max_size = HOST_PAGE_ALIGN(uc, max_size);
1119
46
    }
1120
1121
46
    new_block = g_malloc0(sizeof(*new_block));
1122
46
    if (new_block == NULL)
1123
0
        return NULL;
1124
46
    new_block->mr = mr;
1125
46
    new_block->used_length = size;
1126
46
    new_block->max_length = max_size;
1127
46
    assert(max_size >= size);
1128
46
    new_block->page_size = uc->qemu_real_host_page_size;
1129
46
    new_block->host = host;
1130
46
    if (host) {
1131
0
        new_block->flags |= RAM_PREALLOC;
1132
0
    }
1133
1134
46
    uc->invalid_addr = 0;
1135
46
    uc->invalid_error = UC_ERR_OK;
1136
46
    ram_block_add(mr->uc, new_block);
1137
1138
46
    if (uc->invalid_error != UC_ERR_OK) {
1139
0
        g_free(new_block);
1140
0
        return NULL;
1141
0
    }
1142
1143
46
    return new_block;
1144
46
}
Unexecuted instantiation: qemu_ram_alloc_from_ptr_sparc64
Unexecuted instantiation: qemu_ram_alloc_from_ptr_ppc
Unexecuted instantiation: qemu_ram_alloc_from_ptr_ppc64
Unexecuted instantiation: qemu_ram_alloc_from_ptr_riscv32
Unexecuted instantiation: qemu_ram_alloc_from_ptr_riscv64
qemu_ram_alloc_from_ptr_s390x
Line
Count
Source
1111
21.9k
{
1112
21.9k
    RAMBlock *new_block;
1113
21.9k
    ram_addr_t max_size = size;
1114
1115
    // Don't resize pre-alloced memory as they are given by users.
1116
21.9k
    if (!host) {
1117
21.9k
        size = HOST_PAGE_ALIGN(uc, size);
1118
21.9k
        max_size = HOST_PAGE_ALIGN(uc, max_size);
1119
21.9k
    }
1120
1121
21.9k
    new_block = g_malloc0(sizeof(*new_block));
1122
21.9k
    if (new_block == NULL)
1123
0
        return NULL;
1124
21.9k
    new_block->mr = mr;
1125
21.9k
    new_block->used_length = size;
1126
21.9k
    new_block->max_length = max_size;
1127
21.9k
    assert(max_size >= size);
1128
21.9k
    new_block->page_size = uc->qemu_real_host_page_size;
1129
21.9k
    new_block->host = host;
1130
21.9k
    if (host) {
1131
0
        new_block->flags |= RAM_PREALLOC;
1132
0
    }
1133
1134
21.9k
    uc->invalid_addr = 0;
1135
21.9k
    uc->invalid_error = UC_ERR_OK;
1136
21.9k
    ram_block_add(mr->uc, new_block);
1137
1138
21.9k
    if (uc->invalid_error != UC_ERR_OK) {
1139
0
        g_free(new_block);
1140
0
        return NULL;
1141
0
    }
1142
1143
21.9k
    return new_block;
1144
21.9k
}
Unexecuted instantiation: qemu_ram_alloc_from_ptr_tricore
1145
1146
RAMBlock *qemu_ram_alloc(struct uc_struct *uc, ram_addr_t size, MemoryRegion *mr)
1147
176k
{
1148
176k
    return qemu_ram_alloc_from_ptr(uc, size, NULL, mr);
1149
176k
}
qemu_ram_alloc_x86_64
Line
Count
Source
1147
33.6k
{
1148
33.6k
    return qemu_ram_alloc_from_ptr(uc, size, NULL, mr);
1149
33.6k
}
qemu_ram_alloc_arm
Line
Count
Source
1147
43.9k
{
1148
43.9k
    return qemu_ram_alloc_from_ptr(uc, size, NULL, mr);
1149
43.9k
}
qemu_ram_alloc_aarch64
Line
Count
Source
1147
60.2k
{
1148
60.2k
    return qemu_ram_alloc_from_ptr(uc, size, NULL, mr);
1149
60.2k
}
qemu_ram_alloc_m68k
Line
Count
Source
1147
29
{
1148
29
    return qemu_ram_alloc_from_ptr(uc, size, NULL, mr);
1149
29
}
qemu_ram_alloc_mips
Line
Count
Source
1147
7.41k
{
1148
7.41k
    return qemu_ram_alloc_from_ptr(uc, size, NULL, mr);
1149
7.41k
}
qemu_ram_alloc_mipsel
Line
Count
Source
1147
8.94k
{
1148
8.94k
    return qemu_ram_alloc_from_ptr(uc, size, NULL, mr);
1149
8.94k
}
Unexecuted instantiation: qemu_ram_alloc_mips64
Unexecuted instantiation: qemu_ram_alloc_mips64el
qemu_ram_alloc_sparc
Line
Count
Source
1147
46
{
1148
46
    return qemu_ram_alloc_from_ptr(uc, size, NULL, mr);
1149
46
}
Unexecuted instantiation: qemu_ram_alloc_sparc64
Unexecuted instantiation: qemu_ram_alloc_ppc
Unexecuted instantiation: qemu_ram_alloc_ppc64
Unexecuted instantiation: qemu_ram_alloc_riscv32
Unexecuted instantiation: qemu_ram_alloc_riscv64
qemu_ram_alloc_s390x
Line
Count
Source
1147
21.9k
{
1148
21.9k
    return qemu_ram_alloc_from_ptr(uc, size, NULL, mr);
1149
21.9k
}
Unexecuted instantiation: qemu_ram_alloc_tricore
1150
1151
static void reclaim_ramblock(struct uc_struct *uc, RAMBlock *block)
1152
176k
{
1153
176k
    if (block->flags & RAM_PREALLOC) {
1154
0
        ;
1155
176k
    } else if (false) {
1156
176k
    } else {
1157
176k
        qemu_anon_ram_free(uc, block->host, block->max_length);
1158
176k
    }
1159
176k
    g_free(block);
1160
176k
}
1161
1162
void qemu_ram_free(struct uc_struct *uc, RAMBlock *block)
1163
176k
{
1164
176k
    if (!block) {
1165
0
        return;
1166
0
    }
1167
1168
    //if (block->host) {
1169
    //    ram_block_notify_remove(block->host, block->max_length);
1170
    //}
1171
1172
176k
    QLIST_REMOVE_RCU(block, next);
1173
176k
    uc->ram_list.mru_block = NULL;
1174
176k
    uc->ram_list.freed = true;
1175
176k
    uc->ram_list.last_block = NULL;
1176
    /* Write list before version */
1177
    //smp_wmb();
1178
    // call_rcu(block, reclaim_ramblock, rcu);
1179
176k
    reclaim_ramblock(uc, block);
1180
176k
}
qemu_ram_free_x86_64
Line
Count
Source
1163
33.6k
{
1164
33.6k
    if (!block) {
1165
0
        return;
1166
0
    }
1167
1168
    //if (block->host) {
1169
    //    ram_block_notify_remove(block->host, block->max_length);
1170
    //}
1171
1172
33.6k
    QLIST_REMOVE_RCU(block, next);
1173
33.6k
    uc->ram_list.mru_block = NULL;
1174
33.6k
    uc->ram_list.freed = true;
1175
33.6k
    uc->ram_list.last_block = NULL;
1176
    /* Write list before version */
1177
    //smp_wmb();
1178
    // call_rcu(block, reclaim_ramblock, rcu);
1179
33.6k
    reclaim_ramblock(uc, block);
1180
33.6k
}
qemu_ram_free_arm
Line
Count
Source
1163
43.9k
{
1164
43.9k
    if (!block) {
1165
0
        return;
1166
0
    }
1167
1168
    //if (block->host) {
1169
    //    ram_block_notify_remove(block->host, block->max_length);
1170
    //}
1171
1172
43.9k
    QLIST_REMOVE_RCU(block, next);
1173
43.9k
    uc->ram_list.mru_block = NULL;
1174
43.9k
    uc->ram_list.freed = true;
1175
43.9k
    uc->ram_list.last_block = NULL;
1176
    /* Write list before version */
1177
    //smp_wmb();
1178
    // call_rcu(block, reclaim_ramblock, rcu);
1179
43.9k
    reclaim_ramblock(uc, block);
1180
43.9k
}
qemu_ram_free_aarch64
Line
Count
Source
1163
60.2k
{
1164
60.2k
    if (!block) {
1165
0
        return;
1166
0
    }
1167
1168
    //if (block->host) {
1169
    //    ram_block_notify_remove(block->host, block->max_length);
1170
    //}
1171
1172
60.2k
    QLIST_REMOVE_RCU(block, next);
1173
60.2k
    uc->ram_list.mru_block = NULL;
1174
60.2k
    uc->ram_list.freed = true;
1175
60.2k
    uc->ram_list.last_block = NULL;
1176
    /* Write list before version */
1177
    //smp_wmb();
1178
    // call_rcu(block, reclaim_ramblock, rcu);
1179
60.2k
    reclaim_ramblock(uc, block);
1180
60.2k
}
qemu_ram_free_m68k
Line
Count
Source
1163
29
{
1164
29
    if (!block) {
1165
0
        return;
1166
0
    }
1167
1168
    //if (block->host) {
1169
    //    ram_block_notify_remove(block->host, block->max_length);
1170
    //}
1171
1172
29
    QLIST_REMOVE_RCU(block, next);
1173
29
    uc->ram_list.mru_block = NULL;
1174
29
    uc->ram_list.freed = true;
1175
29
    uc->ram_list.last_block = NULL;
1176
    /* Write list before version */
1177
    //smp_wmb();
1178
    // call_rcu(block, reclaim_ramblock, rcu);
1179
29
    reclaim_ramblock(uc, block);
1180
29
}
qemu_ram_free_mips
Line
Count
Source
1163
7.41k
{
1164
7.41k
    if (!block) {
1165
0
        return;
1166
0
    }
1167
1168
    //if (block->host) {
1169
    //    ram_block_notify_remove(block->host, block->max_length);
1170
    //}
1171
1172
7.41k
    QLIST_REMOVE_RCU(block, next);
1173
7.41k
    uc->ram_list.mru_block = NULL;
1174
7.41k
    uc->ram_list.freed = true;
1175
7.41k
    uc->ram_list.last_block = NULL;
1176
    /* Write list before version */
1177
    //smp_wmb();
1178
    // call_rcu(block, reclaim_ramblock, rcu);
1179
7.41k
    reclaim_ramblock(uc, block);
1180
7.41k
}
qemu_ram_free_mipsel
Line
Count
Source
1163
8.94k
{
1164
8.94k
    if (!block) {
1165
0
        return;
1166
0
    }
1167
1168
    //if (block->host) {
1169
    //    ram_block_notify_remove(block->host, block->max_length);
1170
    //}
1171
1172
8.94k
    QLIST_REMOVE_RCU(block, next);
1173
8.94k
    uc->ram_list.mru_block = NULL;
1174
8.94k
    uc->ram_list.freed = true;
1175
8.94k
    uc->ram_list.last_block = NULL;
1176
    /* Write list before version */
1177
    //smp_wmb();
1178
    // call_rcu(block, reclaim_ramblock, rcu);
1179
8.94k
    reclaim_ramblock(uc, block);
1180
8.94k
}
Unexecuted instantiation: qemu_ram_free_mips64
Unexecuted instantiation: qemu_ram_free_mips64el
qemu_ram_free_sparc
Line
Count
Source
1163
46
{
1164
46
    if (!block) {
1165
0
        return;
1166
0
    }
1167
1168
    //if (block->host) {
1169
    //    ram_block_notify_remove(block->host, block->max_length);
1170
    //}
1171
1172
46
    QLIST_REMOVE_RCU(block, next);
1173
46
    uc->ram_list.mru_block = NULL;
1174
46
    uc->ram_list.freed = true;
1175
46
    uc->ram_list.last_block = NULL;
1176
    /* Write list before version */
1177
    //smp_wmb();
1178
    // call_rcu(block, reclaim_ramblock, rcu);
1179
46
    reclaim_ramblock(uc, block);
1180
46
}
Unexecuted instantiation: qemu_ram_free_sparc64
Unexecuted instantiation: qemu_ram_free_ppc
Unexecuted instantiation: qemu_ram_free_ppc64
Unexecuted instantiation: qemu_ram_free_riscv32
Unexecuted instantiation: qemu_ram_free_riscv64
qemu_ram_free_s390x
Line
Count
Source
1163
21.9k
{
1164
21.9k
    if (!block) {
1165
0
        return;
1166
0
    }
1167
1168
    //if (block->host) {
1169
    //    ram_block_notify_remove(block->host, block->max_length);
1170
    //}
1171
1172
21.9k
    QLIST_REMOVE_RCU(block, next);
1173
21.9k
    uc->ram_list.mru_block = NULL;
1174
21.9k
    uc->ram_list.freed = true;
1175
21.9k
    uc->ram_list.last_block = NULL;
1176
    /* Write list before version */
1177
    //smp_wmb();
1178
    // call_rcu(block, reclaim_ramblock, rcu);
1179
21.9k
    reclaim_ramblock(uc, block);
1180
21.9k
}
Unexecuted instantiation: qemu_ram_free_tricore
1181
1182
/* Return a host pointer to ram allocated with qemu_ram_alloc.
1183
 * This should not be used for general purpose DMA.  Use address_space_map
1184
 * or address_space_rw instead. For local memory (e.g. video ram) that the
1185
 * device owns, use memory_region_get_ram_ptr.
1186
 *
1187
 * Called within RCU critical section.
1188
 */
1189
void *qemu_map_ram_ptr(struct uc_struct *uc, RAMBlock *ram_block, ram_addr_t addr)
1190
2.76M
{
1191
2.76M
    RAMBlock *block = ram_block;
1192
1193
2.76M
    if (block == NULL) {
1194
0
        block = qemu_get_ram_block(uc, addr);
1195
0
        addr -= block->offset;
1196
0
    }
1197
1198
2.76M
    return ramblock_ptr(block, addr);
1199
2.76M
}
qemu_map_ram_ptr_x86_64
Line
Count
Source
1190
219k
{
1191
219k
    RAMBlock *block = ram_block;
1192
1193
219k
    if (block == NULL) {
1194
0
        block = qemu_get_ram_block(uc, addr);
1195
0
        addr -= block->offset;
1196
0
    }
1197
1198
219k
    return ramblock_ptr(block, addr);
1199
219k
}
qemu_map_ram_ptr_arm
Line
Count
Source
1190
829k
{
1191
829k
    RAMBlock *block = ram_block;
1192
1193
829k
    if (block == NULL) {
1194
0
        block = qemu_get_ram_block(uc, addr);
1195
0
        addr -= block->offset;
1196
0
    }
1197
1198
829k
    return ramblock_ptr(block, addr);
1199
829k
}
qemu_map_ram_ptr_aarch64
Line
Count
Source
1190
266k
{
1191
266k
    RAMBlock *block = ram_block;
1192
1193
266k
    if (block == NULL) {
1194
0
        block = qemu_get_ram_block(uc, addr);
1195
0
        addr -= block->offset;
1196
0
    }
1197
1198
266k
    return ramblock_ptr(block, addr);
1199
266k
}
qemu_map_ram_ptr_m68k
Line
Count
Source
1190
47
{
1191
47
    RAMBlock *block = ram_block;
1192
1193
47
    if (block == NULL) {
1194
0
        block = qemu_get_ram_block(uc, addr);
1195
0
        addr -= block->offset;
1196
0
    }
1197
1198
47
    return ramblock_ptr(block, addr);
1199
47
}
qemu_map_ram_ptr_mips
Line
Count
Source
1190
278k
{
1191
278k
    RAMBlock *block = ram_block;
1192
1193
278k
    if (block == NULL) {
1194
0
        block = qemu_get_ram_block(uc, addr);
1195
0
        addr -= block->offset;
1196
0
    }
1197
1198
278k
    return ramblock_ptr(block, addr);
1199
278k
}
qemu_map_ram_ptr_mipsel
Line
Count
Source
1190
24.2k
{
1191
24.2k
    RAMBlock *block = ram_block;
1192
1193
24.2k
    if (block == NULL) {
1194
0
        block = qemu_get_ram_block(uc, addr);
1195
0
        addr -= block->offset;
1196
0
    }
1197
1198
24.2k
    return ramblock_ptr(block, addr);
1199
24.2k
}
Unexecuted instantiation: qemu_map_ram_ptr_mips64
Unexecuted instantiation: qemu_map_ram_ptr_mips64el
qemu_map_ram_ptr_sparc
Line
Count
Source
1190
60
{
1191
60
    RAMBlock *block = ram_block;
1192
1193
60
    if (block == NULL) {
1194
0
        block = qemu_get_ram_block(uc, addr);
1195
0
        addr -= block->offset;
1196
0
    }
1197
1198
60
    return ramblock_ptr(block, addr);
1199
60
}
Unexecuted instantiation: qemu_map_ram_ptr_sparc64
Unexecuted instantiation: qemu_map_ram_ptr_ppc
Unexecuted instantiation: qemu_map_ram_ptr_ppc64
Unexecuted instantiation: qemu_map_ram_ptr_riscv32
Unexecuted instantiation: qemu_map_ram_ptr_riscv64
qemu_map_ram_ptr_s390x
Line
Count
Source
1190
1.14M
{
1191
1.14M
    RAMBlock *block = ram_block;
1192
1193
1.14M
    if (block == NULL) {
1194
0
        block = qemu_get_ram_block(uc, addr);
1195
0
        addr -= block->offset;
1196
0
    }
1197
1198
1.14M
    return ramblock_ptr(block, addr);
1199
1.14M
}
Unexecuted instantiation: qemu_map_ram_ptr_tricore
1200
1201
/* Return a host pointer to guest's ram. Similar to qemu_map_ram_ptr
1202
 * but takes a size argument.
1203
 *
1204
 * Called within RCU critical section.
1205
 */
1206
static void *qemu_ram_ptr_length(struct uc_struct *uc, RAMBlock *ram_block, ram_addr_t addr,
1207
                                 hwaddr *size, bool lock)
1208
176k
{
1209
176k
    RAMBlock *block = ram_block;
1210
176k
    if (*size == 0) {
1211
0
        return NULL;
1212
0
    }
1213
1214
176k
    if (block == NULL) {
1215
0
        block = qemu_get_ram_block(uc, addr);
1216
0
        addr -= block->offset;
1217
0
    }
1218
176k
    *size = MIN(*size, block->max_length - addr);
1219
1220
176k
    return ramblock_ptr(block, addr);
1221
176k
}
1222
1223
/* Return the offset of a hostpointer within a ramblock */
1224
ram_addr_t qemu_ram_block_host_offset(RAMBlock *rb, void *host)
1225
0
{
1226
0
    ram_addr_t res = (uint8_t *)host - (uint8_t *)rb->host;
1227
0
    assert((uintptr_t)host >= (uintptr_t)rb->host);
1228
0
    assert(res < rb->max_length);
1229
1230
0
    return res;
1231
0
}
Unexecuted instantiation: qemu_ram_block_host_offset_x86_64
Unexecuted instantiation: qemu_ram_block_host_offset_arm
Unexecuted instantiation: qemu_ram_block_host_offset_aarch64
Unexecuted instantiation: qemu_ram_block_host_offset_m68k
Unexecuted instantiation: qemu_ram_block_host_offset_mips
Unexecuted instantiation: qemu_ram_block_host_offset_mipsel
Unexecuted instantiation: qemu_ram_block_host_offset_mips64
Unexecuted instantiation: qemu_ram_block_host_offset_mips64el
Unexecuted instantiation: qemu_ram_block_host_offset_sparc
Unexecuted instantiation: qemu_ram_block_host_offset_sparc64
Unexecuted instantiation: qemu_ram_block_host_offset_ppc
Unexecuted instantiation: qemu_ram_block_host_offset_ppc64
Unexecuted instantiation: qemu_ram_block_host_offset_riscv32
Unexecuted instantiation: qemu_ram_block_host_offset_riscv64
Unexecuted instantiation: qemu_ram_block_host_offset_s390x
Unexecuted instantiation: qemu_ram_block_host_offset_tricore
1232
1233
/*
1234
 * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
1235
 * in that RAMBlock.
1236
 *
1237
 * ptr: Host pointer to look up
1238
 * round_offset: If true round the result offset down to a page boundary
1239
 * *ram_addr: set to result ram_addr
1240
 * *offset: set to result offset within the RAMBlock
1241
 *
1242
 * Returns: RAMBlock (or NULL if not found)
1243
 *
1244
 * By the time this function returns, the returned pointer is not protected
1245
 * by RCU anymore.  If the caller is not within an RCU critical section and
1246
 * does not hold the iothread lock, it must have other means of protecting the
1247
 * pointer, such as a reference to the region that includes the incoming
1248
 * ram_addr_t.
1249
 */
1250
RAMBlock *qemu_ram_block_from_host(struct uc_struct *uc, void *ptr,
1251
                                   bool round_offset, ram_addr_t *offset)
1252
74.4M
{
1253
74.4M
    RAMBlock *block;
1254
74.4M
    uint8_t *host = ptr;
1255
1256
74.4M
    block = uc->ram_list.mru_block;
1257
74.4M
    if (block && block->host && host - block->host < block->max_length) {
1258
0
        goto found;
1259
0
    }
1260
1261
74.4M
    RAMBLOCK_FOREACH(block) {
1262
        /* This case append when the block is not mapped. */
1263
74.4M
        if (block->host == NULL) {
1264
0
            continue;
1265
0
        }
1266
74.4M
        if (host - block->host < block->max_length) {
1267
74.4M
            goto found;
1268
74.4M
        }
1269
74.4M
    }
1270
1271
0
    return NULL;
1272
1273
74.4M
found:
1274
74.4M
    *offset = (host - block->host);
1275
74.4M
    if (round_offset) {
1276
0
        *offset &= TARGET_PAGE_MASK;
1277
0
    }
1278
74.4M
    return block;
1279
74.4M
}
qemu_ram_block_from_host_x86_64
Line
Count
Source
1252
11.1M
{
1253
11.1M
    RAMBlock *block;
1254
11.1M
    uint8_t *host = ptr;
1255
1256
11.1M
    block = uc->ram_list.mru_block;
1257
11.1M
    if (block && block->host && host - block->host < block->max_length) {
1258
0
        goto found;
1259
0
    }
1260
1261
11.1M
    RAMBLOCK_FOREACH(block) {
1262
        /* This case append when the block is not mapped. */
1263
11.1M
        if (block->host == NULL) {
1264
0
            continue;
1265
0
        }
1266
11.1M
        if (host - block->host < block->max_length) {
1267
11.1M
            goto found;
1268
11.1M
        }
1269
11.1M
    }
1270
1271
0
    return NULL;
1272
1273
11.1M
found:
1274
11.1M
    *offset = (host - block->host);
1275
11.1M
    if (round_offset) {
1276
0
        *offset &= TARGET_PAGE_MASK;
1277
0
    }
1278
11.1M
    return block;
1279
11.1M
}
qemu_ram_block_from_host_arm
Line
Count
Source
1252
36.7M
{
1253
36.7M
    RAMBlock *block;
1254
36.7M
    uint8_t *host = ptr;
1255
1256
36.7M
    block = uc->ram_list.mru_block;
1257
36.7M
    if (block && block->host && host - block->host < block->max_length) {
1258
0
        goto found;
1259
0
    }
1260
1261
36.7M
    RAMBLOCK_FOREACH(block) {
1262
        /* This case append when the block is not mapped. */
1263
36.7M
        if (block->host == NULL) {
1264
0
            continue;
1265
0
        }
1266
36.7M
        if (host - block->host < block->max_length) {
1267
36.7M
            goto found;
1268
36.7M
        }
1269
36.7M
    }
1270
1271
0
    return NULL;
1272
1273
36.7M
found:
1274
36.7M
    *offset = (host - block->host);
1275
36.7M
    if (round_offset) {
1276
0
        *offset &= TARGET_PAGE_MASK;
1277
0
    }
1278
36.7M
    return block;
1279
36.7M
}
qemu_ram_block_from_host_aarch64
Line
Count
Source
1252
18.3M
{
1253
18.3M
    RAMBlock *block;
1254
18.3M
    uint8_t *host = ptr;
1255
1256
18.3M
    block = uc->ram_list.mru_block;
1257
18.3M
    if (block && block->host && host - block->host < block->max_length) {
1258
0
        goto found;
1259
0
    }
1260
1261
18.3M
    RAMBLOCK_FOREACH(block) {
1262
        /* This case append when the block is not mapped. */
1263
18.3M
        if (block->host == NULL) {
1264
0
            continue;
1265
0
        }
1266
18.3M
        if (host - block->host < block->max_length) {
1267
18.3M
            goto found;
1268
18.3M
        }
1269
18.3M
    }
1270
1271
0
    return NULL;
1272
1273
18.3M
found:
1274
18.3M
    *offset = (host - block->host);
1275
18.3M
    if (round_offset) {
1276
0
        *offset &= TARGET_PAGE_MASK;
1277
0
    }
1278
18.3M
    return block;
1279
18.3M
}
qemu_ram_block_from_host_m68k
Line
Count
Source
1252
41.6k
{
1253
41.6k
    RAMBlock *block;
1254
41.6k
    uint8_t *host = ptr;
1255
1256
41.6k
    block = uc->ram_list.mru_block;
1257
41.6k
    if (block && block->host && host - block->host < block->max_length) {
1258
0
        goto found;
1259
0
    }
1260
1261
41.6k
    RAMBLOCK_FOREACH(block) {
1262
        /* This case append when the block is not mapped. */
1263
41.6k
        if (block->host == NULL) {
1264
0
            continue;
1265
0
        }
1266
41.6k
        if (host - block->host < block->max_length) {
1267
41.6k
            goto found;
1268
41.6k
        }
1269
41.6k
    }
1270
1271
0
    return NULL;
1272
1273
41.6k
found:
1274
41.6k
    *offset = (host - block->host);
1275
41.6k
    if (round_offset) {
1276
0
        *offset &= TARGET_PAGE_MASK;
1277
0
    }
1278
41.6k
    return block;
1279
41.6k
}
qemu_ram_block_from_host_mips
Line
Count
Source
1252
1.55M
{
1253
1.55M
    RAMBlock *block;
1254
1.55M
    uint8_t *host = ptr;
1255
1256
1.55M
    block = uc->ram_list.mru_block;
1257
1.55M
    if (block && block->host && host - block->host < block->max_length) {
1258
0
        goto found;
1259
0
    }
1260
1261
1.55M
    RAMBLOCK_FOREACH(block) {
1262
        /* This case append when the block is not mapped. */
1263
1.55M
        if (block->host == NULL) {
1264
0
            continue;
1265
0
        }
1266
1.55M
        if (host - block->host < block->max_length) {
1267
1.55M
            goto found;
1268
1.55M
        }
1269
1.55M
    }
1270
1271
0
    return NULL;
1272
1273
1.55M
found:
1274
1.55M
    *offset = (host - block->host);
1275
1.55M
    if (round_offset) {
1276
0
        *offset &= TARGET_PAGE_MASK;
1277
0
    }
1278
1.55M
    return block;
1279
1.55M
}
qemu_ram_block_from_host_mipsel
Line
Count
Source
1252
3.20M
{
1253
3.20M
    RAMBlock *block;
1254
3.20M
    uint8_t *host = ptr;
1255
1256
3.20M
    block = uc->ram_list.mru_block;
1257
3.20M
    if (block && block->host && host - block->host < block->max_length) {
1258
0
        goto found;
1259
0
    }
1260
1261
3.20M
    RAMBLOCK_FOREACH(block) {
1262
        /* This case append when the block is not mapped. */
1263
3.20M
        if (block->host == NULL) {
1264
0
            continue;
1265
0
        }
1266
3.20M
        if (host - block->host < block->max_length) {
1267
3.20M
            goto found;
1268
3.20M
        }
1269
3.20M
    }
1270
1271
0
    return NULL;
1272
1273
3.20M
found:
1274
3.20M
    *offset = (host - block->host);
1275
3.20M
    if (round_offset) {
1276
0
        *offset &= TARGET_PAGE_MASK;
1277
0
    }
1278
3.20M
    return block;
1279
3.20M
}
Unexecuted instantiation: qemu_ram_block_from_host_mips64
Unexecuted instantiation: qemu_ram_block_from_host_mips64el
qemu_ram_block_from_host_sparc
Line
Count
Source
1252
6.94k
{
1253
6.94k
    RAMBlock *block;
1254
6.94k
    uint8_t *host = ptr;
1255
1256
6.94k
    block = uc->ram_list.mru_block;
1257
6.94k
    if (block && block->host && host - block->host < block->max_length) {
1258
0
        goto found;
1259
0
    }
1260
1261
6.94k
    RAMBLOCK_FOREACH(block) {
1262
        /* This case append when the block is not mapped. */
1263
6.94k
        if (block->host == NULL) {
1264
0
            continue;
1265
0
        }
1266
6.94k
        if (host - block->host < block->max_length) {
1267
6.94k
            goto found;
1268
6.94k
        }
1269
6.94k
    }
1270
1271
0
    return NULL;
1272
1273
6.94k
found:
1274
6.94k
    *offset = (host - block->host);
1275
6.94k
    if (round_offset) {
1276
0
        *offset &= TARGET_PAGE_MASK;
1277
0
    }
1278
6.94k
    return block;
1279
6.94k
}
Unexecuted instantiation: qemu_ram_block_from_host_sparc64
Unexecuted instantiation: qemu_ram_block_from_host_ppc
Unexecuted instantiation: qemu_ram_block_from_host_ppc64
Unexecuted instantiation: qemu_ram_block_from_host_riscv32
Unexecuted instantiation: qemu_ram_block_from_host_riscv64
qemu_ram_block_from_host_s390x
Line
Count
Source
1252
3.40M
{
1253
3.40M
    RAMBlock *block;
1254
3.40M
    uint8_t *host = ptr;
1255
1256
3.40M
    block = uc->ram_list.mru_block;
1257
3.40M
    if (block && block->host && host - block->host < block->max_length) {
1258
0
        goto found;
1259
0
    }
1260
1261
3.40M
    RAMBLOCK_FOREACH(block) {
1262
        /* This case append when the block is not mapped. */
1263
3.40M
        if (block->host == NULL) {
1264
0
            continue;
1265
0
        }
1266
3.40M
        if (host - block->host < block->max_length) {
1267
3.40M
            goto found;
1268
3.40M
        }
1269
3.40M
    }
1270
1271
0
    return NULL;
1272
1273
3.40M
found:
1274
3.40M
    *offset = (host - block->host);
1275
3.40M
    if (round_offset) {
1276
0
        *offset &= TARGET_PAGE_MASK;
1277
0
    }
1278
3.40M
    return block;
1279
3.40M
}
Unexecuted instantiation: qemu_ram_block_from_host_tricore
1280
1281
/* Some of the softmmu routines need to translate from a host pointer
1282
   (typically a TLB entry) back to a ram offset.  */
1283
ram_addr_t qemu_ram_addr_from_host(struct uc_struct *uc, void *ptr)
1284
74.4M
{
1285
74.4M
    RAMBlock *block;
1286
74.4M
    ram_addr_t offset;
1287
1288
74.4M
    block = qemu_ram_block_from_host(uc, ptr, false, &offset);
1289
74.4M
    if (!block) {
1290
0
        return RAM_ADDR_INVALID;
1291
0
    }
1292
1293
74.4M
    return block->offset + offset;
1294
74.4M
}
qemu_ram_addr_from_host_x86_64
Line
Count
Source
1284
11.1M
{
1285
11.1M
    RAMBlock *block;
1286
11.1M
    ram_addr_t offset;
1287
1288
11.1M
    block = qemu_ram_block_from_host(uc, ptr, false, &offset);
1289
11.1M
    if (!block) {
1290
0
        return RAM_ADDR_INVALID;
1291
0
    }
1292
1293
11.1M
    return block->offset + offset;
1294
11.1M
}
qemu_ram_addr_from_host_arm
Line
Count
Source
1284
36.7M
{
1285
36.7M
    RAMBlock *block;
1286
36.7M
    ram_addr_t offset;
1287
1288
36.7M
    block = qemu_ram_block_from_host(uc, ptr, false, &offset);
1289
36.7M
    if (!block) {
1290
0
        return RAM_ADDR_INVALID;
1291
0
    }
1292
1293
36.7M
    return block->offset + offset;
1294
36.7M
}
qemu_ram_addr_from_host_aarch64
Line
Count
Source
1284
18.3M
{
1285
18.3M
    RAMBlock *block;
1286
18.3M
    ram_addr_t offset;
1287
1288
18.3M
    block = qemu_ram_block_from_host(uc, ptr, false, &offset);
1289
18.3M
    if (!block) {
1290
0
        return RAM_ADDR_INVALID;
1291
0
    }
1292
1293
18.3M
    return block->offset + offset;
1294
18.3M
}
qemu_ram_addr_from_host_m68k
Line
Count
Source
1284
41.6k
{
1285
41.6k
    RAMBlock *block;
1286
41.6k
    ram_addr_t offset;
1287
1288
41.6k
    block = qemu_ram_block_from_host(uc, ptr, false, &offset);
1289
41.6k
    if (!block) {
1290
0
        return RAM_ADDR_INVALID;
1291
0
    }
1292
1293
41.6k
    return block->offset + offset;
1294
41.6k
}
qemu_ram_addr_from_host_mips
Line
Count
Source
1284
1.55M
{
1285
1.55M
    RAMBlock *block;
1286
1.55M
    ram_addr_t offset;
1287
1288
1.55M
    block = qemu_ram_block_from_host(uc, ptr, false, &offset);
1289
1.55M
    if (!block) {
1290
0
        return RAM_ADDR_INVALID;
1291
0
    }
1292
1293
1.55M
    return block->offset + offset;
1294
1.55M
}
qemu_ram_addr_from_host_mipsel
Line
Count
Source
1284
3.20M
{
1285
3.20M
    RAMBlock *block;
1286
3.20M
    ram_addr_t offset;
1287
1288
3.20M
    block = qemu_ram_block_from_host(uc, ptr, false, &offset);
1289
3.20M
    if (!block) {
1290
0
        return RAM_ADDR_INVALID;
1291
0
    }
1292
1293
3.20M
    return block->offset + offset;
1294
3.20M
}
Unexecuted instantiation: qemu_ram_addr_from_host_mips64
Unexecuted instantiation: qemu_ram_addr_from_host_mips64el
qemu_ram_addr_from_host_sparc
Line
Count
Source
1284
6.94k
{
1285
6.94k
    RAMBlock *block;
1286
6.94k
    ram_addr_t offset;
1287
1288
6.94k
    block = qemu_ram_block_from_host(uc, ptr, false, &offset);
1289
6.94k
    if (!block) {
1290
0
        return RAM_ADDR_INVALID;
1291
0
    }
1292
1293
6.94k
    return block->offset + offset;
1294
6.94k
}
Unexecuted instantiation: qemu_ram_addr_from_host_sparc64
Unexecuted instantiation: qemu_ram_addr_from_host_ppc
Unexecuted instantiation: qemu_ram_addr_from_host_ppc64
Unexecuted instantiation: qemu_ram_addr_from_host_riscv32
Unexecuted instantiation: qemu_ram_addr_from_host_riscv64
qemu_ram_addr_from_host_s390x
Line
Count
Source
1284
3.40M
{
1285
3.40M
    RAMBlock *block;
1286
3.40M
    ram_addr_t offset;
1287
1288
3.40M
    block = qemu_ram_block_from_host(uc, ptr, false, &offset);
1289
3.40M
    if (!block) {
1290
0
        return RAM_ADDR_INVALID;
1291
0
    }
1292
1293
3.40M
    return block->offset + offset;
1294
3.40M
}
Unexecuted instantiation: qemu_ram_addr_from_host_tricore
1295
1296
/* Generate a debug exception if a watchpoint has been hit.  */
1297
void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
1298
                          MemTxAttrs attrs, int flags, uintptr_t ra)
1299
8
{
1300
8
}
cpu_check_watchpoint_x86_64
Line
Count
Source
1299
1
{
1300
1
}
cpu_check_watchpoint_arm
Line
Count
Source
1299
5
{
1300
5
}
cpu_check_watchpoint_aarch64
Line
Count
Source
1299
2
{
1300
2
}
Unexecuted instantiation: cpu_check_watchpoint_m68k
Unexecuted instantiation: cpu_check_watchpoint_mips
Unexecuted instantiation: cpu_check_watchpoint_mipsel
Unexecuted instantiation: cpu_check_watchpoint_mips64
Unexecuted instantiation: cpu_check_watchpoint_mips64el
Unexecuted instantiation: cpu_check_watchpoint_sparc
Unexecuted instantiation: cpu_check_watchpoint_sparc64
Unexecuted instantiation: cpu_check_watchpoint_ppc
Unexecuted instantiation: cpu_check_watchpoint_ppc64
Unexecuted instantiation: cpu_check_watchpoint_riscv32
Unexecuted instantiation: cpu_check_watchpoint_riscv64
Unexecuted instantiation: cpu_check_watchpoint_s390x
Unexecuted instantiation: cpu_check_watchpoint_tricore
1301
1302
static MemTxResult flatview_read(struct uc_struct *uc, FlatView *fv, hwaddr addr,
1303
                                 MemTxAttrs attrs, void *buf, hwaddr len);
1304
static MemTxResult flatview_write(struct uc_struct *, FlatView *fv, hwaddr addr, MemTxAttrs attrs,
1305
                                  const void *buf, hwaddr len);
1306
static bool flatview_access_valid(struct uc_struct *uc, FlatView *fv, hwaddr addr, hwaddr len,
1307
                                  bool is_write, MemTxAttrs attrs);
1308
1309
static MemTxResult subpage_read(struct uc_struct *uc, void *opaque, hwaddr addr, uint64_t *data,
1310
                                unsigned len, MemTxAttrs attrs)
1311
0
{
1312
0
    subpage_t *subpage = opaque;
1313
0
    uint8_t buf[8];
1314
0
    MemTxResult res;
1315
1316
#if defined(DEBUG_SUBPAGE)
1317
    printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
1318
           subpage, len, addr);
1319
#endif
1320
0
    res = flatview_read(uc, subpage->fv, addr + subpage->base, attrs, buf, len);
1321
0
    if (res) {
1322
0
        return res;
1323
0
    }
1324
0
    *data = ldn_p(buf, len);
1325
0
    return MEMTX_OK;
1326
0
}
1327
1328
static MemTxResult subpage_write(struct uc_struct *uc, void *opaque, hwaddr addr,
1329
                                 uint64_t value, unsigned len, MemTxAttrs attrs)
1330
0
{
1331
0
    subpage_t *subpage = opaque;
1332
0
    uint8_t buf[8];
1333
1334
#if defined(DEBUG_SUBPAGE)
1335
    printf("%s: subpage %p len %u addr " TARGET_FMT_plx
1336
           " value %"PRIx64"\n",
1337
           __func__, subpage, len, addr, value);
1338
#endif
1339
0
    stn_p(buf, len, value);
1340
0
    return flatview_write(uc, subpage->fv, addr + subpage->base, attrs, buf, len);
1341
0
}
1342
1343
static bool subpage_accepts(struct uc_struct *uc, void *opaque, hwaddr addr,
1344
                            unsigned len, bool is_write,
1345
                            MemTxAttrs attrs)
1346
0
{
1347
0
    subpage_t *subpage = opaque;
1348
#if defined(DEBUG_SUBPAGE)
1349
    printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
1350
           __func__, subpage, is_write ? 'w' : 'r', len, addr);
1351
#endif
1352
1353
0
    return flatview_access_valid(uc, subpage->fv, addr + subpage->base,
1354
0
                                 len, is_write, attrs);
1355
0
}
1356
1357
static const MemoryRegionOps subpage_ops = {
1358
    .read_with_attrs = subpage_read,
1359
    .write_with_attrs = subpage_write,
1360
    .impl.min_access_size = 1,
1361
    .impl.max_access_size = 8,
1362
    .valid.min_access_size = 1,
1363
    .valid.max_access_size = 8,
1364
    .valid.accepts = subpage_accepts,
1365
    .endianness = DEVICE_NATIVE_ENDIAN,
1366
};
1367
1368
static int subpage_register(struct uc_struct *uc, subpage_t *mmio, uint32_t start, uint32_t end,
1369
                            uint16_t section)
1370
0
{
1371
0
    int idx, eidx;
1372
1373
0
    if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1374
0
        return -1;
1375
0
    idx = SUBPAGE_IDX(start);
1376
0
    eidx = SUBPAGE_IDX(end);
1377
#if defined(DEBUG_SUBPAGE)
1378
    printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
1379
           __func__, mmio, start, end, idx, eidx, section);
1380
#endif
1381
0
    for (; idx <= eidx; idx++) {
1382
0
        mmio->sub_section[idx] = section;
1383
0
    }
1384
1385
0
    return 0;
1386
0
}
1387
1388
static subpage_t *subpage_init(struct uc_struct *uc, FlatView *fv, hwaddr base)
1389
0
{
1390
0
    subpage_t *mmio;
1391
1392
    /* mmio->sub_section is set to PHYS_SECTION_UNASSIGNED with g_malloc0 */
1393
0
    mmio = g_malloc0(sizeof(subpage_t) + TARGET_PAGE_SIZE * sizeof(uint16_t));
1394
0
    mmio->fv = fv;
1395
0
    mmio->base = base;
1396
0
    memory_region_init_io(fv->root->uc, &mmio->iomem, &subpage_ops, mmio,
1397
0
                          TARGET_PAGE_SIZE);
1398
0
    mmio->iomem.subpage = true;
1399
0
    mmio->iomem.priority = uc->snapshot_level;
1400
#if defined(DEBUG_SUBPAGE)
1401
    printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
1402
           mmio, base, TARGET_PAGE_SIZE);
1403
#endif
1404
1405
0
    return mmio;
1406
0
}
1407
1408
static uint16_t dummy_section(struct uc_struct *uc, PhysPageMap *map, FlatView *fv, MemoryRegion *mr)
1409
1.05M
{
1410
1.05M
    assert(fv);
1411
1.05M
    MemoryRegionSection section = {
1412
1.05M
        .fv = fv,
1413
1.05M
        .mr = mr,
1414
1.05M
        .offset_within_address_space = 0,
1415
1.05M
        .offset_within_region = 0,
1416
1.05M
        .size = int128_2_64(),
1417
1.05M
    };
1418
1419
1.05M
    return phys_section_add(uc, map, &section);
1420
1.05M
}
1421
1422
MemoryRegionSection *iotlb_to_section(CPUState *cpu,
1423
                                      hwaddr index, MemTxAttrs attrs)
1424
0
{
1425
#ifdef TARGET_ARM
1426
    struct uc_struct *uc = cpu->uc;
1427
#endif
1428
0
    int asidx = cpu_asidx_from_attrs(cpu, attrs);
1429
0
    CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx];
1430
0
    AddressSpaceDispatch *d = cpuas->memory_dispatch;
1431
0
    MemoryRegionSection *sections = d->map.sections;
1432
1433
0
    return &sections[index & ~TARGET_PAGE_MASK];
1434
0
}
Unexecuted instantiation: iotlb_to_section_x86_64
Unexecuted instantiation: iotlb_to_section_arm
Unexecuted instantiation: iotlb_to_section_aarch64
Unexecuted instantiation: iotlb_to_section_m68k
Unexecuted instantiation: iotlb_to_section_mips
Unexecuted instantiation: iotlb_to_section_mipsel
Unexecuted instantiation: iotlb_to_section_mips64
Unexecuted instantiation: iotlb_to_section_mips64el
Unexecuted instantiation: iotlb_to_section_sparc
Unexecuted instantiation: iotlb_to_section_sparc64
Unexecuted instantiation: iotlb_to_section_ppc
Unexecuted instantiation: iotlb_to_section_ppc64
Unexecuted instantiation: iotlb_to_section_riscv32
Unexecuted instantiation: iotlb_to_section_riscv64
Unexecuted instantiation: iotlb_to_section_s390x
Unexecuted instantiation: iotlb_to_section_tricore
1435
1436
static void io_mem_init(struct uc_struct *uc)
1437
176k
{
1438
176k
    memory_region_init_io(uc, &uc->io_mem_unassigned, &unassigned_mem_ops, NULL,
1439
176k
                          UINT64_MAX);
1440
176k
}
1441
1442
AddressSpaceDispatch *address_space_dispatch_new(struct uc_struct *uc, FlatView *fv)
1443
1.05M
{
1444
1.05M
    AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
1445
1.05M
#ifndef NDEBUG
1446
1.05M
    uint16_t n;
1447
1448
1.05M
    n = dummy_section(uc, &d->map, fv, &(uc->io_mem_unassigned));
1449
1.05M
    assert(n == PHYS_SECTION_UNASSIGNED);
1450
#else
1451
    dummy_section(uc, &d->map, fv, &(uc->io_mem_unassigned));
1452
#endif
1453
1454
1.05M
    d->phys_map  = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
1455
1.05M
    d->uc = uc;
1456
1457
1.05M
    return d;
1458
1.05M
}
address_space_dispatch_new_x86_64
Line
Count
Source
1443
201k
{
1444
201k
    AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
1445
201k
#ifndef NDEBUG
1446
201k
    uint16_t n;
1447
1448
201k
    n = dummy_section(uc, &d->map, fv, &(uc->io_mem_unassigned));
1449
201k
    assert(n == PHYS_SECTION_UNASSIGNED);
1450
#else
1451
    dummy_section(uc, &d->map, fv, &(uc->io_mem_unassigned));
1452
#endif
1453
1454
201k
    d->phys_map  = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
1455
201k
    d->uc = uc;
1456
1457
201k
    return d;
1458
201k
}
address_space_dispatch_new_arm
Line
Count
Source
1443
263k
{
1444
263k
    AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
1445
263k
#ifndef NDEBUG
1446
263k
    uint16_t n;
1447
1448
263k
    n = dummy_section(uc, &d->map, fv, &(uc->io_mem_unassigned));
1449
263k
    assert(n == PHYS_SECTION_UNASSIGNED);
1450
#else
1451
    dummy_section(uc, &d->map, fv, &(uc->io_mem_unassigned));
1452
#endif
1453
1454
263k
    d->phys_map  = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
1455
263k
    d->uc = uc;
1456
1457
263k
    return d;
1458
263k
}
address_space_dispatch_new_aarch64
Line
Count
Source
1443
361k
{
1444
361k
    AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
1445
361k
#ifndef NDEBUG
1446
361k
    uint16_t n;
1447
1448
361k
    n = dummy_section(uc, &d->map, fv, &(uc->io_mem_unassigned));
1449
361k
    assert(n == PHYS_SECTION_UNASSIGNED);
1450
#else
1451
    dummy_section(uc, &d->map, fv, &(uc->io_mem_unassigned));
1452
#endif
1453
1454
361k
    d->phys_map  = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
1455
361k
    d->uc = uc;
1456
1457
361k
    return d;
1458
361k
}
address_space_dispatch_new_m68k
Line
Count
Source
1443
174
{
1444
174
    AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
1445
174
#ifndef NDEBUG
1446
174
    uint16_t n;
1447
1448
174
    n = dummy_section(uc, &d->map, fv, &(uc->io_mem_unassigned));
1449
174
    assert(n == PHYS_SECTION_UNASSIGNED);
1450
#else
1451
    dummy_section(uc, &d->map, fv, &(uc->io_mem_unassigned));
1452
#endif
1453
1454
174
    d->phys_map  = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
1455
174
    d->uc = uc;
1456
1457
174
    return d;
1458
174
}
address_space_dispatch_new_mips
Line
Count
Source
1443
44.5k
{
1444
44.5k
    AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
1445
44.5k
#ifndef NDEBUG
1446
44.5k
    uint16_t n;
1447
1448
44.5k
    n = dummy_section(uc, &d->map, fv, &(uc->io_mem_unassigned));
1449
44.5k
    assert(n == PHYS_SECTION_UNASSIGNED);
1450
#else
1451
    dummy_section(uc, &d->map, fv, &(uc->io_mem_unassigned));
1452
#endif
1453
1454
44.5k
    d->phys_map  = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
1455
44.5k
    d->uc = uc;
1456
1457
44.5k
    return d;
1458
44.5k
}
address_space_dispatch_new_mipsel
Line
Count
Source
1443
53.6k
{
1444
53.6k
    AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
1445
53.6k
#ifndef NDEBUG
1446
53.6k
    uint16_t n;
1447
1448
53.6k
    n = dummy_section(uc, &d->map, fv, &(uc->io_mem_unassigned));
1449
53.6k
    assert(n == PHYS_SECTION_UNASSIGNED);
1450
#else
1451
    dummy_section(uc, &d->map, fv, &(uc->io_mem_unassigned));
1452
#endif
1453
1454
53.6k
    d->phys_map  = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
1455
53.6k
    d->uc = uc;
1456
1457
53.6k
    return d;
1458
53.6k
}
Unexecuted instantiation: address_space_dispatch_new_mips64
Unexecuted instantiation: address_space_dispatch_new_mips64el
address_space_dispatch_new_sparc
Line
Count
Source
1443
276
{
1444
276
    AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
1445
276
#ifndef NDEBUG
1446
276
    uint16_t n;
1447
1448
276
    n = dummy_section(uc, &d->map, fv, &(uc->io_mem_unassigned));
1449
276
    assert(n == PHYS_SECTION_UNASSIGNED);
1450
#else
1451
    dummy_section(uc, &d->map, fv, &(uc->io_mem_unassigned));
1452
#endif
1453
1454
276
    d->phys_map  = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
1455
276
    d->uc = uc;
1456
1457
276
    return d;
1458
276
}
Unexecuted instantiation: address_space_dispatch_new_sparc64
Unexecuted instantiation: address_space_dispatch_new_ppc
Unexecuted instantiation: address_space_dispatch_new_ppc64
Unexecuted instantiation: address_space_dispatch_new_riscv32
Unexecuted instantiation: address_space_dispatch_new_riscv64
address_space_dispatch_new_s390x
Line
Count
Source
1443
131k
{
1444
131k
    AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
1445
131k
#ifndef NDEBUG
1446
131k
    uint16_t n;
1447
1448
131k
    n = dummy_section(uc, &d->map, fv, &(uc->io_mem_unassigned));
1449
131k
    assert(n == PHYS_SECTION_UNASSIGNED);
1450
#else
1451
    dummy_section(uc, &d->map, fv, &(uc->io_mem_unassigned));
1452
#endif
1453
1454
131k
    d->phys_map  = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
1455
131k
    d->uc = uc;
1456
1457
131k
    return d;
1458
131k
}
Unexecuted instantiation: address_space_dispatch_new_tricore
1459
1460
void address_space_dispatch_clear(AddressSpaceDispatch *d)
1461
0
{
1462
0
    MemoryRegionSection *section;
1463
0
    struct uc_struct *uc = d->uc;
1464
0
    while (d->map.sections_nb > 0) {
1465
0
        d->map.sections_nb--;
1466
0
        section = &d->map.sections[d->map.sections_nb];
1467
0
        if (section->mr->priority > uc->snapshot_level) {
1468
0
            phys_section_destroy(section->mr);
1469
0
        }
1470
0
    }
1471
0
    g_free(d->map.sections);
1472
0
    g_free(d->map.nodes);
1473
0
    g_free(d);
1474
0
}
Unexecuted instantiation: address_space_dispatch_clear_x86_64
Unexecuted instantiation: address_space_dispatch_clear_arm
Unexecuted instantiation: address_space_dispatch_clear_aarch64
Unexecuted instantiation: address_space_dispatch_clear_m68k
Unexecuted instantiation: address_space_dispatch_clear_mips
Unexecuted instantiation: address_space_dispatch_clear_mipsel
Unexecuted instantiation: address_space_dispatch_clear_mips64
Unexecuted instantiation: address_space_dispatch_clear_mips64el
Unexecuted instantiation: address_space_dispatch_clear_sparc
Unexecuted instantiation: address_space_dispatch_clear_sparc64
Unexecuted instantiation: address_space_dispatch_clear_ppc
Unexecuted instantiation: address_space_dispatch_clear_ppc64
Unexecuted instantiation: address_space_dispatch_clear_riscv32
Unexecuted instantiation: address_space_dispatch_clear_riscv64
Unexecuted instantiation: address_space_dispatch_clear_s390x
Unexecuted instantiation: address_space_dispatch_clear_tricore
1475
1476
void address_space_dispatch_free(AddressSpaceDispatch *d)
1477
1.05M
{
1478
1.05M
    phys_sections_free(&d->map);
1479
1.05M
    g_free(d);
1480
1.05M
}
address_space_dispatch_free_x86_64
Line
Count
Source
1477
201k
{
1478
201k
    phys_sections_free(&d->map);
1479
201k
    g_free(d);
1480
201k
}
address_space_dispatch_free_arm
Line
Count
Source
1477
263k
{
1478
263k
    phys_sections_free(&d->map);
1479
263k
    g_free(d);
1480
263k
}
address_space_dispatch_free_aarch64
Line
Count
Source
1477
361k
{
1478
361k
    phys_sections_free(&d->map);
1479
361k
    g_free(d);
1480
361k
}
address_space_dispatch_free_m68k
Line
Count
Source
1477
174
{
1478
174
    phys_sections_free(&d->map);
1479
174
    g_free(d);
1480
174
}
address_space_dispatch_free_mips
Line
Count
Source
1477
44.5k
{
1478
44.5k
    phys_sections_free(&d->map);
1479
44.5k
    g_free(d);
1480
44.5k
}
address_space_dispatch_free_mipsel
Line
Count
Source
1477
53.6k
{
1478
53.6k
    phys_sections_free(&d->map);
1479
53.6k
    g_free(d);
1480
53.6k
}
Unexecuted instantiation: address_space_dispatch_free_mips64
Unexecuted instantiation: address_space_dispatch_free_mips64el
address_space_dispatch_free_sparc
Line
Count
Source
1477
276
{
1478
276
    phys_sections_free(&d->map);
1479
276
    g_free(d);
1480
276
}
Unexecuted instantiation: address_space_dispatch_free_sparc64
Unexecuted instantiation: address_space_dispatch_free_ppc
Unexecuted instantiation: address_space_dispatch_free_ppc64
Unexecuted instantiation: address_space_dispatch_free_riscv32
Unexecuted instantiation: address_space_dispatch_free_riscv64
address_space_dispatch_free_s390x
Line
Count
Source
1477
131k
{
1478
131k
    phys_sections_free(&d->map);
1479
131k
    g_free(d);
1480
131k
}
Unexecuted instantiation: address_space_dispatch_free_tricore
1481
1482
static void tcg_commit(MemoryListener *listener)
1483
841k
{
1484
841k
    CPUAddressSpace *cpuas;
1485
841k
    AddressSpaceDispatch *d;
1486
1487
    /* since each CPU stores ram addresses in its TLB cache, we must
1488
       reset the modified entries */
1489
841k
    cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
1490
841k
    cpu_reloading_memory_map();
1491
    /* The CPU and TLB are protected by the iothread lock.
1492
     * We reload the dispatch pointer now because cpu_reloading_memory_map()
1493
     * may have split the RCU critical section.
1494
     */
1495
841k
    d = address_space_to_dispatch(cpuas->as);
1496
841k
    cpuas->memory_dispatch = d;
1497
841k
    tlb_flush(cpuas->cpu);
1498
841k
}
1499
1500
static uint64_t unassigned_io_read(struct uc_struct *uc, void* opaque, hwaddr addr, unsigned size)
1501
0
{
1502
#ifdef _MSC_VER
1503
    return (uint64_t)0xffffffffffffffffULL;
1504
#else
1505
0
    return (uint64_t)-1ULL;
1506
0
#endif
1507
0
}
1508
1509
static void unassigned_io_write(struct uc_struct *uc, void* opaque, hwaddr addr, uint64_t data, unsigned size)
1510
0
{
1511
0
}
1512
1513
static const MemoryRegionOps unassigned_io_ops = {
1514
    .read = unassigned_io_read,
1515
    .write = unassigned_io_write,
1516
    .endianness = DEVICE_NATIVE_ENDIAN,
1517
};
1518
1519
static void memory_map_init(struct uc_struct *uc)
1520
176k
{
1521
176k
    uc->system_memory = g_malloc(sizeof(*(uc->system_memory)));
1522
176k
    memory_region_init(uc, uc->system_memory, UINT64_MAX);
1523
176k
    address_space_init(uc, &uc->address_space_memory, uc->system_memory);
1524
1525
176k
    uc->system_io = g_malloc(sizeof(*(uc->system_io)));
1526
176k
    memory_region_init_io(uc, uc->system_io, &unassigned_io_ops, NULL, 65536);
1527
176k
    address_space_init(uc, &uc->address_space_io, uc->system_io);
1528
176k
}
1529
1530
/* physical memory access (slow version, mainly for debug) */
1531
static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
1532
                                     hwaddr length)
1533
19
{
1534
19
}
1535
1536
static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
1537
11.4k
{
1538
11.4k
    unsigned access_size_max = mr->ops->valid.max_access_size;
1539
1540
    /* Regions are assumed to support 1-4 byte accesses unless
1541
       otherwise specified.  */
1542
11.4k
    if (access_size_max == 0) {
1543
11.4k
        access_size_max = 4;
1544
11.4k
    }
1545
1546
    /* Bound the maximum access by the alignment of the address.  */
1547
11.4k
    if (!mr->ops->impl.unaligned) {
1548
#ifdef _MSC_VER
1549
        unsigned align_size_max = addr & (0ULL - addr);
1550
#else
1551
11.4k
        unsigned align_size_max = addr & -addr;
1552
11.4k
#endif
1553
11.4k
        if (align_size_max != 0 && align_size_max < access_size_max) {
1554
0
            access_size_max = align_size_max;
1555
0
        }
1556
11.4k
    }
1557
1558
    /* Don't attempt accesses larger than the maximum.  */
1559
11.4k
    if (l > access_size_max) {
1560
10.1k
        l = access_size_max;
1561
10.1k
    }
1562
11.4k
    l = pow2floor(l);
1563
1564
11.4k
    return l;
1565
11.4k
}
1566
1567
static bool prepare_mmio_access(MemoryRegion *mr)
1568
2.68k
{
1569
2.68k
    return true;
1570
2.68k
}
1571
1572
/* Called within RCU critical section.  */
1573
static MemTxResult flatview_write_continue(struct uc_struct *uc, FlatView *fv, hwaddr addr,
1574
                                           MemTxAttrs attrs,
1575
                                           const void *ptr,
1576
                                           hwaddr len, hwaddr addr1,
1577
                                           hwaddr l, MemoryRegion *mr)
1578
176k
{
1579
176k
    uint8_t *ram_ptr;
1580
176k
    uint64_t val;
1581
176k
    MemTxResult result = MEMTX_OK;
1582
176k
    bool release_lock = false;
1583
176k
    const uint8_t *buf = ptr;
1584
1585
176k
    for (;;) {
1586
176k
        if (!memory_access_is_direct(mr, true)) {
1587
0
            release_lock |= prepare_mmio_access(mr);
1588
0
            l = memory_access_size(mr, l, addr1);
1589
            /* XXX: could force current_cpu to NULL to avoid
1590
               potential bugs */
1591
0
            val = ldn_he_p(buf, l);
1592
0
            result |= memory_region_dispatch_write(uc, mr, addr1, val,
1593
0
                                                   size_memop(l), attrs);
1594
176k
        } else {
1595
            /* RAM case */
1596
176k
            ram_ptr = qemu_ram_ptr_length(fv->root->uc, mr->ram_block, addr1, &l, false);
1597
176k
            memcpy(ram_ptr, buf, l);
1598
176k
        }
1599
1600
176k
        if (release_lock) {
1601
0
            release_lock = false;
1602
0
        }
1603
1604
176k
        len -= l;
1605
176k
        buf += l;
1606
176k
        addr += l;
1607
1608
176k
        if (!len) {
1609
176k
            break;
1610
176k
        }
1611
1612
0
        l = len;
1613
0
        mr = flatview_translate(uc, fv, addr, &addr1, &l, true, attrs);
1614
0
    }
1615
1616
176k
    return result;
1617
176k
}
1618
1619
/* Called from RCU critical section.  */
1620
static MemTxResult flatview_write(struct uc_struct *uc, FlatView *fv, hwaddr addr, MemTxAttrs attrs,
1621
                                  const void *buf, hwaddr len)
1622
176k
{
1623
176k
    hwaddr l;
1624
176k
    hwaddr addr1;
1625
176k
    MemoryRegion *mr;
1626
176k
    MemTxResult result = MEMTX_OK;
1627
1628
176k
    l = len;
1629
176k
    mr = flatview_translate(uc, fv, addr, &addr1, &l, true, attrs);
1630
176k
    result = flatview_write_continue(uc, fv, addr, attrs, buf, len,
1631
176k
                                     addr1, l, mr);
1632
1633
176k
    return result;
1634
176k
}
1635
1636
/* Called within RCU critical section.  */
1637
MemTxResult flatview_read_continue(struct uc_struct *uc, FlatView *fv, hwaddr addr,
1638
                                   MemTxAttrs attrs, void *ptr,
1639
                                   hwaddr len, hwaddr addr1, hwaddr l,
1640
                                   MemoryRegion *mr)
1641
1.34k
{
1642
1.34k
    uint8_t *ram_ptr;
1643
1.34k
    uint64_t val;
1644
1.34k
    MemTxResult result = MEMTX_OK;
1645
1.34k
    bool release_lock = false;
1646
1.34k
    uint8_t *buf = ptr;
1647
1648
2.68k
    for (;;) {
1649
2.68k
        if (!memory_access_is_direct(mr, false)) {
1650
            /* I/O case */
1651
2.68k
            release_lock |= prepare_mmio_access(mr);
1652
2.68k
            l = memory_access_size(mr, l, addr1);
1653
2.68k
            result |= memory_region_dispatch_read(uc, mr, addr1, &val,
1654
2.68k
                                                  size_memop(l), attrs);
1655
2.68k
            stn_he_p(buf, l, val);
1656
2.68k
        } else {
1657
            /* RAM case */
1658
0
            ram_ptr = qemu_ram_ptr_length(fv->root->uc, mr->ram_block, addr1, &l, false);
1659
0
            memcpy(buf, ram_ptr, l);
1660
0
        }
1661
1662
2.68k
        if (release_lock) {
1663
2.68k
            release_lock = false;
1664
2.68k
        }
1665
1666
2.68k
        len -= l;
1667
2.68k
        buf += l;
1668
2.68k
        addr += l;
1669
1670
2.68k
        if (!len) {
1671
1.34k
            break;
1672
1.34k
        }
1673
1674
1.34k
        l = len;
1675
1.34k
        mr = flatview_translate(uc, fv, addr, &addr1, &l, false, attrs);
1676
1.34k
    }
1677
1678
1.34k
    return result;
1679
1.34k
}
Unexecuted instantiation: flatview_read_continue_x86_64
Unexecuted instantiation: flatview_read_continue_arm
Unexecuted instantiation: flatview_read_continue_aarch64
Unexecuted instantiation: flatview_read_continue_m68k
Unexecuted instantiation: flatview_read_continue_mips
Unexecuted instantiation: flatview_read_continue_mipsel
Unexecuted instantiation: flatview_read_continue_mips64
Unexecuted instantiation: flatview_read_continue_mips64el
Unexecuted instantiation: flatview_read_continue_sparc
Unexecuted instantiation: flatview_read_continue_sparc64
Unexecuted instantiation: flatview_read_continue_ppc
Unexecuted instantiation: flatview_read_continue_ppc64
Unexecuted instantiation: flatview_read_continue_riscv32
Unexecuted instantiation: flatview_read_continue_riscv64
flatview_read_continue_s390x
Line
Count
Source
1641
1.34k
{
1642
1.34k
    uint8_t *ram_ptr;
1643
1.34k
    uint64_t val;
1644
1.34k
    MemTxResult result = MEMTX_OK;
1645
1.34k
    bool release_lock = false;
1646
1.34k
    uint8_t *buf = ptr;
1647
1648
2.68k
    for (;;) {
1649
2.68k
        if (!memory_access_is_direct(mr, false)) {
1650
            /* I/O case */
1651
2.68k
            release_lock |= prepare_mmio_access(mr);
1652
2.68k
            l = memory_access_size(mr, l, addr1);
1653
2.68k
            result |= memory_region_dispatch_read(uc, mr, addr1, &val,
1654
2.68k
                                                  size_memop(l), attrs);
1655
2.68k
            stn_he_p(buf, l, val);
1656
2.68k
        } else {
1657
            /* RAM case */
1658
0
            ram_ptr = qemu_ram_ptr_length(fv->root->uc, mr->ram_block, addr1, &l, false);
1659
0
            memcpy(buf, ram_ptr, l);
1660
0
        }
1661
1662
2.68k
        if (release_lock) {
1663
2.68k
            release_lock = false;
1664
2.68k
        }
1665
1666
2.68k
        len -= l;
1667
2.68k
        buf += l;
1668
2.68k
        addr += l;
1669
1670
2.68k
        if (!len) {
1671
1.34k
            break;
1672
1.34k
        }
1673
1674
1.34k
        l = len;
1675
1.34k
        mr = flatview_translate(uc, fv, addr, &addr1, &l, false, attrs);
1676
1.34k
    }
1677
1678
1.34k
    return result;
1679
1.34k
}
Unexecuted instantiation: flatview_read_continue_tricore
1680
1681
/* Called from RCU critical section.  */
1682
static MemTxResult flatview_read(struct uc_struct *uc, FlatView *fv, hwaddr addr,
1683
                                 MemTxAttrs attrs, void *buf, hwaddr len)
1684
0
{
1685
0
    hwaddr l;
1686
0
    hwaddr addr1;
1687
0
    MemoryRegion *mr;
1688
1689
0
    l = len;
1690
0
    mr = flatview_translate(uc, fv, addr, &addr1, &l, false, attrs);
1691
0
    return flatview_read_continue(uc, fv, addr, attrs, buf, len,
1692
0
                                  addr1, l, mr);
1693
0
}
1694
1695
MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
1696
                                    MemTxAttrs attrs, void *buf, hwaddr len)
1697
0
{
1698
0
    MemTxResult result = MEMTX_OK;
1699
0
    FlatView *fv;
1700
1701
0
    if (len > 0) {
1702
0
        fv = address_space_to_flatview(as);
1703
0
        result = flatview_read(as->uc, fv, addr, attrs, buf, len);
1704
0
    }
1705
1706
0
    return result;
1707
0
}
Unexecuted instantiation: address_space_read_full_x86_64
Unexecuted instantiation: address_space_read_full_arm
Unexecuted instantiation: address_space_read_full_aarch64
Unexecuted instantiation: address_space_read_full_m68k
Unexecuted instantiation: address_space_read_full_mips
Unexecuted instantiation: address_space_read_full_mipsel
Unexecuted instantiation: address_space_read_full_mips64
Unexecuted instantiation: address_space_read_full_mips64el
Unexecuted instantiation: address_space_read_full_sparc
Unexecuted instantiation: address_space_read_full_sparc64
Unexecuted instantiation: address_space_read_full_ppc
Unexecuted instantiation: address_space_read_full_ppc64
Unexecuted instantiation: address_space_read_full_riscv32
Unexecuted instantiation: address_space_read_full_riscv64
Unexecuted instantiation: address_space_read_full_s390x
Unexecuted instantiation: address_space_read_full_tricore
1708
1709
MemTxResult address_space_write(AddressSpace *as, hwaddr addr,
1710
                                MemTxAttrs attrs,
1711
                                const void *buf, hwaddr len)
1712
176k
{
1713
176k
    MemTxResult result = MEMTX_OK;
1714
176k
    FlatView *fv;
1715
1716
176k
    if (len > 0) {
1717
176k
        fv = address_space_to_flatview(as);
1718
176k
        result = flatview_write(as->uc, fv, addr, attrs, buf, len);
1719
176k
    }
1720
1721
176k
    return result;
1722
176k
}
address_space_write_x86_64
Line
Count
Source
1712
33.6k
{
1713
33.6k
    MemTxResult result = MEMTX_OK;
1714
33.6k
    FlatView *fv;
1715
1716
33.6k
    if (len > 0) {
1717
33.6k
        fv = address_space_to_flatview(as);
1718
33.6k
        result = flatview_write(as->uc, fv, addr, attrs, buf, len);
1719
33.6k
    }
1720
1721
33.6k
    return result;
1722
33.6k
}
address_space_write_arm
Line
Count
Source
1712
43.9k
{
1713
43.9k
    MemTxResult result = MEMTX_OK;
1714
43.9k
    FlatView *fv;
1715
1716
43.9k
    if (len > 0) {
1717
43.9k
        fv = address_space_to_flatview(as);
1718
43.9k
        result = flatview_write(as->uc, fv, addr, attrs, buf, len);
1719
43.9k
    }
1720
1721
43.9k
    return result;
1722
43.9k
}
address_space_write_aarch64
Line
Count
Source
1712
60.2k
{
1713
60.2k
    MemTxResult result = MEMTX_OK;
1714
60.2k
    FlatView *fv;
1715
1716
60.2k
    if (len > 0) {
1717
60.2k
        fv = address_space_to_flatview(as);
1718
60.2k
        result = flatview_write(as->uc, fv, addr, attrs, buf, len);
1719
60.2k
    }
1720
1721
60.2k
    return result;
1722
60.2k
}
address_space_write_m68k
Line
Count
Source
1712
29
{
1713
29
    MemTxResult result = MEMTX_OK;
1714
29
    FlatView *fv;
1715
1716
29
    if (len > 0) {
1717
29
        fv = address_space_to_flatview(as);
1718
29
        result = flatview_write(as->uc, fv, addr, attrs, buf, len);
1719
29
    }
1720
1721
29
    return result;
1722
29
}
address_space_write_mips
Line
Count
Source
1712
7.41k
{
1713
7.41k
    MemTxResult result = MEMTX_OK;
1714
7.41k
    FlatView *fv;
1715
1716
7.41k
    if (len > 0) {
1717
7.41k
        fv = address_space_to_flatview(as);
1718
7.41k
        result = flatview_write(as->uc, fv, addr, attrs, buf, len);
1719
7.41k
    }
1720
1721
7.41k
    return result;
1722
7.41k
}
address_space_write_mipsel
Line
Count
Source
1712
8.94k
{
1713
8.94k
    MemTxResult result = MEMTX_OK;
1714
8.94k
    FlatView *fv;
1715
1716
8.94k
    if (len > 0) {
1717
8.94k
        fv = address_space_to_flatview(as);
1718
8.94k
        result = flatview_write(as->uc, fv, addr, attrs, buf, len);
1719
8.94k
    }
1720
1721
8.94k
    return result;
1722
8.94k
}
Unexecuted instantiation: address_space_write_mips64
Unexecuted instantiation: address_space_write_mips64el
address_space_write_sparc
Line
Count
Source
1712
46
{
1713
46
    MemTxResult result = MEMTX_OK;
1714
46
    FlatView *fv;
1715
1716
46
    if (len > 0) {
1717
46
        fv = address_space_to_flatview(as);
1718
46
        result = flatview_write(as->uc, fv, addr, attrs, buf, len);
1719
46
    }
1720
1721
46
    return result;
1722
46
}
Unexecuted instantiation: address_space_write_sparc64
Unexecuted instantiation: address_space_write_ppc
Unexecuted instantiation: address_space_write_ppc64
Unexecuted instantiation: address_space_write_riscv32
Unexecuted instantiation: address_space_write_riscv64
address_space_write_s390x
Line
Count
Source
1712
21.9k
{
1713
21.9k
    MemTxResult result = MEMTX_OK;
1714
21.9k
    FlatView *fv;
1715
1716
21.9k
    if (len > 0) {
1717
21.9k
        fv = address_space_to_flatview(as);
1718
21.9k
        result = flatview_write(as->uc, fv, addr, attrs, buf, len);
1719
21.9k
    }
1720
1721
21.9k
    return result;
1722
21.9k
}
Unexecuted instantiation: address_space_write_tricore
1723
1724
MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
1725
                             void *buf, hwaddr len, bool is_write)
1726
176k
{
1727
176k
    if (is_write) {
1728
176k
        return address_space_write(as, addr, attrs, buf, len);
1729
176k
    } else {
1730
0
        return address_space_read_full(as, addr, attrs, buf, len);
1731
0
    }
1732
176k
}
address_space_rw_x86_64
Line
Count
Source
1726
33.6k
{
1727
33.6k
    if (is_write) {
1728
33.6k
        return address_space_write(as, addr, attrs, buf, len);
1729
33.6k
    } else {
1730
0
        return address_space_read_full(as, addr, attrs, buf, len);
1731
0
    }
1732
33.6k
}
address_space_rw_arm
Line
Count
Source
1726
43.9k
{
1727
43.9k
    if (is_write) {
1728
43.9k
        return address_space_write(as, addr, attrs, buf, len);
1729
43.9k
    } else {
1730
0
        return address_space_read_full(as, addr, attrs, buf, len);
1731
0
    }
1732
43.9k
}
address_space_rw_aarch64
Line
Count
Source
1726
60.2k
{
1727
60.2k
    if (is_write) {
1728
60.2k
        return address_space_write(as, addr, attrs, buf, len);
1729
60.2k
    } else {
1730
0
        return address_space_read_full(as, addr, attrs, buf, len);
1731
0
    }
1732
60.2k
}
address_space_rw_m68k
Line
Count
Source
1726
29
{
1727
29
    if (is_write) {
1728
29
        return address_space_write(as, addr, attrs, buf, len);
1729
29
    } else {
1730
0
        return address_space_read_full(as, addr, attrs, buf, len);
1731
0
    }
1732
29
}
address_space_rw_mips
Line
Count
Source
1726
7.41k
{
1727
7.41k
    if (is_write) {
1728
7.41k
        return address_space_write(as, addr, attrs, buf, len);
1729
7.41k
    } else {
1730
0
        return address_space_read_full(as, addr, attrs, buf, len);
1731
0
    }
1732
7.41k
}
address_space_rw_mipsel
Line
Count
Source
1726
8.94k
{
1727
8.94k
    if (is_write) {
1728
8.94k
        return address_space_write(as, addr, attrs, buf, len);
1729
8.94k
    } else {
1730
0
        return address_space_read_full(as, addr, attrs, buf, len);
1731
0
    }
1732
8.94k
}
Unexecuted instantiation: address_space_rw_mips64
Unexecuted instantiation: address_space_rw_mips64el
address_space_rw_sparc
Line
Count
Source
1726
46
{
1727
46
    if (is_write) {
1728
46
        return address_space_write(as, addr, attrs, buf, len);
1729
46
    } else {
1730
0
        return address_space_read_full(as, addr, attrs, buf, len);
1731
0
    }
1732
46
}
Unexecuted instantiation: address_space_rw_sparc64
Unexecuted instantiation: address_space_rw_ppc
Unexecuted instantiation: address_space_rw_ppc64
Unexecuted instantiation: address_space_rw_riscv32
Unexecuted instantiation: address_space_rw_riscv64
address_space_rw_s390x
Line
Count
Source
1726
21.9k
{
1727
21.9k
    if (is_write) {
1728
21.9k
        return address_space_write(as, addr, attrs, buf, len);
1729
21.9k
    } else {
1730
0
        return address_space_read_full(as, addr, attrs, buf, len);
1731
0
    }
1732
21.9k
}
Unexecuted instantiation: address_space_rw_tricore
1733
1734
bool cpu_physical_memory_rw(AddressSpace *as, hwaddr addr, void *buf,
1735
                            hwaddr len, bool is_write)
1736
176k
{
1737
176k
    MemTxResult result = address_space_rw(as, addr, MEMTXATTRS_UNSPECIFIED,
1738
176k
                     buf, len, is_write);
1739
176k
    if (result == MEMTX_OK) {
1740
176k
        return true;
1741
176k
    } else {
1742
0
        return false;
1743
0
    }
1744
176k
}
cpu_physical_memory_rw_x86_64
Line
Count
Source
1736
33.6k
{
1737
33.6k
    MemTxResult result = address_space_rw(as, addr, MEMTXATTRS_UNSPECIFIED,
1738
33.6k
                     buf, len, is_write);
1739
33.6k
    if (result == MEMTX_OK) {
1740
33.6k
        return true;
1741
33.6k
    } else {
1742
        return false;
1743
0
    }
1744
33.6k
}
cpu_physical_memory_rw_arm
Line
Count
Source
1736
43.9k
{
1737
43.9k
    MemTxResult result = address_space_rw(as, addr, MEMTXATTRS_UNSPECIFIED,
1738
43.9k
                     buf, len, is_write);
1739
43.9k
    if (result == MEMTX_OK) {
1740
43.9k
        return true;
1741
43.9k
    } else {
1742
        return false;
1743
0
    }
1744
43.9k
}
cpu_physical_memory_rw_aarch64
Line
Count
Source
1736
60.2k
{
1737
60.2k
    MemTxResult result = address_space_rw(as, addr, MEMTXATTRS_UNSPECIFIED,
1738
60.2k
                     buf, len, is_write);
1739
60.2k
    if (result == MEMTX_OK) {
1740
60.2k
        return true;
1741
60.2k
    } else {
1742
        return false;
1743
0
    }
1744
60.2k
}
cpu_physical_memory_rw_m68k
Line
Count
Source
1736
29
{
1737
29
    MemTxResult result = address_space_rw(as, addr, MEMTXATTRS_UNSPECIFIED,
1738
29
                     buf, len, is_write);
1739
29
    if (result == MEMTX_OK) {
1740
29
        return true;
1741
29
    } else {
1742
        return false;
1743
0
    }
1744
29
}
cpu_physical_memory_rw_mips
Line
Count
Source
1736
7.41k
{
1737
7.41k
    MemTxResult result = address_space_rw(as, addr, MEMTXATTRS_UNSPECIFIED,
1738
7.41k
                     buf, len, is_write);
1739
7.41k
    if (result == MEMTX_OK) {
1740
7.41k
        return true;
1741
7.41k
    } else {
1742
        return false;
1743
0
    }
1744
7.41k
}
cpu_physical_memory_rw_mipsel
Line
Count
Source
1736
8.94k
{
1737
8.94k
    MemTxResult result = address_space_rw(as, addr, MEMTXATTRS_UNSPECIFIED,
1738
8.94k
                     buf, len, is_write);
1739
8.94k
    if (result == MEMTX_OK) {
1740
8.94k
        return true;
1741
8.94k
    } else {
1742
        return false;
1743
0
    }
1744
8.94k
}
Unexecuted instantiation: cpu_physical_memory_rw_mips64
Unexecuted instantiation: cpu_physical_memory_rw_mips64el
cpu_physical_memory_rw_sparc
Line
Count
Source
1736
46
{
1737
46
    MemTxResult result = address_space_rw(as, addr, MEMTXATTRS_UNSPECIFIED,
1738
46
                     buf, len, is_write);
1739
46
    if (result == MEMTX_OK) {
1740
46
        return true;
1741
46
    } else {
1742
        return false;
1743
0
    }
1744
46
}
Unexecuted instantiation: cpu_physical_memory_rw_sparc64
Unexecuted instantiation: cpu_physical_memory_rw_ppc
Unexecuted instantiation: cpu_physical_memory_rw_ppc64
Unexecuted instantiation: cpu_physical_memory_rw_riscv32
Unexecuted instantiation: cpu_physical_memory_rw_riscv64
cpu_physical_memory_rw_s390x
Line
Count
Source
1736
21.9k
{
1737
21.9k
    MemTxResult result = address_space_rw(as, addr, MEMTXATTRS_UNSPECIFIED,
1738
21.9k
                     buf, len, is_write);
1739
21.9k
    if (result == MEMTX_OK) {
1740
21.9k
        return true;
1741
21.9k
    } else {
1742
        return false;
1743
0
    }
1744
21.9k
}
Unexecuted instantiation: cpu_physical_memory_rw_tricore
1745
1746
enum write_rom_type {
1747
    WRITE_DATA,
1748
    FLUSH_CACHE,
1749
};
1750
1751
static inline MemTxResult address_space_write_rom_internal(AddressSpace *as,
1752
                                                           hwaddr addr,
1753
                                                           MemTxAttrs attrs,
1754
                                                           const void *ptr,
1755
                                                           hwaddr len,
1756
                                                           enum write_rom_type type)
1757
0
{
1758
0
    hwaddr l;
1759
0
    uint8_t *ram_ptr;
1760
0
    hwaddr addr1;
1761
0
    MemoryRegion *mr;
1762
0
    const uint8_t *buf = ptr;
1763
1764
0
    while (len > 0) {
1765
0
        l = len;
1766
0
        mr = address_space_translate(as, addr, &addr1, &l, true, attrs);
1767
1768
0
        if (!memory_region_is_ram(mr)) {
1769
0
            l = memory_access_size(mr, l, addr1);
1770
0
        } else {
1771
            /* ROM/RAM case */
1772
0
            ram_ptr = qemu_map_ram_ptr(as->uc, mr->ram_block, addr1);
1773
0
            switch (type) {
1774
0
            case WRITE_DATA:
1775
0
                memcpy(ram_ptr, buf, l);
1776
0
                break;
1777
0
            case FLUSH_CACHE:
1778
0
                flush_icache_range((uintptr_t)ram_ptr, (uintptr_t)ram_ptr + l);
1779
0
                break;
1780
0
            }
1781
0
        }
1782
0
        len -= l;
1783
0
        buf += l;
1784
0
        addr += l;
1785
0
    }
1786
0
    return MEMTX_OK;
1787
0
}
1788
1789
/* used for ROM loading : can write in RAM and ROM */
1790
MemTxResult address_space_write_rom(AddressSpace *as, hwaddr addr,
1791
                                    MemTxAttrs attrs,
1792
                                    const void *buf, hwaddr len)
1793
0
{
1794
0
    return address_space_write_rom_internal(as, addr, attrs,
1795
0
                                            buf, len, WRITE_DATA);
1796
0
}
Unexecuted instantiation: address_space_write_rom_x86_64
Unexecuted instantiation: address_space_write_rom_arm
Unexecuted instantiation: address_space_write_rom_aarch64
Unexecuted instantiation: address_space_write_rom_m68k
Unexecuted instantiation: address_space_write_rom_mips
Unexecuted instantiation: address_space_write_rom_mipsel
Unexecuted instantiation: address_space_write_rom_mips64
Unexecuted instantiation: address_space_write_rom_mips64el
Unexecuted instantiation: address_space_write_rom_sparc
Unexecuted instantiation: address_space_write_rom_sparc64
Unexecuted instantiation: address_space_write_rom_ppc
Unexecuted instantiation: address_space_write_rom_ppc64
Unexecuted instantiation: address_space_write_rom_riscv32
Unexecuted instantiation: address_space_write_rom_riscv64
Unexecuted instantiation: address_space_write_rom_s390x
Unexecuted instantiation: address_space_write_rom_tricore
1797
1798
void cpu_flush_icache_range(AddressSpace *as, hwaddr start, hwaddr len)
1799
0
{
1800
0
}
Unexecuted instantiation: cpu_flush_icache_range_x86_64
Unexecuted instantiation: cpu_flush_icache_range_arm
Unexecuted instantiation: cpu_flush_icache_range_aarch64
Unexecuted instantiation: cpu_flush_icache_range_m68k
Unexecuted instantiation: cpu_flush_icache_range_mips
Unexecuted instantiation: cpu_flush_icache_range_mipsel
Unexecuted instantiation: cpu_flush_icache_range_mips64
Unexecuted instantiation: cpu_flush_icache_range_mips64el
Unexecuted instantiation: cpu_flush_icache_range_sparc
Unexecuted instantiation: cpu_flush_icache_range_sparc64
Unexecuted instantiation: cpu_flush_icache_range_ppc
Unexecuted instantiation: cpu_flush_icache_range_ppc64
Unexecuted instantiation: cpu_flush_icache_range_riscv32
Unexecuted instantiation: cpu_flush_icache_range_riscv64
Unexecuted instantiation: cpu_flush_icache_range_s390x
Unexecuted instantiation: cpu_flush_icache_range_tricore
1801
1802
void cpu_exec_init_all(struct uc_struct *uc)
1803
176k
{
1804
    /* The data structures we set up here depend on knowing the page size,
1805
     * so no more changes can be made after this point.
1806
     * In an ideal world, nothing we did before we had finished the
1807
     * machine setup would care about the target page size, and we could
1808
     * do this much later, rather than requiring board models to state
1809
     * up front what their requirements are.
1810
     */
1811
176k
    finalize_target_page_bits(uc);
1812
176k
    memory_map_init(uc);
1813
176k
    io_mem_init(uc);
1814
176k
}
cpu_exec_init_all_x86_64
Line
Count
Source
1803
33.6k
{
1804
    /* The data structures we set up here depend on knowing the page size,
1805
     * so no more changes can be made after this point.
1806
     * In an ideal world, nothing we did before we had finished the
1807
     * machine setup would care about the target page size, and we could
1808
     * do this much later, rather than requiring board models to state
1809
     * up front what their requirements are.
1810
     */
1811
33.6k
    finalize_target_page_bits(uc);
1812
33.6k
    memory_map_init(uc);
1813
33.6k
    io_mem_init(uc);
1814
33.6k
}
cpu_exec_init_all_arm
Line
Count
Source
1803
43.9k
{
1804
    /* The data structures we set up here depend on knowing the page size,
1805
     * so no more changes can be made after this point.
1806
     * In an ideal world, nothing we did before we had finished the
1807
     * machine setup would care about the target page size, and we could
1808
     * do this much later, rather than requiring board models to state
1809
     * up front what their requirements are.
1810
     */
1811
43.9k
    finalize_target_page_bits(uc);
1812
43.9k
    memory_map_init(uc);
1813
43.9k
    io_mem_init(uc);
1814
43.9k
}
cpu_exec_init_all_aarch64
Line
Count
Source
1803
60.2k
{
1804
    /* The data structures we set up here depend on knowing the page size,
1805
     * so no more changes can be made after this point.
1806
     * In an ideal world, nothing we did before we had finished the
1807
     * machine setup would care about the target page size, and we could
1808
     * do this much later, rather than requiring board models to state
1809
     * up front what their requirements are.
1810
     */
1811
60.2k
    finalize_target_page_bits(uc);
1812
60.2k
    memory_map_init(uc);
1813
60.2k
    io_mem_init(uc);
1814
60.2k
}
cpu_exec_init_all_m68k
Line
Count
Source
1803
29
{
1804
    /* The data structures we set up here depend on knowing the page size,
1805
     * so no more changes can be made after this point.
1806
     * In an ideal world, nothing we did before we had finished the
1807
     * machine setup would care about the target page size, and we could
1808
     * do this much later, rather than requiring board models to state
1809
     * up front what their requirements are.
1810
     */
1811
29
    finalize_target_page_bits(uc);
1812
29
    memory_map_init(uc);
1813
29
    io_mem_init(uc);
1814
29
}
cpu_exec_init_all_mips
Line
Count
Source
1803
7.41k
{
1804
    /* The data structures we set up here depend on knowing the page size,
1805
     * so no more changes can be made after this point.
1806
     * In an ideal world, nothing we did before we had finished the
1807
     * machine setup would care about the target page size, and we could
1808
     * do this much later, rather than requiring board models to state
1809
     * up front what their requirements are.
1810
     */
1811
7.41k
    finalize_target_page_bits(uc);
1812
7.41k
    memory_map_init(uc);
1813
7.41k
    io_mem_init(uc);
1814
7.41k
}
cpu_exec_init_all_mipsel
Line
Count
Source
1803
8.94k
{
1804
    /* The data structures we set up here depend on knowing the page size,
1805
     * so no more changes can be made after this point.
1806
     * In an ideal world, nothing we did before we had finished the
1807
     * machine setup would care about the target page size, and we could
1808
     * do this much later, rather than requiring board models to state
1809
     * up front what their requirements are.
1810
     */
1811
8.94k
    finalize_target_page_bits(uc);
1812
8.94k
    memory_map_init(uc);
1813
8.94k
    io_mem_init(uc);
1814
8.94k
}
Unexecuted instantiation: cpu_exec_init_all_mips64
Unexecuted instantiation: cpu_exec_init_all_mips64el
cpu_exec_init_all_sparc
Line
Count
Source
1803
46
{
1804
    /* The data structures we set up here depend on knowing the page size,
1805
     * so no more changes can be made after this point.
1806
     * In an ideal world, nothing we did before we had finished the
1807
     * machine setup would care about the target page size, and we could
1808
     * do this much later, rather than requiring board models to state
1809
     * up front what their requirements are.
1810
     */
1811
46
    finalize_target_page_bits(uc);
1812
46
    memory_map_init(uc);
1813
46
    io_mem_init(uc);
1814
46
}
Unexecuted instantiation: cpu_exec_init_all_sparc64
Unexecuted instantiation: cpu_exec_init_all_ppc
Unexecuted instantiation: cpu_exec_init_all_ppc64
Unexecuted instantiation: cpu_exec_init_all_riscv32
Unexecuted instantiation: cpu_exec_init_all_riscv64
cpu_exec_init_all_s390x
Line
Count
Source
1803
21.9k
{
1804
    /* The data structures we set up here depend on knowing the page size,
1805
     * so no more changes can be made after this point.
1806
     * In an ideal world, nothing we did before we had finished the
1807
     * machine setup would care about the target page size, and we could
1808
     * do this much later, rather than requiring board models to state
1809
     * up front what their requirements are.
1810
     */
1811
21.9k
    finalize_target_page_bits(uc);
1812
21.9k
    memory_map_init(uc);
1813
21.9k
    io_mem_init(uc);
1814
21.9k
}
Unexecuted instantiation: cpu_exec_init_all_tricore
1815
1816
static bool flatview_access_valid(struct uc_struct *uc, FlatView *fv, hwaddr addr, hwaddr len,
1817
                                  bool is_write, MemTxAttrs attrs)
1818
1.16M
{
1819
1.16M
    MemoryRegion *mr;
1820
1.16M
    hwaddr l, xlat;
1821
1822
2.32M
    while (len > 0) {
1823
1.16M
        l = len;
1824
1.16M
        mr = flatview_translate(uc, fv, addr, &xlat, &l, is_write, attrs);
1825
1.16M
        if (!memory_access_is_direct(mr, is_write)) {
1826
8.78k
            l = memory_access_size(mr, l, addr);
1827
8.78k
            if (!memory_region_access_valid(uc, mr, xlat, l, is_write, attrs)) {
1828
8.78k
                return false;
1829
8.78k
            }
1830
8.78k
        }
1831
1832
1.16M
        len -= l;
1833
1.16M
        addr += l;
1834
1.16M
    }
1835
1.16M
    return true;
1836
1.16M
}
1837
1838
bool address_space_access_valid(AddressSpace *as, hwaddr addr,
1839
                                hwaddr len, bool is_write,
1840
                                MemTxAttrs attrs)
1841
1.16M
{
1842
1.16M
    FlatView *fv;
1843
1.16M
    bool result;
1844
1845
1.16M
    fv = address_space_to_flatview(as);
1846
1.16M
    result = flatview_access_valid(as->uc, fv, addr, len, is_write, attrs);
1847
1.16M
    return result;
1848
1.16M
}
Unexecuted instantiation: address_space_access_valid_x86_64
Unexecuted instantiation: address_space_access_valid_arm
Unexecuted instantiation: address_space_access_valid_aarch64
Unexecuted instantiation: address_space_access_valid_m68k
Unexecuted instantiation: address_space_access_valid_mips
Unexecuted instantiation: address_space_access_valid_mipsel
Unexecuted instantiation: address_space_access_valid_mips64
Unexecuted instantiation: address_space_access_valid_mips64el
Unexecuted instantiation: address_space_access_valid_sparc
Unexecuted instantiation: address_space_access_valid_sparc64
Unexecuted instantiation: address_space_access_valid_ppc
Unexecuted instantiation: address_space_access_valid_ppc64
Unexecuted instantiation: address_space_access_valid_riscv32
Unexecuted instantiation: address_space_access_valid_riscv64
address_space_access_valid_s390x
Line
Count
Source
1841
1.16M
{
1842
1.16M
    FlatView *fv;
1843
1.16M
    bool result;
1844
1845
1.16M
    fv = address_space_to_flatview(as);
1846
1.16M
    result = flatview_access_valid(as->uc, fv, addr, len, is_write, attrs);
1847
1.16M
    return result;
1848
1.16M
}
Unexecuted instantiation: address_space_access_valid_tricore
1849
1850
static hwaddr
1851
flatview_extend_translation(struct uc_struct *uc, FlatView *fv, hwaddr addr,
1852
                            hwaddr target_len,
1853
                            MemoryRegion *mr, hwaddr base, hwaddr len,
1854
                            bool is_write, MemTxAttrs attrs)
1855
12
{
1856
12
    hwaddr done = 0;
1857
12
    hwaddr xlat;
1858
12
    MemoryRegion *this_mr;
1859
1860
12
    for (;;) {
1861
12
        target_len -= len;
1862
12
        addr += len;
1863
12
        done += len;
1864
12
        if (target_len == 0) {
1865
12
            return done;
1866
12
        }
1867
1868
0
        len = target_len;
1869
0
        this_mr = flatview_translate(uc, fv, addr, &xlat,
1870
0
                                     &len, is_write, attrs);
1871
0
        if (this_mr != mr || xlat != base + done) {
1872
0
            return done;
1873
0
        }
1874
0
    }
1875
12
}
1876
1877
/* Map a physical memory region into a host virtual address.
1878
 * May map a subset of the requested range, given by and returned in *plen.
1879
 * May return NULL if resources needed to perform the mapping are exhausted.
1880
 * Use only for reads OR writes - not for read-modify-write operations.
1881
 * Use cpu_register_map_client() to know when retrying the map operation is
1882
 * likely to succeed.
1883
 */
1884
void *address_space_map(AddressSpace *as,
1885
                        hwaddr addr,
1886
                        hwaddr *plen,
1887
                        bool is_write,
1888
                        MemTxAttrs attrs)
1889
12
{
1890
12
    hwaddr len = *plen;
1891
12
    hwaddr l, xlat;
1892
12
    MemoryRegion *mr;
1893
12
    void *ptr;
1894
12
    FlatView *fv;
1895
12
    struct uc_struct *uc = as->uc;
1896
1897
12
    if (len == 0) {
1898
0
        return NULL;
1899
0
    }
1900
1901
12
    l = len;
1902
12
    fv = address_space_to_flatview(as);
1903
12
    mr = flatview_translate(uc, fv, addr, &xlat, &l, is_write, attrs);
1904
1905
12
    if (!memory_access_is_direct(mr, is_write)) {
1906
        /* Avoid unbounded allocations */
1907
0
        l = MIN(l, TARGET_PAGE_SIZE);
1908
0
        mr->uc->bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
1909
0
        mr->uc->bounce.addr = addr;
1910
0
        mr->uc->bounce.len = l;
1911
1912
0
        mr->uc->bounce.mr = mr;
1913
0
        if (!is_write) {
1914
0
            flatview_read(as->uc, fv, addr, MEMTXATTRS_UNSPECIFIED,
1915
0
                               mr->uc->bounce.buffer, l);
1916
0
        }
1917
1918
0
        *plen = l;
1919
0
        return mr->uc->bounce.buffer;
1920
0
    }
1921
1922
1923
12
    *plen = flatview_extend_translation(as->uc, fv, addr, len, mr, xlat,
1924
12
                                        l, is_write, attrs);
1925
12
    ptr = qemu_ram_ptr_length(as->uc, mr->ram_block, xlat, plen, true);
1926
1927
12
    return ptr;
1928
12
}
Unexecuted instantiation: address_space_map_x86_64
Unexecuted instantiation: address_space_map_arm
Unexecuted instantiation: address_space_map_aarch64
Unexecuted instantiation: address_space_map_m68k
Unexecuted instantiation: address_space_map_mips
Unexecuted instantiation: address_space_map_mipsel
Unexecuted instantiation: address_space_map_mips64
Unexecuted instantiation: address_space_map_mips64el
Unexecuted instantiation: address_space_map_sparc
Unexecuted instantiation: address_space_map_sparc64
Unexecuted instantiation: address_space_map_ppc
Unexecuted instantiation: address_space_map_ppc64
Unexecuted instantiation: address_space_map_riscv32
Unexecuted instantiation: address_space_map_riscv64
address_space_map_s390x
Line
Count
Source
1889
12
{
1890
12
    hwaddr len = *plen;
1891
12
    hwaddr l, xlat;
1892
12
    MemoryRegion *mr;
1893
12
    void *ptr;
1894
12
    FlatView *fv;
1895
12
    struct uc_struct *uc = as->uc;
1896
1897
12
    if (len == 0) {
1898
0
        return NULL;
1899
0
    }
1900
1901
12
    l = len;
1902
12
    fv = address_space_to_flatview(as);
1903
12
    mr = flatview_translate(uc, fv, addr, &xlat, &l, is_write, attrs);
1904
1905
12
    if (!memory_access_is_direct(mr, is_write)) {
1906
        /* Avoid unbounded allocations */
1907
0
        l = MIN(l, TARGET_PAGE_SIZE);
1908
0
        mr->uc->bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
1909
0
        mr->uc->bounce.addr = addr;
1910
0
        mr->uc->bounce.len = l;
1911
1912
0
        mr->uc->bounce.mr = mr;
1913
0
        if (!is_write) {
1914
0
            flatview_read(as->uc, fv, addr, MEMTXATTRS_UNSPECIFIED,
1915
0
                               mr->uc->bounce.buffer, l);
1916
0
        }
1917
1918
0
        *plen = l;
1919
0
        return mr->uc->bounce.buffer;
1920
0
    }
1921
1922
1923
12
    *plen = flatview_extend_translation(as->uc, fv, addr, len, mr, xlat,
1924
12
                                        l, is_write, attrs);
1925
12
    ptr = qemu_ram_ptr_length(as->uc, mr->ram_block, xlat, plen, true);
1926
1927
12
    return ptr;
1928
12
}
Unexecuted instantiation: address_space_map_tricore
1929
1930
/* Unmaps a memory region previously mapped by address_space_map().
1931
 * Will also mark the memory as dirty if is_write is true.  access_len gives
1932
 * the amount of memory that was actually read or written by the caller.
1933
 */
1934
void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
1935
                         bool is_write, hwaddr access_len)
1936
12
{
1937
12
    if (buffer != as->uc->bounce.buffer) {
1938
12
        MemoryRegion *mr;
1939
12
        ram_addr_t addr1;
1940
1941
12
        mr = memory_region_from_host(as->uc, buffer, &addr1);
1942
12
        assert(mr != NULL);
1943
12
        if (is_write) {
1944
12
            invalidate_and_set_dirty(mr, addr1, access_len);
1945
12
        }
1946
12
        return;
1947
12
    }
1948
0
    if (is_write) {
1949
0
        address_space_write(as, as->uc->bounce.addr, MEMTXATTRS_UNSPECIFIED,
1950
0
                            as->uc->bounce.buffer, access_len);
1951
0
    }
1952
0
    qemu_vfree(as->uc->bounce.buffer);
1953
0
    as->uc->bounce.buffer = NULL;
1954
0
}
Unexecuted instantiation: address_space_unmap_x86_64
Unexecuted instantiation: address_space_unmap_arm
Unexecuted instantiation: address_space_unmap_aarch64
Unexecuted instantiation: address_space_unmap_m68k
Unexecuted instantiation: address_space_unmap_mips
Unexecuted instantiation: address_space_unmap_mipsel
Unexecuted instantiation: address_space_unmap_mips64
Unexecuted instantiation: address_space_unmap_mips64el
Unexecuted instantiation: address_space_unmap_sparc
Unexecuted instantiation: address_space_unmap_sparc64
Unexecuted instantiation: address_space_unmap_ppc
Unexecuted instantiation: address_space_unmap_ppc64
Unexecuted instantiation: address_space_unmap_riscv32
Unexecuted instantiation: address_space_unmap_riscv64
address_space_unmap_s390x
Line
Count
Source
1936
12
{
1937
12
    if (buffer != as->uc->bounce.buffer) {
1938
12
        MemoryRegion *mr;
1939
12
        ram_addr_t addr1;
1940
1941
12
        mr = memory_region_from_host(as->uc, buffer, &addr1);
1942
12
        assert(mr != NULL);
1943
12
        if (is_write) {
1944
12
            invalidate_and_set_dirty(mr, addr1, access_len);
1945
12
        }
1946
12
        return;
1947
12
    }
1948
0
    if (is_write) {
1949
0
        address_space_write(as, as->uc->bounce.addr, MEMTXATTRS_UNSPECIFIED,
1950
0
                            as->uc->bounce.buffer, access_len);
1951
0
    }
1952
0
    qemu_vfree(as->uc->bounce.buffer);
1953
    as->uc->bounce.buffer = NULL;
1954
0
}
Unexecuted instantiation: address_space_unmap_tricore
1955
1956
void *cpu_physical_memory_map(AddressSpace *as, hwaddr addr,
1957
                              hwaddr *plen,
1958
                              bool is_write)
1959
12
{
1960
12
    return address_space_map(as, addr, plen, is_write,
1961
12
                             MEMTXATTRS_UNSPECIFIED);
1962
12
}
Unexecuted instantiation: cpu_physical_memory_map_x86_64
Unexecuted instantiation: cpu_physical_memory_map_arm
Unexecuted instantiation: cpu_physical_memory_map_aarch64
Unexecuted instantiation: cpu_physical_memory_map_m68k
Unexecuted instantiation: cpu_physical_memory_map_mips
Unexecuted instantiation: cpu_physical_memory_map_mipsel
Unexecuted instantiation: cpu_physical_memory_map_mips64
Unexecuted instantiation: cpu_physical_memory_map_mips64el
Unexecuted instantiation: cpu_physical_memory_map_sparc
Unexecuted instantiation: cpu_physical_memory_map_sparc64
Unexecuted instantiation: cpu_physical_memory_map_ppc
Unexecuted instantiation: cpu_physical_memory_map_ppc64
Unexecuted instantiation: cpu_physical_memory_map_riscv32
Unexecuted instantiation: cpu_physical_memory_map_riscv64
cpu_physical_memory_map_s390x
Line
Count
Source
1959
12
{
1960
12
    return address_space_map(as, addr, plen, is_write,
1961
12
                             MEMTXATTRS_UNSPECIFIED);
1962
12
}
Unexecuted instantiation: cpu_physical_memory_map_tricore
1963
1964
void cpu_physical_memory_unmap(AddressSpace *as, void *buffer, hwaddr len,
1965
                               bool is_write, hwaddr access_len)
1966
12
{
1967
12
    address_space_unmap(as, buffer, len, is_write, access_len);
1968
12
}
Unexecuted instantiation: cpu_physical_memory_unmap_x86_64
Unexecuted instantiation: cpu_physical_memory_unmap_arm
Unexecuted instantiation: cpu_physical_memory_unmap_aarch64
Unexecuted instantiation: cpu_physical_memory_unmap_m68k
Unexecuted instantiation: cpu_physical_memory_unmap_mips
Unexecuted instantiation: cpu_physical_memory_unmap_mipsel
Unexecuted instantiation: cpu_physical_memory_unmap_mips64
Unexecuted instantiation: cpu_physical_memory_unmap_mips64el
Unexecuted instantiation: cpu_physical_memory_unmap_sparc
Unexecuted instantiation: cpu_physical_memory_unmap_sparc64
Unexecuted instantiation: cpu_physical_memory_unmap_ppc
Unexecuted instantiation: cpu_physical_memory_unmap_ppc64
Unexecuted instantiation: cpu_physical_memory_unmap_riscv32
Unexecuted instantiation: cpu_physical_memory_unmap_riscv64
cpu_physical_memory_unmap_s390x
Line
Count
Source
1966
12
{
1967
12
    address_space_unmap(as, buffer, len, is_write, access_len);
1968
12
}
Unexecuted instantiation: cpu_physical_memory_unmap_tricore
1969
1970
#define ARG1_DECL                AddressSpace *as
1971
99.0k
#define ARG1                     as
1972
#ifdef UNICORN_ARCH_POSTFIX
1973
#define SUFFIX UNICORN_ARCH_POSTFIX
1974
#else
1975
#define SUFFIX
1976
#endif
1977
99.0k
#define TRANSLATE(...)           address_space_translate(as, __VA_ARGS__)
1978
#include "memory_ldst.inc.c"
1979
1980
/* Called from RCU critical section.  This function has the same
1981
 * semantics as address_space_translate, but it only works on a
1982
 * predefined range of a MemoryRegion that was mapped with
1983
 * address_space_cache_init.
1984
 */
1985
static inline MemoryRegion *address_space_translate_cached(
1986
    MemoryRegionCache *cache, hwaddr addr, hwaddr *xlat,
1987
    hwaddr *plen, bool is_write, MemTxAttrs attrs)
1988
0
{
1989
0
    MemoryRegionSection section;
1990
0
    MemoryRegion *mr;
1991
0
    IOMMUMemoryRegion *iommu_mr;
1992
0
    AddressSpace *target_as;
1993
1994
0
    assert(!cache->ptr);
1995
0
    *xlat = addr + cache->xlat;
1996
1997
0
    mr = cache->mrs.mr;
1998
0
    iommu_mr = memory_region_get_iommu(mr);
1999
0
    if (!iommu_mr) {
2000
        /* MMIO region.  */
2001
0
        return mr;
2002
0
    }
2003
2004
0
    section = address_space_translate_iommu(iommu_mr, xlat, plen,
2005
0
                                            NULL, is_write, true,
2006
0
                                            &target_as, attrs);
2007
0
    return section.mr;
2008
0
}
2009
2010
#define ARG1_DECL                MemoryRegionCache *cache
2011
0
#define ARG1                     cache
2012
#ifdef UNICORN_ARCH_POSTFIX
2013
#define SUFFIX                   glue(_cached_slow, UNICORN_ARCH_POSTFIX)
2014
#else
2015
#define SUFFIX                   _cached_slow
2016
#endif
2017
0
#define TRANSLATE(...)           address_space_translate_cached(cache, __VA_ARGS__)
2018
#include "memory_ldst.inc.c"
2019
2020
/* virtual memory access for debug (includes writing to ROM) */
2021
int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
2022
                        void *ptr, target_ulong len, bool is_write)
2023
0
{
2024
#ifdef TARGET_ARM
2025
    struct uc_struct *uc = cpu->uc;
2026
#endif
2027
0
    hwaddr phys_addr;
2028
0
    target_ulong l, page;
2029
0
    uint8_t *buf = ptr;
2030
2031
0
    while (len > 0) {
2032
0
        int asidx;
2033
0
        MemTxAttrs attrs;
2034
2035
0
        page = addr & TARGET_PAGE_MASK;
2036
0
        phys_addr = cpu_get_phys_page_attrs_debug(cpu, page, &attrs);
2037
0
        asidx = cpu_asidx_from_attrs(cpu, attrs);
2038
        /* if no physical page mapped, return an error */
2039
0
        if (phys_addr == -1)
2040
0
            return -1;
2041
0
        l = (page + TARGET_PAGE_SIZE) - addr;
2042
0
        if (l > len)
2043
0
            l = len;
2044
0
        phys_addr += (addr & ~TARGET_PAGE_MASK);
2045
0
        if (is_write) {
2046
0
            address_space_write_rom(cpu->cpu_ases[asidx].as, phys_addr,
2047
0
                                    attrs, buf, l);
2048
0
        } else {
2049
0
            address_space_read(cpu->cpu_ases[asidx].as, phys_addr, attrs, buf,
2050
0
                               l);
2051
0
        }
2052
0
        len -= l;
2053
0
        buf += l;
2054
0
        addr += l;
2055
0
    }
2056
0
    return 0;
2057
0
}
Unexecuted instantiation: cpu_memory_rw_debug_x86_64
Unexecuted instantiation: cpu_memory_rw_debug_arm
Unexecuted instantiation: cpu_memory_rw_debug_aarch64
Unexecuted instantiation: cpu_memory_rw_debug_m68k
Unexecuted instantiation: cpu_memory_rw_debug_mips
Unexecuted instantiation: cpu_memory_rw_debug_mipsel
Unexecuted instantiation: cpu_memory_rw_debug_mips64
Unexecuted instantiation: cpu_memory_rw_debug_mips64el
Unexecuted instantiation: cpu_memory_rw_debug_sparc
Unexecuted instantiation: cpu_memory_rw_debug_sparc64
Unexecuted instantiation: cpu_memory_rw_debug_ppc
Unexecuted instantiation: cpu_memory_rw_debug_ppc64
Unexecuted instantiation: cpu_memory_rw_debug_riscv32
Unexecuted instantiation: cpu_memory_rw_debug_riscv64
Unexecuted instantiation: cpu_memory_rw_debug_s390x
Unexecuted instantiation: cpu_memory_rw_debug_tricore
2058
2059
/*
2060
 * Allows code that needs to deal with migration bitmaps etc to still be built
2061
 * target independent.
2062
 */
2063
size_t qemu_target_page_size(struct uc_struct *uc)
2064
0
{
2065
0
    return TARGET_PAGE_SIZE;
2066
0
}
Unexecuted instantiation: qemu_target_page_size_x86_64
Unexecuted instantiation: qemu_target_page_size_arm
Unexecuted instantiation: qemu_target_page_size_aarch64
Unexecuted instantiation: qemu_target_page_size_m68k
Unexecuted instantiation: qemu_target_page_size_mips
Unexecuted instantiation: qemu_target_page_size_mipsel
Unexecuted instantiation: qemu_target_page_size_mips64
Unexecuted instantiation: qemu_target_page_size_mips64el
Unexecuted instantiation: qemu_target_page_size_sparc
Unexecuted instantiation: qemu_target_page_size_sparc64
Unexecuted instantiation: qemu_target_page_size_ppc
Unexecuted instantiation: qemu_target_page_size_ppc64
Unexecuted instantiation: qemu_target_page_size_riscv32
Unexecuted instantiation: qemu_target_page_size_riscv64
Unexecuted instantiation: qemu_target_page_size_s390x
Unexecuted instantiation: qemu_target_page_size_tricore
2067
2068
int qemu_target_page_bits(struct uc_struct *uc)
2069
0
{
2070
0
    return TARGET_PAGE_BITS;
2071
0
}
Unexecuted instantiation: qemu_target_page_bits_x86_64
Unexecuted instantiation: qemu_target_page_bits_arm
Unexecuted instantiation: qemu_target_page_bits_aarch64
Unexecuted instantiation: qemu_target_page_bits_m68k
Unexecuted instantiation: qemu_target_page_bits_mips
Unexecuted instantiation: qemu_target_page_bits_mipsel
Unexecuted instantiation: qemu_target_page_bits_mips64
Unexecuted instantiation: qemu_target_page_bits_mips64el
Unexecuted instantiation: qemu_target_page_bits_sparc
Unexecuted instantiation: qemu_target_page_bits_sparc64
Unexecuted instantiation: qemu_target_page_bits_ppc
Unexecuted instantiation: qemu_target_page_bits_ppc64
Unexecuted instantiation: qemu_target_page_bits_riscv32
Unexecuted instantiation: qemu_target_page_bits_riscv64
Unexecuted instantiation: qemu_target_page_bits_s390x
Unexecuted instantiation: qemu_target_page_bits_tricore
2072
2073
int qemu_target_page_bits_min(void)
2074
0
{
2075
0
    return TARGET_PAGE_BITS_MIN;
2076
0
}
Unexecuted instantiation: qemu_target_page_bits_min_x86_64
Unexecuted instantiation: qemu_target_page_bits_min_arm
Unexecuted instantiation: qemu_target_page_bits_min_aarch64
Unexecuted instantiation: qemu_target_page_bits_min_m68k
Unexecuted instantiation: qemu_target_page_bits_min_mips
Unexecuted instantiation: qemu_target_page_bits_min_mipsel
Unexecuted instantiation: qemu_target_page_bits_min_mips64
Unexecuted instantiation: qemu_target_page_bits_min_mips64el
Unexecuted instantiation: qemu_target_page_bits_min_sparc
Unexecuted instantiation: qemu_target_page_bits_min_sparc64
Unexecuted instantiation: qemu_target_page_bits_min_ppc
Unexecuted instantiation: qemu_target_page_bits_min_ppc64
Unexecuted instantiation: qemu_target_page_bits_min_riscv32
Unexecuted instantiation: qemu_target_page_bits_min_riscv64
Unexecuted instantiation: qemu_target_page_bits_min_s390x
Unexecuted instantiation: qemu_target_page_bits_min_tricore
2077
2078
bool target_words_bigendian(void)
2079
0
{
2080
#if defined(TARGET_WORDS_BIGENDIAN)
2081
    return true;
2082
#else
2083
    return false;
2084
#endif
2085
0
}
Unexecuted instantiation: target_words_bigendian_x86_64
Unexecuted instantiation: target_words_bigendian_arm
Unexecuted instantiation: target_words_bigendian_aarch64
Unexecuted instantiation: target_words_bigendian_m68k
Unexecuted instantiation: target_words_bigendian_mips
Unexecuted instantiation: target_words_bigendian_mipsel
Unexecuted instantiation: target_words_bigendian_mips64
Unexecuted instantiation: target_words_bigendian_mips64el
Unexecuted instantiation: target_words_bigendian_sparc
Unexecuted instantiation: target_words_bigendian_sparc64
Unexecuted instantiation: target_words_bigendian_ppc
Unexecuted instantiation: target_words_bigendian_ppc64
Unexecuted instantiation: target_words_bigendian_riscv32
Unexecuted instantiation: target_words_bigendian_riscv64
Unexecuted instantiation: target_words_bigendian_s390x
Unexecuted instantiation: target_words_bigendian_tricore
2086
2087
bool cpu_physical_memory_is_io(AddressSpace *as, hwaddr phys_addr)
2088
0
{
2089
0
    MemoryRegion*mr;
2090
0
    hwaddr l = 1;
2091
0
    bool res;
2092
2093
0
    mr = address_space_translate(as,
2094
0
                                 phys_addr, &phys_addr, &l, false,
2095
0
                                 MEMTXATTRS_UNSPECIFIED);
2096
2097
0
    res = !memory_region_is_ram(mr);
2098
0
    return res;
2099
0
}
Unexecuted instantiation: cpu_physical_memory_is_io_x86_64
Unexecuted instantiation: cpu_physical_memory_is_io_arm
Unexecuted instantiation: cpu_physical_memory_is_io_aarch64
Unexecuted instantiation: cpu_physical_memory_is_io_m68k
Unexecuted instantiation: cpu_physical_memory_is_io_mips
Unexecuted instantiation: cpu_physical_memory_is_io_mipsel
Unexecuted instantiation: cpu_physical_memory_is_io_mips64
Unexecuted instantiation: cpu_physical_memory_is_io_mips64el
Unexecuted instantiation: cpu_physical_memory_is_io_sparc
Unexecuted instantiation: cpu_physical_memory_is_io_sparc64
Unexecuted instantiation: cpu_physical_memory_is_io_ppc
Unexecuted instantiation: cpu_physical_memory_is_io_ppc64
Unexecuted instantiation: cpu_physical_memory_is_io_riscv32
Unexecuted instantiation: cpu_physical_memory_is_io_riscv64
Unexecuted instantiation: cpu_physical_memory_is_io_s390x
Unexecuted instantiation: cpu_physical_memory_is_io_tricore
2100
2101
/*
2102
 * Unmap pages of memory from start to start+length such that
2103
 * they a) read as 0, b) Trigger whatever fault mechanism
2104
 * the OS provides for postcopy.
2105
 * The pages must be unmapped by the end of the function.
2106
 * Returns: 0 on success, none-0 on failure
2107
 *
2108
 */
2109
int ram_block_discard_range(struct uc_struct *uc, RAMBlock *rb, uint64_t start, size_t length)
2110
0
{
2111
0
    int ret = -1;
2112
2113
0
    uint8_t *host_startaddr = rb->host + start;
2114
2115
0
    if (!QEMU_PTR_IS_ALIGNED(host_startaddr, rb->page_size)) {
2116
        //error_report("ram_block_discard_range: Unaligned start address: %p",
2117
        //             host_startaddr);
2118
0
        goto err;
2119
0
    }
2120
2121
0
    if ((start + length) <= rb->used_length) {
2122
0
        bool need_madvise;
2123
0
        if (!QEMU_IS_ALIGNED(length, rb->page_size)) {
2124
            //error_report("ram_block_discard_range: Unaligned length: %zx",
2125
            //             length);
2126
0
            goto err;
2127
0
        }
2128
2129
0
        errno = ENOTSUP; /* If we are missing MADVISE etc */
2130
2131
        /* The logic here is messy;
2132
         *    madvise DONTNEED fails for hugepages
2133
         *    fallocate works on hugepages and shmem
2134
         */
2135
0
        need_madvise = (rb->page_size == uc->qemu_host_page_size);
2136
0
        if (need_madvise) {
2137
            /* For normal RAM this causes it to be unmapped,
2138
             * for shared memory it causes the local mapping to disappear
2139
             * and to fall back on the file contents (which we just
2140
             * fallocate'd away).
2141
             */
2142
0
#if defined(CONFIG_MADVISE)
2143
0
            ret =  madvise(host_startaddr, length, MADV_DONTNEED);
2144
0
            if (ret) {
2145
0
                ret = -errno;
2146
                //error_report("ram_block_discard_range: Failed to discard range "
2147
                //             "%s:%" PRIx64 " +%zx (%d)",
2148
                //             rb->idstr, start, length, ret);
2149
0
                goto err;
2150
0
            }
2151
#else
2152
            ret = -ENOSYS;
2153
            //error_report("ram_block_discard_range: MADVISE not available"
2154
            //             "%s:%" PRIx64 " +%zx (%d)",
2155
            //             rb->idstr, start, length, ret);
2156
            goto err;
2157
#endif
2158
0
        }
2159
0
    } else {
2160
        //error_report("ram_block_discard_range: Overrun block '%s' (%" PRIu64
2161
        //             "/%zx/" RAM_ADDR_FMT")",
2162
        //             rb->idstr, start, length, rb->used_length);
2163
0
    }
2164
2165
0
err:
2166
0
    return ret;
2167
0
}
Unexecuted instantiation: ram_block_discard_range_x86_64
Unexecuted instantiation: ram_block_discard_range_arm
Unexecuted instantiation: ram_block_discard_range_aarch64
Unexecuted instantiation: ram_block_discard_range_m68k
Unexecuted instantiation: ram_block_discard_range_mips
Unexecuted instantiation: ram_block_discard_range_mipsel
Unexecuted instantiation: ram_block_discard_range_mips64
Unexecuted instantiation: ram_block_discard_range_mips64el
Unexecuted instantiation: ram_block_discard_range_sparc
Unexecuted instantiation: ram_block_discard_range_sparc64
Unexecuted instantiation: ram_block_discard_range_ppc
Unexecuted instantiation: ram_block_discard_range_ppc64
Unexecuted instantiation: ram_block_discard_range_riscv32
Unexecuted instantiation: ram_block_discard_range_riscv64
Unexecuted instantiation: ram_block_discard_range_s390x
Unexecuted instantiation: ram_block_discard_range_tricore
2168
2169
bool ramblock_is_pmem(RAMBlock *rb)
2170
0
{
2171
0
    return rb->flags & RAM_PMEM;
2172
0
}
Unexecuted instantiation: ramblock_is_pmem_x86_64
Unexecuted instantiation: ramblock_is_pmem_arm
Unexecuted instantiation: ramblock_is_pmem_aarch64
Unexecuted instantiation: ramblock_is_pmem_m68k
Unexecuted instantiation: ramblock_is_pmem_mips
Unexecuted instantiation: ramblock_is_pmem_mipsel
Unexecuted instantiation: ramblock_is_pmem_mips64
Unexecuted instantiation: ramblock_is_pmem_mips64el
Unexecuted instantiation: ramblock_is_pmem_sparc
Unexecuted instantiation: ramblock_is_pmem_sparc64
Unexecuted instantiation: ramblock_is_pmem_ppc
Unexecuted instantiation: ramblock_is_pmem_ppc64
Unexecuted instantiation: ramblock_is_pmem_riscv32
Unexecuted instantiation: ramblock_is_pmem_riscv64
Unexecuted instantiation: ramblock_is_pmem_s390x
Unexecuted instantiation: ramblock_is_pmem_tricore
2173
2174
void page_size_init(struct uc_struct *uc)
2175
176k
{
2176
    /* NOTE: we can always suppose that qemu_host_page_size >=
2177
       TARGET_PAGE_SIZE */
2178
176k
    if (uc->qemu_host_page_size == 0) {
2179
176k
        uc->qemu_host_page_size = uc->qemu_real_host_page_size;
2180
176k
    }
2181
176k
    if (uc->qemu_host_page_size < TARGET_PAGE_SIZE) {
2182
0
        uc->qemu_host_page_size = TARGET_PAGE_SIZE;
2183
0
    }
2184
176k
}
page_size_init_x86_64
Line
Count
Source
2175
33.6k
{
2176
    /* NOTE: we can always suppose that qemu_host_page_size >=
2177
       TARGET_PAGE_SIZE */
2178
33.6k
    if (uc->qemu_host_page_size == 0) {
2179
33.6k
        uc->qemu_host_page_size = uc->qemu_real_host_page_size;
2180
33.6k
    }
2181
33.6k
    if (uc->qemu_host_page_size < TARGET_PAGE_SIZE) {
2182
0
        uc->qemu_host_page_size = TARGET_PAGE_SIZE;
2183
0
    }
2184
33.6k
}
page_size_init_arm
Line
Count
Source
2175
43.9k
{
2176
    /* NOTE: we can always suppose that qemu_host_page_size >=
2177
       TARGET_PAGE_SIZE */
2178
43.9k
    if (uc->qemu_host_page_size == 0) {
2179
43.9k
        uc->qemu_host_page_size = uc->qemu_real_host_page_size;
2180
43.9k
    }
2181
43.9k
    if (uc->qemu_host_page_size < TARGET_PAGE_SIZE) {
2182
0
        uc->qemu_host_page_size = TARGET_PAGE_SIZE;
2183
0
    }
2184
43.9k
}
page_size_init_aarch64
Line
Count
Source
2175
60.2k
{
2176
    /* NOTE: we can always suppose that qemu_host_page_size >=
2177
       TARGET_PAGE_SIZE */
2178
60.2k
    if (uc->qemu_host_page_size == 0) {
2179
60.2k
        uc->qemu_host_page_size = uc->qemu_real_host_page_size;
2180
60.2k
    }
2181
60.2k
    if (uc->qemu_host_page_size < TARGET_PAGE_SIZE) {
2182
0
        uc->qemu_host_page_size = TARGET_PAGE_SIZE;
2183
0
    }
2184
60.2k
}
page_size_init_m68k
Line
Count
Source
2175
29
{
2176
    /* NOTE: we can always suppose that qemu_host_page_size >=
2177
       TARGET_PAGE_SIZE */
2178
29
    if (uc->qemu_host_page_size == 0) {
2179
29
        uc->qemu_host_page_size = uc->qemu_real_host_page_size;
2180
29
    }
2181
29
    if (uc->qemu_host_page_size < TARGET_PAGE_SIZE) {
2182
0
        uc->qemu_host_page_size = TARGET_PAGE_SIZE;
2183
0
    }
2184
29
}
page_size_init_mips
Line
Count
Source
2175
7.41k
{
2176
    /* NOTE: we can always suppose that qemu_host_page_size >=
2177
       TARGET_PAGE_SIZE */
2178
7.41k
    if (uc->qemu_host_page_size == 0) {
2179
7.41k
        uc->qemu_host_page_size = uc->qemu_real_host_page_size;
2180
7.41k
    }
2181
7.41k
    if (uc->qemu_host_page_size < TARGET_PAGE_SIZE) {
2182
0
        uc->qemu_host_page_size = TARGET_PAGE_SIZE;
2183
0
    }
2184
7.41k
}
page_size_init_mipsel
Line
Count
Source
2175
8.94k
{
2176
    /* NOTE: we can always suppose that qemu_host_page_size >=
2177
       TARGET_PAGE_SIZE */
2178
8.94k
    if (uc->qemu_host_page_size == 0) {
2179
8.94k
        uc->qemu_host_page_size = uc->qemu_real_host_page_size;
2180
8.94k
    }
2181
8.94k
    if (uc->qemu_host_page_size < TARGET_PAGE_SIZE) {
2182
0
        uc->qemu_host_page_size = TARGET_PAGE_SIZE;
2183
0
    }
2184
8.94k
}
Unexecuted instantiation: page_size_init_mips64
Unexecuted instantiation: page_size_init_mips64el
page_size_init_sparc
Line
Count
Source
2175
46
{
2176
    /* NOTE: we can always suppose that qemu_host_page_size >=
2177
       TARGET_PAGE_SIZE */
2178
46
    if (uc->qemu_host_page_size == 0) {
2179
46
        uc->qemu_host_page_size = uc->qemu_real_host_page_size;
2180
46
    }
2181
46
    if (uc->qemu_host_page_size < TARGET_PAGE_SIZE) {
2182
0
        uc->qemu_host_page_size = TARGET_PAGE_SIZE;
2183
0
    }
2184
46
}
Unexecuted instantiation: page_size_init_sparc64
Unexecuted instantiation: page_size_init_ppc
Unexecuted instantiation: page_size_init_ppc64
Unexecuted instantiation: page_size_init_riscv32
Unexecuted instantiation: page_size_init_riscv64
page_size_init_s390x
Line
Count
Source
2175
21.9k
{
2176
    /* NOTE: we can always suppose that qemu_host_page_size >=
2177
       TARGET_PAGE_SIZE */
2178
21.9k
    if (uc->qemu_host_page_size == 0) {
2179
21.9k
        uc->qemu_host_page_size = uc->qemu_real_host_page_size;
2180
21.9k
    }
2181
21.9k
    if (uc->qemu_host_page_size < TARGET_PAGE_SIZE) {
2182
0
        uc->qemu_host_page_size = TARGET_PAGE_SIZE;
2183
0
    }
2184
21.9k
}
Unexecuted instantiation: page_size_init_tricore