/src/xen/tools/tests/x86_emulator/x86-emulate.h
Line | Count | Source |
1 | | #ifndef X86_EMULATE_H |
2 | | #define X86_EMULATE_H |
3 | | |
4 | | #include <assert.h> |
5 | | #include <stdbool.h> |
6 | | #include <stddef.h> |
7 | | #include <stdint.h> |
8 | | #include <stdlib.h> |
9 | | /* |
10 | | * Use of sse registers must be disabled prior to the definition of |
11 | | * always_inline functions that would use them (memcpy, memset, etc), |
12 | | * so do this as early as possible, aiming to be before any always_inline |
13 | | * functions that are used are declared. |
14 | | * Unfortunately, this cannot be done prior to inclusion of <stdlib.h> |
15 | | * due to functions such as 'atof' that have SSE register return declared, |
16 | | * so do so here, immediately after that. |
17 | | */ |
18 | | #if __GNUC__ >= 6 |
19 | | # pragma GCC target("no-sse") |
20 | | #endif |
21 | | /* |
22 | | * Attempt detection of unwanted prior inclusion of some headers known to use |
23 | | * always_inline with SSE registers in some library / compiler / optimization |
24 | | * combinations. |
25 | | */ |
26 | | #ifdef _STRING_H |
27 | | # error "Must not include <string.h> before x86-emulate.h" |
28 | | #endif |
29 | | #include <string.h> |
30 | | |
31 | | /* EOF is a standard macro defined in <stdio.h> so use it for detection */ |
32 | | #ifdef EOF |
33 | | # error "Must not include <stdio.h> before x86-emulate.h" |
34 | | #endif |
35 | | #include <stdio.h> |
36 | | |
37 | | #include <xen/xen.h> |
38 | | |
39 | | #include <xen/asm/msr-index.h> |
40 | | #include <xen/asm/x86-defns.h> |
41 | | #include <xen/asm/x86-vendors.h> |
42 | | |
43 | | #include <xen-tools/common-macros.h> |
44 | | |
45 | 3.38M | #define ASSERT assert |
46 | 0 | #define ASSERT_UNREACHABLE() assert(!__LINE__) |
47 | | |
48 | | #define DEFINE_PER_CPU(type, var) type per_cpu_##var |
49 | 0 | #define this_cpu(var) per_cpu_##var |
50 | | |
51 | | #define __init |
52 | | #define __maybe_unused __attribute__((__unused__)) |
53 | | |
54 | 83.7k | #define likely(x) __builtin_expect(!!(x), true) |
55 | 3.83M | #define unlikely(x) __builtin_expect(!!(x), false) |
56 | | |
57 | | #define cf_check /* No Control Flow Integriy checking */ |
58 | | |
59 | | /* |
60 | | * Pseudo keyword 'fallthrough' to make explicit the fallthrough intention at |
61 | | * the end of a case statement block. |
62 | | */ |
63 | | #if !defined(__clang__) && (__GNUC__ >= 7) |
64 | | # define fallthrough __attribute__((__fallthrough__)) |
65 | | #else |
66 | 3.80k | # define fallthrough do {} while (0) /* fallthrough */ |
67 | | #endif |
68 | | |
69 | | #ifdef __GCC_ASM_FLAG_OUTPUTS__ |
70 | | # define ASM_FLAG_OUT(yes, no) yes |
71 | | #else |
72 | | # define ASM_FLAG_OUT(yes, no) no |
73 | | #endif |
74 | | |
75 | | #define hweight32 __builtin_popcount |
76 | 0 | #define hweight64 __builtin_popcountll |
77 | | |
78 | 45 | #define is_canonical_address(x) (((int64_t)(x) >> 47) == ((int64_t)(x) >> 63)) |
79 | | |
80 | | static inline void *place_ret(void *ptr) |
81 | 163k | { |
82 | 163k | *(uint8_t *)ptr = 0xc3; |
83 | 163k | return ptr + 1; |
84 | 163k | } Unexecuted instantiation: fuzz-emul.c:place_ret Line | Count | Source | 81 | 105k | { | 82 | 105k | *(uint8_t *)ptr = 0xc3; | 83 | 105k | return ptr + 1; | 84 | 105k | } |
Unexecuted instantiation: 0f01.c:place_ret Unexecuted instantiation: 0fae.c:place_ret Unexecuted instantiation: 0fc7.c:place_ret Unexecuted instantiation: decode.c:place_ret Line | Count | Source | 81 | 57.7k | { | 82 | 57.7k | *(uint8_t *)ptr = 0xc3; | 83 | 57.7k | return ptr + 1; | 84 | 57.7k | } |
Unexecuted instantiation: wrappers.c:place_ret |
85 | | |
86 | | extern uint32_t mxcsr_mask; |
87 | | extern struct cpu_policy cpu_policy; |
88 | | |
89 | 4 | #define MMAP_SZ 16384 |
90 | | bool emul_test_init(void); |
91 | | |
92 | | /* Must save and restore FPU state between any call into libc. */ |
93 | | void emul_save_fpu_state(void); |
94 | | void emul_restore_fpu_state(void); |
95 | | |
96 | | struct x86_fxsr *get_fpu_save_area(void); |
97 | | |
98 | | /* |
99 | | * In order to reasonably use the above, wrap library calls we use and which we |
100 | | * think might access any of the FPU state into wrappers saving/restoring state |
101 | | * around the actual function. |
102 | | */ |
103 | | #ifndef WRAP |
104 | | # define WRAP(x) typeof(x) __wrap_ ## x |
105 | | #endif |
106 | | |
107 | | WRAP(fwrite); |
108 | | WRAP(memcmp); |
109 | | WRAP(memcpy); |
110 | | WRAP(memset); |
111 | | WRAP(printf); |
112 | | WRAP(putchar); |
113 | | WRAP(puts); |
114 | | WRAP(snprintf); |
115 | | WRAP(strstr); |
116 | | WRAP(vprintf); |
117 | | WRAP(vsnprintf); |
118 | | |
119 | | #undef WRAP |
120 | | |
121 | | #include "x86_emulate/x86_emulate.h" |
122 | | |
123 | | void evex_disp8_test(void *instr, struct x86_emulate_ctxt *ctxt, |
124 | | const struct x86_emulate_ops *ops); |
125 | | void predicates_test(void *instr, struct x86_emulate_ctxt *ctxt, |
126 | | int (*fetch)(unsigned long offset, |
127 | | void *p_data, |
128 | | unsigned int bytes, |
129 | | struct x86_emulate_ctxt *ctxt)); |
130 | | |
131 | | static inline uint64_t xgetbv(uint32_t xcr) |
132 | 41.1k | { |
133 | 41.1k | uint32_t lo, hi; |
134 | | |
135 | 41.1k | asm ( ".byte 0x0f, 0x01, 0xd0" : "=a" (lo), "=d" (hi) : "c" (xcr) ); |
136 | | |
137 | 41.1k | return ((uint64_t)hi << 32) | lo; |
138 | 41.1k | } Unexecuted instantiation: fuzz-emul.c:xgetbv Line | Count | Source | 132 | 41.1k | { | 133 | 41.1k | uint32_t lo, hi; | 134 | | | 135 | 41.1k | asm ( ".byte 0x0f, 0x01, 0xd0" : "=a" (lo), "=d" (hi) : "c" (xcr) ); | 136 | | | 137 | 41.1k | return ((uint64_t)hi << 32) | lo; | 138 | 41.1k | } |
Unexecuted instantiation: 0f01.c:xgetbv Unexecuted instantiation: 0fae.c:xgetbv Unexecuted instantiation: 0fc7.c:xgetbv Unexecuted instantiation: decode.c:xgetbv Unexecuted instantiation: fpu.c:xgetbv Unexecuted instantiation: wrappers.c:xgetbv |
139 | | |
140 | | /* Intentionally checking OSXSAVE here. */ |
141 | 82.2k | #define cpu_has_xsave (cpu_policy.basic.raw[1].c & (1u << 27)) |
142 | | |
143 | | static inline bool xcr0_mask(uint64_t mask) |
144 | 41.1k | { |
145 | 41.1k | return cpu_has_xsave && ((xgetbv(0) & mask) == mask); |
146 | 41.1k | } Unexecuted instantiation: fuzz-emul.c:xcr0_mask Line | Count | Source | 144 | 41.1k | { | 145 | 41.1k | return cpu_has_xsave && ((xgetbv(0) & mask) == mask); | 146 | 41.1k | } |
Unexecuted instantiation: 0f01.c:xcr0_mask Unexecuted instantiation: 0fae.c:xcr0_mask Unexecuted instantiation: 0fc7.c:xcr0_mask Unexecuted instantiation: decode.c:xcr0_mask Unexecuted instantiation: fpu.c:xcr0_mask Unexecuted instantiation: wrappers.c:xcr0_mask |
147 | | |
148 | | unsigned int rdpkru(void); |
149 | | void wrpkru(unsigned int val); |
150 | | |
151 | | #define cache_line_size() (cpu_policy.basic.clflush_size * 8) |
152 | | #define cpu_has_fpu cpu_policy.basic.fpu |
153 | 17.9k | #define cpu_has_mmx cpu_policy.basic.mmx |
154 | | #define cpu_has_fxsr cpu_policy.basic.fxsr |
155 | 43.9k | #define cpu_has_sse cpu_policy.basic.sse |
156 | | #define cpu_has_sse2 cpu_policy.basic.sse2 |
157 | | #define cpu_has_sse3 cpu_policy.basic.sse3 |
158 | | #define cpu_has_pclmulqdq cpu_policy.basic.pclmulqdq |
159 | | #define cpu_has_ssse3 cpu_policy.basic.ssse3 |
160 | | #define cpu_has_fma (cpu_policy.basic.fma && xcr0_mask(6)) |
161 | | #define cpu_has_sse4_1 cpu_policy.basic.sse4_1 |
162 | | #define cpu_has_sse4_2 cpu_policy.basic.sse4_2 |
163 | | #define cpu_has_popcnt cpu_policy.basic.popcnt |
164 | | #define cpu_has_aesni cpu_policy.basic.aesni |
165 | 40.6k | #define cpu_has_avx (cpu_policy.basic.avx && xcr0_mask(6)) |
166 | | #define cpu_has_f16c (cpu_policy.basic.f16c && xcr0_mask(6)) |
167 | | |
168 | | #define cpu_has_avx2 (cpu_policy.feat.avx2 && xcr0_mask(6)) |
169 | | #define cpu_has_bmi1 cpu_policy.feat.bmi1 |
170 | | #define cpu_has_bmi2 cpu_policy.feat.bmi2 |
171 | 0 | #define cpu_has_avx512f (cpu_policy.feat.avx512f && \ |
172 | 0 | xcr0_mask(0xe6)) |
173 | | #define cpu_has_avx512dq (cpu_policy.feat.avx512dq && \ |
174 | | xcr0_mask(0xe6)) |
175 | | #define cpu_has_avx512_ifma (cpu_policy.feat.avx512_ifma && \ |
176 | | xcr0_mask(0xe6)) |
177 | | #define cpu_has_avx512cd (cpu_policy.feat.avx512cd && \ |
178 | | xcr0_mask(0xe6)) |
179 | | #define cpu_has_sha cpu_policy.feat.sha |
180 | 0 | #define cpu_has_avx512bw (cpu_policy.feat.avx512bw && \ |
181 | 0 | xcr0_mask(0xe6)) |
182 | | #define cpu_has_avx512vl (cpu_policy.feat.avx512vl && \ |
183 | | xcr0_mask(0xe6)) |
184 | | #define cpu_has_avx512_vbmi (cpu_policy.feat.avx512_vbmi && \ |
185 | | xcr0_mask(0xe6)) |
186 | | #define cpu_has_avx512_vbmi2 (cpu_policy.feat.avx512_vbmi2 && \ |
187 | | xcr0_mask(0xe6)) |
188 | | #define cpu_has_gfni cpu_policy.feat.gfni |
189 | | #define cpu_has_vaes (cpu_policy.feat.vaes && xcr0_mask(6)) |
190 | | #define cpu_has_vpclmulqdq (cpu_policy.feat.vpclmulqdq && xcr0_mask(6)) |
191 | | #define cpu_has_avx512_vnni (cpu_policy.feat.avx512_vnni && \ |
192 | | xcr0_mask(0xe6)) |
193 | | #define cpu_has_avx512_bitalg (cpu_policy.feat.avx512_bitalg && \ |
194 | | xcr0_mask(0xe6)) |
195 | | #define cpu_has_avx512_vpopcntdq (cpu_policy.feat.avx512_vpopcntdq && \ |
196 | | xcr0_mask(0xe6)) |
197 | | #define cpu_has_movdiri cpu_policy.feat.movdiri |
198 | | #define cpu_has_movdir64b cpu_policy.feat.movdir64b |
199 | | #define cpu_has_avx512_vp2intersect (cpu_policy.feat.avx512_vp2intersect && \ |
200 | | xcr0_mask(0xe6)) |
201 | | #define cpu_has_serialize cpu_policy.feat.serialize |
202 | | #define cpu_has_avx512_fp16 (cpu_policy.feat.avx512_fp16 && \ |
203 | | xcr0_mask(0xe6)) |
204 | | #define cpu_has_sha512 (cpu_policy.feat.sha512 && xcr0_mask(6)) |
205 | | #define cpu_has_sm3 (cpu_policy.feat.sm3 && xcr0_mask(6)) |
206 | | #define cpu_has_sm4 (cpu_policy.feat.sm4 && xcr0_mask(6)) |
207 | | #define cpu_has_avx_vnni (cpu_policy.feat.avx_vnni && xcr0_mask(6)) |
208 | | #define cpu_has_avx512_bf16 (cpu_policy.feat.avx512_bf16 && \ |
209 | | xcr0_mask(0xe6)) |
210 | | #define cpu_has_cmpccxadd cpu_policy.feat.cmpccxadd |
211 | | #define cpu_has_avx_ifma (cpu_policy.feat.avx_ifma && xcr0_mask(6)) |
212 | | #define cpu_has_avx_vnni_int8 (cpu_policy.feat.avx_vnni_int8 && \ |
213 | | xcr0_mask(6)) |
214 | | #define cpu_has_avx_ne_convert (cpu_policy.feat.avx_ne_convert && \ |
215 | | xcr0_mask(6)) |
216 | | #define cpu_has_avx_vnni_int16 (cpu_policy.feat.avx_vnni_int16 && \ |
217 | | xcr0_mask(6)) |
218 | | |
219 | 1 | #define cpu_has_xgetbv1 (cpu_has_xsave && cpu_policy.xstate.xgetbv1) |
220 | | |
221 | | #define cpu_has_3dnow_ext cpu_policy.extd._3dnowext |
222 | | #define cpu_has_sse4a cpu_policy.extd.sse4a |
223 | | #define cpu_has_xop (cpu_policy.extd.xop && xcr0_mask(6)) |
224 | | #define cpu_has_fma4 (cpu_policy.extd.fma4 && xcr0_mask(6)) |
225 | | #define cpu_has_tbm cpu_policy.extd.tbm |
226 | | #define cpu_has_avx512_bmm (cpu_policy.extd.avx512_bmm && \ |
227 | | xcr0_mask(0xe6)) |
228 | | |
229 | | int emul_test_cpuid( |
230 | | uint32_t leaf, |
231 | | uint32_t subleaf, |
232 | | struct cpuid_leaf *res, |
233 | | struct x86_emulate_ctxt *ctxt); |
234 | | |
235 | | int emul_test_read_cr( |
236 | | unsigned int reg, |
237 | | unsigned long *val, |
238 | | struct x86_emulate_ctxt *ctxt); |
239 | | |
240 | | int emul_test_read_xcr( |
241 | | unsigned int reg, |
242 | | uint64_t *val, |
243 | | struct x86_emulate_ctxt *ctxt); |
244 | | |
245 | | int emul_test_get_fpu( |
246 | | enum x86_emulate_fpu_type type, |
247 | | struct x86_emulate_ctxt *ctxt); |
248 | | |
249 | | void emul_test_put_fpu( |
250 | | struct x86_emulate_ctxt *ctxt, |
251 | | enum x86_emulate_fpu_type backout, |
252 | | const struct x86_emul_fpu_aux *aux); |
253 | | |
254 | | #endif /* X86_EMULATE_H */ |