/src/ffmpeg/libavutil/x86/cpu.c
Line | Count | Source |
1 | | /* |
2 | | * CPU detection code, extracted from mmx.h |
3 | | * (c)1997-99 by H. Dietz and R. Fisher |
4 | | * Converted to C and improved by Fabrice Bellard. |
5 | | * |
6 | | * This file is part of FFmpeg. |
7 | | * |
8 | | * FFmpeg is free software; you can redistribute it and/or |
9 | | * modify it under the terms of the GNU Lesser General Public |
10 | | * License as published by the Free Software Foundation; either |
11 | | * version 2.1 of the License, or (at your option) any later version. |
12 | | * |
13 | | * FFmpeg is distributed in the hope that it will be useful, |
14 | | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
15 | | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
16 | | * Lesser General Public License for more details. |
17 | | * |
18 | | * You should have received a copy of the GNU Lesser General Public |
19 | | * License along with FFmpeg; if not, write to the Free Software |
20 | | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
21 | | */ |
22 | | |
23 | | #include <stdlib.h> |
24 | | #include <string.h> |
25 | | |
26 | | #include "libavutil/x86/asm.h" |
27 | | #include "libavutil/x86/cpu.h" |
28 | | #include "libavutil/cpu.h" |
29 | | #include "libavutil/cpu_internal.h" |
30 | | |
31 | | #if HAVE_X86ASM |
32 | | |
33 | | #define cpuid(index, eax, ebx, ecx, edx) \ |
34 | 0 | ff_cpu_cpuid(index, &eax, &ebx, &ecx, &edx) |
35 | | |
36 | | #define xgetbv(index, eax, edx) \ |
37 | 0 | ff_cpu_xgetbv(index, &eax, &edx) |
38 | | |
39 | | #elif HAVE_INLINE_ASM |
40 | | |
41 | | /* ebx saving is necessary for PIC. gcc seems unable to see it alone */ |
42 | | #define cpuid(index, eax, ebx, ecx, edx) \ |
43 | | __asm__ volatile ( \ |
44 | | "mov %%"FF_REG_b", %%"FF_REG_S" \n\t" \ |
45 | | "cpuid \n\t" \ |
46 | | "xchg %%"FF_REG_b", %%"FF_REG_S \ |
47 | | : "=a" (eax), "=S" (ebx), "=c" (ecx), "=d" (edx) \ |
48 | | : "0" (index), "2"(0)) |
49 | | |
50 | | #define xgetbv(index, eax, edx) \ |
51 | | __asm__ (".byte 0x0f, 0x01, 0xd0" : "=a"(eax), "=d"(edx) : "c" (index)) |
52 | | |
53 | | #define get_eflags(x) \ |
54 | | __asm__ volatile ("pushfl \n" \ |
55 | | "pop %0 \n" \ |
56 | | : "=r"(x)) |
57 | | |
58 | | #define set_eflags(x) \ |
59 | | __asm__ volatile ("push %0 \n" \ |
60 | | "popfl \n" \ |
61 | | :: "r"(x)) |
62 | | |
63 | | #endif /* HAVE_INLINE_ASM */ |
64 | | |
65 | | #if ARCH_X86_64 |
66 | | |
67 | 0 | #define cpuid_test() 1 |
68 | | |
69 | | #elif HAVE_X86ASM |
70 | | |
71 | | #define cpuid_test ff_cpu_cpuid_test |
72 | | |
73 | | #elif HAVE_INLINE_ASM |
74 | | |
75 | | static int cpuid_test(void) |
76 | | { |
77 | | x86_reg a, c; |
78 | | |
79 | | /* Check if CPUID is supported by attempting to toggle the ID bit in |
80 | | * the EFLAGS register. */ |
81 | | get_eflags(a); |
82 | | set_eflags(a ^ 0x200000); |
83 | | get_eflags(c); |
84 | | |
85 | | return a != c; |
86 | | } |
87 | | #endif |
88 | | |
89 | | /* Function to test if multimedia instructions are supported... */ |
90 | | int ff_get_cpu_flags_x86(void) |
91 | 0 | { |
92 | 0 | int rval = 0; |
93 | |
|
94 | 0 | #ifdef cpuid |
95 | |
|
96 | 0 | int eax, ebx, ecx, edx; |
97 | 0 | int max_std_level, max_ext_level, std_caps = 0, ext_caps = 0; |
98 | 0 | int family = 0, model = 0; |
99 | 0 | union { int i[3]; char c[12]; } vendor; |
100 | 0 | int xcr0_lo = 0, xcr0_hi = 0; |
101 | |
|
102 | 0 | if (!cpuid_test()) |
103 | 0 | return 0; /* CPUID not supported */ |
104 | | |
105 | 0 | cpuid(0, max_std_level, vendor.i[0], vendor.i[2], vendor.i[1]); |
106 | |
|
107 | 0 | if (max_std_level >= 1) { |
108 | 0 | cpuid(1, eax, ebx, ecx, std_caps); |
109 | 0 | family = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff); |
110 | 0 | model = ((eax >> 4) & 0xf) + ((eax >> 12) & 0xf0); |
111 | 0 | if (std_caps & (1 << 15)) |
112 | 0 | rval |= AV_CPU_FLAG_CMOV; |
113 | 0 | if (std_caps & (1 << 23)) |
114 | 0 | rval |= AV_CPU_FLAG_MMX; |
115 | 0 | if (std_caps & (1 << 25)) |
116 | 0 | rval |= AV_CPU_FLAG_MMXEXT; |
117 | 0 | #if HAVE_SSE |
118 | 0 | if (std_caps & (1 << 25)) |
119 | 0 | rval |= AV_CPU_FLAG_SSE; |
120 | 0 | if (std_caps & (1 << 26)) |
121 | 0 | rval |= AV_CPU_FLAG_SSE2; |
122 | 0 | if (ecx & 1) |
123 | 0 | rval |= AV_CPU_FLAG_SSE3; |
124 | 0 | if (ecx & 0x2) |
125 | 0 | rval |= AV_CPU_FLAG_CLMUL; |
126 | 0 | if (ecx & 0x00000200 ) |
127 | 0 | rval |= AV_CPU_FLAG_SSSE3; |
128 | 0 | if (ecx & 0x00080000 ) |
129 | 0 | rval |= AV_CPU_FLAG_SSE4; |
130 | 0 | if (ecx & 0x00100000 ) |
131 | 0 | rval |= AV_CPU_FLAG_SSE42; |
132 | 0 | if (ecx & 0x02000000 ) |
133 | 0 | rval |= AV_CPU_FLAG_AESNI; |
134 | 0 | #if HAVE_AVX |
135 | | /* Check OXSAVE and AVX bits */ |
136 | 0 | if ((ecx & 0x18000000) == 0x18000000) { |
137 | | /* Check for OS support */ |
138 | 0 | xgetbv(0, xcr0_lo, xcr0_hi); |
139 | 0 | if ((xcr0_lo & 0x6) == 0x6) { |
140 | 0 | rval |= AV_CPU_FLAG_AVX; |
141 | 0 | if (ecx & 0x00001000) |
142 | 0 | rval |= AV_CPU_FLAG_FMA3; |
143 | 0 | } |
144 | 0 | } |
145 | 0 | #endif /* HAVE_AVX */ |
146 | 0 | #endif /* HAVE_SSE */ |
147 | 0 | } |
148 | 0 | if (max_std_level >= 7) { |
149 | 0 | cpuid(7, eax, ebx, ecx, edx); |
150 | 0 | #if HAVE_AVX2 |
151 | 0 | if ((rval & AV_CPU_FLAG_AVX) && (ebx & 0x00000020)) |
152 | 0 | rval |= AV_CPU_FLAG_AVX2; |
153 | | #if HAVE_AVX512 /* F, CD, BW, DQ, VL */ |
154 | 0 | if ((xcr0_lo & 0xe0) == 0xe0) { /* OPMASK/ZMM state */ |
155 | 0 | if ((rval & AV_CPU_FLAG_AVX2) && (ebx & 0xd0030000) == 0xd0030000) { |
156 | 0 | rval |= AV_CPU_FLAG_AVX512; |
157 | 0 | #if HAVE_AVX512ICL |
158 | 0 | if ((ebx & 0xd0200000) == 0xd0200000 && (ecx & 0x5f42) == 0x5f42) |
159 | 0 | rval |= AV_CPU_FLAG_AVX512ICL; |
160 | 0 | #endif /* HAVE_AVX512ICL */ |
161 | 0 | } |
162 | 0 | } |
163 | 0 | #endif /* HAVE_AVX512 */ |
164 | 0 | #endif /* HAVE_AVX2 */ |
165 | | /* BMI1/2 don't need OS support */ |
166 | 0 | if (ebx & 0x00000008) { |
167 | 0 | rval |= AV_CPU_FLAG_BMI1; |
168 | 0 | if (ebx & 0x00000100) |
169 | 0 | rval |= AV_CPU_FLAG_BMI2; |
170 | 0 | } |
171 | 0 | } |
172 | |
|
173 | 0 | cpuid(0x80000000, max_ext_level, ebx, ecx, edx); |
174 | |
|
175 | 0 | if (max_ext_level >= 0x80000001) { |
176 | 0 | cpuid(0x80000001, eax, ebx, ecx, ext_caps); |
177 | 0 | if (ext_caps & (1U << 31)) |
178 | 0 | rval |= AV_CPU_FLAG_3DNOW; |
179 | 0 | if (ext_caps & (1 << 30)) |
180 | 0 | rval |= AV_CPU_FLAG_3DNOWEXT; |
181 | 0 | if (ext_caps & (1 << 23)) |
182 | 0 | rval |= AV_CPU_FLAG_MMX; |
183 | 0 | if (ext_caps & (1 << 22)) |
184 | 0 | rval |= AV_CPU_FLAG_MMXEXT; |
185 | |
|
186 | 0 | if (!strncmp(vendor.c, "AuthenticAMD", 12)) { |
187 | | /* Allow for selectively disabling SSE2 functions on AMD processors |
188 | | with SSE2 support but not SSE4a. This includes Athlon64, some |
189 | | Opteron, and some Sempron processors. MMX, SSE, or 3DNow! are faster |
190 | | than SSE2 often enough to utilize this special-case flag. |
191 | | AV_CPU_FLAG_SSE2 and AV_CPU_FLAG_SSE2SLOW are both set in this case |
192 | | so that SSE2 is used unless explicitly disabled by checking |
193 | | AV_CPU_FLAG_SSE2SLOW. */ |
194 | 0 | if (rval & AV_CPU_FLAG_SSE2 && !(ecx & 0x00000040)) |
195 | 0 | rval |= AV_CPU_FLAG_SSE2SLOW; |
196 | | |
197 | | /* Similar to the above but for AVX functions on AMD processors. |
198 | | This is necessary only for functions using YMM registers on Bulldozer |
199 | | and Jaguar based CPUs as they lack 256-bit execution units. SSE/AVX |
200 | | functions using XMM registers are always faster on them. |
201 | | AV_CPU_FLAG_AVX and AV_CPU_FLAG_AVXSLOW are both set so that AVX is |
202 | | used unless explicitly disabled by checking AV_CPU_FLAG_AVXSLOW. */ |
203 | 0 | if ((family == 0x15 || family == 0x16) && (rval & AV_CPU_FLAG_AVX)) |
204 | 0 | rval |= AV_CPU_FLAG_AVXSLOW; |
205 | | |
206 | | /* Zen 3 and earlier have slow gather */ |
207 | 0 | if ((family <= 0x19) && (rval & AV_CPU_FLAG_AVX2)) |
208 | 0 | rval |= AV_CPU_FLAG_SLOW_GATHER; |
209 | 0 | } |
210 | | |
211 | | /* XOP and FMA4 use the AVX instruction coding scheme, so they can't be |
212 | | * used unless the OS has AVX support. */ |
213 | 0 | if (rval & AV_CPU_FLAG_AVX) { |
214 | 0 | if (ecx & 0x00000800) |
215 | 0 | rval |= AV_CPU_FLAG_XOP; |
216 | 0 | if (ecx & 0x00010000) |
217 | 0 | rval |= AV_CPU_FLAG_FMA4; |
218 | 0 | } |
219 | 0 | } |
220 | |
|
221 | 0 | if (!strncmp(vendor.c, "GenuineIntel", 12)) { |
222 | 0 | if (family == 6 && (model == 9 || model == 13 || model == 14)) { |
223 | | /* 6/9 (pentium-m "banias"), 6/13 (pentium-m "dothan"), and |
224 | | * 6/14 (core1 "yonah") theoretically support sse2, but it's |
225 | | * usually slower than mmx, so let's just pretend they don't. |
226 | | * AV_CPU_FLAG_SSE2 is disabled and AV_CPU_FLAG_SSE2SLOW is |
227 | | * enabled so that SSE2 is not used unless explicitly enabled |
228 | | * by checking AV_CPU_FLAG_SSE2SLOW. The same situation |
229 | | * applies for AV_CPU_FLAG_SSE3 and AV_CPU_FLAG_SSE3SLOW. */ |
230 | 0 | if (rval & AV_CPU_FLAG_SSE2) |
231 | 0 | rval ^= AV_CPU_FLAG_SSE2SLOW | AV_CPU_FLAG_SSE2; |
232 | 0 | if (rval & AV_CPU_FLAG_SSE3) |
233 | 0 | rval ^= AV_CPU_FLAG_SSE3SLOW | AV_CPU_FLAG_SSE3; |
234 | 0 | } |
235 | | /* The Atom processor has SSSE3 support, which is useful in many cases, |
236 | | * but sometimes the SSSE3 version is slower than the SSE2 equivalent |
237 | | * on the Atom, but is generally faster on other processors supporting |
238 | | * SSSE3. This flag allows for selectively disabling certain SSSE3 |
239 | | * functions on the Atom. */ |
240 | 0 | if (family == 6 && model == 28) |
241 | 0 | rval |= AV_CPU_FLAG_ATOM; |
242 | | |
243 | | /* Conroe has a slow shuffle unit. Check the model number to ensure not |
244 | | * to include crippled low-end Penryns and Nehalems that lack SSE4. */ |
245 | 0 | if ((rval & AV_CPU_FLAG_SSSE3) && !(rval & AV_CPU_FLAG_SSE4) && |
246 | 0 | family == 6 && model < 23) |
247 | 0 | rval |= AV_CPU_FLAG_SSSE3SLOW; |
248 | | |
249 | | /* Ice Lake and below have slow gather due to Gather Data Sampling |
250 | | * mitigation. */ |
251 | 0 | if ((rval & AV_CPU_FLAG_AVX2) && family == 6 && model < 143) |
252 | 0 | rval |= AV_CPU_FLAG_SLOW_GATHER; |
253 | 0 | } |
254 | |
|
255 | 0 | #endif /* cpuid */ |
256 | |
|
257 | 0 | return rval; |
258 | 0 | } |
259 | | |
260 | | size_t ff_get_cpu_max_align_x86(void) |
261 | 0 | { |
262 | 0 | int flags = av_get_cpu_flags(); |
263 | |
|
264 | 0 | if (flags & AV_CPU_FLAG_AVX512) |
265 | 0 | return 64; |
266 | 0 | if (flags & (AV_CPU_FLAG_AVX2 | |
267 | 0 | AV_CPU_FLAG_AVX | |
268 | 0 | AV_CPU_FLAG_XOP | |
269 | 0 | AV_CPU_FLAG_FMA4 | |
270 | 0 | AV_CPU_FLAG_FMA3 | |
271 | 0 | AV_CPU_FLAG_AVXSLOW)) |
272 | 0 | return 32; |
273 | 0 | if (flags & (AV_CPU_FLAG_AESNI | |
274 | 0 | AV_CPU_FLAG_SSE42 | |
275 | 0 | AV_CPU_FLAG_SSE4 | |
276 | 0 | AV_CPU_FLAG_SSSE3 | |
277 | 0 | AV_CPU_FLAG_SSE3 | |
278 | 0 | AV_CPU_FLAG_SSE2 | |
279 | 0 | AV_CPU_FLAG_SSE | |
280 | 0 | AV_CPU_FLAG_ATOM | |
281 | 0 | AV_CPU_FLAG_SSSE3SLOW | |
282 | 0 | AV_CPU_FLAG_SSE3SLOW | |
283 | 0 | AV_CPU_FLAG_SSE2SLOW)) |
284 | 0 | return 16; |
285 | | |
286 | 0 | return 8; |
287 | 0 | } |