/src/libavif/ext/libyuv/source/cpu_id.cc
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright 2011 The LibYuv Project Authors. All rights reserved. |
3 | | * |
4 | | * Use of this source code is governed by a BSD-style license |
5 | | * that can be found in the LICENSE file in the root of the source |
6 | | * tree. An additional intellectual property rights grant can be found |
7 | | * in the file PATENTS. All contributing project authors may |
8 | | * be found in the AUTHORS file in the root of the source tree. |
9 | | */ |
10 | | |
11 | | #include "libyuv/cpu_id.h" |
12 | | |
13 | | #if defined(_MSC_VER) |
14 | | #include <intrin.h> // For __cpuidex() |
15 | | #endif |
16 | | #if !defined(__pnacl__) && !defined(__CLR_VER) && \ |
17 | | !defined(__native_client__) && (defined(_M_IX86) || defined(_M_X64)) && \ |
18 | | defined(_MSC_FULL_VER) && (_MSC_FULL_VER >= 160040219) |
19 | | #include <immintrin.h> // For _xgetbv() |
20 | | #endif |
21 | | |
22 | | // For ArmCpuCaps() but unittested on all platforms |
23 | | #include <stdio.h> // For fopen() |
24 | | #include <string.h> |
25 | | |
26 | | #if defined(__linux__) && defined(__aarch64__) |
27 | | #include <sys/auxv.h> // For getauxval() |
28 | | #endif |
29 | | |
30 | | #if defined(_WIN32) && defined(__aarch64__) |
31 | | #undef WIN32_LEAN_AND_MEAN |
32 | | #define WIN32_LEAN_AND_MEAN |
33 | | #undef WIN32_EXTRA_LEAN |
34 | | #define WIN32_EXTRA_LEAN |
35 | | #include <windows.h> // For IsProcessorFeaturePresent() |
36 | | #endif |
37 | | |
38 | | #if defined(__APPLE__) && defined(__aarch64__) |
39 | | #include <sys/sysctl.h> // For sysctlbyname() |
40 | | #endif |
41 | | |
42 | | #ifdef __cplusplus |
43 | | namespace libyuv { |
44 | | extern "C" { |
45 | | #endif |
46 | | |
47 | | // For functions that use the stack and have runtime checks for overflow, |
48 | | // use SAFEBUFFERS to avoid additional check. |
49 | | #if defined(_MSC_FULL_VER) && (_MSC_FULL_VER >= 160040219) && \ |
50 | | !defined(__clang__) |
51 | | #define SAFEBUFFERS __declspec(safebuffers) |
52 | | #else |
53 | | #define SAFEBUFFERS |
54 | | #endif |
55 | | |
56 | | // cpu_info_ variable for SIMD instruction sets detected. |
57 | | LIBYUV_API int cpu_info_ = 0; |
58 | | |
59 | | // Low level cpuid for X86. |
60 | | #if (defined(_M_IX86) || defined(_M_X64) || defined(__i386__) || \ |
61 | | defined(__x86_64__)) && \ |
62 | | !defined(__pnacl__) && !defined(__CLR_VER) |
63 | | LIBYUV_API |
64 | 20 | void CpuId(int info_eax, int info_ecx, int* cpu_info) { |
65 | | #if defined(_MSC_VER) |
66 | | // Visual C version uses intrinsic or inline x86 assembly. |
67 | | #if defined(_MSC_FULL_VER) && (_MSC_FULL_VER >= 160040219) |
68 | | __cpuidex(cpu_info, info_eax, info_ecx); |
69 | | #elif defined(_M_IX86) |
70 | | __asm { |
71 | | mov eax, info_eax |
72 | | mov ecx, info_ecx |
73 | | mov edi, cpu_info |
74 | | cpuid |
75 | | mov [edi], eax |
76 | | mov [edi + 4], ebx |
77 | | mov [edi + 8], ecx |
78 | | mov [edi + 12], edx |
79 | | } |
80 | | #else // Visual C but not x86 |
81 | | if (info_ecx == 0) { |
82 | | __cpuid(cpu_info, info_eax); |
83 | | } else { |
84 | | cpu_info[3] = cpu_info[2] = cpu_info[1] = cpu_info[0] = 0u; |
85 | | } |
86 | | #endif |
87 | | // GCC version uses inline x86 assembly. |
88 | | #else // defined(_MSC_VER) |
89 | 20 | int info_ebx, info_edx; |
90 | 20 | asm volatile( |
91 | | #if defined(__i386__) && defined(__PIC__) |
92 | | // Preserve ebx for fpic 32 bit. |
93 | | "mov %%ebx, %%edi \n" |
94 | | "cpuid \n" |
95 | | "xchg %%edi, %%ebx \n" |
96 | | : "=D"(info_ebx), |
97 | | #else |
98 | 20 | "cpuid \n" |
99 | 20 | : "=b"(info_ebx), |
100 | 20 | #endif // defined( __i386__) && defined(__PIC__) |
101 | 20 | "+a"(info_eax), "+c"(info_ecx), "=d"(info_edx)); |
102 | 20 | cpu_info[0] = info_eax; |
103 | 20 | cpu_info[1] = info_ebx; |
104 | 20 | cpu_info[2] = info_ecx; |
105 | 20 | cpu_info[3] = info_edx; |
106 | 20 | #endif // defined(_MSC_VER) |
107 | 20 | } |
108 | | #else // (defined(_M_IX86) || defined(_M_X64) ... |
109 | | LIBYUV_API |
110 | | void CpuId(int eax, int ecx, int* cpu_info) { |
111 | | (void)eax; |
112 | | (void)ecx; |
113 | | cpu_info[0] = cpu_info[1] = cpu_info[2] = cpu_info[3] = 0; |
114 | | } |
115 | | #endif |
116 | | |
117 | | // For VS2010 and earlier emit can be used: |
118 | | // _asm _emit 0x0f _asm _emit 0x01 _asm _emit 0xd0 // For VS2010 and earlier. |
119 | | // __asm { |
120 | | // xor ecx, ecx // xcr 0 |
121 | | // xgetbv |
122 | | // mov xcr0, eax |
123 | | // } |
124 | | // For VS2013 and earlier 32 bit, the _xgetbv(0) optimizer produces bad code. |
125 | | // https://code.google.com/p/libyuv/issues/detail?id=529 |
126 | | #if defined(_M_IX86) && defined(_MSC_VER) && (_MSC_VER < 1900) |
127 | | #pragma optimize("g", off) |
128 | | #endif |
129 | | #if (defined(_M_IX86) || defined(_M_X64) || defined(__i386__) || \ |
130 | | defined(__x86_64__)) && \ |
131 | | !defined(__pnacl__) && !defined(__CLR_VER) && !defined(__native_client__) |
132 | | // X86 CPUs have xgetbv to detect OS saves high parts of ymm registers. |
133 | 8 | static int GetXCR0() { |
134 | 8 | int xcr0 = 0; |
135 | | #if defined(_MSC_FULL_VER) && (_MSC_FULL_VER >= 160040219) |
136 | | xcr0 = (int)_xgetbv(0); // VS2010 SP1 required. NOLINT |
137 | | #elif defined(__i386__) || defined(__x86_64__) |
138 | | asm(".byte 0x0f, 0x01, 0xd0" : "=a"(xcr0) : "c"(0) : "%edx"); |
139 | 8 | #endif // defined(__i386__) || defined(__x86_64__) |
140 | 8 | return xcr0; |
141 | 8 | } |
142 | | #else |
143 | | // xgetbv unavailable to query for OSSave support. Return 0. |
144 | | #define GetXCR0() 0 |
145 | | #endif // defined(_M_IX86) || defined(_M_X64) .. |
146 | | // Return optimization to previous setting. |
147 | | #if defined(_M_IX86) && defined(_MSC_VER) && (_MSC_VER < 1900) |
148 | | #pragma optimize("g", on) |
149 | | #endif |
150 | | |
151 | | static int cpuinfo_search(const char* cpuinfo_line, |
152 | | const char* needle, |
153 | 0 | int needle_len) { |
154 | 0 | const char* p = strstr(cpuinfo_line, needle); |
155 | 0 | return p && (p[needle_len] == ' ' || p[needle_len] == '\n'); |
156 | 0 | } |
157 | | |
158 | | // Based on libvpx arm_cpudetect.c |
159 | | // For Arm, but public to allow testing on any CPU |
160 | 0 | LIBYUV_API SAFEBUFFERS int ArmCpuCaps(const char* cpuinfo_name) { |
161 | 0 | char cpuinfo_line[512]; |
162 | 0 | FILE* f = fopen(cpuinfo_name, "re"); |
163 | 0 | if (!f) { |
164 | | // Assume Neon if /proc/cpuinfo is unavailable. |
165 | | // This will occur for Chrome sandbox for Pepper or Render process. |
166 | 0 | return kCpuHasNEON; |
167 | 0 | } |
168 | 0 | memset(cpuinfo_line, 0, sizeof(cpuinfo_line)); |
169 | 0 | int features = 0; |
170 | 0 | while (fgets(cpuinfo_line, sizeof(cpuinfo_line), f)) { |
171 | 0 | if (memcmp(cpuinfo_line, "Features", 8) == 0) { |
172 | 0 | if (cpuinfo_search(cpuinfo_line, " neon", 5)) { |
173 | 0 | features |= kCpuHasNEON; |
174 | 0 | } |
175 | 0 | } |
176 | 0 | } |
177 | 0 | fclose(f); |
178 | 0 | return features; |
179 | 0 | } |
180 | | |
181 | | #ifdef __aarch64__ |
182 | | #ifdef __linux__ |
183 | | // Define hwcap values ourselves: building with an old auxv header where these |
184 | | // hwcap values are not defined should not prevent features from being enabled. |
185 | | #define YUV_AARCH64_HWCAP_ASIMDDP (1UL << 20) |
186 | | #define YUV_AARCH64_HWCAP_SVE (1UL << 22) |
187 | | #define YUV_AARCH64_HWCAP2_SVE2 (1UL << 1) |
188 | | #define YUV_AARCH64_HWCAP2_I8MM (1UL << 13) |
189 | | #define YUV_AARCH64_HWCAP2_SME (1UL << 23) |
190 | | #define YUV_AARCH64_HWCAP2_SME2 (1UL << 37) |
191 | | |
192 | | // For AArch64, but public to allow testing on any CPU. |
193 | | LIBYUV_API SAFEBUFFERS int AArch64CpuCaps(unsigned long hwcap, |
194 | | unsigned long hwcap2) { |
195 | | // Neon is mandatory on AArch64, so enable regardless of hwcaps. |
196 | | int features = kCpuHasNEON; |
197 | | |
198 | | // Don't try to enable later extensions unless earlier extensions are also |
199 | | // reported available. Some of these constraints aren't strictly required by |
200 | | // the architecture, but are satisfied by all micro-architectures of |
201 | | // interest. This also avoids an issue on some emulators where true |
202 | | // architectural constraints are not satisfied, e.g. SVE2 may be reported as |
203 | | // available while SVE is not. |
204 | | if (hwcap & YUV_AARCH64_HWCAP_ASIMDDP) { |
205 | | features |= kCpuHasNeonDotProd; |
206 | | if (hwcap2 & YUV_AARCH64_HWCAP2_I8MM) { |
207 | | features |= kCpuHasNeonI8MM; |
208 | | if (hwcap & YUV_AARCH64_HWCAP_SVE) { |
209 | | features |= kCpuHasSVE; |
210 | | if (hwcap2 & YUV_AARCH64_HWCAP2_SVE2) { |
211 | | features |= kCpuHasSVE2; |
212 | | } |
213 | | } |
214 | | // SME may be present without SVE |
215 | | if (hwcap2 & YUV_AARCH64_HWCAP2_SME) { |
216 | | features |= kCpuHasSME; |
217 | | if (hwcap2 & YUV_AARCH64_HWCAP2_SME2) { |
218 | | features |= kCpuHasSME2; |
219 | | } |
220 | | } |
221 | | } |
222 | | } |
223 | | return features; |
224 | | } |
225 | | |
226 | | #elif defined(_WIN32) |
227 | | // For AArch64, but public to allow testing on any CPU. |
228 | | LIBYUV_API SAFEBUFFERS int AArch64CpuCaps() { |
229 | | // Neon is mandatory on AArch64, so enable unconditionally. |
230 | | int features = kCpuHasNEON; |
231 | | |
232 | | // For more information on IsProcessorFeaturePresent(), see: |
233 | | // https://learn.microsoft.com/en-us/windows/win32/api/processthreadsapi/nf-processthreadsapi-isprocessorfeaturepresent#parameters |
234 | | #ifdef PF_ARM_V82_DP_INSTRUCTIONS_AVAILABLE |
235 | | if (IsProcessorFeaturePresent(PF_ARM_V82_DP_INSTRUCTIONS_AVAILABLE)) { |
236 | | features |= kCpuHasNeonDotProd; |
237 | | } |
238 | | #endif |
239 | | // No Neon I8MM or SVE feature detection available here at time of writing. |
240 | | return features; |
241 | | } |
242 | | |
243 | | #elif defined(__APPLE__) |
244 | | static bool have_feature(const char* feature) { |
245 | | // For more information on sysctlbyname(), see: |
246 | | // https://developer.apple.com/documentation/kernel/1387446-sysctlbyname/determining_instruction_set_characteristics |
247 | | int64_t feature_present = 0; |
248 | | size_t size = sizeof(feature_present); |
249 | | if (sysctlbyname(feature, &feature_present, &size, NULL, 0) != 0) { |
250 | | return false; |
251 | | } |
252 | | return feature_present; |
253 | | } |
254 | | |
255 | | // For AArch64, but public to allow testing on any CPU. |
256 | | LIBYUV_API SAFEBUFFERS int AArch64CpuCaps() { |
257 | | // Neon is mandatory on AArch64, so enable unconditionally. |
258 | | int features = kCpuHasNEON; |
259 | | |
260 | | if (have_feature("hw.optional.arm.FEAT_DotProd")) { |
261 | | features |= kCpuHasNeonDotProd; |
262 | | if (have_feature("hw.optional.arm.FEAT_I8MM")) { |
263 | | features |= kCpuHasNeonI8MM; |
264 | | if (have_feature("hw.optional.arm.FEAT_SME")) { |
265 | | features |= kCpuHasSME; |
266 | | if (have_feature("hw.optional.arm.FEAT_SME2")) { |
267 | | features |= kCpuHasSME2; |
268 | | } |
269 | | } |
270 | | } |
271 | | } |
272 | | // No SVE feature detection available here at time of writing. |
273 | | return features; |
274 | | } |
275 | | |
276 | | #else // !defined(__linux__) && !defined(_WIN32) && !defined(__APPLE__) |
277 | | // For AArch64, but public to allow testing on any CPU. |
278 | | LIBYUV_API SAFEBUFFERS int AArch64CpuCaps() { |
279 | | // Neon is mandatory on AArch64, so enable unconditionally. |
280 | | int features = kCpuHasNEON; |
281 | | |
282 | | // TODO(libyuv:980) support feature detection on other platforms. |
283 | | |
284 | | return features; |
285 | | } |
286 | | #endif |
287 | | #endif // defined(__aarch64__) |
288 | | |
289 | 0 | LIBYUV_API SAFEBUFFERS int RiscvCpuCaps(const char* cpuinfo_name) { |
290 | 0 | char cpuinfo_line[512]; |
291 | 0 | int flag = 0; |
292 | 0 | FILE* f = fopen(cpuinfo_name, "re"); |
293 | 0 | if (!f) { |
294 | | #if defined(__riscv_vector) |
295 | | // Assume RVV if /proc/cpuinfo is unavailable. |
296 | | // This will occur for Chrome sandbox for Pepper or Render process. |
297 | | return kCpuHasRVV; |
298 | | #else |
299 | 0 | return 0; |
300 | 0 | #endif |
301 | 0 | } |
302 | 0 | memset(cpuinfo_line, 0, sizeof(cpuinfo_line)); |
303 | 0 | while (fgets(cpuinfo_line, sizeof(cpuinfo_line), f)) { |
304 | 0 | if (memcmp(cpuinfo_line, "isa", 3) == 0) { |
305 | | // ISA string must begin with rv64{i,e,g} for a 64-bit processor. |
306 | 0 | char* isa = strstr(cpuinfo_line, "rv64"); |
307 | 0 | if (isa) { |
308 | 0 | size_t isa_len = strlen(isa); |
309 | 0 | char* extensions; |
310 | 0 | size_t extensions_len = 0; |
311 | 0 | size_t std_isa_len; |
312 | | // Remove the new-line character at the end of string |
313 | 0 | if (isa[isa_len - 1] == '\n') { |
314 | 0 | isa[--isa_len] = '\0'; |
315 | 0 | } |
316 | | // 5 ISA characters |
317 | 0 | if (isa_len < 5) { |
318 | 0 | fclose(f); |
319 | 0 | return 0; |
320 | 0 | } |
321 | | // Skip {i,e,g} canonical checking. |
322 | | // Skip rvxxx |
323 | 0 | isa += 5; |
324 | | // Find the very first occurrence of 's', 'x' or 'z'. |
325 | | // To detect multi-letter standard, non-standard, and |
326 | | // supervisor-level extensions. |
327 | 0 | extensions = strpbrk(isa, "zxs"); |
328 | 0 | if (extensions) { |
329 | | // Multi-letter extensions are seperated by a single underscore |
330 | | // as described in RISC-V User-Level ISA V2.2. |
331 | 0 | char* ext = strtok(extensions, "_"); |
332 | 0 | extensions_len = strlen(extensions); |
333 | 0 | while (ext) { |
334 | | // Search for the ZVFH (Vector FP16) extension. |
335 | 0 | if (!strcmp(ext, "zvfh")) { |
336 | 0 | flag |= kCpuHasRVVZVFH; |
337 | 0 | } |
338 | 0 | ext = strtok(NULL, "_"); |
339 | 0 | } |
340 | 0 | } |
341 | 0 | std_isa_len = isa_len - extensions_len - 5; |
342 | | // Detect the v in the standard single-letter extensions. |
343 | 0 | if (memchr(isa, 'v', std_isa_len)) { |
344 | | // The RVV implied the F extension. |
345 | 0 | flag |= kCpuHasRVV; |
346 | 0 | } |
347 | 0 | } |
348 | 0 | } |
349 | | #if defined(__riscv_vector) |
350 | | // Assume RVV if /proc/cpuinfo is from x86 host running QEMU. |
351 | | else if ((memcmp(cpuinfo_line, "vendor_id\t: GenuineIntel", 24) == 0) || |
352 | | (memcmp(cpuinfo_line, "vendor_id\t: AuthenticAMD", 24) == 0)) { |
353 | | fclose(f); |
354 | | return kCpuHasRVV; |
355 | | } |
356 | | #endif |
357 | 0 | } |
358 | 0 | fclose(f); |
359 | 0 | return flag; |
360 | 0 | } |
361 | | |
362 | 0 | LIBYUV_API SAFEBUFFERS int MipsCpuCaps(const char* cpuinfo_name) { |
363 | 0 | char cpuinfo_line[512]; |
364 | 0 | int flag = 0; |
365 | 0 | FILE* f = fopen(cpuinfo_name, "re"); |
366 | 0 | if (!f) { |
367 | | // Assume nothing if /proc/cpuinfo is unavailable. |
368 | | // This will occur for Chrome sandbox for Pepper or Render process. |
369 | 0 | return 0; |
370 | 0 | } |
371 | 0 | memset(cpuinfo_line, 0, sizeof(cpuinfo_line)); |
372 | 0 | while (fgets(cpuinfo_line, sizeof(cpuinfo_line), f)) { |
373 | 0 | if (memcmp(cpuinfo_line, "cpu model", 9) == 0) { |
374 | | // Workaround early kernel without MSA in ASEs line. |
375 | 0 | if (strstr(cpuinfo_line, "Loongson-2K")) { |
376 | 0 | flag |= kCpuHasMSA; |
377 | 0 | } |
378 | 0 | } |
379 | 0 | if (memcmp(cpuinfo_line, "ASEs implemented", 16) == 0) { |
380 | 0 | if (strstr(cpuinfo_line, "msa")) { |
381 | 0 | flag |= kCpuHasMSA; |
382 | 0 | } |
383 | | // ASEs is the last line, so we can break here. |
384 | 0 | break; |
385 | 0 | } |
386 | 0 | } |
387 | 0 | fclose(f); |
388 | 0 | return flag; |
389 | 0 | } |
390 | | |
391 | | #define LOONGARCH_CFG2 0x2 |
392 | | #define LOONGARCH_CFG2_LSX (1 << 6) |
393 | | #define LOONGARCH_CFG2_LASX (1 << 7) |
394 | | |
395 | | #if defined(__loongarch__) |
396 | | LIBYUV_API SAFEBUFFERS int LoongarchCpuCaps(void) { |
397 | | int flag = 0; |
398 | | uint32_t cfg2 = 0; |
399 | | |
400 | | __asm__ volatile("cpucfg %0, %1 \n\t" : "+&r"(cfg2) : "r"(LOONGARCH_CFG2)); |
401 | | |
402 | | if (cfg2 & LOONGARCH_CFG2_LSX) |
403 | | flag |= kCpuHasLSX; |
404 | | |
405 | | if (cfg2 & LOONGARCH_CFG2_LASX) |
406 | | flag |= kCpuHasLASX; |
407 | | return flag; |
408 | | } |
409 | | #endif |
410 | | |
411 | 4 | static SAFEBUFFERS int GetCpuFlags(void) { |
412 | 4 | int cpu_info = 0; |
413 | 4 | #if !defined(__pnacl__) && !defined(__CLR_VER) && \ |
414 | 4 | (defined(__x86_64__) || defined(_M_X64) || defined(__i386__) || \ |
415 | 4 | defined(_M_IX86)) |
416 | 4 | int cpu_info0[4] = {0, 0, 0, 0}; |
417 | 4 | int cpu_info1[4] = {0, 0, 0, 0}; |
418 | 4 | int cpu_info7[4] = {0, 0, 0, 0}; |
419 | 4 | int cpu_einfo7[4] = {0, 0, 0, 0}; |
420 | 4 | int cpu_info24[4] = {0, 0, 0, 0}; |
421 | 4 | int cpu_amdinfo21[4] = {0, 0, 0, 0}; |
422 | 4 | CpuId(0, 0, cpu_info0); |
423 | 4 | CpuId(1, 0, cpu_info1); |
424 | 4 | if (cpu_info0[0] >= 7) { |
425 | 4 | CpuId(7, 0, cpu_info7); |
426 | 4 | CpuId(7, 1, cpu_einfo7); |
427 | 4 | CpuId(0x80000021, 0, cpu_amdinfo21); |
428 | 4 | } |
429 | 4 | if (cpu_info0[0] >= 0x24) { |
430 | 0 | CpuId(0x24, 0, cpu_info24); |
431 | 0 | } |
432 | 4 | cpu_info = kCpuHasX86 | ((cpu_info1[3] & 0x04000000) ? kCpuHasSSE2 : 0) | |
433 | 4 | ((cpu_info1[2] & 0x00000200) ? kCpuHasSSSE3 : 0) | |
434 | 4 | ((cpu_info1[2] & 0x00080000) ? kCpuHasSSE41 : 0) | |
435 | 4 | ((cpu_info1[2] & 0x00100000) ? kCpuHasSSE42 : 0) | |
436 | 4 | ((cpu_info7[1] & 0x00000200) ? kCpuHasERMS : 0) | |
437 | 4 | ((cpu_info7[3] & 0x00000010) ? kCpuHasFSMR : 0); |
438 | | |
439 | | // AVX requires OS saves YMM registers. |
440 | 4 | if (((cpu_info1[2] & 0x1c000000) == 0x1c000000) && // AVX and OSXSave |
441 | 4 | ((GetXCR0() & 6) == 6)) { // Test OS saves YMM registers |
442 | 4 | cpu_info |= kCpuHasAVX | ((cpu_info7[1] & 0x00000020) ? kCpuHasAVX2 : 0) | |
443 | 4 | ((cpu_info1[2] & 0x00001000) ? kCpuHasFMA3 : 0) | |
444 | 4 | ((cpu_info1[2] & 0x20000000) ? kCpuHasF16C : 0) | |
445 | 4 | ((cpu_einfo7[0] & 0x00000010) ? kCpuHasAVXVNNI : 0) | |
446 | 4 | ((cpu_einfo7[3] & 0x00000010) ? kCpuHasAVXVNNIINT8 : 0); |
447 | | |
448 | 4 | cpu_info |= ((cpu_amdinfo21[0] & 0x00008000) ? kCpuHasERMS : 0); |
449 | | |
450 | | // Detect AVX512bw |
451 | 4 | if ((GetXCR0() & 0xe0) == 0xe0) { |
452 | 0 | cpu_info |= ((cpu_info7[1] & 0x40000000) ? kCpuHasAVX512BW : 0) | |
453 | 0 | ((cpu_info7[1] & 0x80000000) ? kCpuHasAVX512VL : 0) | |
454 | 0 | ((cpu_info7[2] & 0x00000002) ? kCpuHasAVX512VBMI : 0) | |
455 | 0 | ((cpu_info7[2] & 0x00000040) ? kCpuHasAVX512VBMI2 : 0) | |
456 | 0 | ((cpu_info7[2] & 0x00000800) ? kCpuHasAVX512VNNI : 0) | |
457 | 0 | ((cpu_info7[2] & 0x00001000) ? kCpuHasAVX512VBITALG : 0) | |
458 | 0 | ((cpu_einfo7[3] & 0x00080000) ? kCpuHasAVX10 : 0) | |
459 | 0 | ((cpu_info7[3] & 0x02000000) ? kCpuHasAMXINT8 : 0); |
460 | 0 | if (cpu_info0[0] >= 0x24 && (cpu_einfo7[3] & 0x00080000)) { |
461 | 0 | cpu_info |= ((cpu_info24[1] & 0xFF) >= 2) ? kCpuHasAVX10_2 : 0; |
462 | 0 | } |
463 | 0 | } |
464 | 4 | } |
465 | 4 | #endif |
466 | | #if defined(__mips__) && defined(__linux__) |
467 | | cpu_info = MipsCpuCaps("/proc/cpuinfo"); |
468 | | cpu_info |= kCpuHasMIPS; |
469 | | #endif |
470 | | #if defined(__loongarch__) && defined(__linux__) |
471 | | cpu_info = LoongarchCpuCaps(); |
472 | | cpu_info |= kCpuHasLOONGARCH; |
473 | | #endif |
474 | | #if defined(__aarch64__) |
475 | | #if defined(__linux__) |
476 | | // getauxval is supported since Android SDK version 18, minimum at time of |
477 | | // writing is 21, so should be safe to always use this. If getauxval is |
478 | | // somehow disabled then getauxval returns 0, which will leave Neon enabled |
479 | | // since Neon is mandatory on AArch64. |
480 | | unsigned long hwcap = getauxval(AT_HWCAP); |
481 | | unsigned long hwcap2 = getauxval(AT_HWCAP2); |
482 | | cpu_info = AArch64CpuCaps(hwcap, hwcap2); |
483 | | #else |
484 | | cpu_info = AArch64CpuCaps(); |
485 | | #endif |
486 | | cpu_info |= kCpuHasARM; |
487 | | #endif // __aarch64__ |
488 | | #if defined(__arm__) |
489 | | // gcc -mfpu=neon defines __ARM_NEON__ |
490 | | // __ARM_NEON__ generates code that requires Neon. NaCL also requires Neon. |
491 | | // For Linux, /proc/cpuinfo can be tested but without that assume Neon. |
492 | | // Linux arm parse text file for neon detect. |
493 | | #if defined(__linux__) |
494 | | cpu_info = ArmCpuCaps("/proc/cpuinfo"); |
495 | | #elif defined(__ARM_NEON__) |
496 | | cpu_info = kCpuHasNEON; |
497 | | #else |
498 | | cpu_info = 0; |
499 | | #endif |
500 | | cpu_info |= kCpuHasARM; |
501 | | #endif // __arm__ |
502 | | #if defined(__riscv) && defined(__linux__) |
503 | | cpu_info = RiscvCpuCaps("/proc/cpuinfo"); |
504 | | cpu_info |= kCpuHasRISCV; |
505 | | #endif // __riscv |
506 | 4 | cpu_info |= kCpuInitialized; |
507 | 4 | return cpu_info; |
508 | 4 | } |
509 | | |
510 | | // Note that use of this function is not thread safe. |
511 | | LIBYUV_API |
512 | 4 | int MaskCpuFlags(int enable_flags) { |
513 | 4 | int cpu_info = GetCpuFlags() & enable_flags; |
514 | 4 | SetCpuFlags(cpu_info); |
515 | 4 | return cpu_info; |
516 | 4 | } |
517 | | |
518 | | LIBYUV_API |
519 | 4 | int InitCpuFlags(void) { |
520 | 4 | return MaskCpuFlags(-1); |
521 | 4 | } |
522 | | |
523 | | #ifdef __cplusplus |
524 | | } // extern "C" |
525 | | } // namespace libyuv |
526 | | #endif |