/src/gnutls/lib/accelerated/x86/x86-common.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright (C) 2011-2018 Free Software Foundation, Inc. |
3 | | * Copyright (C) 2018 Red Hat, Inc. |
4 | | * |
5 | | * Author: Nikos Mavrogiannopoulos |
6 | | * |
7 | | * This file is part of GnuTLS. |
8 | | * |
9 | | * The GnuTLS is free software; you can redistribute it and/or |
10 | | * modify it under the terms of the GNU Lesser General Public License |
11 | | * as published by the Free Software Foundation; either version 2.1 of |
12 | | * the License, or (at your option) any later version. |
13 | | * |
14 | | * This library is distributed in the hope that it will be useful, but |
15 | | * WITHOUT ANY WARRANTY; without even the implied warranty of |
16 | | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
17 | | * Lesser General Public License for more details. |
18 | | * |
19 | | * You should have received a copy of the GNU Lesser General Public License |
20 | | * along with this program. If not, see <https://www.gnu.org/licenses/> |
21 | | * |
22 | | */ |
23 | | |
24 | | /* |
25 | | * The following code is an implementation of the AES-128-CBC cipher |
26 | | * using intel's AES instruction set. |
27 | | */ |
28 | | |
29 | | #include "errors.h" |
30 | | #include "gnutls_int.h" |
31 | | #include <gnutls/crypto.h> |
32 | | #include "errors.h" |
33 | | #include "aes-x86.h" |
34 | | #include "sha-x86.h" |
35 | | #include "x86-common.h" |
36 | | #ifdef HAVE_LIBNETTLE |
37 | | #include <nettle/aes.h> /* for key generation in 192 and 256 bits */ |
38 | | #include "sha-padlock.h" |
39 | | #endif |
40 | | #include "aes-padlock.h" |
41 | | #ifdef HAVE_CPUID_H |
42 | | #include <cpuid.h> |
43 | | #else |
44 | | #define __get_cpuid(...) 0 |
45 | | #define __get_cpuid_count(...) 0 |
46 | | #endif |
47 | | |
48 | | /* ebx, ecx, edx |
49 | | * This is a format compatible with openssl's CPUID detection. |
50 | | */ |
51 | | #if defined(__GNUC__) |
52 | | __attribute__((visibility("hidden"))) |
53 | | #elif defined(__SUNPRO_C) |
54 | | __hidden |
55 | | #endif |
56 | | unsigned int GNUTLS_x86_cpuid_s[4]; |
57 | | |
58 | | #ifndef bit_SHA |
59 | | #define bit_SHA (1 << 29) |
60 | | #endif |
61 | | |
62 | | /* ecx */ |
63 | | #ifndef bit_AVX512BITALG |
64 | | #define bit_AVX512BITALG 0x4000 |
65 | | #endif |
66 | | |
67 | | #ifndef bit_PCLMUL |
68 | | #define bit_PCLMUL 0x2 |
69 | | #endif |
70 | | |
71 | | #ifndef bit_SSSE3 |
72 | | /* ecx */ |
73 | | #define bit_SSSE3 0x0000200 |
74 | | #endif |
75 | | |
76 | | #ifndef bit_AES |
77 | | #define bit_AES 0x2000000 |
78 | | #endif |
79 | | |
80 | | #ifndef bit_AVX |
81 | | #define bit_AVX 0x10000000 |
82 | | #endif |
83 | | |
84 | | #ifndef bit_AVX2 |
85 | | #define bit_AVX2 0x00000020 |
86 | | #endif |
87 | | |
88 | | #ifndef bit_AVX512F |
89 | | #define bit_AVX512F 0x00010000 |
90 | | #endif |
91 | | |
92 | | #ifndef bit_AVX512IFMA |
93 | | #define bit_AVX512IFMA 0x00200000 |
94 | | #endif |
95 | | |
96 | | #ifndef bit_AVX512BW |
97 | | #define bit_AVX512BW 0x40000000 |
98 | | #endif |
99 | | |
100 | | #ifndef bit_AVX512VL |
101 | | #define bit_AVX512VL 0x80000000 |
102 | | #endif |
103 | | |
104 | | #ifndef bit_OSXSAVE |
105 | | #define bit_OSXSAVE 0x8000000 |
106 | | #endif |
107 | | |
108 | | #ifndef bit_MOVBE |
109 | | #define bit_MOVBE 0x00400000 |
110 | | #endif |
111 | | |
112 | 0 | #define bit_PADLOCK (0x3 << 6) |
113 | 0 | #define bit_PADLOCK_PHE (0x3 << 10) |
114 | 0 | #define bit_PADLOCK_PHE_SHA512 (0x3 << 25) |
115 | | |
116 | | /* Our internal bit-string for cpu capabilities. Should be set |
117 | | * in GNUTLS_CPUID_OVERRIDE */ |
118 | 0 | #define EMPTY_SET 1 |
119 | 0 | #define INTEL_AES_NI (1 << 1) |
120 | 0 | #define INTEL_SSSE3 (1 << 2) |
121 | 0 | #define INTEL_PCLMUL (1 << 3) |
122 | 0 | #define INTEL_AVX (1 << 4) |
123 | 0 | #define INTEL_SHA (1 << 5) |
124 | 0 | #define PADLOCK (1 << 20) |
125 | 0 | #define PADLOCK_PHE (1 << 21) |
126 | 0 | #define PADLOCK_PHE_SHA512 (1 << 22) |
127 | | |
128 | | #ifndef HAVE_GET_CPUID_COUNT |
129 | | static inline void get_cpuid_level7(unsigned int *eax, unsigned int *ebx, |
130 | | unsigned int *ecx, unsigned int *edx) |
131 | 4 | { |
132 | | /* we avoid using __get_cpuid_count, because it is not available with gcc 4.8 */ |
133 | 4 | if (__get_cpuid_max(7, 0) < 7) |
134 | 4 | return; |
135 | | |
136 | 0 | __cpuid_count(7, 0, *eax, *ebx, *ecx, *edx); |
137 | 0 | return; |
138 | 4 | } |
139 | | #else |
140 | | #define get_cpuid_level7(a, b, c, d) __get_cpuid_count(7, 0, a, b, c, d) |
141 | | #endif |
142 | | |
143 | | static unsigned read_cpuid_vals(unsigned int vals[4]) |
144 | 4 | { |
145 | 4 | unsigned t1, t2, t3; |
146 | 4 | vals[0] = vals[1] = vals[2] = vals[3] = 0; |
147 | | |
148 | 4 | if (!__get_cpuid(1, &t1, &t2, &vals[1], &vals[0])) |
149 | 0 | return 0; |
150 | | /* suppress AVX512; it works conditionally on certain CPUs on the original code */ |
151 | 4 | vals[1] &= 0xfffff7ff; |
152 | | |
153 | 4 | get_cpuid_level7(&t1, &vals[2], &t2, &t3); |
154 | | |
155 | 4 | return 1; |
156 | 4 | } |
157 | | |
158 | | /* Based on the example in "How to detect New Instruction support in |
159 | | * the 4th generation Intel Core processor family. |
160 | | * https://software.intel.com/en-us/articles/how-to-detect-new-instruction-support-in-the-4th-generation-intel-core-processor-family |
161 | | */ |
162 | | static unsigned check_4th_gen_intel_features(unsigned ecx) |
163 | 4 | { |
164 | 4 | uint32_t xcr0; |
165 | | |
166 | 4 | if ((ecx & bit_OSXSAVE) != bit_OSXSAVE) |
167 | 0 | return 0; |
168 | | |
169 | | #if defined(_MSC_VER) && !defined(__clang__) |
170 | | xcr0 = _xgetbv(0); |
171 | | #else |
172 | 4 | __asm__("xgetbv" : "=a"(xcr0) : "c"(0) : "%edx"); |
173 | 4 | #endif |
174 | | /* Check if xmm and ymm state are enabled in XCR0. */ |
175 | 4 | return (xcr0 & 6) == 6; |
176 | 4 | } |
177 | | |
178 | | static void capabilities_to_intel_cpuid(unsigned capabilities) |
179 | 0 | { |
180 | 0 | unsigned a[4]; |
181 | |
|
182 | 0 | if (capabilities & EMPTY_SET) { |
183 | 0 | return; |
184 | 0 | } |
185 | | |
186 | 0 | if (!read_cpuid_vals(a)) |
187 | 0 | return; |
188 | | |
189 | 0 | if (capabilities & INTEL_AES_NI) { |
190 | 0 | if (a[1] & bit_AES) { |
191 | 0 | GNUTLS_x86_cpuid_s[1] |= bit_AES; |
192 | 0 | } else { |
193 | 0 | _gnutls_debug_log( |
194 | 0 | "AESNI acceleration requested but not available\n"); |
195 | 0 | } |
196 | 0 | } |
197 | |
|
198 | 0 | if (capabilities & INTEL_SSSE3) { |
199 | 0 | if (a[1] & bit_SSSE3) { |
200 | 0 | GNUTLS_x86_cpuid_s[1] |= bit_SSSE3; |
201 | 0 | } else { |
202 | 0 | _gnutls_debug_log( |
203 | 0 | "SSSE3 acceleration requested but not available\n"); |
204 | 0 | } |
205 | 0 | } |
206 | |
|
207 | 0 | if (capabilities & INTEL_AVX) { |
208 | 0 | if ((a[1] & bit_AVX) && (a[1] & bit_MOVBE) && |
209 | 0 | check_4th_gen_intel_features(a[1])) { |
210 | 0 | GNUTLS_x86_cpuid_s[1] |= bit_AVX | bit_MOVBE; |
211 | 0 | } else { |
212 | 0 | _gnutls_debug_log( |
213 | 0 | "AVX acceleration requested but not available\n"); |
214 | 0 | } |
215 | 0 | } |
216 | |
|
217 | 0 | if (capabilities & INTEL_PCLMUL) { |
218 | 0 | if (a[1] & bit_PCLMUL) { |
219 | 0 | GNUTLS_x86_cpuid_s[1] |= bit_PCLMUL; |
220 | 0 | } else { |
221 | 0 | _gnutls_debug_log( |
222 | 0 | "PCLMUL acceleration requested but not available\n"); |
223 | 0 | } |
224 | 0 | } |
225 | |
|
226 | 0 | if (capabilities & INTEL_SHA) { |
227 | 0 | if (a[2] & bit_SHA) { |
228 | 0 | GNUTLS_x86_cpuid_s[2] |= bit_SHA; |
229 | 0 | } else { |
230 | 0 | _gnutls_debug_log( |
231 | 0 | "SHA acceleration requested but not available\n"); |
232 | 0 | } |
233 | 0 | } |
234 | 0 | } |
235 | | |
236 | | static unsigned check_optimized_aes(void) |
237 | 4 | { |
238 | 4 | return (GNUTLS_x86_cpuid_s[1] & bit_AES); |
239 | 4 | } |
240 | | |
241 | | static unsigned check_ssse3(void) |
242 | 8 | { |
243 | 8 | return (GNUTLS_x86_cpuid_s[1] & bit_SSSE3); |
244 | 8 | } |
245 | | |
246 | | static unsigned check_sha(void) |
247 | 8 | { |
248 | 8 | return (GNUTLS_x86_cpuid_s[2] & bit_SHA); |
249 | 8 | } |
250 | | |
251 | | #ifdef ASM_X86_64 |
252 | | static unsigned check_avx_movbe(void) |
253 | 4 | { |
254 | 4 | return (GNUTLS_x86_cpuid_s[1] & (bit_AVX | bit_MOVBE)) == |
255 | 4 | (bit_AVX | bit_MOVBE); |
256 | 4 | } |
257 | | |
258 | | static unsigned check_pclmul(void) |
259 | 4 | { |
260 | 4 | return (GNUTLS_x86_cpuid_s[1] & bit_PCLMUL); |
261 | 4 | } |
262 | | #endif |
263 | | |
264 | | #ifdef ENABLE_PADLOCK |
265 | | static unsigned capabilities_to_zhaoxin_edx(unsigned capabilities) |
266 | 0 | { |
267 | 0 | unsigned a, b, c, t; |
268 | |
|
269 | 0 | if (capabilities & EMPTY_SET) { |
270 | 0 | return 0; |
271 | 0 | } |
272 | | |
273 | 0 | if (!__get_cpuid(1, &t, &a, &b, &c)) |
274 | 0 | return 0; |
275 | 0 | if (capabilities & PADLOCK) { |
276 | 0 | if (c & bit_PADLOCK) { |
277 | 0 | GNUTLS_x86_cpuid_s[2] |= bit_PADLOCK; |
278 | 0 | } else { |
279 | 0 | _gnutls_debug_log( |
280 | 0 | "Padlock acceleration requested but not available\n"); |
281 | 0 | } |
282 | 0 | } |
283 | |
|
284 | 0 | if (capabilities & PADLOCK_PHE) { |
285 | 0 | if (c & bit_PADLOCK_PHE) { |
286 | 0 | GNUTLS_x86_cpuid_s[2] |= bit_PADLOCK_PHE; |
287 | 0 | } else { |
288 | 0 | _gnutls_debug_log( |
289 | 0 | "Padlock-PHE acceleration requested but not available\n"); |
290 | 0 | } |
291 | 0 | } |
292 | |
|
293 | 0 | if (capabilities & PADLOCK_PHE_SHA512) { |
294 | 0 | if (c & bit_PADLOCK_PHE_SHA512) { |
295 | 0 | GNUTLS_x86_cpuid_s[2] |= bit_PADLOCK_PHE_SHA512; |
296 | 0 | } else { |
297 | 0 | _gnutls_debug_log( |
298 | 0 | "Padlock-PHE-SHA512 acceleration requested but not available\n"); |
299 | 0 | } |
300 | 0 | } |
301 | |
|
302 | 0 | return GNUTLS_x86_cpuid_s[2]; |
303 | 0 | } |
304 | | |
305 | | static int check_padlock(unsigned edx) |
306 | 0 | { |
307 | 0 | return ((edx & bit_PADLOCK) == bit_PADLOCK); |
308 | 0 | } |
309 | | |
310 | | static int check_phe(unsigned edx) |
311 | 0 | { |
312 | 0 | return ((edx & bit_PADLOCK_PHE) == bit_PADLOCK_PHE); |
313 | 0 | } |
314 | | |
315 | | /* We are actually checking for SHA512 */ |
316 | | static int check_phe_sha512(unsigned edx) |
317 | 0 | { |
318 | 0 | return ((edx & bit_PADLOCK_PHE_SHA512) == bit_PADLOCK_PHE_SHA512); |
319 | 0 | } |
320 | | |
321 | | /* On some of the Zhaoxin CPUs, pclmul has a faster acceleration effect */ |
322 | | static int check_fast_pclmul(void) |
323 | 0 | { |
324 | 0 | unsigned int a, b, c, d; |
325 | 0 | unsigned int family, model; |
326 | |
|
327 | 0 | if (!__get_cpuid(1, &a, &b, &c, &d)) |
328 | 0 | return 0; |
329 | | |
330 | 0 | family = ((a >> 8) & 0x0F); |
331 | 0 | model = ((a >> 4) & 0x0F) + ((a >> 12) & 0xF0); |
332 | |
|
333 | 0 | if (((family == 0x6) && (model == 0xf || model == 0x19)) || |
334 | 0 | ((family == 0x7) && (model == 0x1B || model == 0x3B))) |
335 | 0 | return 1; |
336 | 0 | else |
337 | 0 | return 0; |
338 | 0 | } |
339 | | |
340 | | static int check_phe_partial(void) |
341 | 0 | { |
342 | 0 | const char text[64] = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" |
343 | 0 | "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"; |
344 | 0 | uint32_t iv[5] = { 0x67452301UL, 0xEFCDAB89UL, 0x98BADCFEUL, |
345 | 0 | 0x10325476UL, 0xC3D2E1F0UL }; |
346 | | |
347 | | /* If EAX is set to -1 (this is the case with padlock_sha1_blocks), the |
348 | | * xsha1 instruction takes a complete SHA-1 block (64 bytes), while it |
349 | | * takes arbitrary length data otherwise. */ |
350 | 0 | padlock_sha1_blocks(iv, text, 1); |
351 | |
|
352 | 0 | if (iv[0] == 0xDA4968EBUL && iv[1] == 0x2E377C1FUL && |
353 | 0 | iv[2] == 0x884E8F52UL && iv[3] == 0x83524BEBUL && |
354 | 0 | iv[4] == 0xE74EBDBDUL) |
355 | 0 | return 1; |
356 | 0 | else |
357 | 0 | return 0; |
358 | 0 | } |
359 | | |
360 | | static unsigned check_zhaoxin(void) |
361 | 4 | { |
362 | 4 | unsigned int a, b, c, d; |
363 | | |
364 | 4 | if (!__get_cpuid(0, &a, &b, &c, &d)) |
365 | 0 | return 0; |
366 | | |
367 | | /* Zhaoxin and VIA CPU was detected */ |
368 | 4 | if ((memcmp(&b, "Cent", 4) == 0 && memcmp(&d, "aurH", 4) == 0 && |
369 | 4 | memcmp(&c, "auls", 4) == 0) || |
370 | 4 | (memcmp(&b, " Sh", 4) == 0 && memcmp(&d, "angh", 4) == 0 && |
371 | 4 | memcmp(&c, "ai ", 4) == 0)) { |
372 | 0 | return 1; |
373 | 0 | } |
374 | | |
375 | 4 | return 0; |
376 | 4 | } |
377 | | |
378 | | static void register_x86_padlock_crypto(unsigned capabilities) |
379 | 4 | { |
380 | 4 | int ret, phe; |
381 | 4 | unsigned edx; |
382 | | |
383 | 4 | if (check_zhaoxin() == 0) |
384 | 4 | return; |
385 | | |
386 | 0 | memset(GNUTLS_x86_cpuid_s, 0, sizeof(GNUTLS_x86_cpuid_s)); |
387 | |
|
388 | 0 | if (capabilities == 0) { |
389 | 0 | if (!read_cpuid_vals(GNUTLS_x86_cpuid_s)) |
390 | 0 | return; |
391 | 0 | edx = padlock_capability(); |
392 | 0 | } else { |
393 | 0 | capabilities_to_intel_cpuid(capabilities); |
394 | 0 | edx = capabilities_to_zhaoxin_edx(capabilities); |
395 | 0 | } |
396 | | |
397 | 0 | if (check_ssse3()) { |
398 | 0 | _gnutls_debug_log("Zhaoxin SSSE3 was detected\n"); |
399 | |
|
400 | 0 | ret = gnutls_crypto_single_cipher_register( |
401 | 0 | GNUTLS_CIPHER_AES_128_GCM, 90, |
402 | 0 | &_gnutls_aes_gcm_x86_ssse3, 0); |
403 | 0 | if (ret < 0) { |
404 | 0 | gnutls_assert(); |
405 | 0 | } |
406 | |
|
407 | 0 | ret = gnutls_crypto_single_cipher_register( |
408 | 0 | GNUTLS_CIPHER_AES_192_GCM, 90, |
409 | 0 | &_gnutls_aes_gcm_x86_ssse3, 0); |
410 | 0 | if (ret < 0) { |
411 | 0 | gnutls_assert(); |
412 | 0 | } |
413 | |
|
414 | 0 | ret = gnutls_crypto_single_cipher_register( |
415 | 0 | GNUTLS_CIPHER_AES_256_GCM, 90, |
416 | 0 | &_gnutls_aes_gcm_x86_ssse3, 0); |
417 | 0 | if (ret < 0) { |
418 | 0 | gnutls_assert(); |
419 | 0 | } |
420 | |
|
421 | 0 | ret = gnutls_crypto_single_cipher_register( |
422 | 0 | GNUTLS_CIPHER_AES_128_CBC, 90, &_gnutls_aes_ssse3, 0); |
423 | 0 | if (ret < 0) { |
424 | 0 | gnutls_assert(); |
425 | 0 | } |
426 | |
|
427 | 0 | ret = gnutls_crypto_single_cipher_register( |
428 | 0 | GNUTLS_CIPHER_AES_192_CBC, 90, &_gnutls_aes_ssse3, 0); |
429 | 0 | if (ret < 0) { |
430 | 0 | gnutls_assert(); |
431 | 0 | } |
432 | |
|
433 | 0 | ret = gnutls_crypto_single_cipher_register( |
434 | 0 | GNUTLS_CIPHER_AES_256_CBC, 90, &_gnutls_aes_ssse3, 0); |
435 | 0 | if (ret < 0) { |
436 | 0 | gnutls_assert(); |
437 | 0 | } |
438 | 0 | } |
439 | |
|
440 | 0 | if (check_sha() || check_ssse3()) { |
441 | 0 | if (check_sha()) |
442 | 0 | _gnutls_debug_log("Zhaoxin SHA was detected\n"); |
443 | |
|
444 | 0 | ret = gnutls_crypto_single_digest_register( |
445 | 0 | GNUTLS_DIG_SHA1, 80, &_gnutls_sha_x86_ssse3, 0); |
446 | 0 | if (ret < 0) { |
447 | 0 | gnutls_assert(); |
448 | 0 | } |
449 | |
|
450 | 0 | ret = gnutls_crypto_single_digest_register( |
451 | 0 | GNUTLS_DIG_SHA224, 80, &_gnutls_sha_x86_ssse3, 0); |
452 | 0 | if (ret < 0) { |
453 | 0 | gnutls_assert(); |
454 | 0 | } |
455 | |
|
456 | 0 | ret = gnutls_crypto_single_digest_register( |
457 | 0 | GNUTLS_DIG_SHA256, 80, &_gnutls_sha_x86_ssse3, 0); |
458 | 0 | if (ret < 0) { |
459 | 0 | gnutls_assert(); |
460 | 0 | } |
461 | |
|
462 | 0 | ret = gnutls_crypto_single_mac_register( |
463 | 0 | GNUTLS_MAC_SHA1, 80, &_gnutls_hmac_sha_x86_ssse3, 0); |
464 | 0 | if (ret < 0) |
465 | 0 | gnutls_assert(); |
466 | |
|
467 | 0 | ret = gnutls_crypto_single_mac_register( |
468 | 0 | GNUTLS_MAC_SHA224, 80, &_gnutls_hmac_sha_x86_ssse3, 0); |
469 | 0 | if (ret < 0) |
470 | 0 | gnutls_assert(); |
471 | |
|
472 | 0 | ret = gnutls_crypto_single_mac_register( |
473 | 0 | GNUTLS_MAC_SHA256, 80, &_gnutls_hmac_sha_x86_ssse3, 0); |
474 | 0 | if (ret < 0) |
475 | 0 | gnutls_assert(); |
476 | |
|
477 | 0 | ret = gnutls_crypto_single_digest_register( |
478 | 0 | GNUTLS_DIG_SHA384, 80, &_gnutls_sha_x86_ssse3, 0); |
479 | 0 | if (ret < 0) |
480 | 0 | gnutls_assert(); |
481 | |
|
482 | 0 | ret = gnutls_crypto_single_digest_register( |
483 | 0 | GNUTLS_DIG_SHA512, 80, &_gnutls_sha_x86_ssse3, 0); |
484 | 0 | if (ret < 0) |
485 | 0 | gnutls_assert(); |
486 | 0 | ret = gnutls_crypto_single_mac_register( |
487 | 0 | GNUTLS_MAC_SHA384, 80, &_gnutls_hmac_sha_x86_ssse3, 0); |
488 | 0 | if (ret < 0) |
489 | 0 | gnutls_assert(); |
490 | |
|
491 | 0 | ret = gnutls_crypto_single_mac_register( |
492 | 0 | GNUTLS_MAC_SHA512, 80, &_gnutls_hmac_sha_x86_ssse3, 0); |
493 | 0 | if (ret < 0) |
494 | 0 | gnutls_assert(); |
495 | 0 | } |
496 | |
|
497 | 0 | if (check_optimized_aes()) { |
498 | 0 | _gnutls_debug_log("Zhaoxin AES accelerator was detected\n"); |
499 | 0 | ret = gnutls_crypto_single_cipher_register( |
500 | 0 | GNUTLS_CIPHER_AES_128_CBC, 80, &_gnutls_aesni_x86, 0); |
501 | 0 | if (ret < 0) { |
502 | 0 | gnutls_assert(); |
503 | 0 | } |
504 | |
|
505 | 0 | ret = gnutls_crypto_single_cipher_register( |
506 | 0 | GNUTLS_CIPHER_AES_192_CBC, 80, &_gnutls_aesni_x86, 0); |
507 | 0 | if (ret < 0) { |
508 | 0 | gnutls_assert(); |
509 | 0 | } |
510 | |
|
511 | 0 | ret = gnutls_crypto_single_cipher_register( |
512 | 0 | GNUTLS_CIPHER_AES_256_CBC, 80, &_gnutls_aesni_x86, 0); |
513 | 0 | if (ret < 0) { |
514 | 0 | gnutls_assert(); |
515 | 0 | } |
516 | |
|
517 | 0 | ret = gnutls_crypto_single_cipher_register( |
518 | 0 | GNUTLS_CIPHER_AES_128_CCM, 80, |
519 | 0 | &_gnutls_aes_ccm_x86_aesni, 0); |
520 | 0 | if (ret < 0) { |
521 | 0 | gnutls_assert(); |
522 | 0 | } |
523 | |
|
524 | 0 | ret = gnutls_crypto_single_cipher_register( |
525 | 0 | GNUTLS_CIPHER_AES_256_CCM, 80, |
526 | 0 | &_gnutls_aes_ccm_x86_aesni, 0); |
527 | 0 | if (ret < 0) { |
528 | 0 | gnutls_assert(); |
529 | 0 | } |
530 | |
|
531 | 0 | ret = gnutls_crypto_single_cipher_register( |
532 | 0 | GNUTLS_CIPHER_AES_128_CCM_8, 80, |
533 | 0 | &_gnutls_aes_ccm_x86_aesni, 0); |
534 | 0 | if (ret < 0) { |
535 | 0 | gnutls_assert(); |
536 | 0 | } |
537 | |
|
538 | 0 | ret = gnutls_crypto_single_cipher_register( |
539 | 0 | GNUTLS_CIPHER_AES_256_CCM_8, 80, |
540 | 0 | &_gnutls_aes_ccm_x86_aesni, 0); |
541 | 0 | if (ret < 0) { |
542 | 0 | gnutls_assert(); |
543 | 0 | } |
544 | |
|
545 | 0 | ret = gnutls_crypto_single_cipher_register( |
546 | 0 | GNUTLS_CIPHER_AES_128_XTS, 80, |
547 | 0 | &_gnutls_aes_xts_x86_aesni, 0); |
548 | 0 | if (ret < 0) { |
549 | 0 | gnutls_assert(); |
550 | 0 | } |
551 | |
|
552 | 0 | ret = gnutls_crypto_single_cipher_register( |
553 | 0 | GNUTLS_CIPHER_AES_256_XTS, 80, |
554 | 0 | &_gnutls_aes_xts_x86_aesni, 0); |
555 | 0 | if (ret < 0) { |
556 | 0 | gnutls_assert(); |
557 | 0 | } |
558 | |
|
559 | 0 | #ifdef ASM_X86_64 |
560 | 0 | if (check_pclmul()) { |
561 | | /* register GCM ciphers */ |
562 | 0 | _gnutls_debug_log( |
563 | 0 | "Zhaoxin GCM accelerator was detected\n"); |
564 | 0 | if (check_avx_movbe() && check_fast_pclmul()) { |
565 | 0 | _gnutls_debug_log( |
566 | 0 | "Zhaoxin GCM accelerator (AVX) was detected\n"); |
567 | 0 | ret = gnutls_crypto_single_cipher_register( |
568 | 0 | GNUTLS_CIPHER_AES_128_GCM, 80, |
569 | 0 | &_gnutls_aes_gcm_pclmul_avx, 0); |
570 | 0 | if (ret < 0) { |
571 | 0 | gnutls_assert(); |
572 | 0 | } |
573 | |
|
574 | 0 | ret = gnutls_crypto_single_cipher_register( |
575 | 0 | GNUTLS_CIPHER_AES_192_GCM, 80, |
576 | 0 | &_gnutls_aes_gcm_pclmul_avx, 0); |
577 | 0 | if (ret < 0) { |
578 | 0 | gnutls_assert(); |
579 | 0 | } |
580 | |
|
581 | 0 | ret = gnutls_crypto_single_cipher_register( |
582 | 0 | GNUTLS_CIPHER_AES_256_GCM, 80, |
583 | 0 | &_gnutls_aes_gcm_pclmul_avx, 0); |
584 | 0 | if (ret < 0) { |
585 | 0 | gnutls_assert(); |
586 | 0 | } |
587 | 0 | } else { |
588 | 0 | ret = gnutls_crypto_single_cipher_register( |
589 | 0 | GNUTLS_CIPHER_AES_128_GCM, 80, |
590 | 0 | &_gnutls_aes_gcm_pclmul, 0); |
591 | 0 | if (ret < 0) { |
592 | 0 | gnutls_assert(); |
593 | 0 | } |
594 | |
|
595 | 0 | ret = gnutls_crypto_single_cipher_register( |
596 | 0 | GNUTLS_CIPHER_AES_192_GCM, 80, |
597 | 0 | &_gnutls_aes_gcm_pclmul, 0); |
598 | 0 | if (ret < 0) { |
599 | 0 | gnutls_assert(); |
600 | 0 | } |
601 | |
|
602 | 0 | ret = gnutls_crypto_single_cipher_register( |
603 | 0 | GNUTLS_CIPHER_AES_256_GCM, 80, |
604 | 0 | &_gnutls_aes_gcm_pclmul, 0); |
605 | 0 | if (ret < 0) { |
606 | 0 | gnutls_assert(); |
607 | 0 | } |
608 | 0 | } |
609 | 0 | } else |
610 | 0 | #endif |
611 | 0 | { |
612 | 0 | ret = gnutls_crypto_single_cipher_register( |
613 | 0 | GNUTLS_CIPHER_AES_128_GCM, 80, |
614 | 0 | &_gnutls_aes_gcm_x86_aesni, 0); |
615 | 0 | if (ret < 0) { |
616 | 0 | gnutls_assert(); |
617 | 0 | } |
618 | |
|
619 | 0 | ret = gnutls_crypto_single_cipher_register( |
620 | 0 | GNUTLS_CIPHER_AES_192_GCM, 80, |
621 | 0 | &_gnutls_aes_gcm_x86_aesni, 0); |
622 | 0 | if (ret < 0) { |
623 | 0 | gnutls_assert(); |
624 | 0 | } |
625 | |
|
626 | 0 | ret = gnutls_crypto_single_cipher_register( |
627 | 0 | GNUTLS_CIPHER_AES_256_GCM, 80, |
628 | 0 | &_gnutls_aes_gcm_x86_aesni, 0); |
629 | 0 | if (ret < 0) { |
630 | 0 | gnutls_assert(); |
631 | 0 | } |
632 | 0 | } |
633 | 0 | } |
634 | |
|
635 | 0 | if (check_padlock(edx)) { |
636 | 0 | _gnutls_debug_log("Padlock AES accelerator was detected\n"); |
637 | 0 | ret = gnutls_crypto_single_cipher_register( |
638 | 0 | GNUTLS_CIPHER_AES_128_CBC, 80, &_gnutls_aes_padlock, 0); |
639 | 0 | if (ret < 0) { |
640 | 0 | gnutls_assert(); |
641 | 0 | } |
642 | |
|
643 | 0 | ret = gnutls_crypto_single_cipher_register( |
644 | 0 | GNUTLS_CIPHER_AES_192_CBC, 80, &_gnutls_aes_padlock, 0); |
645 | 0 | if (ret < 0) { |
646 | 0 | gnutls_assert(); |
647 | 0 | } |
648 | | |
649 | | /* register GCM ciphers */ |
650 | 0 | ret = gnutls_crypto_single_cipher_register( |
651 | 0 | GNUTLS_CIPHER_AES_128_GCM, 90, &_gnutls_aes_gcm_padlock, |
652 | 0 | 0); |
653 | 0 | if (ret < 0) { |
654 | 0 | gnutls_assert(); |
655 | 0 | } |
656 | |
|
657 | 0 | ret = gnutls_crypto_single_cipher_register( |
658 | 0 | GNUTLS_CIPHER_AES_256_CBC, 80, &_gnutls_aes_padlock, 0); |
659 | 0 | if (ret < 0) { |
660 | 0 | gnutls_assert(); |
661 | 0 | } |
662 | |
|
663 | 0 | ret = gnutls_crypto_single_cipher_register( |
664 | 0 | GNUTLS_CIPHER_AES_256_GCM, 90, &_gnutls_aes_gcm_padlock, |
665 | 0 | 0); |
666 | 0 | if (ret < 0) { |
667 | 0 | gnutls_assert(); |
668 | 0 | } |
669 | 0 | } |
670 | |
|
671 | 0 | if (!check_optimized_aes() && !check_padlock(edx)) |
672 | 0 | _gnutls_priority_update_non_aesni(); |
673 | |
|
674 | 0 | #ifdef HAVE_LIBNETTLE |
675 | 0 | phe = check_phe(edx); |
676 | |
|
677 | 0 | if (phe && check_phe_partial()) { |
678 | 0 | _gnutls_debug_log( |
679 | 0 | "Padlock SHA1 and SHA256 (partial) accelerator was detected\n"); |
680 | 0 | if (check_phe_sha512(edx)) { |
681 | 0 | _gnutls_debug_log( |
682 | 0 | "Padlock SHA512 (partial) accelerator was detected\n"); |
683 | 0 | ret = gnutls_crypto_single_digest_register( |
684 | 0 | GNUTLS_DIG_SHA384, 80, &_gnutls_sha_padlock, 0); |
685 | 0 | if (ret < 0) { |
686 | 0 | gnutls_assert(); |
687 | 0 | } |
688 | |
|
689 | 0 | ret = gnutls_crypto_single_digest_register( |
690 | 0 | GNUTLS_DIG_SHA512, 80, &_gnutls_sha_padlock, 0); |
691 | 0 | if (ret < 0) { |
692 | 0 | gnutls_assert(); |
693 | 0 | } |
694 | |
|
695 | 0 | ret = gnutls_crypto_single_mac_register( |
696 | 0 | GNUTLS_MAC_SHA384, 80, |
697 | 0 | &_gnutls_hmac_sha_padlock, 0); |
698 | 0 | if (ret < 0) { |
699 | 0 | gnutls_assert(); |
700 | 0 | } |
701 | |
|
702 | 0 | ret = gnutls_crypto_single_mac_register( |
703 | 0 | GNUTLS_MAC_SHA512, 80, |
704 | 0 | &_gnutls_hmac_sha_padlock, 0); |
705 | 0 | if (ret < 0) { |
706 | 0 | gnutls_assert(); |
707 | 0 | } |
708 | 0 | } |
709 | |
|
710 | 0 | ret = gnutls_crypto_single_digest_register( |
711 | 0 | GNUTLS_DIG_SHA1, 90, &_gnutls_sha_padlock, 0); |
712 | 0 | if (ret < 0) { |
713 | 0 | gnutls_assert(); |
714 | 0 | } |
715 | |
|
716 | 0 | ret = gnutls_crypto_single_digest_register( |
717 | 0 | GNUTLS_DIG_SHA224, 90, &_gnutls_sha_padlock, 0); |
718 | 0 | if (ret < 0) { |
719 | 0 | gnutls_assert(); |
720 | 0 | } |
721 | |
|
722 | 0 | ret = gnutls_crypto_single_digest_register( |
723 | 0 | GNUTLS_DIG_SHA256, 90, &_gnutls_sha_padlock, 0); |
724 | 0 | if (ret < 0) { |
725 | 0 | gnutls_assert(); |
726 | 0 | } |
727 | |
|
728 | 0 | ret = gnutls_crypto_single_mac_register( |
729 | 0 | GNUTLS_MAC_SHA1, 90, &_gnutls_hmac_sha_padlock, 0); |
730 | 0 | if (ret < 0) { |
731 | 0 | gnutls_assert(); |
732 | 0 | } |
733 | | |
734 | | /* we don't register MAC_SHA224 because it is not used by TLS */ |
735 | |
|
736 | 0 | ret = gnutls_crypto_single_mac_register( |
737 | 0 | GNUTLS_MAC_SHA256, 90, &_gnutls_hmac_sha_padlock, 0); |
738 | 0 | if (ret < 0) { |
739 | 0 | gnutls_assert(); |
740 | 0 | } |
741 | 0 | } else if (phe) { |
742 | | /* Original padlock PHE. Does not support incremental operations. |
743 | | */ |
744 | 0 | _gnutls_debug_log( |
745 | 0 | "Padlock SHA1 and SHA256 accelerator was detected\n"); |
746 | 0 | ret = gnutls_crypto_single_digest_register( |
747 | 0 | GNUTLS_DIG_SHA1, 90, &_gnutls_sha_padlock_oneshot, 0); |
748 | 0 | if (ret < 0) { |
749 | 0 | gnutls_assert(); |
750 | 0 | } |
751 | |
|
752 | 0 | ret = gnutls_crypto_single_digest_register( |
753 | 0 | GNUTLS_DIG_SHA256, 90, &_gnutls_sha_padlock_oneshot, 0); |
754 | 0 | if (ret < 0) { |
755 | 0 | gnutls_assert(); |
756 | 0 | } |
757 | |
|
758 | 0 | ret = gnutls_crypto_single_mac_register( |
759 | 0 | GNUTLS_MAC_SHA1, 90, &_gnutls_hmac_sha_padlock_oneshot, |
760 | 0 | 0); |
761 | 0 | if (ret < 0) { |
762 | 0 | gnutls_assert(); |
763 | 0 | } |
764 | |
|
765 | 0 | ret = gnutls_crypto_single_mac_register( |
766 | 0 | GNUTLS_MAC_SHA256, 90, |
767 | 0 | &_gnutls_hmac_sha_padlock_oneshot, 0); |
768 | 0 | if (ret < 0) { |
769 | 0 | gnutls_assert(); |
770 | 0 | } |
771 | 0 | } |
772 | 0 | #endif |
773 | |
|
774 | 0 | return; |
775 | 0 | } |
776 | | #endif |
777 | | |
778 | | enum x86_cpu_vendor { |
779 | | X86_CPU_VENDOR_OTHER, |
780 | | X86_CPU_VENDOR_INTEL, |
781 | | X86_CPU_VENDOR_AMD, |
782 | | }; |
783 | | |
784 | | static enum x86_cpu_vendor check_x86_cpu_vendor(void) |
785 | 4 | { |
786 | 4 | unsigned int a, b, c, d; |
787 | | |
788 | 4 | if (!__get_cpuid(0, &a, &b, &c, &d)) { |
789 | 0 | return X86_CPU_VENDOR_OTHER; |
790 | 0 | } |
791 | | |
792 | 4 | if (memcmp(&b, "Genu", 4) == 0 && memcmp(&d, "ineI", 4) == 0 && |
793 | 4 | memcmp(&c, "ntel", 4) == 0) { |
794 | 0 | return X86_CPU_VENDOR_INTEL; |
795 | 0 | } |
796 | | |
797 | 4 | if (memcmp(&b, "Auth", 4) == 0 && memcmp(&d, "enti", 4) == 0 && |
798 | 4 | memcmp(&c, "cAMD", 4) == 0) { |
799 | 4 | return X86_CPU_VENDOR_AMD; |
800 | 4 | } |
801 | | |
802 | 0 | return X86_CPU_VENDOR_OTHER; |
803 | 4 | } |
804 | | |
805 | | static void register_x86_intel_crypto(unsigned capabilities) |
806 | 4 | { |
807 | 4 | int ret; |
808 | 4 | enum x86_cpu_vendor vendor; |
809 | | |
810 | 4 | memset(GNUTLS_x86_cpuid_s, 0, sizeof(GNUTLS_x86_cpuid_s)); |
811 | | |
812 | 4 | vendor = check_x86_cpu_vendor(); |
813 | 4 | if (vendor == X86_CPU_VENDOR_OTHER) { |
814 | 0 | return; |
815 | 0 | } |
816 | | |
817 | 4 | if (capabilities == 0) { |
818 | 4 | if (!read_cpuid_vals(GNUTLS_x86_cpuid_s)) |
819 | 0 | return; |
820 | 4 | if (!check_4th_gen_intel_features(GNUTLS_x86_cpuid_s[1])) { |
821 | 0 | GNUTLS_x86_cpuid_s[1] &= ~bit_AVX; |
822 | | |
823 | | /* Clear AVX2 bits as well, according to what |
824 | | * OpenSSL does. Should we clear |
825 | | * bit_AVX512DQ, bit_AVX512PF, bit_AVX512ER, |
826 | | * and bit_AVX512CD? */ |
827 | 0 | GNUTLS_x86_cpuid_s[2] &= |
828 | 0 | ~(bit_AVX2 | bit_AVX512F | bit_AVX512IFMA | |
829 | 0 | bit_AVX512BW | bit_AVX512BW); |
830 | 0 | } |
831 | 4 | } else { |
832 | 0 | capabilities_to_intel_cpuid(capabilities); |
833 | 0 | } |
834 | | |
835 | | /* CRYPTOGAMS uses the (1 << 30) bit as an indicator of Intel CPUs */ |
836 | 4 | if (vendor == X86_CPU_VENDOR_INTEL) { |
837 | 0 | GNUTLS_x86_cpuid_s[0] |= 1 << 30; |
838 | 4 | } else { |
839 | 4 | GNUTLS_x86_cpuid_s[0] &= ~(1 << 30); |
840 | 4 | } |
841 | | |
842 | 4 | if (check_ssse3()) { |
843 | 4 | _gnutls_debug_log("Intel SSSE3 was detected\n"); |
844 | | |
845 | 4 | ret = gnutls_crypto_single_cipher_register( |
846 | 4 | GNUTLS_CIPHER_AES_128_GCM, 90, |
847 | 4 | &_gnutls_aes_gcm_x86_ssse3, 0); |
848 | 4 | if (ret < 0) { |
849 | 0 | gnutls_assert(); |
850 | 0 | } |
851 | | |
852 | 4 | ret = gnutls_crypto_single_cipher_register( |
853 | 4 | GNUTLS_CIPHER_AES_192_GCM, 90, |
854 | 4 | &_gnutls_aes_gcm_x86_ssse3, 0); |
855 | 4 | if (ret < 0) { |
856 | 0 | gnutls_assert(); |
857 | 0 | } |
858 | | |
859 | 4 | ret = gnutls_crypto_single_cipher_register( |
860 | 4 | GNUTLS_CIPHER_AES_256_GCM, 90, |
861 | 4 | &_gnutls_aes_gcm_x86_ssse3, 0); |
862 | 4 | if (ret < 0) { |
863 | 0 | gnutls_assert(); |
864 | 0 | } |
865 | | |
866 | 4 | ret = gnutls_crypto_single_cipher_register( |
867 | 4 | GNUTLS_CIPHER_AES_128_CBC, 90, &_gnutls_aes_ssse3, 0); |
868 | 4 | if (ret < 0) { |
869 | 0 | gnutls_assert(); |
870 | 0 | } |
871 | | |
872 | 4 | ret = gnutls_crypto_single_cipher_register( |
873 | 4 | GNUTLS_CIPHER_AES_192_CBC, 90, &_gnutls_aes_ssse3, 0); |
874 | 4 | if (ret < 0) { |
875 | 0 | gnutls_assert(); |
876 | 0 | } |
877 | | |
878 | 4 | ret = gnutls_crypto_single_cipher_register( |
879 | 4 | GNUTLS_CIPHER_AES_256_CBC, 90, &_gnutls_aes_ssse3, 0); |
880 | 4 | if (ret < 0) { |
881 | 0 | gnutls_assert(); |
882 | 0 | } |
883 | 4 | } |
884 | | |
885 | 4 | if (check_sha() || check_ssse3()) { |
886 | 4 | if (check_sha()) |
887 | 4 | _gnutls_debug_log("Intel SHA was detected\n"); |
888 | | |
889 | 4 | ret = gnutls_crypto_single_digest_register( |
890 | 4 | GNUTLS_DIG_SHA1, 80, &_gnutls_sha_x86_ssse3, 0); |
891 | 4 | if (ret < 0) { |
892 | 0 | gnutls_assert(); |
893 | 0 | } |
894 | | |
895 | 4 | ret = gnutls_crypto_single_digest_register( |
896 | 4 | GNUTLS_DIG_SHA224, 80, &_gnutls_sha_x86_ssse3, 0); |
897 | 4 | if (ret < 0) { |
898 | 0 | gnutls_assert(); |
899 | 0 | } |
900 | | |
901 | 4 | ret = gnutls_crypto_single_digest_register( |
902 | 4 | GNUTLS_DIG_SHA256, 80, &_gnutls_sha_x86_ssse3, 0); |
903 | 4 | if (ret < 0) { |
904 | 0 | gnutls_assert(); |
905 | 0 | } |
906 | | |
907 | 4 | ret = gnutls_crypto_single_mac_register( |
908 | 4 | GNUTLS_MAC_SHA1, 80, &_gnutls_hmac_sha_x86_ssse3, 0); |
909 | 4 | if (ret < 0) |
910 | 4 | gnutls_assert(); |
911 | | |
912 | 4 | ret = gnutls_crypto_single_mac_register( |
913 | 4 | GNUTLS_MAC_SHA224, 80, &_gnutls_hmac_sha_x86_ssse3, 0); |
914 | 4 | if (ret < 0) |
915 | 4 | gnutls_assert(); |
916 | | |
917 | 4 | ret = gnutls_crypto_single_mac_register( |
918 | 4 | GNUTLS_MAC_SHA256, 80, &_gnutls_hmac_sha_x86_ssse3, 0); |
919 | 4 | if (ret < 0) |
920 | 4 | gnutls_assert(); |
921 | | |
922 | 4 | ret = gnutls_crypto_single_digest_register( |
923 | 4 | GNUTLS_DIG_SHA384, 80, &_gnutls_sha_x86_ssse3, 0); |
924 | 4 | if (ret < 0) |
925 | 4 | gnutls_assert(); |
926 | | |
927 | 4 | ret = gnutls_crypto_single_digest_register( |
928 | 4 | GNUTLS_DIG_SHA512, 80, &_gnutls_sha_x86_ssse3, 0); |
929 | 4 | if (ret < 0) |
930 | 4 | gnutls_assert(); |
931 | 4 | ret = gnutls_crypto_single_mac_register( |
932 | 4 | GNUTLS_MAC_SHA384, 80, &_gnutls_hmac_sha_x86_ssse3, 0); |
933 | 4 | if (ret < 0) |
934 | 4 | gnutls_assert(); |
935 | | |
936 | 4 | ret = gnutls_crypto_single_mac_register( |
937 | 4 | GNUTLS_MAC_SHA512, 80, &_gnutls_hmac_sha_x86_ssse3, 0); |
938 | 4 | if (ret < 0) |
939 | 4 | gnutls_assert(); |
940 | 4 | } |
941 | | |
942 | 4 | if (check_optimized_aes()) { |
943 | 4 | _gnutls_debug_log("Intel AES accelerator was detected\n"); |
944 | 4 | ret = gnutls_crypto_single_cipher_register( |
945 | 4 | GNUTLS_CIPHER_AES_128_CBC, 80, &_gnutls_aesni_x86, 0); |
946 | 4 | if (ret < 0) { |
947 | 0 | gnutls_assert(); |
948 | 0 | } |
949 | | |
950 | 4 | ret = gnutls_crypto_single_cipher_register( |
951 | 4 | GNUTLS_CIPHER_AES_192_CBC, 80, &_gnutls_aesni_x86, 0); |
952 | 4 | if (ret < 0) { |
953 | 0 | gnutls_assert(); |
954 | 0 | } |
955 | | |
956 | 4 | ret = gnutls_crypto_single_cipher_register( |
957 | 4 | GNUTLS_CIPHER_AES_256_CBC, 80, &_gnutls_aesni_x86, 0); |
958 | 4 | if (ret < 0) { |
959 | 0 | gnutls_assert(); |
960 | 0 | } |
961 | | |
962 | 4 | ret = gnutls_crypto_single_cipher_register( |
963 | 4 | GNUTLS_CIPHER_AES_128_CCM, 80, |
964 | 4 | &_gnutls_aes_ccm_x86_aesni, 0); |
965 | 4 | if (ret < 0) { |
966 | 0 | gnutls_assert(); |
967 | 0 | } |
968 | | |
969 | 4 | ret = gnutls_crypto_single_cipher_register( |
970 | 4 | GNUTLS_CIPHER_AES_256_CCM, 80, |
971 | 4 | &_gnutls_aes_ccm_x86_aesni, 0); |
972 | 4 | if (ret < 0) { |
973 | 0 | gnutls_assert(); |
974 | 0 | } |
975 | | |
976 | 4 | ret = gnutls_crypto_single_cipher_register( |
977 | 4 | GNUTLS_CIPHER_AES_128_CCM_8, 80, |
978 | 4 | &_gnutls_aes_ccm_x86_aesni, 0); |
979 | 4 | if (ret < 0) { |
980 | 0 | gnutls_assert(); |
981 | 0 | } |
982 | | |
983 | 4 | ret = gnutls_crypto_single_cipher_register( |
984 | 4 | GNUTLS_CIPHER_AES_256_CCM_8, 80, |
985 | 4 | &_gnutls_aes_ccm_x86_aesni, 0); |
986 | 4 | if (ret < 0) { |
987 | 0 | gnutls_assert(); |
988 | 0 | } |
989 | | |
990 | 4 | ret = gnutls_crypto_single_cipher_register( |
991 | 4 | GNUTLS_CIPHER_AES_128_XTS, 80, |
992 | 4 | &_gnutls_aes_xts_x86_aesni, 0); |
993 | 4 | if (ret < 0) { |
994 | 0 | gnutls_assert(); |
995 | 0 | } |
996 | | |
997 | 4 | ret = gnutls_crypto_single_cipher_register( |
998 | 4 | GNUTLS_CIPHER_AES_256_XTS, 80, |
999 | 4 | &_gnutls_aes_xts_x86_aesni, 0); |
1000 | 4 | if (ret < 0) { |
1001 | 0 | gnutls_assert(); |
1002 | 0 | } |
1003 | | |
1004 | 4 | #ifdef ASM_X86_64 |
1005 | 4 | if (check_pclmul()) { |
1006 | | /* register GCM ciphers */ |
1007 | 4 | if (check_avx_movbe()) { |
1008 | 4 | _gnutls_debug_log( |
1009 | 4 | "Intel GCM accelerator (AVX) was detected\n"); |
1010 | 4 | ret = gnutls_crypto_single_cipher_register( |
1011 | 4 | GNUTLS_CIPHER_AES_128_GCM, 80, |
1012 | 4 | &_gnutls_aes_gcm_pclmul_avx, 0); |
1013 | 4 | if (ret < 0) { |
1014 | 0 | gnutls_assert(); |
1015 | 0 | } |
1016 | | |
1017 | 4 | ret = gnutls_crypto_single_cipher_register( |
1018 | 4 | GNUTLS_CIPHER_AES_192_GCM, 80, |
1019 | 4 | &_gnutls_aes_gcm_pclmul_avx, 0); |
1020 | 4 | if (ret < 0) { |
1021 | 0 | gnutls_assert(); |
1022 | 0 | } |
1023 | | |
1024 | 4 | ret = gnutls_crypto_single_cipher_register( |
1025 | 4 | GNUTLS_CIPHER_AES_256_GCM, 80, |
1026 | 4 | &_gnutls_aes_gcm_pclmul_avx, 0); |
1027 | 4 | if (ret < 0) { |
1028 | 0 | gnutls_assert(); |
1029 | 0 | } |
1030 | 4 | } else { |
1031 | 0 | _gnutls_debug_log( |
1032 | 0 | "Intel GCM accelerator was detected\n"); |
1033 | 0 | ret = gnutls_crypto_single_cipher_register( |
1034 | 0 | GNUTLS_CIPHER_AES_128_GCM, 80, |
1035 | 0 | &_gnutls_aes_gcm_pclmul, 0); |
1036 | 0 | if (ret < 0) { |
1037 | 0 | gnutls_assert(); |
1038 | 0 | } |
1039 | |
|
1040 | 0 | ret = gnutls_crypto_single_cipher_register( |
1041 | 0 | GNUTLS_CIPHER_AES_192_GCM, 80, |
1042 | 0 | &_gnutls_aes_gcm_pclmul, 0); |
1043 | 0 | if (ret < 0) { |
1044 | 0 | gnutls_assert(); |
1045 | 0 | } |
1046 | |
|
1047 | 0 | ret = gnutls_crypto_single_cipher_register( |
1048 | 0 | GNUTLS_CIPHER_AES_256_GCM, 80, |
1049 | 0 | &_gnutls_aes_gcm_pclmul, 0); |
1050 | 0 | if (ret < 0) { |
1051 | 0 | gnutls_assert(); |
1052 | 0 | } |
1053 | 0 | } |
1054 | 4 | } else |
1055 | 0 | #endif |
1056 | 0 | { |
1057 | 0 | ret = gnutls_crypto_single_cipher_register( |
1058 | 0 | GNUTLS_CIPHER_AES_128_GCM, 80, |
1059 | 0 | &_gnutls_aes_gcm_x86_aesni, 0); |
1060 | 0 | if (ret < 0) { |
1061 | 0 | gnutls_assert(); |
1062 | 0 | } |
1063 | |
|
1064 | 0 | ret = gnutls_crypto_single_cipher_register( |
1065 | 0 | GNUTLS_CIPHER_AES_192_GCM, 80, |
1066 | 0 | &_gnutls_aes_gcm_x86_aesni, 0); |
1067 | 0 | if (ret < 0) { |
1068 | 0 | gnutls_assert(); |
1069 | 0 | } |
1070 | |
|
1071 | 0 | ret = gnutls_crypto_single_cipher_register( |
1072 | 0 | GNUTLS_CIPHER_AES_256_GCM, 80, |
1073 | 0 | &_gnutls_aes_gcm_x86_aesni, 0); |
1074 | 0 | if (ret < 0) { |
1075 | 0 | gnutls_assert(); |
1076 | 0 | } |
1077 | 0 | } |
1078 | 4 | } else { |
1079 | 0 | _gnutls_priority_update_non_aesni(); |
1080 | 0 | } |
1081 | | |
1082 | 4 | return; |
1083 | 4 | } |
1084 | | |
1085 | | void register_x86_crypto(void) |
1086 | 4 | { |
1087 | 4 | unsigned capabilities = 0; |
1088 | 4 | char *p; |
1089 | 4 | p = secure_getenv("GNUTLS_CPUID_OVERRIDE"); |
1090 | 4 | if (p) { |
1091 | 0 | capabilities = strtol(p, NULL, 0); |
1092 | 0 | } |
1093 | | |
1094 | 4 | register_x86_intel_crypto(capabilities); |
1095 | 4 | #ifdef ENABLE_PADLOCK |
1096 | 4 | register_x86_padlock_crypto(capabilities); |
1097 | 4 | #endif |
1098 | 4 | } |