/src/gnutls/lib/accelerated/x86/x86-common.c
Line | Count | Source |
1 | | /* |
2 | | * Copyright (C) 2011-2018 Free Software Foundation, Inc. |
3 | | * Copyright (C) 2018 Red Hat, Inc. |
4 | | * |
5 | | * Author: Nikos Mavrogiannopoulos |
6 | | * |
7 | | * This file is part of GnuTLS. |
8 | | * |
9 | | * The GnuTLS is free software; you can redistribute it and/or |
10 | | * modify it under the terms of the GNU Lesser General Public License |
11 | | * as published by the Free Software Foundation; either version 2.1 of |
12 | | * the License, or (at your option) any later version. |
13 | | * |
14 | | * This library is distributed in the hope that it will be useful, but |
15 | | * WITHOUT ANY WARRANTY; without even the implied warranty of |
16 | | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
17 | | * Lesser General Public License for more details. |
18 | | * |
19 | | * You should have received a copy of the GNU Lesser General Public License |
20 | | * along with this program. If not, see <https://www.gnu.org/licenses/> |
21 | | * |
22 | | */ |
23 | | |
24 | | /* |
25 | | * The following code is an implementation of the AES-128-CBC cipher |
26 | | * using intel's AES instruction set. |
27 | | */ |
28 | | |
29 | | #include "errors.h" |
30 | | #include "gnutls_int.h" |
31 | | #include <gnutls/crypto.h> |
32 | | #include "errors.h" |
33 | | #include "aes-x86.h" |
34 | | #include "sha-x86.h" |
35 | | #include "x86-common.h" |
36 | | #ifdef HAVE_LIBNETTLE |
37 | | #include <nettle/aes.h> /* for key generation in 192 and 256 bits */ |
38 | | #include <nettle/hmac.h> /* to check if custom hmac is supported */ |
39 | | #include "sha-padlock.h" |
40 | | #endif |
41 | | #include "aes-padlock.h" |
42 | | #ifdef HAVE_CPUID_H |
43 | | #include <cpuid.h> |
44 | | #else |
45 | | #define __get_cpuid(...) 0 |
46 | | #define __get_cpuid_count(...) 0 |
47 | | #endif |
48 | | |
49 | | /* ebx, ecx, edx |
50 | | * This is a format compatible with openssl's CPUID detection. |
51 | | */ |
52 | | #if defined(__GNUC__) |
53 | | __attribute__((visibility("hidden"))) |
54 | | #elif defined(__SUNPRO_C) |
55 | | __hidden |
56 | | #endif |
57 | | unsigned int GNUTLS_x86_cpuid_s[4]; |
58 | | |
59 | | #ifndef bit_SHA |
60 | | #define bit_SHA (1 << 29) |
61 | | #endif |
62 | | |
63 | | /* ecx */ |
64 | | #ifndef bit_AVX512BITALG |
65 | | #define bit_AVX512BITALG 0x4000 |
66 | | #endif |
67 | | |
68 | | #ifndef bit_PCLMUL |
69 | | #define bit_PCLMUL 0x2 |
70 | | #endif |
71 | | |
72 | | #ifndef bit_SSSE3 |
73 | | /* ecx */ |
74 | | #define bit_SSSE3 0x0000200 |
75 | | #endif |
76 | | |
77 | | #ifndef bit_AES |
78 | | #define bit_AES 0x2000000 |
79 | | #endif |
80 | | |
81 | | #ifndef bit_AVX |
82 | | #define bit_AVX 0x10000000 |
83 | | #endif |
84 | | |
85 | | #ifndef bit_AVX2 |
86 | | #define bit_AVX2 0x00000020 |
87 | | #endif |
88 | | |
89 | | #ifndef bit_AVX512F |
90 | | #define bit_AVX512F 0x00010000 |
91 | | #endif |
92 | | |
93 | | #ifndef bit_AVX512IFMA |
94 | | #define bit_AVX512IFMA 0x00200000 |
95 | | #endif |
96 | | |
97 | | #ifndef bit_AVX512BW |
98 | | #define bit_AVX512BW 0x40000000 |
99 | | #endif |
100 | | |
101 | | #ifndef bit_AVX512VL |
102 | | #define bit_AVX512VL 0x80000000 |
103 | | #endif |
104 | | |
105 | | #ifndef bit_OSXSAVE |
106 | | #define bit_OSXSAVE 0x8000000 |
107 | | #endif |
108 | | |
109 | | #ifndef bit_MOVBE |
110 | | #define bit_MOVBE 0x00400000 |
111 | | #endif |
112 | | |
113 | 0 | #define bit_PADLOCK (0x3 << 6) |
114 | 0 | #define bit_PADLOCK_PHE (0x3 << 10) |
115 | 0 | #define bit_PADLOCK_PHE_SHA512 (0x3 << 25) |
116 | | |
117 | | /* Our internal bit-string for cpu capabilities. Should be set |
118 | | * in GNUTLS_CPUID_OVERRIDE */ |
119 | 0 | #define EMPTY_SET 1 |
120 | 0 | #define INTEL_AES_NI (1 << 1) |
121 | 0 | #define INTEL_SSSE3 (1 << 2) |
122 | 0 | #define INTEL_PCLMUL (1 << 3) |
123 | 0 | #define INTEL_AVX (1 << 4) |
124 | 0 | #define INTEL_SHA (1 << 5) |
125 | 0 | #define PADLOCK (1 << 20) |
126 | 0 | #define PADLOCK_PHE (1 << 21) |
127 | 0 | #define PADLOCK_PHE_SHA512 (1 << 22) |
128 | | |
129 | | #ifndef HAVE_GET_CPUID_COUNT |
130 | | static inline void get_cpuid_level7(unsigned int *eax, unsigned int *ebx, |
131 | | unsigned int *ecx, unsigned int *edx) |
132 | 4 | { |
133 | | /* we avoid using __get_cpuid_count, because it is not available with gcc 4.8 */ |
134 | 4 | if (__get_cpuid_max(7, 0) < 7) |
135 | 4 | return; |
136 | | |
137 | 4 | __cpuid_count(7, 0, *eax, *ebx, *ecx, *edx); |
138 | 0 | return; |
139 | 4 | } |
140 | | #else |
141 | | #define get_cpuid_level7(a, b, c, d) __get_cpuid_count(7, 0, a, b, c, d) |
142 | | #endif |
143 | | |
144 | | static unsigned read_cpuid_vals(unsigned int vals[4]) |
145 | 4 | { |
146 | 4 | unsigned t1, t2, t3; |
147 | 4 | vals[0] = vals[1] = vals[2] = vals[3] = 0; |
148 | | |
149 | 4 | if (!__get_cpuid(1, &t1, &t2, &vals[1], &vals[0])) |
150 | 0 | return 0; |
151 | | /* suppress AVX512; it works conditionally on certain CPUs on the original code */ |
152 | 4 | vals[1] &= 0xfffff7ff; |
153 | | |
154 | 4 | get_cpuid_level7(&t1, &vals[2], &t2, &t3); |
155 | | |
156 | 4 | return 1; |
157 | 4 | } |
158 | | |
159 | | /* Based on the example in "How to detect New Instruction support in |
160 | | * the 4th generation Intel Core processor family. |
161 | | * https://software.intel.com/en-us/articles/how-to-detect-new-instruction-support-in-the-4th-generation-intel-core-processor-family |
162 | | */ |
163 | | static unsigned check_4th_gen_intel_features(unsigned ecx) |
164 | 4 | { |
165 | 4 | uint32_t xcr0; |
166 | | |
167 | 4 | if ((ecx & bit_OSXSAVE) != bit_OSXSAVE) |
168 | 0 | return 0; |
169 | | |
170 | | #if defined(_MSC_VER) && !defined(__clang__) |
171 | | xcr0 = _xgetbv(0); |
172 | | #else |
173 | 4 | __asm__("xgetbv" : "=a"(xcr0) : "c"(0) : "%edx"); |
174 | 4 | #endif |
175 | | /* Check if xmm and ymm state are enabled in XCR0. */ |
176 | 4 | return (xcr0 & 6) == 6; |
177 | 4 | } |
178 | | |
179 | | static void capabilities_to_intel_cpuid(unsigned capabilities) |
180 | 0 | { |
181 | 0 | unsigned a[4]; |
182 | |
|
183 | 0 | if (capabilities & EMPTY_SET) { |
184 | 0 | return; |
185 | 0 | } |
186 | | |
187 | 0 | if (!read_cpuid_vals(a)) |
188 | 0 | return; |
189 | | |
190 | 0 | if (capabilities & INTEL_AES_NI) { |
191 | 0 | if (a[1] & bit_AES) { |
192 | 0 | GNUTLS_x86_cpuid_s[1] |= bit_AES; |
193 | 0 | } else { |
194 | 0 | _gnutls_debug_log( |
195 | 0 | "AESNI acceleration requested but not available\n"); |
196 | 0 | } |
197 | 0 | } |
198 | |
|
199 | 0 | if (capabilities & INTEL_SSSE3) { |
200 | 0 | if (a[1] & bit_SSSE3) { |
201 | 0 | GNUTLS_x86_cpuid_s[1] |= bit_SSSE3; |
202 | 0 | } else { |
203 | 0 | _gnutls_debug_log( |
204 | 0 | "SSSE3 acceleration requested but not available\n"); |
205 | 0 | } |
206 | 0 | } |
207 | |
|
208 | 0 | if (capabilities & INTEL_AVX) { |
209 | 0 | if ((a[1] & bit_AVX) && (a[1] & bit_MOVBE) && |
210 | 0 | check_4th_gen_intel_features(a[1])) { |
211 | 0 | GNUTLS_x86_cpuid_s[1] |= bit_AVX | bit_MOVBE; |
212 | 0 | } else { |
213 | 0 | _gnutls_debug_log( |
214 | 0 | "AVX acceleration requested but not available\n"); |
215 | 0 | } |
216 | 0 | } |
217 | |
|
218 | 0 | if (capabilities & INTEL_PCLMUL) { |
219 | 0 | if (a[1] & bit_PCLMUL) { |
220 | 0 | GNUTLS_x86_cpuid_s[1] |= bit_PCLMUL; |
221 | 0 | } else { |
222 | 0 | _gnutls_debug_log( |
223 | 0 | "PCLMUL acceleration requested but not available\n"); |
224 | 0 | } |
225 | 0 | } |
226 | |
|
227 | 0 | if (capabilities & INTEL_SHA) { |
228 | 0 | if (a[2] & bit_SHA) { |
229 | 0 | GNUTLS_x86_cpuid_s[2] |= bit_SHA; |
230 | 0 | } else { |
231 | 0 | _gnutls_debug_log( |
232 | 0 | "SHA acceleration requested but not available\n"); |
233 | 0 | } |
234 | 0 | } |
235 | 0 | } |
236 | | |
237 | | static unsigned check_optimized_aes(void) |
238 | 4 | { |
239 | 4 | return (GNUTLS_x86_cpuid_s[1] & bit_AES); |
240 | 4 | } |
241 | | |
242 | | static unsigned check_ssse3(void) |
243 | 8 | { |
244 | 8 | return (GNUTLS_x86_cpuid_s[1] & bit_SSSE3); |
245 | 8 | } |
246 | | |
247 | | static unsigned check_sha(void) |
248 | 8 | { |
249 | 8 | return (GNUTLS_x86_cpuid_s[2] & bit_SHA); |
250 | 8 | } |
251 | | |
252 | | #ifdef ASM_X86_64 |
253 | | static unsigned check_avx_movbe(void) |
254 | 4 | { |
255 | 4 | return (GNUTLS_x86_cpuid_s[1] & (bit_AVX | bit_MOVBE)) == |
256 | 4 | (bit_AVX | bit_MOVBE); |
257 | 4 | } |
258 | | |
259 | | static unsigned check_pclmul(void) |
260 | 4 | { |
261 | 4 | return (GNUTLS_x86_cpuid_s[1] & bit_PCLMUL); |
262 | 4 | } |
263 | | #endif |
264 | | |
265 | | #ifdef ENABLE_PADLOCK |
266 | | static unsigned capabilities_to_zhaoxin_edx(unsigned capabilities) |
267 | 0 | { |
268 | 0 | unsigned a, b, c, t; |
269 | |
|
270 | 0 | if (capabilities & EMPTY_SET) { |
271 | 0 | return 0; |
272 | 0 | } |
273 | | |
274 | 0 | if (!__get_cpuid(1, &t, &a, &b, &c)) |
275 | 0 | return 0; |
276 | 0 | if (capabilities & PADLOCK) { |
277 | 0 | if (c & bit_PADLOCK) { |
278 | 0 | GNUTLS_x86_cpuid_s[2] |= bit_PADLOCK; |
279 | 0 | } else { |
280 | 0 | _gnutls_debug_log( |
281 | 0 | "Padlock acceleration requested but not available\n"); |
282 | 0 | } |
283 | 0 | } |
284 | |
|
285 | 0 | if (capabilities & PADLOCK_PHE) { |
286 | 0 | if (c & bit_PADLOCK_PHE) { |
287 | 0 | GNUTLS_x86_cpuid_s[2] |= bit_PADLOCK_PHE; |
288 | 0 | } else { |
289 | 0 | _gnutls_debug_log( |
290 | 0 | "Padlock-PHE acceleration requested but not available\n"); |
291 | 0 | } |
292 | 0 | } |
293 | |
|
294 | 0 | if (capabilities & PADLOCK_PHE_SHA512) { |
295 | 0 | if (c & bit_PADLOCK_PHE_SHA512) { |
296 | 0 | GNUTLS_x86_cpuid_s[2] |= bit_PADLOCK_PHE_SHA512; |
297 | 0 | } else { |
298 | 0 | _gnutls_debug_log( |
299 | 0 | "Padlock-PHE-SHA512 acceleration requested but not available\n"); |
300 | 0 | } |
301 | 0 | } |
302 | |
|
303 | 0 | return GNUTLS_x86_cpuid_s[2]; |
304 | 0 | } |
305 | | |
306 | | static int check_padlock(unsigned edx) |
307 | 0 | { |
308 | 0 | return ((edx & bit_PADLOCK) == bit_PADLOCK); |
309 | 0 | } |
310 | | |
311 | | static int check_phe(unsigned edx) |
312 | 0 | { |
313 | 0 | return ((edx & bit_PADLOCK_PHE) == bit_PADLOCK_PHE); |
314 | 0 | } |
315 | | |
316 | | /* We are actually checking for SHA512 */ |
317 | | static int check_phe_sha512(unsigned edx) |
318 | 0 | { |
319 | 0 | return ((edx & bit_PADLOCK_PHE_SHA512) == bit_PADLOCK_PHE_SHA512); |
320 | 0 | } |
321 | | |
322 | | /* On some of the Zhaoxin CPUs, pclmul has a faster acceleration effect */ |
323 | | static int check_fast_pclmul(void) |
324 | 0 | { |
325 | 0 | unsigned int a, b, c, d; |
326 | 0 | unsigned int family, model; |
327 | |
|
328 | 0 | if (!__get_cpuid(1, &a, &b, &c, &d)) |
329 | 0 | return 0; |
330 | | |
331 | 0 | family = ((a >> 8) & 0x0F); |
332 | 0 | model = ((a >> 4) & 0x0F) + ((a >> 12) & 0xF0); |
333 | |
|
334 | 0 | if (((family == 0x6) && (model == 0xf || model == 0x19)) || |
335 | 0 | ((family == 0x7) && (model == 0x1B || model == 0x3B))) |
336 | 0 | return 1; |
337 | 0 | else |
338 | 0 | return 0; |
339 | 0 | } |
340 | | |
341 | | static int check_phe_partial(void) |
342 | 0 | { |
343 | 0 | const char text[SHA1_BLOCK_SIZE + 1 /*NUL*/] = |
344 | 0 | "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"; |
345 | 0 | uint32_t iv[5] = { 0x67452301UL, 0xEFCDAB89UL, 0x98BADCFEUL, |
346 | 0 | 0x10325476UL, 0xC3D2E1F0UL }; |
347 | | |
348 | | /* If EAX is set to -1 (this is the case with padlock_sha1_blocks), the |
349 | | * xsha1 instruction takes a complete SHA-1 block (64 bytes), while it |
350 | | * takes arbitrary length data otherwise. */ |
351 | 0 | padlock_sha1_blocks(iv, text, 1); |
352 | |
|
353 | 0 | if (iv[0] == 0xDA4968EBUL && iv[1] == 0x2E377C1FUL && |
354 | 0 | iv[2] == 0x884E8F52UL && iv[3] == 0x83524BEBUL && |
355 | 0 | iv[4] == 0xE74EBDBDUL) |
356 | 0 | return 1; |
357 | 0 | else |
358 | 0 | return 0; |
359 | 0 | } |
360 | | |
361 | | static unsigned check_zhaoxin(void) |
362 | 4 | { |
363 | 4 | unsigned int a, b, c, d; |
364 | | |
365 | 4 | if (!__get_cpuid(0, &a, &b, &c, &d)) |
366 | 0 | return 0; |
367 | | |
368 | | /* Zhaoxin and VIA CPU was detected */ |
369 | 4 | if ((memcmp(&b, "Cent", 4) == 0 && memcmp(&d, "aurH", 4) == 0 && |
370 | 0 | memcmp(&c, "auls", 4) == 0) || |
371 | 4 | (memcmp(&b, " Sh", 4) == 0 && memcmp(&d, "angh", 4) == 0 && |
372 | 0 | memcmp(&c, "ai ", 4) == 0)) { |
373 | 0 | return 1; |
374 | 0 | } |
375 | | |
376 | 4 | return 0; |
377 | 4 | } |
378 | | |
379 | | static void register_x86_padlock_crypto(unsigned capabilities) |
380 | 4 | { |
381 | 4 | int ret, phe; |
382 | 4 | unsigned edx; |
383 | | |
384 | 4 | if (check_zhaoxin() == 0) |
385 | 4 | return; |
386 | | |
387 | 0 | memset(GNUTLS_x86_cpuid_s, 0, sizeof(GNUTLS_x86_cpuid_s)); |
388 | |
|
389 | 0 | if (capabilities == 0) { |
390 | 0 | if (!read_cpuid_vals(GNUTLS_x86_cpuid_s)) |
391 | 0 | return; |
392 | 0 | edx = padlock_capability(); |
393 | 0 | } else { |
394 | 0 | capabilities_to_intel_cpuid(capabilities); |
395 | 0 | edx = capabilities_to_zhaoxin_edx(capabilities); |
396 | 0 | } |
397 | | |
398 | 0 | if (check_ssse3()) { |
399 | 0 | _gnutls_debug_log("Zhaoxin SSSE3 was detected\n"); |
400 | |
|
401 | 0 | ret = gnutls_crypto_single_cipher_register( |
402 | 0 | GNUTLS_CIPHER_AES_128_GCM, 90, |
403 | 0 | &_gnutls_aes_gcm_x86_ssse3, 0); |
404 | 0 | if (ret < 0) { |
405 | 0 | gnutls_assert(); |
406 | 0 | } |
407 | |
|
408 | 0 | ret = gnutls_crypto_single_cipher_register( |
409 | 0 | GNUTLS_CIPHER_AES_192_GCM, 90, |
410 | 0 | &_gnutls_aes_gcm_x86_ssse3, 0); |
411 | 0 | if (ret < 0) { |
412 | 0 | gnutls_assert(); |
413 | 0 | } |
414 | |
|
415 | 0 | ret = gnutls_crypto_single_cipher_register( |
416 | 0 | GNUTLS_CIPHER_AES_256_GCM, 90, |
417 | 0 | &_gnutls_aes_gcm_x86_ssse3, 0); |
418 | 0 | if (ret < 0) { |
419 | 0 | gnutls_assert(); |
420 | 0 | } |
421 | |
|
422 | 0 | ret = gnutls_crypto_single_cipher_register( |
423 | 0 | GNUTLS_CIPHER_AES_128_CBC, 90, &_gnutls_aes_ssse3, 0); |
424 | 0 | if (ret < 0) { |
425 | 0 | gnutls_assert(); |
426 | 0 | } |
427 | |
|
428 | 0 | ret = gnutls_crypto_single_cipher_register( |
429 | 0 | GNUTLS_CIPHER_AES_192_CBC, 90, &_gnutls_aes_ssse3, 0); |
430 | 0 | if (ret < 0) { |
431 | 0 | gnutls_assert(); |
432 | 0 | } |
433 | |
|
434 | 0 | ret = gnutls_crypto_single_cipher_register( |
435 | 0 | GNUTLS_CIPHER_AES_256_CBC, 90, &_gnutls_aes_ssse3, 0); |
436 | 0 | if (ret < 0) { |
437 | 0 | gnutls_assert(); |
438 | 0 | } |
439 | 0 | } |
440 | |
|
441 | 0 | if (check_sha() || check_ssse3()) { |
442 | 0 | if (check_sha()) |
443 | 0 | _gnutls_debug_log("Zhaoxin SHA was detected\n"); |
444 | |
|
445 | 0 | ret = gnutls_crypto_single_digest_register( |
446 | 0 | GNUTLS_DIG_SHA1, 80, &_gnutls_sha_x86_ssse3, 0); |
447 | 0 | if (ret < 0) { |
448 | 0 | gnutls_assert(); |
449 | 0 | } |
450 | |
|
451 | 0 | ret = gnutls_crypto_single_digest_register( |
452 | 0 | GNUTLS_DIG_SHA224, 80, &_gnutls_sha_x86_ssse3, 0); |
453 | 0 | if (ret < 0) { |
454 | 0 | gnutls_assert(); |
455 | 0 | } |
456 | |
|
457 | 0 | ret = gnutls_crypto_single_digest_register( |
458 | 0 | GNUTLS_DIG_SHA256, 80, &_gnutls_sha_x86_ssse3, 0); |
459 | 0 | if (ret < 0) { |
460 | 0 | gnutls_assert(); |
461 | 0 | } |
462 | |
|
463 | 0 | #if defined(HAVE_LIBNETTLE) && defined(HMAC_SET_KEY) |
464 | 0 | ret = gnutls_crypto_single_mac_register( |
465 | 0 | GNUTLS_MAC_SHA1, 80, &_gnutls_hmac_sha_x86_ssse3, 0); |
466 | 0 | if (ret < 0) |
467 | 0 | gnutls_assert(); |
468 | |
|
469 | 0 | ret = gnutls_crypto_single_mac_register( |
470 | 0 | GNUTLS_MAC_SHA224, 80, &_gnutls_hmac_sha_x86_ssse3, 0); |
471 | 0 | if (ret < 0) |
472 | 0 | gnutls_assert(); |
473 | |
|
474 | 0 | ret = gnutls_crypto_single_mac_register( |
475 | 0 | GNUTLS_MAC_SHA256, 80, &_gnutls_hmac_sha_x86_ssse3, 0); |
476 | 0 | if (ret < 0) |
477 | 0 | gnutls_assert(); |
478 | 0 | #endif |
479 | |
|
480 | 0 | ret = gnutls_crypto_single_digest_register( |
481 | 0 | GNUTLS_DIG_SHA384, 80, &_gnutls_sha_x86_ssse3, 0); |
482 | 0 | if (ret < 0) |
483 | 0 | gnutls_assert(); |
484 | |
|
485 | 0 | ret = gnutls_crypto_single_digest_register( |
486 | 0 | GNUTLS_DIG_SHA512, 80, &_gnutls_sha_x86_ssse3, 0); |
487 | 0 | if (ret < 0) |
488 | 0 | gnutls_assert(); |
489 | 0 | #if defined(HAVE_LIBNETTLE) && defined(HMAC_SET_KEY) |
490 | 0 | ret = gnutls_crypto_single_mac_register( |
491 | 0 | GNUTLS_MAC_SHA384, 80, &_gnutls_hmac_sha_x86_ssse3, 0); |
492 | 0 | if (ret < 0) |
493 | 0 | gnutls_assert(); |
494 | |
|
495 | 0 | ret = gnutls_crypto_single_mac_register( |
496 | 0 | GNUTLS_MAC_SHA512, 80, &_gnutls_hmac_sha_x86_ssse3, 0); |
497 | 0 | if (ret < 0) |
498 | 0 | gnutls_assert(); |
499 | 0 | #endif |
500 | 0 | } |
501 | |
|
502 | 0 | if (check_optimized_aes()) { |
503 | 0 | _gnutls_debug_log("Zhaoxin AES accelerator was detected\n"); |
504 | 0 | ret = gnutls_crypto_single_cipher_register( |
505 | 0 | GNUTLS_CIPHER_AES_128_CBC, 80, &_gnutls_aesni_x86, 0); |
506 | 0 | if (ret < 0) { |
507 | 0 | gnutls_assert(); |
508 | 0 | } |
509 | |
|
510 | 0 | ret = gnutls_crypto_single_cipher_register( |
511 | 0 | GNUTLS_CIPHER_AES_192_CBC, 80, &_gnutls_aesni_x86, 0); |
512 | 0 | if (ret < 0) { |
513 | 0 | gnutls_assert(); |
514 | 0 | } |
515 | |
|
516 | 0 | ret = gnutls_crypto_single_cipher_register( |
517 | 0 | GNUTLS_CIPHER_AES_256_CBC, 80, &_gnutls_aesni_x86, 0); |
518 | 0 | if (ret < 0) { |
519 | 0 | gnutls_assert(); |
520 | 0 | } |
521 | |
|
522 | 0 | ret = gnutls_crypto_single_cipher_register( |
523 | 0 | GNUTLS_CIPHER_AES_128_CCM, 80, |
524 | 0 | &_gnutls_aes_ccm_x86_aesni, 0); |
525 | 0 | if (ret < 0) { |
526 | 0 | gnutls_assert(); |
527 | 0 | } |
528 | |
|
529 | 0 | ret = gnutls_crypto_single_cipher_register( |
530 | 0 | GNUTLS_CIPHER_AES_256_CCM, 80, |
531 | 0 | &_gnutls_aes_ccm_x86_aesni, 0); |
532 | 0 | if (ret < 0) { |
533 | 0 | gnutls_assert(); |
534 | 0 | } |
535 | |
|
536 | 0 | ret = gnutls_crypto_single_cipher_register( |
537 | 0 | GNUTLS_CIPHER_AES_128_CCM_8, 80, |
538 | 0 | &_gnutls_aes_ccm_x86_aesni, 0); |
539 | 0 | if (ret < 0) { |
540 | 0 | gnutls_assert(); |
541 | 0 | } |
542 | |
|
543 | 0 | ret = gnutls_crypto_single_cipher_register( |
544 | 0 | GNUTLS_CIPHER_AES_256_CCM_8, 80, |
545 | 0 | &_gnutls_aes_ccm_x86_aesni, 0); |
546 | 0 | if (ret < 0) { |
547 | 0 | gnutls_assert(); |
548 | 0 | } |
549 | |
|
550 | 0 | ret = gnutls_crypto_single_cipher_register( |
551 | 0 | GNUTLS_CIPHER_AES_128_XTS, 80, |
552 | 0 | &_gnutls_aes_xts_x86_aesni, 0); |
553 | 0 | if (ret < 0) { |
554 | 0 | gnutls_assert(); |
555 | 0 | } |
556 | |
|
557 | 0 | ret = gnutls_crypto_single_cipher_register( |
558 | 0 | GNUTLS_CIPHER_AES_256_XTS, 80, |
559 | 0 | &_gnutls_aes_xts_x86_aesni, 0); |
560 | 0 | if (ret < 0) { |
561 | 0 | gnutls_assert(); |
562 | 0 | } |
563 | |
|
564 | 0 | #ifdef ASM_X86_64 |
565 | 0 | if (check_pclmul()) { |
566 | | /* register GCM ciphers */ |
567 | 0 | _gnutls_debug_log( |
568 | 0 | "Zhaoxin GCM accelerator was detected\n"); |
569 | 0 | if (check_avx_movbe() && check_fast_pclmul()) { |
570 | 0 | _gnutls_debug_log( |
571 | 0 | "Zhaoxin GCM accelerator (AVX) was detected\n"); |
572 | 0 | ret = gnutls_crypto_single_cipher_register( |
573 | 0 | GNUTLS_CIPHER_AES_128_GCM, 80, |
574 | 0 | &_gnutls_aes_gcm_pclmul_avx, 0); |
575 | 0 | if (ret < 0) { |
576 | 0 | gnutls_assert(); |
577 | 0 | } |
578 | |
|
579 | 0 | ret = gnutls_crypto_single_cipher_register( |
580 | 0 | GNUTLS_CIPHER_AES_192_GCM, 80, |
581 | 0 | &_gnutls_aes_gcm_pclmul_avx, 0); |
582 | 0 | if (ret < 0) { |
583 | 0 | gnutls_assert(); |
584 | 0 | } |
585 | |
|
586 | 0 | ret = gnutls_crypto_single_cipher_register( |
587 | 0 | GNUTLS_CIPHER_AES_256_GCM, 80, |
588 | 0 | &_gnutls_aes_gcm_pclmul_avx, 0); |
589 | 0 | if (ret < 0) { |
590 | 0 | gnutls_assert(); |
591 | 0 | } |
592 | 0 | } else { |
593 | 0 | ret = gnutls_crypto_single_cipher_register( |
594 | 0 | GNUTLS_CIPHER_AES_128_GCM, 80, |
595 | 0 | &_gnutls_aes_gcm_pclmul, 0); |
596 | 0 | if (ret < 0) { |
597 | 0 | gnutls_assert(); |
598 | 0 | } |
599 | |
|
600 | 0 | ret = gnutls_crypto_single_cipher_register( |
601 | 0 | GNUTLS_CIPHER_AES_192_GCM, 80, |
602 | 0 | &_gnutls_aes_gcm_pclmul, 0); |
603 | 0 | if (ret < 0) { |
604 | 0 | gnutls_assert(); |
605 | 0 | } |
606 | |
|
607 | 0 | ret = gnutls_crypto_single_cipher_register( |
608 | 0 | GNUTLS_CIPHER_AES_256_GCM, 80, |
609 | 0 | &_gnutls_aes_gcm_pclmul, 0); |
610 | 0 | if (ret < 0) { |
611 | 0 | gnutls_assert(); |
612 | 0 | } |
613 | 0 | } |
614 | 0 | } else |
615 | 0 | #endif |
616 | 0 | { |
617 | 0 | ret = gnutls_crypto_single_cipher_register( |
618 | 0 | GNUTLS_CIPHER_AES_128_GCM, 80, |
619 | 0 | &_gnutls_aes_gcm_x86_aesni, 0); |
620 | 0 | if (ret < 0) { |
621 | 0 | gnutls_assert(); |
622 | 0 | } |
623 | |
|
624 | 0 | ret = gnutls_crypto_single_cipher_register( |
625 | 0 | GNUTLS_CIPHER_AES_192_GCM, 80, |
626 | 0 | &_gnutls_aes_gcm_x86_aesni, 0); |
627 | 0 | if (ret < 0) { |
628 | 0 | gnutls_assert(); |
629 | 0 | } |
630 | |
|
631 | 0 | ret = gnutls_crypto_single_cipher_register( |
632 | 0 | GNUTLS_CIPHER_AES_256_GCM, 80, |
633 | 0 | &_gnutls_aes_gcm_x86_aesni, 0); |
634 | 0 | if (ret < 0) { |
635 | 0 | gnutls_assert(); |
636 | 0 | } |
637 | 0 | } |
638 | 0 | } |
639 | |
|
640 | 0 | if (check_padlock(edx)) { |
641 | 0 | _gnutls_debug_log("Padlock AES accelerator was detected\n"); |
642 | 0 | ret = gnutls_crypto_single_cipher_register( |
643 | 0 | GNUTLS_CIPHER_AES_128_CBC, 80, &_gnutls_aes_padlock, 0); |
644 | 0 | if (ret < 0) { |
645 | 0 | gnutls_assert(); |
646 | 0 | } |
647 | |
|
648 | 0 | ret = gnutls_crypto_single_cipher_register( |
649 | 0 | GNUTLS_CIPHER_AES_192_CBC, 80, &_gnutls_aes_padlock, 0); |
650 | 0 | if (ret < 0) { |
651 | 0 | gnutls_assert(); |
652 | 0 | } |
653 | | |
654 | | /* register GCM ciphers */ |
655 | 0 | ret = gnutls_crypto_single_cipher_register( |
656 | 0 | GNUTLS_CIPHER_AES_128_GCM, 90, &_gnutls_aes_gcm_padlock, |
657 | 0 | 0); |
658 | 0 | if (ret < 0) { |
659 | 0 | gnutls_assert(); |
660 | 0 | } |
661 | |
|
662 | 0 | ret = gnutls_crypto_single_cipher_register( |
663 | 0 | GNUTLS_CIPHER_AES_256_CBC, 80, &_gnutls_aes_padlock, 0); |
664 | 0 | if (ret < 0) { |
665 | 0 | gnutls_assert(); |
666 | 0 | } |
667 | |
|
668 | 0 | ret = gnutls_crypto_single_cipher_register( |
669 | 0 | GNUTLS_CIPHER_AES_256_GCM, 90, &_gnutls_aes_gcm_padlock, |
670 | 0 | 0); |
671 | 0 | if (ret < 0) { |
672 | 0 | gnutls_assert(); |
673 | 0 | } |
674 | 0 | } |
675 | |
|
676 | 0 | if (!check_optimized_aes() && !check_padlock(edx)) |
677 | 0 | _gnutls_priority_update_non_aesni(); |
678 | |
|
679 | 0 | #ifdef HAVE_LIBNETTLE |
680 | 0 | phe = check_phe(edx); |
681 | |
|
682 | 0 | if (phe && check_phe_partial()) { |
683 | 0 | _gnutls_debug_log( |
684 | 0 | "Padlock SHA1 and SHA256 (partial) accelerator was detected\n"); |
685 | 0 | if (check_phe_sha512(edx)) { |
686 | 0 | _gnutls_debug_log( |
687 | 0 | "Padlock SHA512 (partial) accelerator was detected\n"); |
688 | 0 | ret = gnutls_crypto_single_digest_register( |
689 | 0 | GNUTLS_DIG_SHA384, 80, &_gnutls_sha_padlock, 0); |
690 | 0 | if (ret < 0) { |
691 | 0 | gnutls_assert(); |
692 | 0 | } |
693 | |
|
694 | 0 | ret = gnutls_crypto_single_digest_register( |
695 | 0 | GNUTLS_DIG_SHA512, 80, &_gnutls_sha_padlock, 0); |
696 | 0 | if (ret < 0) { |
697 | 0 | gnutls_assert(); |
698 | 0 | } |
699 | |
|
700 | 0 | #if defined(HAVE_LIBNETTLE) && defined(HMAC_SET_KEY) |
701 | 0 | ret = gnutls_crypto_single_mac_register( |
702 | 0 | GNUTLS_MAC_SHA384, 80, |
703 | 0 | &_gnutls_hmac_sha_padlock, 0); |
704 | 0 | if (ret < 0) { |
705 | 0 | gnutls_assert(); |
706 | 0 | } |
707 | |
|
708 | 0 | ret = gnutls_crypto_single_mac_register( |
709 | 0 | GNUTLS_MAC_SHA512, 80, |
710 | 0 | &_gnutls_hmac_sha_padlock, 0); |
711 | 0 | if (ret < 0) { |
712 | 0 | gnutls_assert(); |
713 | 0 | } |
714 | 0 | #endif |
715 | 0 | } |
716 | |
|
717 | 0 | ret = gnutls_crypto_single_digest_register( |
718 | 0 | GNUTLS_DIG_SHA1, 90, &_gnutls_sha_padlock, 0); |
719 | 0 | if (ret < 0) { |
720 | 0 | gnutls_assert(); |
721 | 0 | } |
722 | |
|
723 | 0 | ret = gnutls_crypto_single_digest_register( |
724 | 0 | GNUTLS_DIG_SHA224, 90, &_gnutls_sha_padlock, 0); |
725 | 0 | if (ret < 0) { |
726 | 0 | gnutls_assert(); |
727 | 0 | } |
728 | |
|
729 | 0 | ret = gnutls_crypto_single_digest_register( |
730 | 0 | GNUTLS_DIG_SHA256, 90, &_gnutls_sha_padlock, 0); |
731 | 0 | if (ret < 0) { |
732 | 0 | gnutls_assert(); |
733 | 0 | } |
734 | |
|
735 | 0 | #if defined(HAVE_LIBNETTLE) && defined(HMAC_SET_KEY) |
736 | 0 | ret = gnutls_crypto_single_mac_register( |
737 | 0 | GNUTLS_MAC_SHA1, 90, &_gnutls_hmac_sha_padlock, 0); |
738 | 0 | if (ret < 0) { |
739 | 0 | gnutls_assert(); |
740 | 0 | } |
741 | | |
742 | | /* we don't register MAC_SHA224 because it is not used by TLS */ |
743 | |
|
744 | 0 | ret = gnutls_crypto_single_mac_register( |
745 | 0 | GNUTLS_MAC_SHA256, 90, &_gnutls_hmac_sha_padlock, 0); |
746 | 0 | if (ret < 0) { |
747 | 0 | gnutls_assert(); |
748 | 0 | } |
749 | 0 | #endif |
750 | 0 | } else if (phe) { |
751 | | /* Original padlock PHE. Does not support incremental operations. |
752 | | */ |
753 | 0 | _gnutls_debug_log( |
754 | 0 | "Padlock SHA1 and SHA256 accelerator was detected\n"); |
755 | 0 | ret = gnutls_crypto_single_digest_register( |
756 | 0 | GNUTLS_DIG_SHA1, 90, &_gnutls_sha_padlock_oneshot, 0); |
757 | 0 | if (ret < 0) { |
758 | 0 | gnutls_assert(); |
759 | 0 | } |
760 | |
|
761 | 0 | ret = gnutls_crypto_single_digest_register( |
762 | 0 | GNUTLS_DIG_SHA256, 90, &_gnutls_sha_padlock_oneshot, 0); |
763 | 0 | if (ret < 0) { |
764 | 0 | gnutls_assert(); |
765 | 0 | } |
766 | |
|
767 | 0 | #if defined(HAVE_LIBNETTLE) && defined(HMAC_SET_KEY) |
768 | 0 | ret = gnutls_crypto_single_mac_register( |
769 | 0 | GNUTLS_MAC_SHA1, 90, &_gnutls_hmac_sha_padlock_oneshot, |
770 | 0 | 0); |
771 | 0 | if (ret < 0) { |
772 | 0 | gnutls_assert(); |
773 | 0 | } |
774 | |
|
775 | 0 | ret = gnutls_crypto_single_mac_register( |
776 | 0 | GNUTLS_MAC_SHA256, 90, |
777 | 0 | &_gnutls_hmac_sha_padlock_oneshot, 0); |
778 | 0 | if (ret < 0) { |
779 | 0 | gnutls_assert(); |
780 | 0 | } |
781 | 0 | #endif |
782 | 0 | } |
783 | 0 | #endif |
784 | |
|
785 | 0 | return; |
786 | 0 | } |
787 | | #endif |
788 | | |
789 | | enum x86_cpu_vendor { |
790 | | X86_CPU_VENDOR_OTHER, |
791 | | X86_CPU_VENDOR_INTEL, |
792 | | X86_CPU_VENDOR_AMD, |
793 | | X86_CPU_VENDOR_HYGON, |
794 | | }; |
795 | | |
796 | | static enum x86_cpu_vendor check_x86_cpu_vendor(void) |
797 | 4 | { |
798 | 4 | unsigned int a, b, c, d; |
799 | | |
800 | 4 | if (!__get_cpuid(0, &a, &b, &c, &d)) { |
801 | 0 | return X86_CPU_VENDOR_OTHER; |
802 | 0 | } |
803 | | |
804 | 4 | if (memcmp(&b, "Genu", 4) == 0 && memcmp(&d, "ineI", 4) == 0 && |
805 | 4 | memcmp(&c, "ntel", 4) == 0) { |
806 | 4 | return X86_CPU_VENDOR_INTEL; |
807 | 4 | } |
808 | | |
809 | 0 | if (memcmp(&b, "Auth", 4) == 0 && memcmp(&d, "enti", 4) == 0 && |
810 | 0 | memcmp(&c, "cAMD", 4) == 0) { |
811 | 0 | return X86_CPU_VENDOR_AMD; |
812 | 0 | } |
813 | | |
814 | 0 | if (memcmp(&b, "Hygo", 4) == 0 && memcmp(&d, "nGen", 4) == 0 && |
815 | 0 | memcmp(&c, "uine", 4) == 0) { |
816 | 0 | return X86_CPU_VENDOR_HYGON; |
817 | 0 | } |
818 | | |
819 | 0 | return X86_CPU_VENDOR_OTHER; |
820 | 0 | } |
821 | | |
822 | | static void register_x86_intel_crypto(unsigned capabilities) |
823 | 4 | { |
824 | 4 | int ret; |
825 | 4 | enum x86_cpu_vendor vendor; |
826 | | |
827 | 4 | memset(GNUTLS_x86_cpuid_s, 0, sizeof(GNUTLS_x86_cpuid_s)); |
828 | | |
829 | 4 | vendor = check_x86_cpu_vendor(); |
830 | 4 | if (vendor == X86_CPU_VENDOR_OTHER) { |
831 | 0 | return; |
832 | 0 | } |
833 | | |
834 | 4 | if (capabilities == 0) { |
835 | 4 | if (!read_cpuid_vals(GNUTLS_x86_cpuid_s)) |
836 | 0 | return; |
837 | 4 | if (!check_4th_gen_intel_features(GNUTLS_x86_cpuid_s[1])) { |
838 | 0 | GNUTLS_x86_cpuid_s[1] &= ~bit_AVX; |
839 | | |
840 | | /* Clear AVX2 bits as well, according to what |
841 | | * OpenSSL does. Should we clear |
842 | | * bit_AVX512DQ, bit_AVX512PF, bit_AVX512ER, |
843 | | * and bit_AVX512CD? */ |
844 | 0 | GNUTLS_x86_cpuid_s[2] &= |
845 | 0 | ~(bit_AVX2 | bit_AVX512F | bit_AVX512IFMA | |
846 | 0 | bit_AVX512BW | bit_AVX512BW); |
847 | 0 | } |
848 | 4 | } else { |
849 | 0 | capabilities_to_intel_cpuid(capabilities); |
850 | 0 | } |
851 | | |
852 | | /* CRYPTOGAMS uses the (1 << 30) bit as an indicator of Intel CPUs */ |
853 | 4 | if (vendor == X86_CPU_VENDOR_INTEL) { |
854 | 4 | GNUTLS_x86_cpuid_s[0] |= 1 << 30; |
855 | 4 | } else { |
856 | 0 | GNUTLS_x86_cpuid_s[0] &= ~(1 << 30); |
857 | 0 | } |
858 | | |
859 | 4 | if (check_ssse3()) { |
860 | 4 | _gnutls_debug_log("Intel SSSE3 was detected\n"); |
861 | | |
862 | 4 | ret = gnutls_crypto_single_cipher_register( |
863 | 4 | GNUTLS_CIPHER_AES_128_GCM, 90, |
864 | 4 | &_gnutls_aes_gcm_x86_ssse3, 0); |
865 | 4 | if (ret < 0) { |
866 | 0 | gnutls_assert(); |
867 | 0 | } |
868 | | |
869 | 4 | ret = gnutls_crypto_single_cipher_register( |
870 | 4 | GNUTLS_CIPHER_AES_192_GCM, 90, |
871 | 4 | &_gnutls_aes_gcm_x86_ssse3, 0); |
872 | 4 | if (ret < 0) { |
873 | 0 | gnutls_assert(); |
874 | 0 | } |
875 | | |
876 | 4 | ret = gnutls_crypto_single_cipher_register( |
877 | 4 | GNUTLS_CIPHER_AES_256_GCM, 90, |
878 | 4 | &_gnutls_aes_gcm_x86_ssse3, 0); |
879 | 4 | if (ret < 0) { |
880 | 0 | gnutls_assert(); |
881 | 0 | } |
882 | | |
883 | 4 | ret = gnutls_crypto_single_cipher_register( |
884 | 4 | GNUTLS_CIPHER_AES_128_CBC, 90, &_gnutls_aes_ssse3, 0); |
885 | 4 | if (ret < 0) { |
886 | 0 | gnutls_assert(); |
887 | 0 | } |
888 | | |
889 | 4 | ret = gnutls_crypto_single_cipher_register( |
890 | 4 | GNUTLS_CIPHER_AES_192_CBC, 90, &_gnutls_aes_ssse3, 0); |
891 | 4 | if (ret < 0) { |
892 | 0 | gnutls_assert(); |
893 | 0 | } |
894 | | |
895 | 4 | ret = gnutls_crypto_single_cipher_register( |
896 | 4 | GNUTLS_CIPHER_AES_256_CBC, 90, &_gnutls_aes_ssse3, 0); |
897 | 4 | if (ret < 0) { |
898 | 0 | gnutls_assert(); |
899 | 0 | } |
900 | 4 | } |
901 | | |
902 | 4 | if (check_sha() || check_ssse3()) { |
903 | 4 | if (check_sha()) |
904 | 4 | _gnutls_debug_log("Intel SHA was detected\n"); |
905 | | |
906 | 4 | ret = gnutls_crypto_single_digest_register( |
907 | 4 | GNUTLS_DIG_SHA1, 80, &_gnutls_sha_x86_ssse3, 0); |
908 | 4 | if (ret < 0) { |
909 | 0 | gnutls_assert(); |
910 | 0 | } |
911 | | |
912 | 4 | ret = gnutls_crypto_single_digest_register( |
913 | 4 | GNUTLS_DIG_SHA224, 80, &_gnutls_sha_x86_ssse3, 0); |
914 | 4 | if (ret < 0) { |
915 | 0 | gnutls_assert(); |
916 | 0 | } |
917 | | |
918 | 4 | ret = gnutls_crypto_single_digest_register( |
919 | 4 | GNUTLS_DIG_SHA256, 80, &_gnutls_sha_x86_ssse3, 0); |
920 | 4 | if (ret < 0) { |
921 | 0 | gnutls_assert(); |
922 | 0 | } |
923 | | |
924 | 4 | #if defined(HAVE_LIBNETTLE) && defined(HMAC_SET_KEY) |
925 | 4 | ret = gnutls_crypto_single_mac_register( |
926 | 4 | GNUTLS_MAC_SHA1, 80, &_gnutls_hmac_sha_x86_ssse3, 0); |
927 | 4 | if (ret < 0) |
928 | 4 | gnutls_assert(); |
929 | | |
930 | 4 | ret = gnutls_crypto_single_mac_register( |
931 | 4 | GNUTLS_MAC_SHA224, 80, &_gnutls_hmac_sha_x86_ssse3, 0); |
932 | 4 | if (ret < 0) |
933 | 4 | gnutls_assert(); |
934 | | |
935 | 4 | ret = gnutls_crypto_single_mac_register( |
936 | 4 | GNUTLS_MAC_SHA256, 80, &_gnutls_hmac_sha_x86_ssse3, 0); |
937 | 4 | if (ret < 0) |
938 | 4 | gnutls_assert(); |
939 | 4 | #endif |
940 | | |
941 | 4 | ret = gnutls_crypto_single_digest_register( |
942 | 4 | GNUTLS_DIG_SHA384, 80, &_gnutls_sha_x86_ssse3, 0); |
943 | 4 | if (ret < 0) |
944 | 4 | gnutls_assert(); |
945 | | |
946 | 4 | ret = gnutls_crypto_single_digest_register( |
947 | 4 | GNUTLS_DIG_SHA512, 80, &_gnutls_sha_x86_ssse3, 0); |
948 | 4 | if (ret < 0) |
949 | 4 | gnutls_assert(); |
950 | 4 | #if defined(HAVE_LIBNETTLE) && defined(HMAC_SET_KEY) |
951 | 4 | ret = gnutls_crypto_single_mac_register( |
952 | 4 | GNUTLS_MAC_SHA384, 80, &_gnutls_hmac_sha_x86_ssse3, 0); |
953 | 4 | if (ret < 0) |
954 | 4 | gnutls_assert(); |
955 | | |
956 | 4 | ret = gnutls_crypto_single_mac_register( |
957 | 4 | GNUTLS_MAC_SHA512, 80, &_gnutls_hmac_sha_x86_ssse3, 0); |
958 | 4 | if (ret < 0) |
959 | 4 | gnutls_assert(); |
960 | 4 | #endif |
961 | 4 | } |
962 | | |
963 | 4 | if (check_optimized_aes()) { |
964 | 4 | _gnutls_debug_log("Intel AES accelerator was detected\n"); |
965 | 4 | ret = gnutls_crypto_single_cipher_register( |
966 | 4 | GNUTLS_CIPHER_AES_128_CBC, 80, &_gnutls_aesni_x86, 0); |
967 | 4 | if (ret < 0) { |
968 | 0 | gnutls_assert(); |
969 | 0 | } |
970 | | |
971 | 4 | ret = gnutls_crypto_single_cipher_register( |
972 | 4 | GNUTLS_CIPHER_AES_192_CBC, 80, &_gnutls_aesni_x86, 0); |
973 | 4 | if (ret < 0) { |
974 | 0 | gnutls_assert(); |
975 | 0 | } |
976 | | |
977 | 4 | ret = gnutls_crypto_single_cipher_register( |
978 | 4 | GNUTLS_CIPHER_AES_256_CBC, 80, &_gnutls_aesni_x86, 0); |
979 | 4 | if (ret < 0) { |
980 | 0 | gnutls_assert(); |
981 | 0 | } |
982 | | |
983 | 4 | ret = gnutls_crypto_single_cipher_register( |
984 | 4 | GNUTLS_CIPHER_AES_128_CCM, 80, |
985 | 4 | &_gnutls_aes_ccm_x86_aesni, 0); |
986 | 4 | if (ret < 0) { |
987 | 0 | gnutls_assert(); |
988 | 0 | } |
989 | | |
990 | 4 | ret = gnutls_crypto_single_cipher_register( |
991 | 4 | GNUTLS_CIPHER_AES_256_CCM, 80, |
992 | 4 | &_gnutls_aes_ccm_x86_aesni, 0); |
993 | 4 | if (ret < 0) { |
994 | 0 | gnutls_assert(); |
995 | 0 | } |
996 | | |
997 | 4 | ret = gnutls_crypto_single_cipher_register( |
998 | 4 | GNUTLS_CIPHER_AES_128_CCM_8, 80, |
999 | 4 | &_gnutls_aes_ccm_x86_aesni, 0); |
1000 | 4 | if (ret < 0) { |
1001 | 0 | gnutls_assert(); |
1002 | 0 | } |
1003 | | |
1004 | 4 | ret = gnutls_crypto_single_cipher_register( |
1005 | 4 | GNUTLS_CIPHER_AES_256_CCM_8, 80, |
1006 | 4 | &_gnutls_aes_ccm_x86_aesni, 0); |
1007 | 4 | if (ret < 0) { |
1008 | 0 | gnutls_assert(); |
1009 | 0 | } |
1010 | | |
1011 | 4 | ret = gnutls_crypto_single_cipher_register( |
1012 | 4 | GNUTLS_CIPHER_AES_128_XTS, 80, |
1013 | 4 | &_gnutls_aes_xts_x86_aesni, 0); |
1014 | 4 | if (ret < 0) { |
1015 | 0 | gnutls_assert(); |
1016 | 0 | } |
1017 | | |
1018 | 4 | ret = gnutls_crypto_single_cipher_register( |
1019 | 4 | GNUTLS_CIPHER_AES_256_XTS, 80, |
1020 | 4 | &_gnutls_aes_xts_x86_aesni, 0); |
1021 | 4 | if (ret < 0) { |
1022 | 0 | gnutls_assert(); |
1023 | 0 | } |
1024 | | |
1025 | 4 | #ifdef ASM_X86_64 |
1026 | 4 | if (check_pclmul()) { |
1027 | | /* register GCM ciphers */ |
1028 | 4 | if (check_avx_movbe()) { |
1029 | 4 | _gnutls_debug_log( |
1030 | 4 | "Intel GCM accelerator (AVX) was detected\n"); |
1031 | 4 | ret = gnutls_crypto_single_cipher_register( |
1032 | 4 | GNUTLS_CIPHER_AES_128_GCM, 80, |
1033 | 4 | &_gnutls_aes_gcm_pclmul_avx, 0); |
1034 | 4 | if (ret < 0) { |
1035 | 0 | gnutls_assert(); |
1036 | 0 | } |
1037 | | |
1038 | 4 | ret = gnutls_crypto_single_cipher_register( |
1039 | 4 | GNUTLS_CIPHER_AES_192_GCM, 80, |
1040 | 4 | &_gnutls_aes_gcm_pclmul_avx, 0); |
1041 | 4 | if (ret < 0) { |
1042 | 0 | gnutls_assert(); |
1043 | 0 | } |
1044 | | |
1045 | 4 | ret = gnutls_crypto_single_cipher_register( |
1046 | 4 | GNUTLS_CIPHER_AES_256_GCM, 80, |
1047 | 4 | &_gnutls_aes_gcm_pclmul_avx, 0); |
1048 | 4 | if (ret < 0) { |
1049 | 0 | gnutls_assert(); |
1050 | 0 | } |
1051 | 4 | } else { |
1052 | 0 | _gnutls_debug_log( |
1053 | 0 | "Intel GCM accelerator was detected\n"); |
1054 | 0 | ret = gnutls_crypto_single_cipher_register( |
1055 | 0 | GNUTLS_CIPHER_AES_128_GCM, 80, |
1056 | 0 | &_gnutls_aes_gcm_pclmul, 0); |
1057 | 0 | if (ret < 0) { |
1058 | 0 | gnutls_assert(); |
1059 | 0 | } |
1060 | |
|
1061 | 0 | ret = gnutls_crypto_single_cipher_register( |
1062 | 0 | GNUTLS_CIPHER_AES_192_GCM, 80, |
1063 | 0 | &_gnutls_aes_gcm_pclmul, 0); |
1064 | 0 | if (ret < 0) { |
1065 | 0 | gnutls_assert(); |
1066 | 0 | } |
1067 | |
|
1068 | 0 | ret = gnutls_crypto_single_cipher_register( |
1069 | 0 | GNUTLS_CIPHER_AES_256_GCM, 80, |
1070 | 0 | &_gnutls_aes_gcm_pclmul, 0); |
1071 | 0 | if (ret < 0) { |
1072 | 0 | gnutls_assert(); |
1073 | 0 | } |
1074 | 0 | } |
1075 | 4 | } else |
1076 | 0 | #endif |
1077 | 0 | { |
1078 | 0 | ret = gnutls_crypto_single_cipher_register( |
1079 | 0 | GNUTLS_CIPHER_AES_128_GCM, 80, |
1080 | 0 | &_gnutls_aes_gcm_x86_aesni, 0); |
1081 | 0 | if (ret < 0) { |
1082 | 0 | gnutls_assert(); |
1083 | 0 | } |
1084 | |
|
1085 | 0 | ret = gnutls_crypto_single_cipher_register( |
1086 | 0 | GNUTLS_CIPHER_AES_192_GCM, 80, |
1087 | 0 | &_gnutls_aes_gcm_x86_aesni, 0); |
1088 | 0 | if (ret < 0) { |
1089 | 0 | gnutls_assert(); |
1090 | 0 | } |
1091 | |
|
1092 | 0 | ret = gnutls_crypto_single_cipher_register( |
1093 | 0 | GNUTLS_CIPHER_AES_256_GCM, 80, |
1094 | 0 | &_gnutls_aes_gcm_x86_aesni, 0); |
1095 | 0 | if (ret < 0) { |
1096 | 0 | gnutls_assert(); |
1097 | 0 | } |
1098 | 0 | } |
1099 | 4 | } else { |
1100 | 0 | _gnutls_priority_update_non_aesni(); |
1101 | 0 | } |
1102 | | |
1103 | 4 | return; |
1104 | 4 | } |
1105 | | |
1106 | | void register_x86_crypto(void) |
1107 | 4 | { |
1108 | 4 | unsigned capabilities = 0; |
1109 | 4 | char *p; |
1110 | 4 | p = secure_getenv("GNUTLS_CPUID_OVERRIDE"); |
1111 | 4 | if (p) { |
1112 | 0 | capabilities = strtol(p, NULL, 0); |
1113 | 0 | } |
1114 | | |
1115 | 4 | register_x86_intel_crypto(capabilities); |
1116 | 4 | #ifdef ENABLE_PADLOCK |
1117 | 4 | register_x86_padlock_crypto(capabilities); |
1118 | 4 | #endif |
1119 | 4 | } |