/src/openssl/engines/e_padlock.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright 2004-2016 The OpenSSL Project Authors. All Rights Reserved. |
3 | | * |
4 | | * Licensed under the OpenSSL license (the "License"). You may not use |
5 | | * this file except in compliance with the License. You can obtain a copy |
6 | | * in the file LICENSE in the source distribution or at |
7 | | * https://www.openssl.org/source/license.html |
8 | | */ |
9 | | |
10 | | #include <stdio.h> |
11 | | #include <string.h> |
12 | | |
13 | | #include <openssl/opensslconf.h> |
14 | | #include <openssl/crypto.h> |
15 | | #include <openssl/engine.h> |
16 | | #include <openssl/evp.h> |
17 | | #include <openssl/aes.h> |
18 | | #include <openssl/rand.h> |
19 | | #include <openssl/err.h> |
20 | | #include <openssl/modes.h> |
21 | | |
22 | | #ifndef OPENSSL_NO_HW |
23 | | # ifndef OPENSSL_NO_HW_PADLOCK |
24 | | |
25 | | /* Attempt to have a single source for both 0.9.7 and 0.9.8 :-) */ |
26 | | # if (OPENSSL_VERSION_NUMBER >= 0x00908000L) |
27 | | # ifndef OPENSSL_NO_DYNAMIC_ENGINE |
28 | | # define DYNAMIC_ENGINE |
29 | | # endif |
30 | | # elif (OPENSSL_VERSION_NUMBER >= 0x00907000L) |
31 | | # ifdef ENGINE_DYNAMIC_SUPPORT |
32 | | # define DYNAMIC_ENGINE |
33 | | # endif |
34 | | # else |
35 | | # error "Only OpenSSL >= 0.9.7 is supported" |
36 | | # endif |
37 | | |
38 | | /* |
39 | | * VIA PadLock AES is available *ONLY* on some x86 CPUs. Not only that it |
40 | | * doesn't exist elsewhere, but it even can't be compiled on other platforms! |
41 | | */ |
42 | | |
43 | | # undef COMPILE_HW_PADLOCK |
44 | | # if !defined(I386_ONLY) && defined(PADLOCK_ASM) |
45 | | # define COMPILE_HW_PADLOCK |
46 | | # ifdef OPENSSL_NO_DYNAMIC_ENGINE |
47 | | static ENGINE *ENGINE_padlock(void); |
48 | | # endif |
49 | | # endif |
50 | | |
51 | | # ifdef OPENSSL_NO_DYNAMIC_ENGINE |
52 | | void engine_load_padlock_int(void); |
53 | | void engine_load_padlock_int(void) |
54 | 0 | { |
55 | 0 | /* On non-x86 CPUs it just returns. */ |
56 | 0 | # ifdef COMPILE_HW_PADLOCK |
57 | 0 | ENGINE *toadd = ENGINE_padlock(); |
58 | 0 | if (!toadd) |
59 | 0 | return; |
60 | 0 | ENGINE_add(toadd); |
61 | 0 | ENGINE_free(toadd); |
62 | 0 | ERR_clear_error(); |
63 | 0 | # endif |
64 | 0 | } |
65 | | |
66 | | # endif |
67 | | |
68 | | # ifdef COMPILE_HW_PADLOCK |
69 | | |
70 | | /* Function for ENGINE detection and control */ |
71 | | static int padlock_available(void); |
72 | | static int padlock_init(ENGINE *e); |
73 | | |
74 | | /* RNG Stuff */ |
75 | | static RAND_METHOD padlock_rand; |
76 | | |
77 | | /* Cipher Stuff */ |
78 | | static int padlock_ciphers(ENGINE *e, const EVP_CIPHER **cipher, |
79 | | const int **nids, int nid); |
80 | | |
81 | | /* Engine names */ |
82 | | static const char *padlock_id = "padlock"; |
83 | | static char padlock_name[100]; |
84 | | |
85 | | /* Available features */ |
86 | | static int padlock_use_ace = 0; /* Advanced Cryptography Engine */ |
87 | | static int padlock_use_rng = 0; /* Random Number Generator */ |
88 | | |
89 | | /* ===== Engine "management" functions ===== */ |
90 | | |
91 | | /* Prepare the ENGINE structure for registration */ |
92 | | static int padlock_bind_helper(ENGINE *e) |
93 | 0 | { |
94 | 0 | /* Check available features */ |
95 | 0 | padlock_available(); |
96 | 0 |
|
97 | 0 | /* |
98 | 0 | * RNG is currently disabled for reasons discussed in commentary just |
99 | 0 | * before padlock_rand_bytes function. |
100 | 0 | */ |
101 | 0 | padlock_use_rng = 0; |
102 | 0 |
|
103 | 0 | /* Generate a nice engine name with available features */ |
104 | 0 | BIO_snprintf(padlock_name, sizeof(padlock_name), |
105 | 0 | "VIA PadLock (%s, %s)", |
106 | 0 | padlock_use_rng ? "RNG" : "no-RNG", |
107 | 0 | padlock_use_ace ? "ACE" : "no-ACE"); |
108 | 0 |
|
109 | 0 | /* Register everything or return with an error */ |
110 | 0 | if (!ENGINE_set_id(e, padlock_id) || |
111 | 0 | !ENGINE_set_name(e, padlock_name) || |
112 | 0 | !ENGINE_set_init_function(e, padlock_init) || |
113 | 0 | (padlock_use_ace && !ENGINE_set_ciphers(e, padlock_ciphers)) || |
114 | 0 | (padlock_use_rng && !ENGINE_set_RAND(e, &padlock_rand))) { |
115 | 0 | return 0; |
116 | 0 | } |
117 | 0 | |
118 | 0 | /* Everything looks good */ |
119 | 0 | return 1; |
120 | 0 | } |
121 | | |
122 | | # ifdef OPENSSL_NO_DYNAMIC_ENGINE |
123 | | /* Constructor */ |
124 | | static ENGINE *ENGINE_padlock(void) |
125 | 0 | { |
126 | 0 | ENGINE *eng = ENGINE_new(); |
127 | 0 |
|
128 | 0 | if (eng == NULL) { |
129 | 0 | return NULL; |
130 | 0 | } |
131 | 0 | |
132 | 0 | if (!padlock_bind_helper(eng)) { |
133 | 0 | ENGINE_free(eng); |
134 | 0 | return NULL; |
135 | 0 | } |
136 | 0 | |
137 | 0 | return eng; |
138 | 0 | } |
139 | | # endif |
140 | | |
141 | | /* Check availability of the engine */ |
142 | | static int padlock_init(ENGINE *e) |
143 | 0 | { |
144 | 0 | return (padlock_use_rng || padlock_use_ace); |
145 | 0 | } |
146 | | |
147 | | /* |
148 | | * This stuff is needed if this ENGINE is being compiled into a |
149 | | * self-contained shared-library. |
150 | | */ |
151 | | # ifdef DYNAMIC_ENGINE |
152 | | static int padlock_bind_fn(ENGINE *e, const char *id) |
153 | | { |
154 | | if (id && (strcmp(id, padlock_id) != 0)) { |
155 | | return 0; |
156 | | } |
157 | | |
158 | | if (!padlock_bind_helper(e)) { |
159 | | return 0; |
160 | | } |
161 | | |
162 | | return 1; |
163 | | } |
164 | | |
165 | | IMPLEMENT_DYNAMIC_CHECK_FN() |
166 | | IMPLEMENT_DYNAMIC_BIND_FN(padlock_bind_fn) |
167 | | # endif /* DYNAMIC_ENGINE */ |
168 | | /* ===== Here comes the "real" engine ===== */ |
169 | | |
170 | | /* Some AES-related constants */ |
171 | 0 | # define AES_BLOCK_SIZE 16 |
172 | 0 | # define AES_KEY_SIZE_128 16 |
173 | 0 | # define AES_KEY_SIZE_192 24 |
174 | 0 | # define AES_KEY_SIZE_256 32 |
175 | | /* |
176 | | * Here we store the status information relevant to the current context. |
177 | | */ |
178 | | /* |
179 | | * BIG FAT WARNING: Inline assembler in PADLOCK_XCRYPT_ASM() depends on |
180 | | * the order of items in this structure. Don't blindly modify, reorder, |
181 | | * etc! |
182 | | */ |
183 | | struct padlock_cipher_data { |
184 | | unsigned char iv[AES_BLOCK_SIZE]; /* Initialization vector */ |
185 | | union { |
186 | | unsigned int pad[4]; |
187 | | struct { |
188 | | int rounds:4; |
189 | | int dgst:1; /* n/a in C3 */ |
190 | | int align:1; /* n/a in C3 */ |
191 | | int ciphr:1; /* n/a in C3 */ |
192 | | unsigned int keygen:1; |
193 | | int interm:1; |
194 | | unsigned int encdec:1; |
195 | | int ksize:2; |
196 | | } b; |
197 | | } cword; /* Control word */ |
198 | | AES_KEY ks; /* Encryption key */ |
199 | | }; |
200 | | |
201 | | /* Interface to assembler module */ |
202 | | unsigned int padlock_capability(void); |
203 | | void padlock_key_bswap(AES_KEY *key); |
204 | | void padlock_verify_context(struct padlock_cipher_data *ctx); |
205 | | void padlock_reload_key(void); |
206 | | void padlock_aes_block(void *out, const void *inp, |
207 | | struct padlock_cipher_data *ctx); |
208 | | int padlock_ecb_encrypt(void *out, const void *inp, |
209 | | struct padlock_cipher_data *ctx, size_t len); |
210 | | int padlock_cbc_encrypt(void *out, const void *inp, |
211 | | struct padlock_cipher_data *ctx, size_t len); |
212 | | int padlock_cfb_encrypt(void *out, const void *inp, |
213 | | struct padlock_cipher_data *ctx, size_t len); |
214 | | int padlock_ofb_encrypt(void *out, const void *inp, |
215 | | struct padlock_cipher_data *ctx, size_t len); |
216 | | int padlock_ctr32_encrypt(void *out, const void *inp, |
217 | | struct padlock_cipher_data *ctx, size_t len); |
218 | | int padlock_xstore(void *out, int edx); |
219 | | void padlock_sha1_oneshot(void *ctx, const void *inp, size_t len); |
220 | | void padlock_sha1(void *ctx, const void *inp, size_t len); |
221 | | void padlock_sha256_oneshot(void *ctx, const void *inp, size_t len); |
222 | | void padlock_sha256(void *ctx, const void *inp, size_t len); |
223 | | |
224 | | /* |
225 | | * Load supported features of the CPU to see if the PadLock is available. |
226 | | */ |
227 | | static int padlock_available(void) |
228 | 0 | { |
229 | 0 | unsigned int edx = padlock_capability(); |
230 | 0 |
|
231 | 0 | /* Fill up some flags */ |
232 | 0 | padlock_use_ace = ((edx & (0x3 << 6)) == (0x3 << 6)); |
233 | 0 | padlock_use_rng = ((edx & (0x3 << 2)) == (0x3 << 2)); |
234 | 0 |
|
235 | 0 | return padlock_use_ace + padlock_use_rng; |
236 | 0 | } |
237 | | |
238 | | /* ===== AES encryption/decryption ===== */ |
239 | | |
240 | | # if defined(NID_aes_128_cfb128) && ! defined (NID_aes_128_cfb) |
241 | 0 | # define NID_aes_128_cfb NID_aes_128_cfb128 |
242 | | # endif |
243 | | |
244 | | # if defined(NID_aes_128_ofb128) && ! defined (NID_aes_128_ofb) |
245 | 0 | # define NID_aes_128_ofb NID_aes_128_ofb128 |
246 | | # endif |
247 | | |
248 | | # if defined(NID_aes_192_cfb128) && ! defined (NID_aes_192_cfb) |
249 | 0 | # define NID_aes_192_cfb NID_aes_192_cfb128 |
250 | | # endif |
251 | | |
252 | | # if defined(NID_aes_192_ofb128) && ! defined (NID_aes_192_ofb) |
253 | 0 | # define NID_aes_192_ofb NID_aes_192_ofb128 |
254 | | # endif |
255 | | |
256 | | # if defined(NID_aes_256_cfb128) && ! defined (NID_aes_256_cfb) |
257 | 0 | # define NID_aes_256_cfb NID_aes_256_cfb128 |
258 | | # endif |
259 | | |
260 | | # if defined(NID_aes_256_ofb128) && ! defined (NID_aes_256_ofb) |
261 | 0 | # define NID_aes_256_ofb NID_aes_256_ofb128 |
262 | | # endif |
263 | | |
264 | | /* List of supported ciphers. */ |
265 | | static const int padlock_cipher_nids[] = { |
266 | | NID_aes_128_ecb, |
267 | | NID_aes_128_cbc, |
268 | | NID_aes_128_cfb, |
269 | | NID_aes_128_ofb, |
270 | | NID_aes_128_ctr, |
271 | | |
272 | | NID_aes_192_ecb, |
273 | | NID_aes_192_cbc, |
274 | | NID_aes_192_cfb, |
275 | | NID_aes_192_ofb, |
276 | | NID_aes_192_ctr, |
277 | | |
278 | | NID_aes_256_ecb, |
279 | | NID_aes_256_cbc, |
280 | | NID_aes_256_cfb, |
281 | | NID_aes_256_ofb, |
282 | | NID_aes_256_ctr |
283 | | }; |
284 | | |
285 | | static int padlock_cipher_nids_num = (sizeof(padlock_cipher_nids) / |
286 | | sizeof(padlock_cipher_nids[0])); |
287 | | |
288 | | /* Function prototypes ... */ |
289 | | static int padlock_aes_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key, |
290 | | const unsigned char *iv, int enc); |
291 | | |
292 | 0 | # define NEAREST_ALIGNED(ptr) ( (unsigned char *)(ptr) + \ |
293 | 0 | ( (0x10 - ((size_t)(ptr) & 0x0F)) & 0x0F ) ) |
294 | 0 | # define ALIGNED_CIPHER_DATA(ctx) ((struct padlock_cipher_data *)\ |
295 | 0 | NEAREST_ALIGNED(EVP_CIPHER_CTX_get_cipher_data(ctx))) |
296 | | |
297 | | static int |
298 | | padlock_ecb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out_arg, |
299 | | const unsigned char *in_arg, size_t nbytes) |
300 | 0 | { |
301 | 0 | return padlock_ecb_encrypt(out_arg, in_arg, |
302 | 0 | ALIGNED_CIPHER_DATA(ctx), nbytes); |
303 | 0 | } |
304 | | |
305 | | static int |
306 | | padlock_cbc_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out_arg, |
307 | | const unsigned char *in_arg, size_t nbytes) |
308 | 0 | { |
309 | 0 | struct padlock_cipher_data *cdata = ALIGNED_CIPHER_DATA(ctx); |
310 | 0 | int ret; |
311 | 0 |
|
312 | 0 | memcpy(cdata->iv, EVP_CIPHER_CTX_iv(ctx), AES_BLOCK_SIZE); |
313 | 0 | if ((ret = padlock_cbc_encrypt(out_arg, in_arg, cdata, nbytes))) |
314 | 0 | memcpy(EVP_CIPHER_CTX_iv_noconst(ctx), cdata->iv, AES_BLOCK_SIZE); |
315 | 0 | return ret; |
316 | 0 | } |
317 | | |
318 | | static int |
319 | | padlock_cfb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out_arg, |
320 | | const unsigned char *in_arg, size_t nbytes) |
321 | 0 | { |
322 | 0 | struct padlock_cipher_data *cdata = ALIGNED_CIPHER_DATA(ctx); |
323 | 0 | size_t chunk; |
324 | 0 |
|
325 | 0 | if ((chunk = EVP_CIPHER_CTX_num(ctx))) { /* borrow chunk variable */ |
326 | 0 | unsigned char *ivp = EVP_CIPHER_CTX_iv_noconst(ctx); |
327 | 0 |
|
328 | 0 | if (chunk >= AES_BLOCK_SIZE) |
329 | 0 | return 0; /* bogus value */ |
330 | 0 | |
331 | 0 | if (EVP_CIPHER_CTX_encrypting(ctx)) |
332 | 0 | while (chunk < AES_BLOCK_SIZE && nbytes != 0) { |
333 | 0 | ivp[chunk] = *(out_arg++) = *(in_arg++) ^ ivp[chunk]; |
334 | 0 | chunk++, nbytes--; |
335 | 0 | } else |
336 | 0 | while (chunk < AES_BLOCK_SIZE && nbytes != 0) { |
337 | 0 | unsigned char c = *(in_arg++); |
338 | 0 | *(out_arg++) = c ^ ivp[chunk]; |
339 | 0 | ivp[chunk++] = c, nbytes--; |
340 | 0 | } |
341 | 0 |
|
342 | 0 | EVP_CIPHER_CTX_set_num(ctx, chunk % AES_BLOCK_SIZE); |
343 | 0 | } |
344 | 0 |
|
345 | 0 | if (nbytes == 0) |
346 | 0 | return 1; |
347 | 0 | |
348 | 0 | memcpy(cdata->iv, EVP_CIPHER_CTX_iv(ctx), AES_BLOCK_SIZE); |
349 | 0 |
|
350 | 0 | if ((chunk = nbytes & ~(AES_BLOCK_SIZE - 1))) { |
351 | 0 | if (!padlock_cfb_encrypt(out_arg, in_arg, cdata, chunk)) |
352 | 0 | return 0; |
353 | 0 | nbytes -= chunk; |
354 | 0 | } |
355 | 0 |
|
356 | 0 | if (nbytes) { |
357 | 0 | unsigned char *ivp = cdata->iv; |
358 | 0 |
|
359 | 0 | out_arg += chunk; |
360 | 0 | in_arg += chunk; |
361 | 0 | EVP_CIPHER_CTX_set_num(ctx, nbytes); |
362 | 0 | if (cdata->cword.b.encdec) { |
363 | 0 | cdata->cword.b.encdec = 0; |
364 | 0 | padlock_reload_key(); |
365 | 0 | padlock_aes_block(ivp, ivp, cdata); |
366 | 0 | cdata->cword.b.encdec = 1; |
367 | 0 | padlock_reload_key(); |
368 | 0 | while (nbytes) { |
369 | 0 | unsigned char c = *(in_arg++); |
370 | 0 | *(out_arg++) = c ^ *ivp; |
371 | 0 | *(ivp++) = c, nbytes--; |
372 | 0 | } |
373 | 0 | } else { |
374 | 0 | padlock_reload_key(); |
375 | 0 | padlock_aes_block(ivp, ivp, cdata); |
376 | 0 | padlock_reload_key(); |
377 | 0 | while (nbytes) { |
378 | 0 | *ivp = *(out_arg++) = *(in_arg++) ^ *ivp; |
379 | 0 | ivp++, nbytes--; |
380 | 0 | } |
381 | 0 | } |
382 | 0 | } |
383 | 0 |
|
384 | 0 | memcpy(EVP_CIPHER_CTX_iv_noconst(ctx), cdata->iv, AES_BLOCK_SIZE); |
385 | 0 |
|
386 | 0 | return 1; |
387 | 0 | } |
388 | | |
389 | | static int |
390 | | padlock_ofb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out_arg, |
391 | | const unsigned char *in_arg, size_t nbytes) |
392 | 0 | { |
393 | 0 | struct padlock_cipher_data *cdata = ALIGNED_CIPHER_DATA(ctx); |
394 | 0 | size_t chunk; |
395 | 0 |
|
396 | 0 | /* |
397 | 0 | * ctx->num is maintained in byte-oriented modes, such as CFB and OFB... |
398 | 0 | */ |
399 | 0 | if ((chunk = EVP_CIPHER_CTX_num(ctx))) { /* borrow chunk variable */ |
400 | 0 | unsigned char *ivp = EVP_CIPHER_CTX_iv_noconst(ctx); |
401 | 0 |
|
402 | 0 | if (chunk >= AES_BLOCK_SIZE) |
403 | 0 | return 0; /* bogus value */ |
404 | 0 | |
405 | 0 | while (chunk < AES_BLOCK_SIZE && nbytes != 0) { |
406 | 0 | *(out_arg++) = *(in_arg++) ^ ivp[chunk]; |
407 | 0 | chunk++, nbytes--; |
408 | 0 | } |
409 | 0 |
|
410 | 0 | EVP_CIPHER_CTX_set_num(ctx, chunk % AES_BLOCK_SIZE); |
411 | 0 | } |
412 | 0 |
|
413 | 0 | if (nbytes == 0) |
414 | 0 | return 1; |
415 | 0 | |
416 | 0 | memcpy(cdata->iv, EVP_CIPHER_CTX_iv(ctx), AES_BLOCK_SIZE); |
417 | 0 |
|
418 | 0 | if ((chunk = nbytes & ~(AES_BLOCK_SIZE - 1))) { |
419 | 0 | if (!padlock_ofb_encrypt(out_arg, in_arg, cdata, chunk)) |
420 | 0 | return 0; |
421 | 0 | nbytes -= chunk; |
422 | 0 | } |
423 | 0 |
|
424 | 0 | if (nbytes) { |
425 | 0 | unsigned char *ivp = cdata->iv; |
426 | 0 |
|
427 | 0 | out_arg += chunk; |
428 | 0 | in_arg += chunk; |
429 | 0 | EVP_CIPHER_CTX_set_num(ctx, nbytes); |
430 | 0 | padlock_reload_key(); /* empirically found */ |
431 | 0 | padlock_aes_block(ivp, ivp, cdata); |
432 | 0 | padlock_reload_key(); /* empirically found */ |
433 | 0 | while (nbytes) { |
434 | 0 | *(out_arg++) = *(in_arg++) ^ *ivp; |
435 | 0 | ivp++, nbytes--; |
436 | 0 | } |
437 | 0 | } |
438 | 0 |
|
439 | 0 | memcpy(EVP_CIPHER_CTX_iv_noconst(ctx), cdata->iv, AES_BLOCK_SIZE); |
440 | 0 |
|
441 | 0 | return 1; |
442 | 0 | } |
443 | | |
444 | | static void padlock_ctr32_encrypt_glue(const unsigned char *in, |
445 | | unsigned char *out, size_t blocks, |
446 | | struct padlock_cipher_data *ctx, |
447 | | const unsigned char *ivec) |
448 | 0 | { |
449 | 0 | memcpy(ctx->iv, ivec, AES_BLOCK_SIZE); |
450 | 0 | padlock_ctr32_encrypt(out, in, ctx, AES_BLOCK_SIZE * blocks); |
451 | 0 | } |
452 | | |
453 | | static int |
454 | | padlock_ctr_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out_arg, |
455 | | const unsigned char *in_arg, size_t nbytes) |
456 | 0 | { |
457 | 0 | struct padlock_cipher_data *cdata = ALIGNED_CIPHER_DATA(ctx); |
458 | 0 | unsigned int num = EVP_CIPHER_CTX_num(ctx); |
459 | 0 |
|
460 | 0 | CRYPTO_ctr128_encrypt_ctr32(in_arg, out_arg, nbytes, |
461 | 0 | cdata, EVP_CIPHER_CTX_iv_noconst(ctx), |
462 | 0 | EVP_CIPHER_CTX_buf_noconst(ctx), &num, |
463 | 0 | (ctr128_f) padlock_ctr32_encrypt_glue); |
464 | 0 |
|
465 | 0 | EVP_CIPHER_CTX_set_num(ctx, (size_t)num); |
466 | 0 | return 1; |
467 | 0 | } |
468 | | |
469 | 0 | # define EVP_CIPHER_block_size_ECB AES_BLOCK_SIZE |
470 | 0 | # define EVP_CIPHER_block_size_CBC AES_BLOCK_SIZE |
471 | 0 | # define EVP_CIPHER_block_size_OFB 1 |
472 | 0 | # define EVP_CIPHER_block_size_CFB 1 |
473 | 0 | # define EVP_CIPHER_block_size_CTR 1 |
474 | | |
475 | | /* |
476 | | * Declaring so many ciphers by hand would be a pain. Instead introduce a bit |
477 | | * of preprocessor magic :-) |
478 | | */ |
479 | | # define DECLARE_AES_EVP(ksize,lmode,umode) \ |
480 | | static EVP_CIPHER *_hidden_aes_##ksize##_##lmode = NULL; \ |
481 | 0 | static const EVP_CIPHER *padlock_aes_##ksize##_##lmode(void) \ |
482 | 0 | { \ |
483 | 0 | if (_hidden_aes_##ksize##_##lmode == NULL \ |
484 | 0 | && ((_hidden_aes_##ksize##_##lmode = \ |
485 | 0 | EVP_CIPHER_meth_new(NID_aes_##ksize##_##lmode, \ |
486 | 0 | EVP_CIPHER_block_size_##umode, \ |
487 | 0 | AES_KEY_SIZE_##ksize)) == NULL \ |
488 | 0 | || !EVP_CIPHER_meth_set_iv_length(_hidden_aes_##ksize##_##lmode, \ |
489 | 0 | AES_BLOCK_SIZE) \ |
490 | 0 | || !EVP_CIPHER_meth_set_flags(_hidden_aes_##ksize##_##lmode, \ |
491 | 0 | 0 | EVP_CIPH_##umode##_MODE) \ |
492 | 0 | || !EVP_CIPHER_meth_set_init(_hidden_aes_##ksize##_##lmode, \ |
493 | 0 | padlock_aes_init_key) \ |
494 | 0 | || !EVP_CIPHER_meth_set_do_cipher(_hidden_aes_##ksize##_##lmode, \ |
495 | 0 | padlock_##lmode##_cipher) \ |
496 | 0 | || !EVP_CIPHER_meth_set_impl_ctx_size(_hidden_aes_##ksize##_##lmode, \ |
497 | 0 | sizeof(struct padlock_cipher_data) + 16) \ |
498 | 0 | || !EVP_CIPHER_meth_set_set_asn1_params(_hidden_aes_##ksize##_##lmode, \ |
499 | 0 | EVP_CIPHER_set_asn1_iv) \ |
500 | 0 | || !EVP_CIPHER_meth_set_get_asn1_params(_hidden_aes_##ksize##_##lmode, \ |
501 | 0 | EVP_CIPHER_get_asn1_iv))) { \ |
502 | 0 | EVP_CIPHER_meth_free(_hidden_aes_##ksize##_##lmode); \ |
503 | 0 | _hidden_aes_##ksize##_##lmode = NULL; \ |
504 | 0 | } \ |
505 | 0 | return _hidden_aes_##ksize##_##lmode; \ |
506 | 0 | } Unexecuted instantiation: e_padlock.c:padlock_aes_128_ecb Unexecuted instantiation: e_padlock.c:padlock_aes_128_cbc Unexecuted instantiation: e_padlock.c:padlock_aes_128_cfb Unexecuted instantiation: e_padlock.c:padlock_aes_128_ofb Unexecuted instantiation: e_padlock.c:padlock_aes_128_ctr Unexecuted instantiation: e_padlock.c:padlock_aes_192_ecb Unexecuted instantiation: e_padlock.c:padlock_aes_192_cbc Unexecuted instantiation: e_padlock.c:padlock_aes_192_cfb Unexecuted instantiation: e_padlock.c:padlock_aes_192_ofb Unexecuted instantiation: e_padlock.c:padlock_aes_192_ctr Unexecuted instantiation: e_padlock.c:padlock_aes_256_ecb Unexecuted instantiation: e_padlock.c:padlock_aes_256_cbc Unexecuted instantiation: e_padlock.c:padlock_aes_256_cfb Unexecuted instantiation: e_padlock.c:padlock_aes_256_ofb Unexecuted instantiation: e_padlock.c:padlock_aes_256_ctr |
507 | | |
508 | | DECLARE_AES_EVP(128, ecb, ECB) |
509 | | DECLARE_AES_EVP(128, cbc, CBC) |
510 | | DECLARE_AES_EVP(128, cfb, CFB) |
511 | | DECLARE_AES_EVP(128, ofb, OFB) |
512 | | DECLARE_AES_EVP(128, ctr, CTR) |
513 | | |
514 | | DECLARE_AES_EVP(192, ecb, ECB) |
515 | | DECLARE_AES_EVP(192, cbc, CBC) |
516 | | DECLARE_AES_EVP(192, cfb, CFB) |
517 | | DECLARE_AES_EVP(192, ofb, OFB) |
518 | | DECLARE_AES_EVP(192, ctr, CTR) |
519 | | |
520 | | DECLARE_AES_EVP(256, ecb, ECB) |
521 | | DECLARE_AES_EVP(256, cbc, CBC) |
522 | | DECLARE_AES_EVP(256, cfb, CFB) |
523 | | DECLARE_AES_EVP(256, ofb, OFB) |
524 | | DECLARE_AES_EVP(256, ctr, CTR) |
525 | | |
526 | | static int |
527 | | padlock_ciphers(ENGINE *e, const EVP_CIPHER **cipher, const int **nids, |
528 | | int nid) |
529 | 0 | { |
530 | 0 | /* No specific cipher => return a list of supported nids ... */ |
531 | 0 | if (!cipher) { |
532 | 0 | *nids = padlock_cipher_nids; |
533 | 0 | return padlock_cipher_nids_num; |
534 | 0 | } |
535 | 0 | |
536 | 0 | /* ... or the requested "cipher" otherwise */ |
537 | 0 | switch (nid) { |
538 | 0 | case NID_aes_128_ecb: |
539 | 0 | *cipher = padlock_aes_128_ecb(); |
540 | 0 | break; |
541 | 0 | case NID_aes_128_cbc: |
542 | 0 | *cipher = padlock_aes_128_cbc(); |
543 | 0 | break; |
544 | 0 | case NID_aes_128_cfb: |
545 | 0 | *cipher = padlock_aes_128_cfb(); |
546 | 0 | break; |
547 | 0 | case NID_aes_128_ofb: |
548 | 0 | *cipher = padlock_aes_128_ofb(); |
549 | 0 | break; |
550 | 0 | case NID_aes_128_ctr: |
551 | 0 | *cipher = padlock_aes_128_ctr(); |
552 | 0 | break; |
553 | 0 |
|
554 | 0 | case NID_aes_192_ecb: |
555 | 0 | *cipher = padlock_aes_192_ecb(); |
556 | 0 | break; |
557 | 0 | case NID_aes_192_cbc: |
558 | 0 | *cipher = padlock_aes_192_cbc(); |
559 | 0 | break; |
560 | 0 | case NID_aes_192_cfb: |
561 | 0 | *cipher = padlock_aes_192_cfb(); |
562 | 0 | break; |
563 | 0 | case NID_aes_192_ofb: |
564 | 0 | *cipher = padlock_aes_192_ofb(); |
565 | 0 | break; |
566 | 0 | case NID_aes_192_ctr: |
567 | 0 | *cipher = padlock_aes_192_ctr(); |
568 | 0 | break; |
569 | 0 |
|
570 | 0 | case NID_aes_256_ecb: |
571 | 0 | *cipher = padlock_aes_256_ecb(); |
572 | 0 | break; |
573 | 0 | case NID_aes_256_cbc: |
574 | 0 | *cipher = padlock_aes_256_cbc(); |
575 | 0 | break; |
576 | 0 | case NID_aes_256_cfb: |
577 | 0 | *cipher = padlock_aes_256_cfb(); |
578 | 0 | break; |
579 | 0 | case NID_aes_256_ofb: |
580 | 0 | *cipher = padlock_aes_256_ofb(); |
581 | 0 | break; |
582 | 0 | case NID_aes_256_ctr: |
583 | 0 | *cipher = padlock_aes_256_ctr(); |
584 | 0 | break; |
585 | 0 |
|
586 | 0 | default: |
587 | 0 | /* Sorry, we don't support this NID */ |
588 | 0 | *cipher = NULL; |
589 | 0 | return 0; |
590 | 0 | } |
591 | 0 | |
592 | 0 | return 1; |
593 | 0 | } |
594 | | |
595 | | /* Prepare the encryption key for PadLock usage */ |
596 | | static int |
597 | | padlock_aes_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key, |
598 | | const unsigned char *iv, int enc) |
599 | 0 | { |
600 | 0 | struct padlock_cipher_data *cdata; |
601 | 0 | int key_len = EVP_CIPHER_CTX_key_length(ctx) * 8; |
602 | 0 | unsigned long mode = EVP_CIPHER_CTX_mode(ctx); |
603 | 0 |
|
604 | 0 | if (key == NULL) |
605 | 0 | return 0; /* ERROR */ |
606 | 0 | |
607 | 0 | cdata = ALIGNED_CIPHER_DATA(ctx); |
608 | 0 | memset(cdata, 0, sizeof(*cdata)); |
609 | 0 |
|
610 | 0 | /* Prepare Control word. */ |
611 | 0 | if (mode == EVP_CIPH_OFB_MODE || mode == EVP_CIPH_CTR_MODE) |
612 | 0 | cdata->cword.b.encdec = 0; |
613 | 0 | else |
614 | 0 | cdata->cword.b.encdec = (EVP_CIPHER_CTX_encrypting(ctx) == 0); |
615 | 0 | cdata->cword.b.rounds = 10 + (key_len - 128) / 32; |
616 | 0 | cdata->cword.b.ksize = (key_len - 128) / 64; |
617 | 0 |
|
618 | 0 | switch (key_len) { |
619 | 0 | case 128: |
620 | 0 | /* |
621 | 0 | * PadLock can generate an extended key for AES128 in hardware |
622 | 0 | */ |
623 | 0 | memcpy(cdata->ks.rd_key, key, AES_KEY_SIZE_128); |
624 | 0 | cdata->cword.b.keygen = 0; |
625 | 0 | break; |
626 | 0 |
|
627 | 0 | case 192: |
628 | 0 | case 256: |
629 | 0 | /* |
630 | 0 | * Generate an extended AES key in software. Needed for AES192/AES256 |
631 | 0 | */ |
632 | 0 | /* |
633 | 0 | * Well, the above applies to Stepping 8 CPUs and is listed as |
634 | 0 | * hardware errata. They most likely will fix it at some point and |
635 | 0 | * then a check for stepping would be due here. |
636 | 0 | */ |
637 | 0 | if ((mode == EVP_CIPH_ECB_MODE || mode == EVP_CIPH_CBC_MODE) |
638 | 0 | && !enc) |
639 | 0 | AES_set_decrypt_key(key, key_len, &cdata->ks); |
640 | 0 | else |
641 | 0 | AES_set_encrypt_key(key, key_len, &cdata->ks); |
642 | | # ifndef AES_ASM |
643 | | /* |
644 | | * OpenSSL C functions use byte-swapped extended key. |
645 | | */ |
646 | | padlock_key_bswap(&cdata->ks); |
647 | | # endif |
648 | | cdata->cword.b.keygen = 1; |
649 | 0 | break; |
650 | 0 |
|
651 | 0 | default: |
652 | 0 | /* ERROR */ |
653 | 0 | return 0; |
654 | 0 | } |
655 | 0 | |
656 | 0 | /* |
657 | 0 | * This is done to cover for cases when user reuses the |
658 | 0 | * context for new key. The catch is that if we don't do |
659 | 0 | * this, padlock_eas_cipher might proceed with old key... |
660 | 0 | */ |
661 | 0 | padlock_reload_key(); |
662 | 0 |
|
663 | 0 | return 1; |
664 | 0 | } |
665 | | |
666 | | /* ===== Random Number Generator ===== */ |
667 | | /* |
668 | | * This code is not engaged. The reason is that it does not comply |
669 | | * with recommendations for VIA RNG usage for secure applications |
670 | | * (posted at http://www.via.com.tw/en/viac3/c3.jsp) nor does it |
671 | | * provide meaningful error control... |
672 | | */ |
673 | | /* |
674 | | * Wrapper that provides an interface between the API and the raw PadLock |
675 | | * RNG |
676 | | */ |
677 | | static int padlock_rand_bytes(unsigned char *output, int count) |
678 | 0 | { |
679 | 0 | unsigned int eax, buf; |
680 | 0 |
|
681 | 0 | while (count >= 8) { |
682 | 0 | eax = padlock_xstore(output, 0); |
683 | 0 | if (!(eax & (1 << 6))) |
684 | 0 | return 0; /* RNG disabled */ |
685 | 0 | /* this ---vv--- covers DC bias, Raw Bits and String Filter */ |
686 | 0 | if (eax & (0x1F << 10)) |
687 | 0 | return 0; |
688 | 0 | if ((eax & 0x1F) == 0) |
689 | 0 | continue; /* no data, retry... */ |
690 | 0 | if ((eax & 0x1F) != 8) |
691 | 0 | return 0; /* fatal failure... */ |
692 | 0 | output += 8; |
693 | 0 | count -= 8; |
694 | 0 | } |
695 | 0 | while (count > 0) { |
696 | 0 | eax = padlock_xstore(&buf, 3); |
697 | 0 | if (!(eax & (1 << 6))) |
698 | 0 | return 0; /* RNG disabled */ |
699 | 0 | /* this ---vv--- covers DC bias, Raw Bits and String Filter */ |
700 | 0 | if (eax & (0x1F << 10)) |
701 | 0 | return 0; |
702 | 0 | if ((eax & 0x1F) == 0) |
703 | 0 | continue; /* no data, retry... */ |
704 | 0 | if ((eax & 0x1F) != 1) |
705 | 0 | return 0; /* fatal failure... */ |
706 | 0 | *output++ = (unsigned char)buf; |
707 | 0 | count--; |
708 | 0 | } |
709 | 0 | OPENSSL_cleanse(&buf, sizeof(buf)); |
710 | 0 |
|
711 | 0 | return 1; |
712 | 0 | } |
713 | | |
714 | | /* Dummy but necessary function */ |
715 | | static int padlock_rand_status(void) |
716 | 0 | { |
717 | 0 | return 1; |
718 | 0 | } |
719 | | |
720 | | /* Prepare structure for registration */ |
721 | | static RAND_METHOD padlock_rand = { |
722 | | NULL, /* seed */ |
723 | | padlock_rand_bytes, /* bytes */ |
724 | | NULL, /* cleanup */ |
725 | | NULL, /* add */ |
726 | | padlock_rand_bytes, /* pseudorand */ |
727 | | padlock_rand_status, /* rand status */ |
728 | | }; |
729 | | |
730 | | # endif /* COMPILE_HW_PADLOCK */ |
731 | | # endif /* !OPENSSL_NO_HW_PADLOCK */ |
732 | | #endif /* !OPENSSL_NO_HW */ |
733 | | |
734 | | #if defined(OPENSSL_NO_HW) || defined(OPENSSL_NO_HW_PADLOCK) \ |
735 | | || !defined(COMPILE_HW_PADLOCK) |
736 | | # ifndef OPENSSL_NO_DYNAMIC_ENGINE |
737 | | OPENSSL_EXPORT |
738 | | int bind_engine(ENGINE *e, const char *id, const dynamic_fns *fns); |
739 | | OPENSSL_EXPORT |
740 | | int bind_engine(ENGINE *e, const char *id, const dynamic_fns *fns) |
741 | | { |
742 | | return 0; |
743 | | } |
744 | | |
745 | | IMPLEMENT_DYNAMIC_CHECK_FN() |
746 | | # endif |
747 | | #endif |