/src/openssl111/crypto/evp/evp_enc.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright 1995-2022 The OpenSSL Project Authors. All Rights Reserved. |
3 | | * |
4 | | * Licensed under the OpenSSL license (the "License"). You may not use |
5 | | * this file except in compliance with the License. You can obtain a copy |
6 | | * in the file LICENSE in the source distribution or at |
7 | | * https://www.openssl.org/source/license.html |
8 | | */ |
9 | | |
10 | | #include <stdio.h> |
11 | | #include <limits.h> |
12 | | #include <assert.h> |
13 | | #include "internal/cryptlib.h" |
14 | | #include <openssl/evp.h> |
15 | | #include <openssl/err.h> |
16 | | #include <openssl/rand.h> |
17 | | #include <openssl/rand_drbg.h> |
18 | | #include <openssl/engine.h> |
19 | | #include "crypto/evp.h" |
20 | | #include "evp_local.h" |
21 | | |
22 | | int EVP_CIPHER_CTX_reset(EVP_CIPHER_CTX *c) |
23 | 0 | { |
24 | 0 | if (c == NULL) |
25 | 0 | return 1; |
26 | 0 | if (c->cipher != NULL) { |
27 | 0 | if (c->cipher->cleanup && !c->cipher->cleanup(c)) |
28 | 0 | return 0; |
29 | | /* Cleanse cipher context data */ |
30 | 0 | if (c->cipher_data && c->cipher->ctx_size) |
31 | 0 | OPENSSL_cleanse(c->cipher_data, c->cipher->ctx_size); |
32 | 0 | } |
33 | 0 | OPENSSL_free(c->cipher_data); |
34 | 0 | #ifndef OPENSSL_NO_ENGINE |
35 | 0 | ENGINE_finish(c->engine); |
36 | 0 | #endif |
37 | 0 | memset(c, 0, sizeof(*c)); |
38 | 0 | return 1; |
39 | 0 | } |
40 | | |
41 | | EVP_CIPHER_CTX *EVP_CIPHER_CTX_new(void) |
42 | 0 | { |
43 | 0 | return OPENSSL_zalloc(sizeof(EVP_CIPHER_CTX)); |
44 | 0 | } |
45 | | |
46 | | void EVP_CIPHER_CTX_free(EVP_CIPHER_CTX *ctx) |
47 | 0 | { |
48 | 0 | EVP_CIPHER_CTX_reset(ctx); |
49 | 0 | OPENSSL_free(ctx); |
50 | 0 | } |
51 | | |
52 | | int EVP_CipherInit(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *cipher, |
53 | | const unsigned char *key, const unsigned char *iv, int enc) |
54 | 0 | { |
55 | 0 | if (cipher != NULL) |
56 | 0 | EVP_CIPHER_CTX_reset(ctx); |
57 | 0 | return EVP_CipherInit_ex(ctx, cipher, NULL, key, iv, enc); |
58 | 0 | } |
59 | | |
60 | | int EVP_CipherInit_ex(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *cipher, |
61 | | ENGINE *impl, const unsigned char *key, |
62 | | const unsigned char *iv, int enc) |
63 | 0 | { |
64 | 0 | if (enc == -1) |
65 | 0 | enc = ctx->encrypt; |
66 | 0 | else { |
67 | 0 | if (enc) |
68 | 0 | enc = 1; |
69 | 0 | ctx->encrypt = enc; |
70 | 0 | } |
71 | 0 | #ifndef OPENSSL_NO_ENGINE |
72 | | /* |
73 | | * Whether it's nice or not, "Inits" can be used on "Final"'d contexts so |
74 | | * this context may already have an ENGINE! Try to avoid releasing the |
75 | | * previous handle, re-querying for an ENGINE, and having a |
76 | | * reinitialisation, when it may all be unnecessary. |
77 | | */ |
78 | 0 | if (ctx->engine && ctx->cipher |
79 | 0 | && (cipher == NULL || cipher->nid == ctx->cipher->nid)) |
80 | 0 | goto skip_to_init; |
81 | 0 | #endif |
82 | 0 | if (cipher) { |
83 | | /* |
84 | | * Ensure a context left lying around from last time is cleared (the |
85 | | * previous check attempted to avoid this if the same ENGINE and |
86 | | * EVP_CIPHER could be used). |
87 | | */ |
88 | 0 | if (ctx->cipher |
89 | 0 | #ifndef OPENSSL_NO_ENGINE |
90 | 0 | || ctx->engine |
91 | 0 | #endif |
92 | 0 | || ctx->cipher_data) { |
93 | 0 | unsigned long flags = ctx->flags; |
94 | 0 | EVP_CIPHER_CTX_reset(ctx); |
95 | | /* Restore encrypt and flags */ |
96 | 0 | ctx->encrypt = enc; |
97 | 0 | ctx->flags = flags; |
98 | 0 | } |
99 | 0 | #ifndef OPENSSL_NO_ENGINE |
100 | 0 | if (impl) { |
101 | 0 | if (!ENGINE_init(impl)) { |
102 | 0 | EVPerr(EVP_F_EVP_CIPHERINIT_EX, EVP_R_INITIALIZATION_ERROR); |
103 | 0 | return 0; |
104 | 0 | } |
105 | 0 | } else |
106 | | /* Ask if an ENGINE is reserved for this job */ |
107 | 0 | impl = ENGINE_get_cipher_engine(cipher->nid); |
108 | 0 | if (impl) { |
109 | | /* There's an ENGINE for this job ... (apparently) */ |
110 | 0 | const EVP_CIPHER *c = ENGINE_get_cipher(impl, cipher->nid); |
111 | 0 | if (!c) { |
112 | 0 | ENGINE_finish(impl); |
113 | 0 | EVPerr(EVP_F_EVP_CIPHERINIT_EX, EVP_R_INITIALIZATION_ERROR); |
114 | 0 | return 0; |
115 | 0 | } |
116 | | /* We'll use the ENGINE's private cipher definition */ |
117 | 0 | cipher = c; |
118 | | /* |
119 | | * Store the ENGINE functional reference so we know 'cipher' came |
120 | | * from an ENGINE and we need to release it when done. |
121 | | */ |
122 | 0 | ctx->engine = impl; |
123 | 0 | } else |
124 | 0 | ctx->engine = NULL; |
125 | 0 | #endif |
126 | | |
127 | 0 | ctx->cipher = cipher; |
128 | 0 | if (ctx->cipher->ctx_size) { |
129 | 0 | ctx->cipher_data = OPENSSL_zalloc(ctx->cipher->ctx_size); |
130 | 0 | if (ctx->cipher_data == NULL) { |
131 | 0 | ctx->cipher = NULL; |
132 | 0 | EVPerr(EVP_F_EVP_CIPHERINIT_EX, ERR_R_MALLOC_FAILURE); |
133 | 0 | return 0; |
134 | 0 | } |
135 | 0 | } else { |
136 | 0 | ctx->cipher_data = NULL; |
137 | 0 | } |
138 | 0 | ctx->key_len = cipher->key_len; |
139 | | /* Preserve wrap enable flag, zero everything else */ |
140 | 0 | ctx->flags &= EVP_CIPHER_CTX_FLAG_WRAP_ALLOW; |
141 | 0 | if (ctx->cipher->flags & EVP_CIPH_CTRL_INIT) { |
142 | 0 | if (!EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_INIT, 0, NULL)) { |
143 | 0 | ctx->cipher = NULL; |
144 | 0 | EVPerr(EVP_F_EVP_CIPHERINIT_EX, EVP_R_INITIALIZATION_ERROR); |
145 | 0 | return 0; |
146 | 0 | } |
147 | 0 | } |
148 | 0 | } else if (!ctx->cipher) { |
149 | 0 | EVPerr(EVP_F_EVP_CIPHERINIT_EX, EVP_R_NO_CIPHER_SET); |
150 | 0 | return 0; |
151 | 0 | } |
152 | 0 | #ifndef OPENSSL_NO_ENGINE |
153 | 0 | skip_to_init: |
154 | 0 | #endif |
155 | | /* we assume block size is a power of 2 in *cryptUpdate */ |
156 | 0 | OPENSSL_assert(ctx->cipher->block_size == 1 |
157 | 0 | || ctx->cipher->block_size == 8 |
158 | 0 | || ctx->cipher->block_size == 16); |
159 | |
|
160 | 0 | if (!(ctx->flags & EVP_CIPHER_CTX_FLAG_WRAP_ALLOW) |
161 | 0 | && EVP_CIPHER_CTX_mode(ctx) == EVP_CIPH_WRAP_MODE) { |
162 | 0 | EVPerr(EVP_F_EVP_CIPHERINIT_EX, EVP_R_WRAP_MODE_NOT_ALLOWED); |
163 | 0 | return 0; |
164 | 0 | } |
165 | | |
166 | 0 | if (!(EVP_CIPHER_flags(EVP_CIPHER_CTX_cipher(ctx)) & EVP_CIPH_CUSTOM_IV)) { |
167 | 0 | switch (EVP_CIPHER_CTX_mode(ctx)) { |
168 | | |
169 | 0 | case EVP_CIPH_STREAM_CIPHER: |
170 | 0 | case EVP_CIPH_ECB_MODE: |
171 | 0 | break; |
172 | | |
173 | 0 | case EVP_CIPH_CFB_MODE: |
174 | 0 | case EVP_CIPH_OFB_MODE: |
175 | |
|
176 | 0 | ctx->num = 0; |
177 | | /* fall-through */ |
178 | |
|
179 | 0 | case EVP_CIPH_CBC_MODE: |
180 | |
|
181 | 0 | OPENSSL_assert(EVP_CIPHER_CTX_iv_length(ctx) <= |
182 | 0 | (int)sizeof(ctx->iv)); |
183 | 0 | if (iv) |
184 | 0 | memcpy(ctx->oiv, iv, EVP_CIPHER_CTX_iv_length(ctx)); |
185 | 0 | memcpy(ctx->iv, ctx->oiv, EVP_CIPHER_CTX_iv_length(ctx)); |
186 | 0 | break; |
187 | | |
188 | 0 | case EVP_CIPH_CTR_MODE: |
189 | 0 | ctx->num = 0; |
190 | | /* Don't reuse IV for CTR mode */ |
191 | 0 | if (iv) |
192 | 0 | memcpy(ctx->iv, iv, EVP_CIPHER_CTX_iv_length(ctx)); |
193 | 0 | break; |
194 | | |
195 | 0 | default: |
196 | 0 | return 0; |
197 | 0 | } |
198 | 0 | } |
199 | | |
200 | 0 | if (key || (ctx->cipher->flags & EVP_CIPH_ALWAYS_CALL_INIT)) { |
201 | 0 | if (!ctx->cipher->init(ctx, key, iv, enc)) |
202 | 0 | return 0; |
203 | 0 | } |
204 | 0 | ctx->buf_len = 0; |
205 | 0 | ctx->final_used = 0; |
206 | 0 | ctx->block_mask = ctx->cipher->block_size - 1; |
207 | 0 | return 1; |
208 | 0 | } |
209 | | |
210 | | int EVP_CipherUpdate(EVP_CIPHER_CTX *ctx, unsigned char *out, int *outl, |
211 | | const unsigned char *in, int inl) |
212 | 0 | { |
213 | 0 | if (ctx->encrypt) |
214 | 0 | return EVP_EncryptUpdate(ctx, out, outl, in, inl); |
215 | 0 | else |
216 | 0 | return EVP_DecryptUpdate(ctx, out, outl, in, inl); |
217 | 0 | } |
218 | | |
219 | | int EVP_CipherFinal_ex(EVP_CIPHER_CTX *ctx, unsigned char *out, int *outl) |
220 | 0 | { |
221 | 0 | if (ctx->encrypt) |
222 | 0 | return EVP_EncryptFinal_ex(ctx, out, outl); |
223 | 0 | else |
224 | 0 | return EVP_DecryptFinal_ex(ctx, out, outl); |
225 | 0 | } |
226 | | |
227 | | int EVP_CipherFinal(EVP_CIPHER_CTX *ctx, unsigned char *out, int *outl) |
228 | 0 | { |
229 | 0 | if (ctx->encrypt) |
230 | 0 | return EVP_EncryptFinal(ctx, out, outl); |
231 | 0 | else |
232 | 0 | return EVP_DecryptFinal(ctx, out, outl); |
233 | 0 | } |
234 | | |
235 | | int EVP_EncryptInit(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *cipher, |
236 | | const unsigned char *key, const unsigned char *iv) |
237 | 0 | { |
238 | 0 | return EVP_CipherInit(ctx, cipher, key, iv, 1); |
239 | 0 | } |
240 | | |
241 | | int EVP_EncryptInit_ex(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *cipher, |
242 | | ENGINE *impl, const unsigned char *key, |
243 | | const unsigned char *iv) |
244 | 0 | { |
245 | 0 | return EVP_CipherInit_ex(ctx, cipher, impl, key, iv, 1); |
246 | 0 | } |
247 | | |
248 | | int EVP_DecryptInit(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *cipher, |
249 | | const unsigned char *key, const unsigned char *iv) |
250 | 0 | { |
251 | 0 | return EVP_CipherInit(ctx, cipher, key, iv, 0); |
252 | 0 | } |
253 | | |
254 | | int EVP_DecryptInit_ex(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *cipher, |
255 | | ENGINE *impl, const unsigned char *key, |
256 | | const unsigned char *iv) |
257 | 0 | { |
258 | 0 | return EVP_CipherInit_ex(ctx, cipher, impl, key, iv, 0); |
259 | 0 | } |
260 | | |
261 | | /* |
262 | | * According to the letter of standard difference between pointers |
263 | | * is specified to be valid only within same object. This makes |
264 | | * it formally challenging to determine if input and output buffers |
265 | | * are not partially overlapping with standard pointer arithmetic. |
266 | | */ |
267 | | #ifdef PTRDIFF_T |
268 | | # undef PTRDIFF_T |
269 | | #endif |
270 | | #if defined(OPENSSL_SYS_VMS) && __INITIAL_POINTER_SIZE==64 |
271 | | /* |
272 | | * Then we have VMS that distinguishes itself by adhering to |
273 | | * sizeof(size_t)==4 even in 64-bit builds, which means that |
274 | | * difference between two pointers might be truncated to 32 bits. |
275 | | * In the context one can even wonder how comparison for |
276 | | * equality is implemented. To be on the safe side we adhere to |
277 | | * PTRDIFF_T even for comparison for equality. |
278 | | */ |
279 | | # define PTRDIFF_T uint64_t |
280 | | #else |
281 | 0 | # define PTRDIFF_T size_t |
282 | | #endif |
283 | | |
284 | | int is_partially_overlapping(const void *ptr1, const void *ptr2, size_t len) |
285 | 0 | { |
286 | 0 | PTRDIFF_T diff = (PTRDIFF_T)ptr1-(PTRDIFF_T)ptr2; |
287 | | /* |
288 | | * Check for partially overlapping buffers. [Binary logical |
289 | | * operations are used instead of boolean to minimize number |
290 | | * of conditional branches.] |
291 | | */ |
292 | 0 | int overlapped = (len > 0) & (diff != 0) & ((diff < (PTRDIFF_T)len) | |
293 | 0 | (diff > (0 - (PTRDIFF_T)len))); |
294 | |
|
295 | 0 | return overlapped; |
296 | 0 | } |
297 | | |
298 | | static int evp_EncryptDecryptUpdate(EVP_CIPHER_CTX *ctx, |
299 | | unsigned char *out, int *outl, |
300 | | const unsigned char *in, int inl) |
301 | 0 | { |
302 | 0 | int i, j, bl; |
303 | 0 | size_t cmpl = (size_t)inl; |
304 | |
|
305 | 0 | if (EVP_CIPHER_CTX_test_flags(ctx, EVP_CIPH_FLAG_LENGTH_BITS)) |
306 | 0 | cmpl = (cmpl + 7) / 8; |
307 | |
|
308 | 0 | bl = ctx->cipher->block_size; |
309 | | |
310 | | /* |
311 | | * CCM mode needs to know about the case where inl == 0 && in == NULL - it |
312 | | * means the plaintext/ciphertext length is 0 |
313 | | */ |
314 | 0 | if (inl < 0 |
315 | 0 | || (inl == 0 |
316 | 0 | && EVP_CIPHER_mode(ctx->cipher) != EVP_CIPH_CCM_MODE)) { |
317 | 0 | *outl = 0; |
318 | 0 | return inl == 0; |
319 | 0 | } |
320 | | |
321 | 0 | if (ctx->cipher->flags & EVP_CIPH_FLAG_CUSTOM_CIPHER) { |
322 | | /* If block size > 1 then the cipher will have to do this check */ |
323 | 0 | if (bl == 1 && is_partially_overlapping(out, in, cmpl)) { |
324 | 0 | EVPerr(EVP_F_EVP_ENCRYPTDECRYPTUPDATE, EVP_R_PARTIALLY_OVERLAPPING); |
325 | 0 | return 0; |
326 | 0 | } |
327 | | |
328 | 0 | i = ctx->cipher->do_cipher(ctx, out, in, inl); |
329 | 0 | if (i < 0) |
330 | 0 | return 0; |
331 | 0 | else |
332 | 0 | *outl = i; |
333 | 0 | return 1; |
334 | 0 | } |
335 | | |
336 | 0 | if (is_partially_overlapping(out + ctx->buf_len, in, cmpl)) { |
337 | 0 | EVPerr(EVP_F_EVP_ENCRYPTDECRYPTUPDATE, EVP_R_PARTIALLY_OVERLAPPING); |
338 | 0 | return 0; |
339 | 0 | } |
340 | | |
341 | 0 | if (ctx->buf_len == 0 && (inl & (ctx->block_mask)) == 0) { |
342 | 0 | if (ctx->cipher->do_cipher(ctx, out, in, inl)) { |
343 | 0 | *outl = inl; |
344 | 0 | return 1; |
345 | 0 | } else { |
346 | 0 | *outl = 0; |
347 | 0 | return 0; |
348 | 0 | } |
349 | 0 | } |
350 | 0 | i = ctx->buf_len; |
351 | 0 | OPENSSL_assert(bl <= (int)sizeof(ctx->buf)); |
352 | 0 | if (i != 0) { |
353 | 0 | if (bl - i > inl) { |
354 | 0 | memcpy(&(ctx->buf[i]), in, inl); |
355 | 0 | ctx->buf_len += inl; |
356 | 0 | *outl = 0; |
357 | 0 | return 1; |
358 | 0 | } else { |
359 | 0 | j = bl - i; |
360 | | |
361 | | /* |
362 | | * Once we've processed the first j bytes from in, the amount of |
363 | | * data left that is a multiple of the block length is: |
364 | | * (inl - j) & ~(bl - 1) |
365 | | * We must ensure that this amount of data, plus the one block that |
366 | | * we process from ctx->buf does not exceed INT_MAX |
367 | | */ |
368 | 0 | if (((inl - j) & ~(bl - 1)) > INT_MAX - bl) { |
369 | 0 | EVPerr(EVP_F_EVP_ENCRYPTDECRYPTUPDATE, |
370 | 0 | EVP_R_OUTPUT_WOULD_OVERFLOW); |
371 | 0 | return 0; |
372 | 0 | } |
373 | 0 | memcpy(&(ctx->buf[i]), in, j); |
374 | 0 | inl -= j; |
375 | 0 | in += j; |
376 | 0 | if (!ctx->cipher->do_cipher(ctx, out, ctx->buf, bl)) |
377 | 0 | return 0; |
378 | 0 | out += bl; |
379 | 0 | *outl = bl; |
380 | 0 | } |
381 | 0 | } else |
382 | 0 | *outl = 0; |
383 | 0 | i = inl & (bl - 1); |
384 | 0 | inl -= i; |
385 | 0 | if (inl > 0) { |
386 | 0 | if (!ctx->cipher->do_cipher(ctx, out, in, inl)) |
387 | 0 | return 0; |
388 | 0 | *outl += inl; |
389 | 0 | } |
390 | | |
391 | 0 | if (i != 0) |
392 | 0 | memcpy(ctx->buf, &(in[inl]), i); |
393 | 0 | ctx->buf_len = i; |
394 | 0 | return 1; |
395 | 0 | } |
396 | | |
397 | | |
398 | | int EVP_EncryptUpdate(EVP_CIPHER_CTX *ctx, unsigned char *out, int *outl, |
399 | | const unsigned char *in, int inl) |
400 | 0 | { |
401 | | /* Prevent accidental use of decryption context when encrypting */ |
402 | 0 | if (!ctx->encrypt) { |
403 | 0 | EVPerr(EVP_F_EVP_ENCRYPTUPDATE, EVP_R_INVALID_OPERATION); |
404 | 0 | return 0; |
405 | 0 | } |
406 | | |
407 | 0 | return evp_EncryptDecryptUpdate(ctx, out, outl, in, inl); |
408 | 0 | } |
409 | | |
410 | | int EVP_EncryptFinal(EVP_CIPHER_CTX *ctx, unsigned char *out, int *outl) |
411 | 0 | { |
412 | 0 | int ret; |
413 | 0 | ret = EVP_EncryptFinal_ex(ctx, out, outl); |
414 | 0 | return ret; |
415 | 0 | } |
416 | | |
417 | | int EVP_EncryptFinal_ex(EVP_CIPHER_CTX *ctx, unsigned char *out, int *outl) |
418 | 0 | { |
419 | 0 | int n, ret; |
420 | 0 | unsigned int i, b, bl; |
421 | | |
422 | | /* Prevent accidental use of decryption context when encrypting */ |
423 | 0 | if (!ctx->encrypt) { |
424 | 0 | EVPerr(EVP_F_EVP_ENCRYPTFINAL_EX, EVP_R_INVALID_OPERATION); |
425 | 0 | return 0; |
426 | 0 | } |
427 | | |
428 | 0 | if (ctx->cipher->flags & EVP_CIPH_FLAG_CUSTOM_CIPHER) { |
429 | 0 | ret = ctx->cipher->do_cipher(ctx, out, NULL, 0); |
430 | 0 | if (ret < 0) |
431 | 0 | return 0; |
432 | 0 | else |
433 | 0 | *outl = ret; |
434 | 0 | return 1; |
435 | 0 | } |
436 | | |
437 | 0 | b = ctx->cipher->block_size; |
438 | 0 | OPENSSL_assert(b <= sizeof(ctx->buf)); |
439 | 0 | if (b == 1) { |
440 | 0 | *outl = 0; |
441 | 0 | return 1; |
442 | 0 | } |
443 | 0 | bl = ctx->buf_len; |
444 | 0 | if (ctx->flags & EVP_CIPH_NO_PADDING) { |
445 | 0 | if (bl) { |
446 | 0 | EVPerr(EVP_F_EVP_ENCRYPTFINAL_EX, |
447 | 0 | EVP_R_DATA_NOT_MULTIPLE_OF_BLOCK_LENGTH); |
448 | 0 | return 0; |
449 | 0 | } |
450 | 0 | *outl = 0; |
451 | 0 | return 1; |
452 | 0 | } |
453 | | |
454 | 0 | n = b - bl; |
455 | 0 | for (i = bl; i < b; i++) |
456 | 0 | ctx->buf[i] = n; |
457 | 0 | ret = ctx->cipher->do_cipher(ctx, out, ctx->buf, b); |
458 | |
|
459 | 0 | if (ret) |
460 | 0 | *outl = b; |
461 | |
|
462 | 0 | return ret; |
463 | 0 | } |
464 | | |
465 | | int EVP_DecryptUpdate(EVP_CIPHER_CTX *ctx, unsigned char *out, int *outl, |
466 | | const unsigned char *in, int inl) |
467 | 0 | { |
468 | 0 | int fix_len; |
469 | 0 | unsigned int b; |
470 | 0 | size_t cmpl = (size_t)inl; |
471 | | |
472 | | /* Prevent accidental use of encryption context when decrypting */ |
473 | 0 | if (ctx->encrypt) { |
474 | 0 | EVPerr(EVP_F_EVP_DECRYPTUPDATE, EVP_R_INVALID_OPERATION); |
475 | 0 | return 0; |
476 | 0 | } |
477 | | |
478 | 0 | b = ctx->cipher->block_size; |
479 | |
|
480 | 0 | if (EVP_CIPHER_CTX_test_flags(ctx, EVP_CIPH_FLAG_LENGTH_BITS)) |
481 | 0 | cmpl = (cmpl + 7) / 8; |
482 | | |
483 | | /* |
484 | | * CCM mode needs to know about the case where inl == 0 - it means the |
485 | | * plaintext/ciphertext length is 0 |
486 | | */ |
487 | 0 | if (inl < 0 |
488 | 0 | || (inl == 0 |
489 | 0 | && EVP_CIPHER_mode(ctx->cipher) != EVP_CIPH_CCM_MODE)) { |
490 | 0 | *outl = 0; |
491 | 0 | return inl == 0; |
492 | 0 | } |
493 | | |
494 | 0 | if (ctx->cipher->flags & EVP_CIPH_FLAG_CUSTOM_CIPHER) { |
495 | 0 | if (b == 1 && is_partially_overlapping(out, in, cmpl)) { |
496 | 0 | EVPerr(EVP_F_EVP_DECRYPTUPDATE, EVP_R_PARTIALLY_OVERLAPPING); |
497 | 0 | return 0; |
498 | 0 | } |
499 | | |
500 | 0 | fix_len = ctx->cipher->do_cipher(ctx, out, in, inl); |
501 | 0 | if (fix_len < 0) { |
502 | 0 | *outl = 0; |
503 | 0 | return 0; |
504 | 0 | } else |
505 | 0 | *outl = fix_len; |
506 | 0 | return 1; |
507 | 0 | } |
508 | | |
509 | 0 | if (ctx->flags & EVP_CIPH_NO_PADDING) |
510 | 0 | return evp_EncryptDecryptUpdate(ctx, out, outl, in, inl); |
511 | | |
512 | 0 | OPENSSL_assert(b <= sizeof(ctx->final)); |
513 | |
|
514 | 0 | if (ctx->final_used) { |
515 | | /* see comment about PTRDIFF_T comparison above */ |
516 | 0 | if (((PTRDIFF_T)out == (PTRDIFF_T)in) |
517 | 0 | || is_partially_overlapping(out, in, b)) { |
518 | 0 | EVPerr(EVP_F_EVP_DECRYPTUPDATE, EVP_R_PARTIALLY_OVERLAPPING); |
519 | 0 | return 0; |
520 | 0 | } |
521 | | /* |
522 | | * final_used is only ever set if buf_len is 0. Therefore the maximum |
523 | | * length output we will ever see from evp_EncryptDecryptUpdate is |
524 | | * the maximum multiple of the block length that is <= inl, or just: |
525 | | * inl & ~(b - 1) |
526 | | * Since final_used has been set then the final output length is: |
527 | | * (inl & ~(b - 1)) + b |
528 | | * This must never exceed INT_MAX |
529 | | */ |
530 | 0 | if ((inl & ~(b - 1)) > INT_MAX - b) { |
531 | 0 | EVPerr(EVP_F_EVP_DECRYPTUPDATE, EVP_R_OUTPUT_WOULD_OVERFLOW); |
532 | 0 | return 0; |
533 | 0 | } |
534 | 0 | memcpy(out, ctx->final, b); |
535 | 0 | out += b; |
536 | 0 | fix_len = 1; |
537 | 0 | } else |
538 | 0 | fix_len = 0; |
539 | | |
540 | 0 | if (!evp_EncryptDecryptUpdate(ctx, out, outl, in, inl)) |
541 | 0 | return 0; |
542 | | |
543 | | /* |
544 | | * if we have 'decrypted' a multiple of block size, make sure we have a |
545 | | * copy of this last block |
546 | | */ |
547 | 0 | if (b > 1 && !ctx->buf_len) { |
548 | 0 | *outl -= b; |
549 | 0 | ctx->final_used = 1; |
550 | 0 | memcpy(ctx->final, &out[*outl], b); |
551 | 0 | } else |
552 | 0 | ctx->final_used = 0; |
553 | |
|
554 | 0 | if (fix_len) |
555 | 0 | *outl += b; |
556 | |
|
557 | 0 | return 1; |
558 | 0 | } |
559 | | |
560 | | int EVP_DecryptFinal(EVP_CIPHER_CTX *ctx, unsigned char *out, int *outl) |
561 | 0 | { |
562 | 0 | int ret; |
563 | 0 | ret = EVP_DecryptFinal_ex(ctx, out, outl); |
564 | 0 | return ret; |
565 | 0 | } |
566 | | |
567 | | int EVP_DecryptFinal_ex(EVP_CIPHER_CTX *ctx, unsigned char *out, int *outl) |
568 | 0 | { |
569 | 0 | int i, n; |
570 | 0 | unsigned int b; |
571 | | |
572 | | /* Prevent accidental use of encryption context when decrypting */ |
573 | 0 | if (ctx->encrypt) { |
574 | 0 | EVPerr(EVP_F_EVP_DECRYPTFINAL_EX, EVP_R_INVALID_OPERATION); |
575 | 0 | return 0; |
576 | 0 | } |
577 | | |
578 | 0 | *outl = 0; |
579 | |
|
580 | 0 | if (ctx->cipher->flags & EVP_CIPH_FLAG_CUSTOM_CIPHER) { |
581 | 0 | i = ctx->cipher->do_cipher(ctx, out, NULL, 0); |
582 | 0 | if (i < 0) |
583 | 0 | return 0; |
584 | 0 | else |
585 | 0 | *outl = i; |
586 | 0 | return 1; |
587 | 0 | } |
588 | | |
589 | 0 | b = ctx->cipher->block_size; |
590 | 0 | if (ctx->flags & EVP_CIPH_NO_PADDING) { |
591 | 0 | if (ctx->buf_len) { |
592 | 0 | EVPerr(EVP_F_EVP_DECRYPTFINAL_EX, |
593 | 0 | EVP_R_DATA_NOT_MULTIPLE_OF_BLOCK_LENGTH); |
594 | 0 | return 0; |
595 | 0 | } |
596 | 0 | *outl = 0; |
597 | 0 | return 1; |
598 | 0 | } |
599 | 0 | if (b > 1) { |
600 | 0 | if (ctx->buf_len || !ctx->final_used) { |
601 | 0 | EVPerr(EVP_F_EVP_DECRYPTFINAL_EX, EVP_R_WRONG_FINAL_BLOCK_LENGTH); |
602 | 0 | return 0; |
603 | 0 | } |
604 | 0 | OPENSSL_assert(b <= sizeof(ctx->final)); |
605 | | |
606 | | /* |
607 | | * The following assumes that the ciphertext has been authenticated. |
608 | | * Otherwise it provides a padding oracle. |
609 | | */ |
610 | 0 | n = ctx->final[b - 1]; |
611 | 0 | if (n == 0 || n > (int)b) { |
612 | 0 | EVPerr(EVP_F_EVP_DECRYPTFINAL_EX, EVP_R_BAD_DECRYPT); |
613 | 0 | return 0; |
614 | 0 | } |
615 | 0 | for (i = 0; i < n; i++) { |
616 | 0 | if (ctx->final[--b] != n) { |
617 | 0 | EVPerr(EVP_F_EVP_DECRYPTFINAL_EX, EVP_R_BAD_DECRYPT); |
618 | 0 | return 0; |
619 | 0 | } |
620 | 0 | } |
621 | 0 | n = ctx->cipher->block_size - n; |
622 | 0 | for (i = 0; i < n; i++) |
623 | 0 | out[i] = ctx->final[i]; |
624 | 0 | *outl = n; |
625 | 0 | } else |
626 | 0 | *outl = 0; |
627 | 0 | return 1; |
628 | 0 | } |
629 | | |
630 | | int EVP_CIPHER_CTX_set_key_length(EVP_CIPHER_CTX *c, int keylen) |
631 | 0 | { |
632 | 0 | if (c->cipher->flags & EVP_CIPH_CUSTOM_KEY_LENGTH) |
633 | 0 | return EVP_CIPHER_CTX_ctrl(c, EVP_CTRL_SET_KEY_LENGTH, keylen, NULL); |
634 | 0 | if (c->key_len == keylen) |
635 | 0 | return 1; |
636 | 0 | if ((keylen > 0) && (c->cipher->flags & EVP_CIPH_VARIABLE_LENGTH)) { |
637 | 0 | c->key_len = keylen; |
638 | 0 | return 1; |
639 | 0 | } |
640 | 0 | EVPerr(EVP_F_EVP_CIPHER_CTX_SET_KEY_LENGTH, EVP_R_INVALID_KEY_LENGTH); |
641 | 0 | return 0; |
642 | 0 | } |
643 | | |
644 | | int EVP_CIPHER_CTX_set_padding(EVP_CIPHER_CTX *ctx, int pad) |
645 | 0 | { |
646 | 0 | if (pad) |
647 | 0 | ctx->flags &= ~EVP_CIPH_NO_PADDING; |
648 | 0 | else |
649 | 0 | ctx->flags |= EVP_CIPH_NO_PADDING; |
650 | 0 | return 1; |
651 | 0 | } |
652 | | |
653 | | int EVP_CIPHER_CTX_ctrl(EVP_CIPHER_CTX *ctx, int type, int arg, void *ptr) |
654 | 0 | { |
655 | 0 | int ret; |
656 | |
|
657 | 0 | if (!ctx->cipher) { |
658 | 0 | EVPerr(EVP_F_EVP_CIPHER_CTX_CTRL, EVP_R_NO_CIPHER_SET); |
659 | 0 | return 0; |
660 | 0 | } |
661 | | |
662 | 0 | if (!ctx->cipher->ctrl) { |
663 | 0 | EVPerr(EVP_F_EVP_CIPHER_CTX_CTRL, EVP_R_CTRL_NOT_IMPLEMENTED); |
664 | 0 | return 0; |
665 | 0 | } |
666 | | |
667 | 0 | ret = ctx->cipher->ctrl(ctx, type, arg, ptr); |
668 | 0 | if (ret == -1) { |
669 | 0 | EVPerr(EVP_F_EVP_CIPHER_CTX_CTRL, |
670 | 0 | EVP_R_CTRL_OPERATION_NOT_IMPLEMENTED); |
671 | 0 | return 0; |
672 | 0 | } |
673 | 0 | return ret; |
674 | 0 | } |
675 | | |
676 | | int EVP_CIPHER_CTX_rand_key(EVP_CIPHER_CTX *ctx, unsigned char *key) |
677 | 0 | { |
678 | 0 | if (ctx->cipher->flags & EVP_CIPH_RAND_KEY) |
679 | 0 | return EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_RAND_KEY, 0, key); |
680 | 0 | if (RAND_priv_bytes(key, ctx->key_len) <= 0) |
681 | 0 | return 0; |
682 | 0 | return 1; |
683 | 0 | } |
684 | | |
685 | | int EVP_CIPHER_CTX_copy(EVP_CIPHER_CTX *out, const EVP_CIPHER_CTX *in) |
686 | 0 | { |
687 | 0 | if ((in == NULL) || (in->cipher == NULL)) { |
688 | 0 | EVPerr(EVP_F_EVP_CIPHER_CTX_COPY, EVP_R_INPUT_NOT_INITIALIZED); |
689 | 0 | return 0; |
690 | 0 | } |
691 | 0 | #ifndef OPENSSL_NO_ENGINE |
692 | | /* Make sure it's safe to copy a cipher context using an ENGINE */ |
693 | 0 | if (in->engine && !ENGINE_init(in->engine)) { |
694 | 0 | EVPerr(EVP_F_EVP_CIPHER_CTX_COPY, ERR_R_ENGINE_LIB); |
695 | 0 | return 0; |
696 | 0 | } |
697 | 0 | #endif |
698 | | |
699 | 0 | EVP_CIPHER_CTX_reset(out); |
700 | 0 | memcpy(out, in, sizeof(*out)); |
701 | |
|
702 | 0 | if (in->cipher_data && in->cipher->ctx_size) { |
703 | 0 | out->cipher_data = OPENSSL_malloc(in->cipher->ctx_size); |
704 | 0 | if (out->cipher_data == NULL) { |
705 | 0 | out->cipher = NULL; |
706 | 0 | EVPerr(EVP_F_EVP_CIPHER_CTX_COPY, ERR_R_MALLOC_FAILURE); |
707 | 0 | return 0; |
708 | 0 | } |
709 | 0 | memcpy(out->cipher_data, in->cipher_data, in->cipher->ctx_size); |
710 | 0 | } |
711 | | |
712 | 0 | if (in->cipher->flags & EVP_CIPH_CUSTOM_COPY) |
713 | 0 | if (!in->cipher->ctrl((EVP_CIPHER_CTX *)in, EVP_CTRL_COPY, 0, out)) { |
714 | 0 | out->cipher = NULL; |
715 | 0 | EVPerr(EVP_F_EVP_CIPHER_CTX_COPY, EVP_R_INITIALIZATION_ERROR); |
716 | 0 | return 0; |
717 | 0 | } |
718 | 0 | return 1; |
719 | 0 | } |