/src/openssl/ssl/s3_cbc.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* ssl/s3_cbc.c */ |
2 | | /* ==================================================================== |
3 | | * Copyright (c) 2012 The OpenSSL Project. All rights reserved. |
4 | | * |
5 | | * Redistribution and use in source and binary forms, with or without |
6 | | * modification, are permitted provided that the following conditions |
7 | | * are met: |
8 | | * |
9 | | * 1. Redistributions of source code must retain the above copyright |
10 | | * notice, this list of conditions and the following disclaimer. |
11 | | * |
12 | | * 2. Redistributions in binary form must reproduce the above copyright |
13 | | * notice, this list of conditions and the following disclaimer in |
14 | | * the documentation and/or other materials provided with the |
15 | | * distribution. |
16 | | * |
17 | | * 3. All advertising materials mentioning features or use of this |
18 | | * software must display the following acknowledgment: |
19 | | * "This product includes software developed by the OpenSSL Project |
20 | | * for use in the OpenSSL Toolkit. (http://www.openssl.org/)" |
21 | | * |
22 | | * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to |
23 | | * endorse or promote products derived from this software without |
24 | | * prior written permission. For written permission, please contact |
25 | | * openssl-core@openssl.org. |
26 | | * |
27 | | * 5. Products derived from this software may not be called "OpenSSL" |
28 | | * nor may "OpenSSL" appear in their names without prior written |
29 | | * permission of the OpenSSL Project. |
30 | | * |
31 | | * 6. Redistributions of any form whatsoever must retain the following |
32 | | * acknowledgment: |
33 | | * "This product includes software developed by the OpenSSL Project |
34 | | * for use in the OpenSSL Toolkit (http://www.openssl.org/)" |
35 | | * |
36 | | * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY |
37 | | * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
38 | | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
39 | | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR |
40 | | * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
41 | | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
42 | | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; |
43 | | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
44 | | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, |
45 | | * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
46 | | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED |
47 | | * OF THE POSSIBILITY OF SUCH DAMAGE. |
48 | | * ==================================================================== |
49 | | * |
50 | | * This product includes cryptographic software written by Eric Young |
51 | | * (eay@cryptsoft.com). This product includes software written by Tim |
52 | | * Hudson (tjh@cryptsoft.com). |
53 | | * |
54 | | */ |
55 | | |
56 | | #include "../crypto/constant_time_locl.h" |
57 | | #include "ssl_locl.h" |
58 | | |
59 | | #include <openssl/md5.h> |
60 | | #include <openssl/sha.h> |
61 | | |
62 | | /* |
63 | | * MAX_HASH_BIT_COUNT_BYTES is the maximum number of bytes in the hash's |
64 | | * length field. (SHA-384/512 have 128-bit length.) |
65 | | */ |
66 | | #define MAX_HASH_BIT_COUNT_BYTES 16 |
67 | | |
68 | | /* |
69 | | * MAX_HASH_BLOCK_SIZE is the maximum hash block size that we'll support. |
70 | | * Currently SHA-384/512 has a 128-byte block size and that's the largest |
71 | | * supported by TLS.) |
72 | | */ |
73 | | #define MAX_HASH_BLOCK_SIZE 128 |
74 | | |
75 | | /*- |
76 | | * ssl3_cbc_remove_padding removes padding from the decrypted, SSLv3, CBC |
77 | | * record in |rec| by updating |rec->length| in constant time. |
78 | | * |
79 | | * block_size: the block size of the cipher used to encrypt the record. |
80 | | * returns: |
81 | | * 0: (in non-constant time) if the record is publicly invalid. |
82 | | * 1: if the padding was valid |
83 | | * -1: otherwise. |
84 | | */ |
85 | | int ssl3_cbc_remove_padding(const SSL *s, |
86 | | SSL3_RECORD *rec, |
87 | | unsigned block_size, unsigned mac_size) |
88 | 0 | { |
89 | 0 | unsigned padding_length, good; |
90 | 0 | const unsigned overhead = 1 /* padding length byte */ + mac_size; |
91 | | |
92 | | /* |
93 | | * These lengths are all public so we can test them in non-constant time. |
94 | | */ |
95 | 0 | if (overhead > rec->length) |
96 | 0 | return 0; |
97 | | |
98 | 0 | padding_length = rec->data[rec->length - 1]; |
99 | 0 | good = constant_time_ge(rec->length, padding_length + overhead); |
100 | | /* SSLv3 requires that the padding is minimal. */ |
101 | 0 | good &= constant_time_ge(block_size, padding_length + 1); |
102 | 0 | padding_length = good & (padding_length + 1); |
103 | 0 | rec->length -= padding_length; |
104 | 0 | rec->type |= padding_length << 8; /* kludge: pass padding length */ |
105 | 0 | return constant_time_select_int(good, 1, -1); |
106 | 0 | } |
107 | | |
108 | | /*- |
109 | | * tls1_cbc_remove_padding removes the CBC padding from the decrypted, TLS, CBC |
110 | | * record in |rec| in constant time and returns 1 if the padding is valid and |
111 | | * -1 otherwise. It also removes any explicit IV from the start of the record |
112 | | * without leaking any timing about whether there was enough space after the |
113 | | * padding was removed. |
114 | | * |
115 | | * block_size: the block size of the cipher used to encrypt the record. |
116 | | * returns: |
117 | | * 0: (in non-constant time) if the record is publicly invalid. |
118 | | * 1: if the padding was valid |
119 | | * -1: otherwise. |
120 | | */ |
121 | | int tls1_cbc_remove_padding(const SSL *s, |
122 | | SSL3_RECORD *rec, |
123 | | unsigned block_size, unsigned mac_size) |
124 | 0 | { |
125 | 0 | unsigned padding_length, good, to_check, i; |
126 | 0 | const unsigned overhead = 1 /* padding length byte */ + mac_size; |
127 | | /* Check if version requires explicit IV */ |
128 | 0 | if (SSL_USE_EXPLICIT_IV(s)) { |
129 | | /* |
130 | | * These lengths are all public so we can test them in non-constant |
131 | | * time. |
132 | | */ |
133 | 0 | if (overhead + block_size > rec->length) |
134 | 0 | return 0; |
135 | | /* We can now safely skip explicit IV */ |
136 | 0 | rec->data += block_size; |
137 | 0 | rec->input += block_size; |
138 | 0 | rec->length -= block_size; |
139 | 0 | } else if (overhead > rec->length) |
140 | 0 | return 0; |
141 | | |
142 | 0 | padding_length = rec->data[rec->length - 1]; |
143 | | |
144 | | /* |
145 | | * NB: if compression is in operation the first packet may not be of even |
146 | | * length so the padding bug check cannot be performed. This bug |
147 | | * workaround has been around since SSLeay so hopefully it is either |
148 | | * fixed now or no buggy implementation supports compression [steve] |
149 | | */ |
150 | 0 | if ((s->options & SSL_OP_TLS_BLOCK_PADDING_BUG) && !s->expand) { |
151 | | /* First packet is even in size, so check */ |
152 | 0 | if ((CRYPTO_memcmp(s->s3->read_sequence, "\0\0\0\0\0\0\0\0", 8) == 0) && |
153 | 0 | !(padding_length & 1)) { |
154 | 0 | s->s3->flags |= TLS1_FLAGS_TLS_PADDING_BUG; |
155 | 0 | } |
156 | 0 | if ((s->s3->flags & TLS1_FLAGS_TLS_PADDING_BUG) && padding_length > 0) { |
157 | 0 | padding_length--; |
158 | 0 | } |
159 | 0 | } |
160 | |
|
161 | 0 | if (EVP_CIPHER_flags(s->enc_read_ctx->cipher) & EVP_CIPH_FLAG_AEAD_CIPHER) { |
162 | | /* padding is already verified */ |
163 | 0 | rec->length -= padding_length + 1; |
164 | 0 | return 1; |
165 | 0 | } |
166 | | |
167 | 0 | good = constant_time_ge(rec->length, overhead + padding_length); |
168 | | /* |
169 | | * The padding consists of a length byte at the end of the record and |
170 | | * then that many bytes of padding, all with the same value as the length |
171 | | * byte. Thus, with the length byte included, there are i+1 bytes of |
172 | | * padding. We can't check just |padding_length+1| bytes because that |
173 | | * leaks decrypted information. Therefore we always have to check the |
174 | | * maximum amount of padding possible. (Again, the length of the record |
175 | | * is public information so we can use it.) |
176 | | */ |
177 | 0 | to_check = 255; /* maximum amount of padding. */ |
178 | 0 | if (to_check > rec->length - 1) |
179 | 0 | to_check = rec->length - 1; |
180 | |
|
181 | 0 | for (i = 0; i < to_check; i++) { |
182 | 0 | unsigned char mask = constant_time_ge_8(padding_length, i); |
183 | 0 | unsigned char b = rec->data[rec->length - 1 - i]; |
184 | | /* |
185 | | * The final |padding_length+1| bytes should all have the value |
186 | | * |padding_length|. Therefore the XOR should be zero. |
187 | | */ |
188 | 0 | good &= ~(mask & (padding_length ^ b)); |
189 | 0 | } |
190 | | |
191 | | /* |
192 | | * If any of the final |padding_length+1| bytes had the wrong value, one |
193 | | * or more of the lower eight bits of |good| will be cleared. |
194 | | */ |
195 | 0 | good = constant_time_eq(0xff, good & 0xff); |
196 | 0 | padding_length = good & (padding_length + 1); |
197 | 0 | rec->length -= padding_length; |
198 | 0 | rec->type |= padding_length << 8; /* kludge: pass padding length */ |
199 | |
|
200 | 0 | return constant_time_select_int(good, 1, -1); |
201 | 0 | } |
202 | | |
203 | | /*- |
204 | | * ssl3_cbc_copy_mac copies |md_size| bytes from the end of |rec| to |out| in |
205 | | * constant time (independent of the concrete value of rec->length, which may |
206 | | * vary within a 256-byte window). |
207 | | * |
208 | | * ssl3_cbc_remove_padding or tls1_cbc_remove_padding must be called prior to |
209 | | * this function. |
210 | | * |
211 | | * On entry: |
212 | | * rec->orig_len >= md_size |
213 | | * md_size <= EVP_MAX_MD_SIZE |
214 | | * |
215 | | * If CBC_MAC_ROTATE_IN_PLACE is defined then the rotation is performed with |
216 | | * variable accesses in a 64-byte-aligned buffer. Assuming that this fits into |
217 | | * a single or pair of cache-lines, then the variable memory accesses don't |
218 | | * actually affect the timing. CPUs with smaller cache-lines [if any] are |
219 | | * not multi-core and are not considered vulnerable to cache-timing attacks. |
220 | | */ |
221 | | #define CBC_MAC_ROTATE_IN_PLACE |
222 | | |
223 | | void ssl3_cbc_copy_mac(unsigned char *out, |
224 | | const SSL3_RECORD *rec, |
225 | | unsigned md_size, unsigned orig_len) |
226 | 0 | { |
227 | 0 | #if defined(CBC_MAC_ROTATE_IN_PLACE) |
228 | 0 | unsigned char rotated_mac_buf[64 + EVP_MAX_MD_SIZE]; |
229 | 0 | unsigned char *rotated_mac; |
230 | | #else |
231 | | unsigned char rotated_mac[EVP_MAX_MD_SIZE]; |
232 | | #endif |
233 | | |
234 | | /* |
235 | | * mac_end is the index of |rec->data| just after the end of the MAC. |
236 | | */ |
237 | 0 | unsigned mac_end = rec->length; |
238 | 0 | unsigned mac_start = mac_end - md_size; |
239 | | /* |
240 | | * scan_start contains the number of bytes that we can ignore because the |
241 | | * MAC's position can only vary by 255 bytes. |
242 | | */ |
243 | 0 | unsigned scan_start = 0; |
244 | 0 | unsigned i, j; |
245 | 0 | unsigned div_spoiler; |
246 | 0 | unsigned rotate_offset; |
247 | |
|
248 | 0 | OPENSSL_assert(orig_len >= md_size); |
249 | 0 | OPENSSL_assert(md_size <= EVP_MAX_MD_SIZE); |
250 | |
|
251 | 0 | #if defined(CBC_MAC_ROTATE_IN_PLACE) |
252 | 0 | rotated_mac = rotated_mac_buf + ((0 - (size_t)rotated_mac_buf) & 63); |
253 | 0 | #endif |
254 | | |
255 | | /* This information is public so it's safe to branch based on it. */ |
256 | 0 | if (orig_len > md_size + 255 + 1) |
257 | 0 | scan_start = orig_len - (md_size + 255 + 1); |
258 | | /* |
259 | | * div_spoiler contains a multiple of md_size that is used to cause the |
260 | | * modulo operation to be constant time. Without this, the time varies |
261 | | * based on the amount of padding when running on Intel chips at least. |
262 | | * The aim of right-shifting md_size is so that the compiler doesn't |
263 | | * figure out that it can remove div_spoiler as that would require it to |
264 | | * prove that md_size is always even, which I hope is beyond it. |
265 | | */ |
266 | 0 | div_spoiler = md_size >> 1; |
267 | 0 | div_spoiler <<= (sizeof(div_spoiler) - 1) * 8; |
268 | 0 | rotate_offset = (div_spoiler + mac_start - scan_start) % md_size; |
269 | |
|
270 | 0 | memset(rotated_mac, 0, md_size); |
271 | 0 | for (i = scan_start, j = 0; i < orig_len; i++) { |
272 | 0 | unsigned char mac_started = constant_time_ge_8(i, mac_start); |
273 | 0 | unsigned char mac_ended = constant_time_ge_8(i, mac_end); |
274 | 0 | unsigned char b = rec->data[i]; |
275 | 0 | rotated_mac[j++] |= b & mac_started & ~mac_ended; |
276 | 0 | j &= constant_time_lt(j, md_size); |
277 | 0 | } |
278 | | |
279 | | /* Now rotate the MAC */ |
280 | 0 | #if defined(CBC_MAC_ROTATE_IN_PLACE) |
281 | 0 | j = 0; |
282 | 0 | for (i = 0; i < md_size; i++) { |
283 | | /* in case cache-line is 32 bytes, touch second line */ |
284 | 0 | ((volatile unsigned char *)rotated_mac)[rotate_offset ^ 32]; |
285 | 0 | out[j++] = rotated_mac[rotate_offset++]; |
286 | 0 | rotate_offset &= constant_time_lt(rotate_offset, md_size); |
287 | 0 | } |
288 | | #else |
289 | | memset(out, 0, md_size); |
290 | | rotate_offset = md_size - rotate_offset; |
291 | | rotate_offset &= constant_time_lt(rotate_offset, md_size); |
292 | | for (i = 0; i < md_size; i++) { |
293 | | for (j = 0; j < md_size; j++) |
294 | | out[j] |= rotated_mac[i] & constant_time_eq_8(j, rotate_offset); |
295 | | rotate_offset++; |
296 | | rotate_offset &= constant_time_lt(rotate_offset, md_size); |
297 | | } |
298 | | #endif |
299 | 0 | } |
300 | | |
301 | | /* |
302 | | * u32toLE serialises an unsigned, 32-bit number (n) as four bytes at (p) in |
303 | | * little-endian order. The value of p is advanced by four. |
304 | | */ |
305 | | #define u32toLE(n, p) \ |
306 | 0 | (*((p)++)=(unsigned char)(n), \ |
307 | 0 | *((p)++)=(unsigned char)(n>>8), \ |
308 | 0 | *((p)++)=(unsigned char)(n>>16), \ |
309 | 0 | *((p)++)=(unsigned char)(n>>24)) |
310 | | |
311 | | /* |
312 | | * These functions serialize the state of a hash and thus perform the |
313 | | * standard "final" operation without adding the padding and length that such |
314 | | * a function typically does. |
315 | | */ |
316 | | static void tls1_md5_final_raw(void *ctx, unsigned char *md_out) |
317 | 0 | { |
318 | 0 | MD5_CTX *md5 = ctx; |
319 | 0 | u32toLE(md5->A, md_out); |
320 | 0 | u32toLE(md5->B, md_out); |
321 | 0 | u32toLE(md5->C, md_out); |
322 | 0 | u32toLE(md5->D, md_out); |
323 | 0 | } |
324 | | |
325 | | static void tls1_sha1_final_raw(void *ctx, unsigned char *md_out) |
326 | 0 | { |
327 | 0 | SHA_CTX *sha1 = ctx; |
328 | 0 | l2n(sha1->h0, md_out); |
329 | 0 | l2n(sha1->h1, md_out); |
330 | 0 | l2n(sha1->h2, md_out); |
331 | 0 | l2n(sha1->h3, md_out); |
332 | 0 | l2n(sha1->h4, md_out); |
333 | 0 | } |
334 | | |
335 | | #define LARGEST_DIGEST_CTX SHA_CTX |
336 | | |
337 | | #ifndef OPENSSL_NO_SHA256 |
338 | | static void tls1_sha256_final_raw(void *ctx, unsigned char *md_out) |
339 | 0 | { |
340 | 0 | SHA256_CTX *sha256 = ctx; |
341 | 0 | unsigned i; |
342 | |
|
343 | 0 | for (i = 0; i < 8; i++) { |
344 | 0 | l2n(sha256->h[i], md_out); |
345 | 0 | } |
346 | 0 | } |
347 | | |
348 | | # undef LARGEST_DIGEST_CTX |
349 | | # define LARGEST_DIGEST_CTX SHA256_CTX |
350 | | #endif |
351 | | |
352 | | #ifndef OPENSSL_NO_SHA512 |
353 | | static void tls1_sha512_final_raw(void *ctx, unsigned char *md_out) |
354 | 0 | { |
355 | 0 | SHA512_CTX *sha512 = ctx; |
356 | 0 | unsigned i; |
357 | |
|
358 | 0 | for (i = 0; i < 8; i++) { |
359 | 0 | l2n8(sha512->h[i], md_out); |
360 | 0 | } |
361 | 0 | } |
362 | | |
363 | | # undef LARGEST_DIGEST_CTX |
364 | | # define LARGEST_DIGEST_CTX SHA512_CTX |
365 | | #endif |
366 | | |
367 | | /* |
368 | | * ssl3_cbc_record_digest_supported returns 1 iff |ctx| uses a hash function |
369 | | * which ssl3_cbc_digest_record supports. |
370 | | */ |
371 | | char ssl3_cbc_record_digest_supported(const EVP_MD_CTX *ctx) |
372 | 0 | { |
373 | | #ifdef OPENSSL_FIPS |
374 | | if (FIPS_mode()) |
375 | | return 0; |
376 | | #endif |
377 | 0 | switch (EVP_MD_CTX_type(ctx)) { |
378 | 0 | case NID_md5: |
379 | 0 | case NID_sha1: |
380 | 0 | #ifndef OPENSSL_NO_SHA256 |
381 | 0 | case NID_sha224: |
382 | 0 | case NID_sha256: |
383 | 0 | #endif |
384 | 0 | #ifndef OPENSSL_NO_SHA512 |
385 | 0 | case NID_sha384: |
386 | 0 | case NID_sha512: |
387 | 0 | #endif |
388 | 0 | return 1; |
389 | 0 | default: |
390 | 0 | return 0; |
391 | 0 | } |
392 | 0 | } |
393 | | |
394 | | /*- |
395 | | * ssl3_cbc_digest_record computes the MAC of a decrypted, padded SSLv3/TLS |
396 | | * record. |
397 | | * |
398 | | * ctx: the EVP_MD_CTX from which we take the hash function. |
399 | | * ssl3_cbc_record_digest_supported must return true for this EVP_MD_CTX. |
400 | | * md_out: the digest output. At most EVP_MAX_MD_SIZE bytes will be written. |
401 | | * md_out_size: if non-NULL, the number of output bytes is written here. |
402 | | * header: the 13-byte, TLS record header. |
403 | | * data: the record data itself, less any preceeding explicit IV. |
404 | | * data_plus_mac_size: the secret, reported length of the data and MAC |
405 | | * once the padding has been removed. |
406 | | * data_plus_mac_plus_padding_size: the public length of the whole |
407 | | * record, including padding. |
408 | | * is_sslv3: non-zero if we are to use SSLv3. Otherwise, TLS. |
409 | | * |
410 | | * On entry: by virtue of having been through one of the remove_padding |
411 | | * functions, above, we know that data_plus_mac_size is large enough to contain |
412 | | * a padding byte and MAC. (If the padding was invalid, it might contain the |
413 | | * padding too. ) |
414 | | * Returns 1 on success or 0 on error |
415 | | */ |
416 | | int ssl3_cbc_digest_record(const EVP_MD_CTX *ctx, |
417 | | unsigned char *md_out, |
418 | | size_t *md_out_size, |
419 | | const unsigned char header[13], |
420 | | const unsigned char *data, |
421 | | size_t data_plus_mac_size, |
422 | | size_t data_plus_mac_plus_padding_size, |
423 | | const unsigned char *mac_secret, |
424 | | unsigned mac_secret_length, char is_sslv3) |
425 | 0 | { |
426 | 0 | union { |
427 | 0 | double align; |
428 | 0 | unsigned char c[sizeof(LARGEST_DIGEST_CTX)]; |
429 | 0 | } md_state; |
430 | 0 | void (*md_final_raw) (void *ctx, unsigned char *md_out); |
431 | 0 | void (*md_transform) (void *ctx, const unsigned char *block); |
432 | 0 | unsigned md_size, md_block_size = 64; |
433 | 0 | unsigned sslv3_pad_length = 40, header_length, variance_blocks, |
434 | 0 | len, max_mac_bytes, num_blocks, |
435 | 0 | num_starting_blocks, k, mac_end_offset, c, index_a, index_b; |
436 | 0 | unsigned int bits; /* at most 18 bits */ |
437 | 0 | unsigned char length_bytes[MAX_HASH_BIT_COUNT_BYTES]; |
438 | | /* hmac_pad is the masked HMAC key. */ |
439 | 0 | unsigned char hmac_pad[MAX_HASH_BLOCK_SIZE]; |
440 | 0 | unsigned char first_block[MAX_HASH_BLOCK_SIZE]; |
441 | 0 | unsigned char mac_out[EVP_MAX_MD_SIZE]; |
442 | 0 | unsigned i, j, md_out_size_u; |
443 | 0 | EVP_MD_CTX md_ctx; |
444 | | /* |
445 | | * mdLengthSize is the number of bytes in the length field that |
446 | | * terminates * the hash. |
447 | | */ |
448 | 0 | unsigned md_length_size = 8; |
449 | 0 | char length_is_big_endian = 1; |
450 | | |
451 | | /* |
452 | | * This is a, hopefully redundant, check that allows us to forget about |
453 | | * many possible overflows later in this function. |
454 | | */ |
455 | 0 | OPENSSL_assert(data_plus_mac_plus_padding_size < 1024 * 1024); |
456 | |
|
457 | 0 | switch (EVP_MD_CTX_type(ctx)) { |
458 | 0 | case NID_md5: |
459 | 0 | if (MD5_Init((MD5_CTX *)md_state.c) <= 0) |
460 | 0 | return 0; |
461 | 0 | md_final_raw = tls1_md5_final_raw; |
462 | 0 | md_transform = |
463 | 0 | (void (*)(void *ctx, const unsigned char *block))MD5_Transform; |
464 | 0 | md_size = 16; |
465 | 0 | sslv3_pad_length = 48; |
466 | 0 | length_is_big_endian = 0; |
467 | 0 | break; |
468 | 0 | case NID_sha1: |
469 | 0 | if (SHA1_Init((SHA_CTX *)md_state.c) <= 0) |
470 | 0 | return 0; |
471 | 0 | md_final_raw = tls1_sha1_final_raw; |
472 | 0 | md_transform = |
473 | 0 | (void (*)(void *ctx, const unsigned char *block))SHA1_Transform; |
474 | 0 | md_size = 20; |
475 | 0 | break; |
476 | 0 | #ifndef OPENSSL_NO_SHA256 |
477 | 0 | case NID_sha224: |
478 | 0 | if (SHA224_Init((SHA256_CTX *)md_state.c) <= 0) |
479 | 0 | return 0; |
480 | 0 | md_final_raw = tls1_sha256_final_raw; |
481 | 0 | md_transform = |
482 | 0 | (void (*)(void *ctx, const unsigned char *block))SHA256_Transform; |
483 | 0 | md_size = 224 / 8; |
484 | 0 | break; |
485 | 0 | case NID_sha256: |
486 | 0 | if (SHA256_Init((SHA256_CTX *)md_state.c) <= 0) |
487 | 0 | return 0; |
488 | 0 | md_final_raw = tls1_sha256_final_raw; |
489 | 0 | md_transform = |
490 | 0 | (void (*)(void *ctx, const unsigned char *block))SHA256_Transform; |
491 | 0 | md_size = 32; |
492 | 0 | break; |
493 | 0 | #endif |
494 | 0 | #ifndef OPENSSL_NO_SHA512 |
495 | 0 | case NID_sha384: |
496 | 0 | if (SHA384_Init((SHA512_CTX *)md_state.c) <= 0) |
497 | 0 | return 0; |
498 | 0 | md_final_raw = tls1_sha512_final_raw; |
499 | 0 | md_transform = |
500 | 0 | (void (*)(void *ctx, const unsigned char *block))SHA512_Transform; |
501 | 0 | md_size = 384 / 8; |
502 | 0 | md_block_size = 128; |
503 | 0 | md_length_size = 16; |
504 | 0 | break; |
505 | 0 | case NID_sha512: |
506 | 0 | if (SHA512_Init((SHA512_CTX *)md_state.c) <= 0) |
507 | 0 | return 0; |
508 | 0 | md_final_raw = tls1_sha512_final_raw; |
509 | 0 | md_transform = |
510 | 0 | (void (*)(void *ctx, const unsigned char *block))SHA512_Transform; |
511 | 0 | md_size = 64; |
512 | 0 | md_block_size = 128; |
513 | 0 | md_length_size = 16; |
514 | 0 | break; |
515 | 0 | #endif |
516 | 0 | default: |
517 | | /* |
518 | | * ssl3_cbc_record_digest_supported should have been called first to |
519 | | * check that the hash function is supported. |
520 | | */ |
521 | 0 | OPENSSL_assert(0); |
522 | 0 | if (md_out_size) |
523 | 0 | *md_out_size = 0; |
524 | 0 | return 0; |
525 | 0 | } |
526 | | |
527 | 0 | OPENSSL_assert(md_length_size <= MAX_HASH_BIT_COUNT_BYTES); |
528 | 0 | OPENSSL_assert(md_block_size <= MAX_HASH_BLOCK_SIZE); |
529 | 0 | OPENSSL_assert(md_size <= EVP_MAX_MD_SIZE); |
530 | |
|
531 | 0 | header_length = 13; |
532 | 0 | if (is_sslv3) { |
533 | 0 | header_length = mac_secret_length + sslv3_pad_length + 8 /* sequence |
534 | 0 | * number */ + |
535 | 0 | 1 /* record type */ + |
536 | 0 | 2 /* record length */ ; |
537 | 0 | } |
538 | | |
539 | | /* |
540 | | * variance_blocks is the number of blocks of the hash that we have to |
541 | | * calculate in constant time because they could be altered by the |
542 | | * padding value. In SSLv3, the padding must be minimal so the end of |
543 | | * the plaintext varies by, at most, 15+20 = 35 bytes. (We conservatively |
544 | | * assume that the MAC size varies from 0..20 bytes.) In case the 9 bytes |
545 | | * of hash termination (0x80 + 64-bit length) don't fit in the final |
546 | | * block, we say that the final two blocks can vary based on the padding. |
547 | | * TLSv1 has MACs up to 48 bytes long (SHA-384) and the padding is not |
548 | | * required to be minimal. Therefore we say that the final six blocks can |
549 | | * vary based on the padding. Later in the function, if the message is |
550 | | * short and there obviously cannot be this many blocks then |
551 | | * variance_blocks can be reduced. |
552 | | */ |
553 | 0 | variance_blocks = is_sslv3 ? 2 : 6; |
554 | | /* |
555 | | * From now on we're dealing with the MAC, which conceptually has 13 |
556 | | * bytes of `header' before the start of the data (TLS) or 71/75 bytes |
557 | | * (SSLv3) |
558 | | */ |
559 | 0 | len = data_plus_mac_plus_padding_size + header_length; |
560 | | /* |
561 | | * max_mac_bytes contains the maximum bytes of bytes in the MAC, |
562 | | * including * |header|, assuming that there's no padding. |
563 | | */ |
564 | 0 | max_mac_bytes = len - md_size - 1; |
565 | | /* num_blocks is the maximum number of hash blocks. */ |
566 | 0 | num_blocks = |
567 | 0 | (max_mac_bytes + 1 + md_length_size + md_block_size - |
568 | 0 | 1) / md_block_size; |
569 | | /* |
570 | | * In order to calculate the MAC in constant time we have to handle the |
571 | | * final blocks specially because the padding value could cause the end |
572 | | * to appear somewhere in the final |variance_blocks| blocks and we can't |
573 | | * leak where. However, |num_starting_blocks| worth of data can be hashed |
574 | | * right away because no padding value can affect whether they are |
575 | | * plaintext. |
576 | | */ |
577 | 0 | num_starting_blocks = 0; |
578 | | /* |
579 | | * k is the starting byte offset into the conceptual header||data where |
580 | | * we start processing. |
581 | | */ |
582 | 0 | k = 0; |
583 | | /* |
584 | | * mac_end_offset is the index just past the end of the data to be MACed. |
585 | | */ |
586 | 0 | mac_end_offset = data_plus_mac_size + header_length - md_size; |
587 | | /* |
588 | | * c is the index of the 0x80 byte in the final hash block that contains |
589 | | * application data. |
590 | | */ |
591 | 0 | c = mac_end_offset % md_block_size; |
592 | | /* |
593 | | * index_a is the hash block number that contains the 0x80 terminating |
594 | | * value. |
595 | | */ |
596 | 0 | index_a = mac_end_offset / md_block_size; |
597 | | /* |
598 | | * index_b is the hash block number that contains the 64-bit hash length, |
599 | | * in bits. |
600 | | */ |
601 | 0 | index_b = (mac_end_offset + md_length_size) / md_block_size; |
602 | | /* |
603 | | * bits is the hash-length in bits. It includes the additional hash block |
604 | | * for the masked HMAC key, or whole of |header| in the case of SSLv3. |
605 | | */ |
606 | | |
607 | | /* |
608 | | * For SSLv3, if we're going to have any starting blocks then we need at |
609 | | * least two because the header is larger than a single block. |
610 | | */ |
611 | 0 | if (num_blocks > variance_blocks + (is_sslv3 ? 1 : 0)) { |
612 | 0 | num_starting_blocks = num_blocks - variance_blocks; |
613 | 0 | k = md_block_size * num_starting_blocks; |
614 | 0 | } |
615 | |
|
616 | 0 | bits = 8 * mac_end_offset; |
617 | 0 | if (!is_sslv3) { |
618 | | /* |
619 | | * Compute the initial HMAC block. For SSLv3, the padding and secret |
620 | | * bytes are included in |header| because they take more than a |
621 | | * single block. |
622 | | */ |
623 | 0 | bits += 8 * md_block_size; |
624 | 0 | memset(hmac_pad, 0, md_block_size); |
625 | 0 | OPENSSL_assert(mac_secret_length <= sizeof(hmac_pad)); |
626 | 0 | memcpy(hmac_pad, mac_secret, mac_secret_length); |
627 | 0 | for (i = 0; i < md_block_size; i++) |
628 | 0 | hmac_pad[i] ^= 0x36; |
629 | |
|
630 | 0 | md_transform(md_state.c, hmac_pad); |
631 | 0 | } |
632 | |
|
633 | 0 | if (length_is_big_endian) { |
634 | 0 | memset(length_bytes, 0, md_length_size - 4); |
635 | 0 | length_bytes[md_length_size - 4] = (unsigned char)(bits >> 24); |
636 | 0 | length_bytes[md_length_size - 3] = (unsigned char)(bits >> 16); |
637 | 0 | length_bytes[md_length_size - 2] = (unsigned char)(bits >> 8); |
638 | 0 | length_bytes[md_length_size - 1] = (unsigned char)bits; |
639 | 0 | } else { |
640 | 0 | memset(length_bytes, 0, md_length_size); |
641 | 0 | length_bytes[md_length_size - 5] = (unsigned char)(bits >> 24); |
642 | 0 | length_bytes[md_length_size - 6] = (unsigned char)(bits >> 16); |
643 | 0 | length_bytes[md_length_size - 7] = (unsigned char)(bits >> 8); |
644 | 0 | length_bytes[md_length_size - 8] = (unsigned char)bits; |
645 | 0 | } |
646 | |
|
647 | 0 | if (k > 0) { |
648 | 0 | if (is_sslv3) { |
649 | 0 | unsigned overhang; |
650 | | |
651 | | /* |
652 | | * The SSLv3 header is larger than a single block. overhang is |
653 | | * the number of bytes beyond a single block that the header |
654 | | * consumes: either 7 bytes (SHA1) or 11 bytes (MD5). There are no |
655 | | * ciphersuites in SSLv3 that are not SHA1 or MD5 based and |
656 | | * therefore we can be confident that the header_length will be |
657 | | * greater than |md_block_size|. However we add a sanity check just |
658 | | * in case |
659 | | */ |
660 | 0 | if (header_length <= md_block_size) { |
661 | | /* Should never happen */ |
662 | 0 | return 0; |
663 | 0 | } |
664 | 0 | overhang = header_length - md_block_size; |
665 | 0 | md_transform(md_state.c, header); |
666 | 0 | memcpy(first_block, header + md_block_size, overhang); |
667 | 0 | memcpy(first_block + overhang, data, md_block_size - overhang); |
668 | 0 | md_transform(md_state.c, first_block); |
669 | 0 | for (i = 1; i < k / md_block_size - 1; i++) |
670 | 0 | md_transform(md_state.c, data + md_block_size * i - overhang); |
671 | 0 | } else { |
672 | | /* k is a multiple of md_block_size. */ |
673 | 0 | memcpy(first_block, header, 13); |
674 | 0 | memcpy(first_block + 13, data, md_block_size - 13); |
675 | 0 | md_transform(md_state.c, first_block); |
676 | 0 | for (i = 1; i < k / md_block_size; i++) |
677 | 0 | md_transform(md_state.c, data + md_block_size * i - 13); |
678 | 0 | } |
679 | 0 | } |
680 | | |
681 | 0 | memset(mac_out, 0, sizeof(mac_out)); |
682 | | |
683 | | /* |
684 | | * We now process the final hash blocks. For each block, we construct it |
685 | | * in constant time. If the |i==index_a| then we'll include the 0x80 |
686 | | * bytes and zero pad etc. For each block we selectively copy it, in |
687 | | * constant time, to |mac_out|. |
688 | | */ |
689 | 0 | for (i = num_starting_blocks; i <= num_starting_blocks + variance_blocks; |
690 | 0 | i++) { |
691 | 0 | unsigned char block[MAX_HASH_BLOCK_SIZE]; |
692 | 0 | unsigned char is_block_a = constant_time_eq_8(i, index_a); |
693 | 0 | unsigned char is_block_b = constant_time_eq_8(i, index_b); |
694 | 0 | for (j = 0; j < md_block_size; j++) { |
695 | 0 | unsigned char b = 0, is_past_c, is_past_cp1; |
696 | 0 | if (k < header_length) |
697 | 0 | b = header[k]; |
698 | 0 | else if (k < data_plus_mac_plus_padding_size + header_length) |
699 | 0 | b = data[k - header_length]; |
700 | 0 | k++; |
701 | |
|
702 | 0 | is_past_c = is_block_a & constant_time_ge_8(j, c); |
703 | 0 | is_past_cp1 = is_block_a & constant_time_ge_8(j, c + 1); |
704 | | /* |
705 | | * If this is the block containing the end of the application |
706 | | * data, and we are at the offset for the 0x80 value, then |
707 | | * overwrite b with 0x80. |
708 | | */ |
709 | 0 | b = constant_time_select_8(is_past_c, 0x80, b); |
710 | | /* |
711 | | * If this the the block containing the end of the application |
712 | | * data and we're past the 0x80 value then just write zero. |
713 | | */ |
714 | 0 | b = b & ~is_past_cp1; |
715 | | /* |
716 | | * If this is index_b (the final block), but not index_a (the end |
717 | | * of the data), then the 64-bit length didn't fit into index_a |
718 | | * and we're having to add an extra block of zeros. |
719 | | */ |
720 | 0 | b &= ~is_block_b | is_block_a; |
721 | | |
722 | | /* |
723 | | * The final bytes of one of the blocks contains the length. |
724 | | */ |
725 | 0 | if (j >= md_block_size - md_length_size) { |
726 | | /* If this is index_b, write a length byte. */ |
727 | 0 | b = constant_time_select_8(is_block_b, |
728 | 0 | length_bytes[j - |
729 | 0 | (md_block_size - |
730 | 0 | md_length_size)], b); |
731 | 0 | } |
732 | 0 | block[j] = b; |
733 | 0 | } |
734 | |
|
735 | 0 | md_transform(md_state.c, block); |
736 | 0 | md_final_raw(md_state.c, block); |
737 | | /* If this is index_b, copy the hash value to |mac_out|. */ |
738 | 0 | for (j = 0; j < md_size; j++) |
739 | 0 | mac_out[j] |= block[j] & is_block_b; |
740 | 0 | } |
741 | |
|
742 | 0 | EVP_MD_CTX_init(&md_ctx); |
743 | 0 | if (EVP_DigestInit_ex(&md_ctx, ctx->digest, NULL /* engine */ ) <= 0) |
744 | 0 | goto err; |
745 | 0 | if (is_sslv3) { |
746 | | /* We repurpose |hmac_pad| to contain the SSLv3 pad2 block. */ |
747 | 0 | memset(hmac_pad, 0x5c, sslv3_pad_length); |
748 | |
|
749 | 0 | if (EVP_DigestUpdate(&md_ctx, mac_secret, mac_secret_length) <= 0 |
750 | 0 | || EVP_DigestUpdate(&md_ctx, hmac_pad, sslv3_pad_length) <= 0 |
751 | 0 | || EVP_DigestUpdate(&md_ctx, mac_out, md_size) <= 0) |
752 | 0 | goto err; |
753 | 0 | } else { |
754 | | /* Complete the HMAC in the standard manner. */ |
755 | 0 | for (i = 0; i < md_block_size; i++) |
756 | 0 | hmac_pad[i] ^= 0x6a; |
757 | |
|
758 | 0 | if (EVP_DigestUpdate(&md_ctx, hmac_pad, md_block_size) <= 0 |
759 | 0 | || EVP_DigestUpdate(&md_ctx, mac_out, md_size) <= 0) |
760 | 0 | goto err; |
761 | 0 | } |
762 | 0 | EVP_DigestFinal(&md_ctx, md_out, &md_out_size_u); |
763 | 0 | if (md_out_size) |
764 | 0 | *md_out_size = md_out_size_u; |
765 | 0 | EVP_MD_CTX_cleanup(&md_ctx); |
766 | |
|
767 | 0 | return 1; |
768 | 0 | err: |
769 | 0 | EVP_MD_CTX_cleanup(&md_ctx); |
770 | 0 | return 0; |
771 | 0 | } |
772 | | |
773 | | #ifdef OPENSSL_FIPS |
774 | | |
775 | | /* |
776 | | * Due to the need to use EVP in FIPS mode we can't reimplement digests but |
777 | | * we can ensure the number of blocks processed is equal for all cases by |
778 | | * digesting additional data. |
779 | | */ |
780 | | |
781 | | void tls_fips_digest_extra(const EVP_CIPHER_CTX *cipher_ctx, |
782 | | EVP_MD_CTX *mac_ctx, const unsigned char *data, |
783 | | size_t data_len, size_t orig_len) |
784 | | { |
785 | | size_t block_size, digest_pad, blocks_data, blocks_orig; |
786 | | if (EVP_CIPHER_CTX_mode(cipher_ctx) != EVP_CIPH_CBC_MODE) |
787 | | return; |
788 | | block_size = EVP_MD_CTX_block_size(mac_ctx); |
789 | | /*- |
790 | | * We are in FIPS mode if we get this far so we know we have only SHA* |
791 | | * digests and TLS to deal with. |
792 | | * Minimum digest padding length is 17 for SHA384/SHA512 and 9 |
793 | | * otherwise. |
794 | | * Additional header is 13 bytes. To get the number of digest blocks |
795 | | * processed round up the amount of data plus padding to the nearest |
796 | | * block length. Block length is 128 for SHA384/SHA512 and 64 otherwise. |
797 | | * So we have: |
798 | | * blocks = (payload_len + digest_pad + 13 + block_size - 1)/block_size |
799 | | * equivalently: |
800 | | * blocks = (payload_len + digest_pad + 12)/block_size + 1 |
801 | | * HMAC adds a constant overhead. |
802 | | * We're ultimately only interested in differences so this becomes |
803 | | * blocks = (payload_len + 29)/128 |
804 | | * for SHA384/SHA512 and |
805 | | * blocks = (payload_len + 21)/64 |
806 | | * otherwise. |
807 | | */ |
808 | | digest_pad = block_size == 64 ? 21 : 29; |
809 | | blocks_orig = (orig_len + digest_pad) / block_size; |
810 | | blocks_data = (data_len + digest_pad) / block_size; |
811 | | /* |
812 | | * MAC enough blocks to make up the difference between the original and |
813 | | * actual lengths plus one extra block to ensure this is never a no op. |
814 | | * The "data" pointer should always have enough space to perform this |
815 | | * operation as it is large enough for a maximum length TLS buffer. |
816 | | */ |
817 | | EVP_DigestSignUpdate(mac_ctx, data, |
818 | | (blocks_orig - blocks_data + 1) * block_size); |
819 | | } |
820 | | #endif |