Coverage Report

Created: 2024-11-21 07:03

/src/boringssl/crypto/cipher_extra/tls_cbc.c
Line
Count
Source (jump to first uncovered line)
1
/* ====================================================================
2
 * Copyright (c) 2012 The OpenSSL Project.  All rights reserved.
3
 *
4
 * Redistribution and use in source and binary forms, with or without
5
 * modification, are permitted provided that the following conditions
6
 * are met:
7
 *
8
 * 1. Redistributions of source code must retain the above copyright
9
 *    notice, this list of conditions and the following disclaimer.
10
 *
11
 * 2. Redistributions in binary form must reproduce the above copyright
12
 *    notice, this list of conditions and the following disclaimer in
13
 *    the documentation and/or other materials provided with the
14
 *    distribution.
15
 *
16
 * 3. All advertising materials mentioning features or use of this
17
 *    software must display the following acknowledgment:
18
 *    "This product includes software developed by the OpenSSL Project
19
 *    for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
20
 *
21
 * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
22
 *    endorse or promote products derived from this software without
23
 *    prior written permission. For written permission, please contact
24
 *    openssl-core@openssl.org.
25
 *
26
 * 5. Products derived from this software may not be called "OpenSSL"
27
 *    nor may "OpenSSL" appear in their names without prior written
28
 *    permission of the OpenSSL Project.
29
 *
30
 * 6. Redistributions of any form whatsoever must retain the following
31
 *    acknowledgment:
32
 *    "This product includes software developed by the OpenSSL Project
33
 *    for use in the OpenSSL Toolkit (http://www.openssl.org/)"
34
 *
35
 * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
36
 * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
37
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
38
 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE OpenSSL PROJECT OR
39
 * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40
 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
41
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
42
 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
43
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
44
 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
45
 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
46
 * OF THE POSSIBILITY OF SUCH DAMAGE.
47
 * ====================================================================
48
 *
49
 * This product includes cryptographic software written by Eric Young
50
 * (eay@cryptsoft.com).  This product includes software written by Tim
51
 * Hudson (tjh@cryptsoft.com). */
52
53
#include <assert.h>
54
#include <string.h>
55
56
#include <openssl/digest.h>
57
#include <openssl/nid.h>
58
#include <openssl/sha.h>
59
60
#include "../internal.h"
61
#include "internal.h"
62
#include "../fipsmodule/cipher/internal.h"
63
64
65
int EVP_tls_cbc_remove_padding(crypto_word_t *out_padding_ok, size_t *out_len,
66
                               const uint8_t *in, size_t in_len,
67
0
                               size_t block_size, size_t mac_size) {
68
0
  const size_t overhead = 1 /* padding length byte */ + mac_size;
69
70
  // These lengths are all public so we can test them in non-constant time.
71
0
  if (overhead > in_len) {
72
0
    return 0;
73
0
  }
74
75
0
  size_t padding_length = in[in_len - 1];
76
77
0
  crypto_word_t good = constant_time_ge_w(in_len, overhead + padding_length);
78
  // The padding consists of a length byte at the end of the record and
79
  // then that many bytes of padding, all with the same value as the
80
  // length byte. Thus, with the length byte included, there are i+1
81
  // bytes of padding.
82
  //
83
  // We can't check just |padding_length+1| bytes because that leaks
84
  // decrypted information. Therefore we always have to check the maximum
85
  // amount of padding possible. (Again, the length of the record is
86
  // public information so we can use it.)
87
0
  size_t to_check = 256;  // maximum amount of padding, inc length byte.
88
0
  if (to_check > in_len) {
89
0
    to_check = in_len;
90
0
  }
91
92
0
  for (size_t i = 0; i < to_check; i++) {
93
0
    uint8_t mask = constant_time_ge_8(padding_length, i);
94
0
    uint8_t b = in[in_len - 1 - i];
95
    // The final |padding_length+1| bytes should all have the value
96
    // |padding_length|. Therefore the XOR should be zero.
97
0
    good &= ~(mask & (padding_length ^ b));
98
0
  }
99
100
  // If any of the final |padding_length+1| bytes had the wrong value,
101
  // one or more of the lower eight bits of |good| will be cleared.
102
0
  good = constant_time_eq_w(0xff, good & 0xff);
103
104
  // Always treat |padding_length| as zero on error. If, assuming block size of
105
  // 16, a padding of [<15 arbitrary bytes> 15] treated |padding_length| as 16
106
  // and returned -1, distinguishing good MAC and bad padding from bad MAC and
107
  // bad padding would give POODLE's padding oracle.
108
0
  padding_length = good & (padding_length + 1);
109
0
  *out_len = in_len - padding_length;
110
0
  *out_padding_ok = good;
111
0
  return 1;
112
0
}
113
114
void EVP_tls_cbc_copy_mac(uint8_t *out, size_t md_size, const uint8_t *in,
115
0
                          size_t in_len, size_t orig_len) {
116
0
  uint8_t rotated_mac1[EVP_MAX_MD_SIZE], rotated_mac2[EVP_MAX_MD_SIZE];
117
0
  uint8_t *rotated_mac = rotated_mac1;
118
0
  uint8_t *rotated_mac_tmp = rotated_mac2;
119
120
  // mac_end is the index of |in| just after the end of the MAC.
121
0
  size_t mac_end = in_len;
122
0
  size_t mac_start = mac_end - md_size;
123
124
0
  declassify_assert(orig_len >= in_len);
125
0
  declassify_assert(in_len >= md_size);
126
0
  assert(md_size <= EVP_MAX_MD_SIZE);
127
0
  assert(md_size > 0);
128
129
  // scan_start contains the number of bytes that we can ignore because
130
  // the MAC's position can only vary by 255 bytes.
131
0
  size_t scan_start = 0;
132
  // This information is public so it's safe to branch based on it.
133
0
  if (orig_len > md_size + 255 + 1) {
134
0
    scan_start = orig_len - (md_size + 255 + 1);
135
0
  }
136
137
0
  size_t rotate_offset = 0;
138
0
  uint8_t mac_started = 0;
139
0
  OPENSSL_memset(rotated_mac, 0, md_size);
140
0
  for (size_t i = scan_start, j = 0; i < orig_len; i++, j++) {
141
0
    if (j >= md_size) {
142
0
      j -= md_size;
143
0
    }
144
0
    crypto_word_t is_mac_start = constant_time_eq_w(i, mac_start);
145
0
    mac_started |= is_mac_start;
146
0
    uint8_t mac_ended = constant_time_ge_8(i, mac_end);
147
0
    rotated_mac[j] |= in[i] & mac_started & ~mac_ended;
148
    // Save the offset that |mac_start| is mapped to.
149
0
    rotate_offset |= j & is_mac_start;
150
0
  }
151
152
  // Now rotate the MAC. We rotate in log(md_size) steps, one for each bit
153
  // position.
154
0
  for (size_t offset = 1; offset < md_size; offset <<= 1, rotate_offset >>= 1) {
155
    // Rotate by |offset| iff the corresponding bit is set in
156
    // |rotate_offset|, placing the result in |rotated_mac_tmp|.
157
0
    const uint8_t skip_rotate = (rotate_offset & 1) - 1;
158
0
    for (size_t i = 0, j = offset; i < md_size; i++, j++) {
159
0
      if (j >= md_size) {
160
0
        j -= md_size;
161
0
      }
162
0
      rotated_mac_tmp[i] =
163
0
          constant_time_select_8(skip_rotate, rotated_mac[i], rotated_mac[j]);
164
0
    }
165
166
    // Swap pointers so |rotated_mac| contains the (possibly) rotated value.
167
    // Note the number of iterations and thus the identity of these pointers is
168
    // public information.
169
0
    uint8_t *tmp = rotated_mac;
170
0
    rotated_mac = rotated_mac_tmp;
171
0
    rotated_mac_tmp = tmp;
172
0
  }
173
174
0
  OPENSSL_memcpy(out, rotated_mac, md_size);
175
0
}
176
177
int EVP_sha1_final_with_secret_suffix(SHA_CTX *ctx,
178
                                      uint8_t out[SHA_DIGEST_LENGTH],
179
                                      const uint8_t *in, size_t len,
180
0
                                      size_t max_len) {
181
  // Bound the input length so |total_bits| below fits in four bytes. This is
182
  // redundant with TLS record size limits. This also ensures |input_idx| below
183
  // does not overflow.
184
0
  size_t max_len_bits = max_len << 3;
185
0
  if (ctx->Nh != 0 ||
186
0
      (max_len_bits >> 3) != max_len ||  // Overflow
187
0
      ctx->Nl + max_len_bits < max_len_bits ||
188
0
      ctx->Nl + max_len_bits > UINT32_MAX) {
189
0
    return 0;
190
0
  }
191
192
  // We need to hash the following into |ctx|:
193
  //
194
  // - ctx->data[:ctx->num]
195
  // - in[:len]
196
  // - A 0x80 byte
197
  // - However many zero bytes are needed to pad up to a block.
198
  // - Eight bytes of length.
199
0
  size_t num_blocks = (ctx->num + len + 1 + 8 + SHA_CBLOCK - 1) >> 6;
200
0
  size_t last_block = num_blocks - 1;
201
0
  size_t max_blocks = (ctx->num + max_len + 1 + 8 + SHA_CBLOCK - 1) >> 6;
202
203
  // The bounds above imply |total_bits| fits in four bytes.
204
0
  size_t total_bits = ctx->Nl + (len << 3);
205
0
  uint8_t length_bytes[4];
206
0
  length_bytes[0] = (uint8_t)(total_bits >> 24);
207
0
  length_bytes[1] = (uint8_t)(total_bits >> 16);
208
0
  length_bytes[2] = (uint8_t)(total_bits >> 8);
209
0
  length_bytes[3] = (uint8_t)total_bits;
210
211
  // We now construct and process each expected block in constant-time.
212
0
  uint8_t block[SHA_CBLOCK] = {0};
213
0
  uint32_t result[5] = {0};
214
  // input_idx is the index into |in| corresponding to the current block.
215
  // However, we allow this index to overflow beyond |max_len|, to simplify the
216
  // 0x80 byte.
217
0
  size_t input_idx = 0;
218
0
  for (size_t i = 0; i < max_blocks; i++) {
219
    // Fill |block| with data from the partial block in |ctx| and |in|. We copy
220
    // as if we were hashing up to |max_len| and then zero the excess later.
221
0
    size_t block_start = 0;
222
0
    if (i == 0) {
223
0
      OPENSSL_memcpy(block, ctx->data, ctx->num);
224
0
      block_start = ctx->num;
225
0
    }
226
0
    if (input_idx < max_len) {
227
0
      size_t to_copy = SHA_CBLOCK - block_start;
228
0
      if (to_copy > max_len - input_idx) {
229
0
        to_copy = max_len - input_idx;
230
0
      }
231
0
      OPENSSL_memcpy(block + block_start, in + input_idx, to_copy);
232
0
    }
233
234
    // Zero any bytes beyond |len| and add the 0x80 byte.
235
0
    for (size_t j = block_start; j < SHA_CBLOCK; j++) {
236
      // input[idx] corresponds to block[j].
237
0
      size_t idx = input_idx + j - block_start;
238
      // The barriers on |len| are not strictly necessary. However, without
239
      // them, GCC compiles this code by incorporating |len| into the loop
240
      // counter and subtracting it out later. This is still constant-time, but
241
      // it frustrates attempts to validate this.
242
0
      uint8_t is_in_bounds = constant_time_lt_8(idx, value_barrier_w(len));
243
0
      uint8_t is_padding_byte = constant_time_eq_8(idx, value_barrier_w(len));
244
0
      block[j] &= is_in_bounds;
245
0
      block[j] |= 0x80 & is_padding_byte;
246
0
    }
247
248
0
    input_idx += SHA_CBLOCK - block_start;
249
250
    // Fill in the length if this is the last block.
251
0
    crypto_word_t is_last_block = constant_time_eq_w(i, last_block);
252
0
    for (size_t j = 0; j < 4; j++) {
253
0
      block[SHA_CBLOCK - 4 + j] |= is_last_block & length_bytes[j];
254
0
    }
255
256
    // Process the block and save the hash state if it is the final value.
257
0
    SHA1_Transform(ctx, block);
258
0
    for (size_t j = 0; j < 5; j++) {
259
0
      result[j] |= is_last_block & ctx->h[j];
260
0
    }
261
0
  }
262
263
  // Write the output.
264
0
  for (size_t i = 0; i < 5; i++) {
265
0
    CRYPTO_store_u32_be(out + 4 * i, result[i]);
266
0
  }
267
0
  return 1;
268
0
}
269
270
int EVP_sha256_final_with_secret_suffix(SHA256_CTX *ctx,
271
                                        uint8_t out[SHA256_DIGEST_LENGTH],
272
                                        const uint8_t *in, size_t len,
273
0
                                        size_t max_len) {
274
  // Bound the input length so |total_bits| below fits in four bytes. This is
275
  // redundant with TLS record size limits. This also ensures |input_idx| below
276
  // does not overflow.
277
0
  size_t max_len_bits = max_len << 3;
278
0
  if (ctx->Nh != 0 ||
279
0
      (max_len_bits >> 3) != max_len ||  // Overflow
280
0
      ctx->Nl + max_len_bits < max_len_bits ||
281
0
      ctx->Nl + max_len_bits > UINT32_MAX) {
282
0
    return 0;
283
0
  }
284
285
  // We need to hash the following into |ctx|:
286
  //
287
  // - ctx->data[:ctx->num]
288
  // - in[:len]
289
  // - A 0x80 byte
290
  // - However many zero bytes are needed to pad up to a block.
291
  // - Eight bytes of length.
292
0
  size_t num_blocks = (ctx->num + len + 1 + 8 + SHA256_CBLOCK - 1) >> 6;
293
0
  size_t last_block = num_blocks - 1;
294
0
  size_t max_blocks = (ctx->num + max_len + 1 + 8 + SHA256_CBLOCK - 1) >> 6;
295
296
  // The bounds above imply |total_bits| fits in four bytes.
297
0
  size_t total_bits = ctx->Nl + (len << 3);
298
0
  uint8_t length_bytes[4];
299
0
  length_bytes[0] = (uint8_t)(total_bits >> 24);
300
0
  length_bytes[1] = (uint8_t)(total_bits >> 16);
301
0
  length_bytes[2] = (uint8_t)(total_bits >> 8);
302
0
  length_bytes[3] = (uint8_t)total_bits;
303
304
  // We now construct and process each expected block in constant-time.
305
0
  uint8_t block[SHA256_CBLOCK] = {0};
306
0
  uint32_t result[8] = {0};
307
  // input_idx is the index into |in| corresponding to the current block.
308
  // However, we allow this index to overflow beyond |max_len|, to simplify the
309
  // 0x80 byte.
310
0
  size_t input_idx = 0;
311
0
  for (size_t i = 0; i < max_blocks; i++) {
312
    // Fill |block| with data from the partial block in |ctx| and |in|. We copy
313
    // as if we were hashing up to |max_len| and then zero the excess later.
314
0
    size_t block_start = 0;
315
0
    if (i == 0) {
316
0
      OPENSSL_memcpy(block, ctx->data, ctx->num);
317
0
      block_start = ctx->num;
318
0
    }
319
0
    if (input_idx < max_len) {
320
0
      size_t to_copy = SHA256_CBLOCK - block_start;
321
0
      if (to_copy > max_len - input_idx) {
322
0
        to_copy = max_len - input_idx;
323
0
      }
324
0
      OPENSSL_memcpy(block + block_start, in + input_idx, to_copy);
325
0
    }
326
327
    // Zero any bytes beyond |len| and add the 0x80 byte.
328
0
    for (size_t j = block_start; j < SHA256_CBLOCK; j++) {
329
      // input[idx] corresponds to block[j].
330
0
      size_t idx = input_idx + j - block_start;
331
      // The barriers on |len| are not strictly necessary. However, without
332
      // them, GCC compiles this code by incorporating |len| into the loop
333
      // counter and subtracting it out later. This is still constant-time, but
334
      // it frustrates attempts to validate this.
335
0
      uint8_t is_in_bounds = constant_time_lt_8(idx, value_barrier_w(len));
336
0
      uint8_t is_padding_byte = constant_time_eq_8(idx, value_barrier_w(len));
337
0
      block[j] &= is_in_bounds;
338
0
      block[j] |= 0x80 & is_padding_byte;
339
0
    }
340
341
0
    input_idx += SHA256_CBLOCK - block_start;
342
343
    // Fill in the length if this is the last block.
344
0
    crypto_word_t is_last_block = constant_time_eq_w(i, last_block);
345
0
    for (size_t j = 0; j < 4; j++) {
346
0
      block[SHA256_CBLOCK - 4 + j] |= is_last_block & length_bytes[j];
347
0
    }
348
349
    // Process the block and save the hash state if it is the final value.
350
0
    SHA256_Transform(ctx, block);
351
0
    for (size_t j = 0; j < 8; j++) {
352
0
      result[j] |= is_last_block & ctx->h[j];
353
0
    }
354
0
  }
355
356
  // Write the output.
357
0
  for (size_t i = 0; i < 8; i++) {
358
0
    CRYPTO_store_u32_be(out + 4 * i, result[i]);
359
0
  }
360
0
  return 1;
361
0
}
362
363
0
int EVP_tls_cbc_record_digest_supported(const EVP_MD *md) {
364
0
  switch (EVP_MD_type(md)) {
365
0
    case NID_sha1:
366
0
    case NID_sha256:
367
0
      return 1;
368
0
    default:
369
0
      return 0;
370
0
  }
371
0
}
372
373
static int tls_cbc_digest_record_sha1(uint8_t *md_out, size_t *md_out_size,
374
                                      const uint8_t header[13],
375
                                      const uint8_t *data, size_t data_size,
376
                                      size_t data_plus_mac_plus_padding_size,
377
                                      const uint8_t *mac_secret,
378
0
                                      unsigned mac_secret_length) {
379
0
  if (mac_secret_length > SHA_CBLOCK) {
380
    // HMAC pads small keys with zeros and hashes large keys down. This function
381
    // should never reach the large key case.
382
0
    assert(0);
383
0
    return 0;
384
0
  }
385
386
  // Compute the initial HMAC block.
387
0
  uint8_t hmac_pad[SHA_CBLOCK];
388
0
  OPENSSL_memset(hmac_pad, 0, sizeof(hmac_pad));
389
0
  OPENSSL_memcpy(hmac_pad, mac_secret, mac_secret_length);
390
0
  for (size_t i = 0; i < SHA_CBLOCK; i++) {
391
0
    hmac_pad[i] ^= 0x36;
392
0
  }
393
394
0
  SHA_CTX ctx;
395
0
  SHA1_Init(&ctx);
396
0
  SHA1_Update(&ctx, hmac_pad, SHA_CBLOCK);
397
0
  SHA1_Update(&ctx, header, 13);
398
399
  // There are at most 256 bytes of padding, so we can compute the public
400
  // minimum length for |data_size|.
401
0
  size_t min_data_size = 0;
402
0
  if (data_plus_mac_plus_padding_size > SHA_DIGEST_LENGTH + 256) {
403
0
    min_data_size = data_plus_mac_plus_padding_size - SHA_DIGEST_LENGTH - 256;
404
0
  }
405
406
  // Hash the public minimum length directly. This reduces the number of blocks
407
  // that must be computed in constant-time.
408
0
  SHA1_Update(&ctx, data, min_data_size);
409
410
  // Hash the remaining data without leaking |data_size|.
411
0
  uint8_t mac_out[SHA_DIGEST_LENGTH];
412
0
  if (!EVP_sha1_final_with_secret_suffix(
413
0
          &ctx, mac_out, data + min_data_size, data_size - min_data_size,
414
0
          data_plus_mac_plus_padding_size - min_data_size)) {
415
0
    return 0;
416
0
  }
417
418
  // Complete the HMAC in the standard manner.
419
0
  SHA1_Init(&ctx);
420
0
  for (size_t i = 0; i < SHA_CBLOCK; i++) {
421
0
    hmac_pad[i] ^= 0x6a;
422
0
  }
423
424
0
  SHA1_Update(&ctx, hmac_pad, SHA_CBLOCK);
425
0
  SHA1_Update(&ctx, mac_out, SHA_DIGEST_LENGTH);
426
0
  SHA1_Final(md_out, &ctx);
427
0
  *md_out_size = SHA_DIGEST_LENGTH;
428
0
  return 1;
429
0
}
430
431
static int tls_cbc_digest_record_sha256(uint8_t *md_out, size_t *md_out_size,
432
                                        const uint8_t header[13],
433
                                        const uint8_t *data, size_t data_size,
434
                                        size_t data_plus_mac_plus_padding_size,
435
                                        const uint8_t *mac_secret,
436
0
                                        unsigned mac_secret_length) {
437
0
  if (mac_secret_length > SHA256_CBLOCK) {
438
    // HMAC pads small keys with zeros and hashes large keys down. This function
439
    // should never reach the large key case.
440
0
    assert(0);
441
0
    return 0;
442
0
  }
443
444
  // Compute the initial HMAC block.
445
0
  uint8_t hmac_pad[SHA256_CBLOCK];
446
0
  OPENSSL_memset(hmac_pad, 0, sizeof(hmac_pad));
447
0
  OPENSSL_memcpy(hmac_pad, mac_secret, mac_secret_length);
448
0
  for (size_t i = 0; i < SHA256_CBLOCK; i++) {
449
0
    hmac_pad[i] ^= 0x36;
450
0
  }
451
452
0
  SHA256_CTX ctx;
453
0
  SHA256_Init(&ctx);
454
0
  SHA256_Update(&ctx, hmac_pad, SHA256_CBLOCK);
455
0
  SHA256_Update(&ctx, header, 13);
456
457
  // There are at most 256 bytes of padding, so we can compute the public
458
  // minimum length for |data_size|.
459
0
  size_t min_data_size = 0;
460
0
  if (data_plus_mac_plus_padding_size > SHA256_DIGEST_LENGTH + 256) {
461
0
    min_data_size =
462
0
        data_plus_mac_plus_padding_size - SHA256_DIGEST_LENGTH - 256;
463
0
  }
464
465
  // Hash the public minimum length directly. This reduces the number of blocks
466
  // that must be computed in constant-time.
467
0
  SHA256_Update(&ctx, data, min_data_size);
468
469
  // Hash the remaining data without leaking |data_size|.
470
0
  uint8_t mac_out[SHA256_DIGEST_LENGTH];
471
0
  if (!EVP_sha256_final_with_secret_suffix(
472
0
          &ctx, mac_out, data + min_data_size, data_size - min_data_size,
473
0
          data_plus_mac_plus_padding_size - min_data_size)) {
474
0
    return 0;
475
0
  }
476
477
  // Complete the HMAC in the standard manner.
478
0
  SHA256_Init(&ctx);
479
0
  for (size_t i = 0; i < SHA256_CBLOCK; i++) {
480
0
    hmac_pad[i] ^= 0x6a;
481
0
  }
482
483
0
  SHA256_Update(&ctx, hmac_pad, SHA256_CBLOCK);
484
0
  SHA256_Update(&ctx, mac_out, SHA256_DIGEST_LENGTH);
485
0
  SHA256_Final(md_out, &ctx);
486
0
  *md_out_size = SHA256_DIGEST_LENGTH;
487
0
  return 1;
488
0
}
489
490
int EVP_tls_cbc_digest_record(const EVP_MD *md, uint8_t *md_out,
491
                              size_t *md_out_size, const uint8_t header[13],
492
                              const uint8_t *data, size_t data_size,
493
                              size_t data_plus_mac_plus_padding_size,
494
                              const uint8_t *mac_secret,
495
0
                              unsigned mac_secret_length) {
496
0
  switch (EVP_MD_type(md)) {
497
0
    case NID_sha1:
498
0
      return tls_cbc_digest_record_sha1(
499
0
          md_out, md_out_size, header, data, data_size,
500
0
          data_plus_mac_plus_padding_size, mac_secret, mac_secret_length);
501
502
0
    case NID_sha256:
503
0
      return tls_cbc_digest_record_sha256(
504
0
          md_out, md_out_size, header, data, data_size,
505
0
          data_plus_mac_plus_padding_size, mac_secret, mac_secret_length);
506
507
0
    default:
508
      // EVP_tls_cbc_record_digest_supported should have been called first to
509
      // check that the hash function is supported.
510
0
      assert(0);
511
0
      *md_out_size = 0;
512
0
      return 0;
513
0
  }
514
0
}