Coverage Report

Created: 2025-11-17 06:18

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/boringssl/crypto/fipsmodule/sha/sha1.cc.inc
Line
Count
Source
1
// Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved.
2
//
3
// Licensed under the Apache License, Version 2.0 (the "License");
4
// you may not use this file except in compliance with the License.
5
// You may obtain a copy of the License at
6
//
7
//     https://www.apache.org/licenses/LICENSE-2.0
8
//
9
// Unless required by applicable law or agreed to in writing, software
10
// distributed under the License is distributed on an "AS IS" BASIS,
11
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
// See the License for the specific language governing permissions and
13
// limitations under the License.
14
15
#include <string.h>
16
17
#include <openssl/mem.h>
18
#include <openssl/span.h>
19
20
#include "../../internal.h"
21
#include "../bcm_interface.h"
22
#include "../digest/md32_common.h"
23
#include "../service_indicator/internal.h"
24
#include "internal.h"
25
26
27
266k
bcm_infallible BCM_sha1_init(SHA_CTX *sha) {
28
266k
  OPENSSL_memset(sha, 0, sizeof(SHA_CTX));
29
266k
  sha->h[0] = 0x67452301UL;
30
266k
  sha->h[1] = 0xefcdab89UL;
31
266k
  sha->h[2] = 0x98badcfeUL;
32
266k
  sha->h[3] = 0x10325476UL;
33
266k
  sha->h[4] = 0xc3d2e1f0UL;
34
266k
  return bcm_infallible::approved;
35
266k
}
36
37
#if !defined(SHA1_ASM)
38
static void sha1_block_data_order(uint32_t state[5], const uint8_t *data,
39
                                  size_t num);
40
#endif
41
42
4.91k
bcm_infallible BCM_sha1_transform(SHA_CTX *c, const uint8_t data[SHA_CBLOCK]) {
43
4.91k
  sha1_block_data_order(c->h, data, 1);
44
4.91k
  return bcm_infallible::approved;
45
4.91k
}
46
47
namespace {
48
struct SHA1Traits {
49
  using HashContext = SHA_CTX;
50
  static constexpr size_t kBlockSize = SHA_CBLOCK;
51
  static constexpr bool kLengthIsBigEndian = true;
52
  static void HashBlocks(uint32_t *state, const uint8_t *data,
53
482k
                         size_t num_blocks) {
54
482k
    sha1_block_data_order(state, data, num_blocks);
55
482k
  }
56
};
57
}  // namespace
58
59
524k
bcm_infallible BCM_sha1_update(SHA_CTX *c, const void *data, size_t len) {
60
524k
  bssl::crypto_md32_update<SHA1Traits>(
61
524k
      c, bssl::Span(static_cast<const uint8_t *>(data), len));
62
524k
  return bcm_infallible::approved;
63
524k
}
64
65
static void sha1_output_state(uint8_t out[SHA_DIGEST_LENGTH],
66
231k
                              const SHA_CTX *ctx) {
67
231k
  CRYPTO_store_u32_be(out, ctx->h[0]);
68
231k
  CRYPTO_store_u32_be(out + 4, ctx->h[1]);
69
231k
  CRYPTO_store_u32_be(out + 8, ctx->h[2]);
70
231k
  CRYPTO_store_u32_be(out + 12, ctx->h[3]);
71
231k
  CRYPTO_store_u32_be(out + 16, ctx->h[4]);
72
231k
}
73
74
231k
bcm_infallible BCM_sha1_final(uint8_t out[SHA_DIGEST_LENGTH], SHA_CTX *c) {
75
231k
  bssl::crypto_md32_final<SHA1Traits>(c);
76
231k
  sha1_output_state(out, c);
77
231k
  FIPS_service_indicator_update_state();
78
231k
  return bcm_infallible::approved;
79
231k
}
80
81
bcm_infallible BCM_fips_186_2_prf(uint8_t *out, size_t out_len,
82
0
                                  const uint8_t xkey[SHA_DIGEST_LENGTH]) {
83
  // XKEY and XVAL are 160-bit values, but are internally right-padded up to
84
  // block size. See FIPS 186-2, Appendix 3.3. This buffer maintains both the
85
  // current value of XKEY and the padding.
86
0
  uint8_t block[SHA_CBLOCK] = {0};
87
0
  OPENSSL_memcpy(block, xkey, SHA_DIGEST_LENGTH);
88
89
0
  while (out_len != 0) {
90
    // We always use a zero XSEED, so we can merge the inner and outer loops.
91
    // XVAL is also always equal to XKEY.
92
0
    SHA_CTX ctx;
93
0
    BCM_sha1_init(&ctx);
94
0
    BCM_sha1_transform(&ctx, block);
95
96
    // XKEY = (1 + XKEY + w_i) mod 2^b
97
0
    uint32_t carry = 1;
98
0
    for (int i = 4; i >= 0; i--) {
99
0
      uint32_t tmp = CRYPTO_load_u32_be(block + i * 4);
100
0
      tmp = CRYPTO_addc_u32(tmp, ctx.h[i], carry, &carry);
101
0
      CRYPTO_store_u32_be(block + i * 4, tmp);
102
0
    }
103
104
    // Output w_i.
105
0
    if (out_len < SHA_DIGEST_LENGTH) {
106
0
      uint8_t buf[SHA_DIGEST_LENGTH];
107
0
      sha1_output_state(buf, &ctx);
108
0
      OPENSSL_memcpy(out, buf, out_len);
109
0
      break;
110
0
    }
111
0
    sha1_output_state(out, &ctx);
112
0
    out += SHA_DIGEST_LENGTH;
113
0
    out_len -= SHA_DIGEST_LENGTH;
114
0
  }
115
0
  return bcm_infallible::not_approved;
116
0
}
117
118
#define Xupdate(a, ix, ia, ib, ic, id)    \
119
  do {                                    \
120
    (a) = ((ia) ^ (ib) ^ (ic) ^ (id));    \
121
    (ix) = (a) = CRYPTO_rotl_u32((a), 1); \
122
  } while (0)
123
124
#define K_00_19 0x5a827999UL
125
#define K_20_39 0x6ed9eba1UL
126
#define K_40_59 0x8f1bbcdcUL
127
#define K_60_79 0xca62c1d6UL
128
129
// As  pointed out by Wei Dai <weidai@eskimo.com>, F() below can be simplified
130
// to the code in F_00_19.  Wei attributes these optimisations to Peter
131
// Gutmann's SHS code, and he attributes it to Rich Schroeppel. #define
132
// F(x,y,z) (((x) & (y))  |  ((~(x)) & (z))) I've just become aware of another
133
// tweak to be made, again from Wei Dai, in F_40_59, (x&a)|(y&a) -> (x|y)&a
134
#define F_00_19(b, c, d) ((((c) ^ (d)) & (b)) ^ (d))
135
#define F_20_39(b, c, d) ((b) ^ (c) ^ (d))
136
#define F_40_59(b, c, d) (((b) & (c)) | (((b) | (c)) & (d)))
137
#define F_60_79(b, c, d) F_20_39(b, c, d)
138
139
#define BODY_00_15(i, a, b, c, d, e, f, xi)                \
140
  do {                                                     \
141
    (f) = (xi) + (e) + K_00_19 + CRYPTO_rotl_u32((a), 5) + \
142
          F_00_19((b), (c), (d));                          \
143
    (b) = CRYPTO_rotl_u32((b), 30);                        \
144
  } while (0)
145
146
#define BODY_16_19(i, a, b, c, d, e, f, xi, xa, xb, xc, xd)                  \
147
  do {                                                                       \
148
    Xupdate(f, xi, xa, xb, xc, xd);                                          \
149
    (f) += (e) + K_00_19 + CRYPTO_rotl_u32((a), 5) + F_00_19((b), (c), (d)); \
150
    (b) = CRYPTO_rotl_u32((b), 30);                                          \
151
  } while (0)
152
153
#define BODY_20_31(i, a, b, c, d, e, f, xi, xa, xb, xc, xd)                  \
154
  do {                                                                       \
155
    Xupdate(f, xi, xa, xb, xc, xd);                                          \
156
    (f) += (e) + K_20_39 + CRYPTO_rotl_u32((a), 5) + F_20_39((b), (c), (d)); \
157
    (b) = CRYPTO_rotl_u32((b), 30);                                          \
158
  } while (0)
159
160
#define BODY_32_39(i, a, b, c, d, e, f, xa, xb, xc, xd)                      \
161
  do {                                                                       \
162
    Xupdate(f, xa, xa, xb, xc, xd);                                          \
163
    (f) += (e) + K_20_39 + CRYPTO_rotl_u32((a), 5) + F_20_39((b), (c), (d)); \
164
    (b) = CRYPTO_rotl_u32((b), 30);                                          \
165
  } while (0)
166
167
#define BODY_40_59(i, a, b, c, d, e, f, xa, xb, xc, xd)                      \
168
  do {                                                                       \
169
    Xupdate(f, xa, xa, xb, xc, xd);                                          \
170
    (f) += (e) + K_40_59 + CRYPTO_rotl_u32((a), 5) + F_40_59((b), (c), (d)); \
171
    (b) = CRYPTO_rotl_u32((b), 30);                                          \
172
  } while (0)
173
174
#define BODY_60_79(i, a, b, c, d, e, f, xa, xb, xc, xd)    \
175
  do {                                                     \
176
    Xupdate(f, xa, xa, xb, xc, xd);                        \
177
    (f) = (xa) + (e) + K_60_79 + CRYPTO_rotl_u32((a), 5) + \
178
          F_60_79((b), (c), (d));                          \
179
    (b) = CRYPTO_rotl_u32((b), 30);                        \
180
  } while (0)
181
182
#ifdef X
183
#undef X
184
#endif
185
186
/* Originally X was an array. As it's automatic it's natural
187
 * to expect RISC compiler to accommodate at least part of it in
188
 * the register bank, isn't it? Unfortunately not all compilers
189
 * "find" this expectation reasonable:-( On order to make such
190
 * compilers generate better code I replace X[] with a bunch of
191
 * X0, X1, etc. See the function body below...
192
 *         <appro@fy.chalmers.se> */
193
#define X(i) XX##i
194
195
#if !defined(SHA1_ASM)
196
197
#if !defined(SHA1_ASM_NOHW)
198
static void sha1_block_data_order_nohw(uint32_t state[5], const uint8_t *data,
199
                                       size_t num) {
200
  uint32_t A, B, C, D, E, T;
201
  uint32_t XX0, XX1, XX2, XX3, XX4, XX5, XX6, XX7, XX8, XX9, XX10, XX11, XX12,
202
      XX13, XX14, XX15;
203
204
  A = state[0];
205
  B = state[1];
206
  C = state[2];
207
  D = state[3];
208
  E = state[4];
209
210
  for (;;) {
211
    X(0) = CRYPTO_load_u32_be(data);
212
    data += 4;
213
    X(1) = CRYPTO_load_u32_be(data);
214
    data += 4;
215
    BODY_00_15(0, A, B, C, D, E, T, X(0));
216
    X(2) = CRYPTO_load_u32_be(data);
217
    data += 4;
218
    BODY_00_15(1, T, A, B, C, D, E, X(1));
219
    X(3) = CRYPTO_load_u32_be(data);
220
    data += 4;
221
    BODY_00_15(2, E, T, A, B, C, D, X(2));
222
    X(4) = CRYPTO_load_u32_be(data);
223
    data += 4;
224
    BODY_00_15(3, D, E, T, A, B, C, X(3));
225
    X(5) = CRYPTO_load_u32_be(data);
226
    data += 4;
227
    BODY_00_15(4, C, D, E, T, A, B, X(4));
228
    X(6) = CRYPTO_load_u32_be(data);
229
    data += 4;
230
    BODY_00_15(5, B, C, D, E, T, A, X(5));
231
    X(7) = CRYPTO_load_u32_be(data);
232
    data += 4;
233
    BODY_00_15(6, A, B, C, D, E, T, X(6));
234
    X(8) = CRYPTO_load_u32_be(data);
235
    data += 4;
236
    BODY_00_15(7, T, A, B, C, D, E, X(7));
237
    X(9) = CRYPTO_load_u32_be(data);
238
    data += 4;
239
    BODY_00_15(8, E, T, A, B, C, D, X(8));
240
    X(10) = CRYPTO_load_u32_be(data);
241
    data += 4;
242
    BODY_00_15(9, D, E, T, A, B, C, X(9));
243
    X(11) = CRYPTO_load_u32_be(data);
244
    data += 4;
245
    BODY_00_15(10, C, D, E, T, A, B, X(10));
246
    X(12) = CRYPTO_load_u32_be(data);
247
    data += 4;
248
    BODY_00_15(11, B, C, D, E, T, A, X(11));
249
    X(13) = CRYPTO_load_u32_be(data);
250
    data += 4;
251
    BODY_00_15(12, A, B, C, D, E, T, X(12));
252
    X(14) = CRYPTO_load_u32_be(data);
253
    data += 4;
254
    BODY_00_15(13, T, A, B, C, D, E, X(13));
255
    X(15) = CRYPTO_load_u32_be(data);
256
    data += 4;
257
    BODY_00_15(14, E, T, A, B, C, D, X(14));
258
    BODY_00_15(15, D, E, T, A, B, C, X(15));
259
260
    BODY_16_19(16, C, D, E, T, A, B, X(0), X(0), X(2), X(8), X(13));
261
    BODY_16_19(17, B, C, D, E, T, A, X(1), X(1), X(3), X(9), X(14));
262
    BODY_16_19(18, A, B, C, D, E, T, X(2), X(2), X(4), X(10), X(15));
263
    BODY_16_19(19, T, A, B, C, D, E, X(3), X(3), X(5), X(11), X(0));
264
265
    BODY_20_31(20, E, T, A, B, C, D, X(4), X(4), X(6), X(12), X(1));
266
    BODY_20_31(21, D, E, T, A, B, C, X(5), X(5), X(7), X(13), X(2));
267
    BODY_20_31(22, C, D, E, T, A, B, X(6), X(6), X(8), X(14), X(3));
268
    BODY_20_31(23, B, C, D, E, T, A, X(7), X(7), X(9), X(15), X(4));
269
    BODY_20_31(24, A, B, C, D, E, T, X(8), X(8), X(10), X(0), X(5));
270
    BODY_20_31(25, T, A, B, C, D, E, X(9), X(9), X(11), X(1), X(6));
271
    BODY_20_31(26, E, T, A, B, C, D, X(10), X(10), X(12), X(2), X(7));
272
    BODY_20_31(27, D, E, T, A, B, C, X(11), X(11), X(13), X(3), X(8));
273
    BODY_20_31(28, C, D, E, T, A, B, X(12), X(12), X(14), X(4), X(9));
274
    BODY_20_31(29, B, C, D, E, T, A, X(13), X(13), X(15), X(5), X(10));
275
    BODY_20_31(30, A, B, C, D, E, T, X(14), X(14), X(0), X(6), X(11));
276
    BODY_20_31(31, T, A, B, C, D, E, X(15), X(15), X(1), X(7), X(12));
277
278
    BODY_32_39(32, E, T, A, B, C, D, X(0), X(2), X(8), X(13));
279
    BODY_32_39(33, D, E, T, A, B, C, X(1), X(3), X(9), X(14));
280
    BODY_32_39(34, C, D, E, T, A, B, X(2), X(4), X(10), X(15));
281
    BODY_32_39(35, B, C, D, E, T, A, X(3), X(5), X(11), X(0));
282
    BODY_32_39(36, A, B, C, D, E, T, X(4), X(6), X(12), X(1));
283
    BODY_32_39(37, T, A, B, C, D, E, X(5), X(7), X(13), X(2));
284
    BODY_32_39(38, E, T, A, B, C, D, X(6), X(8), X(14), X(3));
285
    BODY_32_39(39, D, E, T, A, B, C, X(7), X(9), X(15), X(4));
286
287
    BODY_40_59(40, C, D, E, T, A, B, X(8), X(10), X(0), X(5));
288
    BODY_40_59(41, B, C, D, E, T, A, X(9), X(11), X(1), X(6));
289
    BODY_40_59(42, A, B, C, D, E, T, X(10), X(12), X(2), X(7));
290
    BODY_40_59(43, T, A, B, C, D, E, X(11), X(13), X(3), X(8));
291
    BODY_40_59(44, E, T, A, B, C, D, X(12), X(14), X(4), X(9));
292
    BODY_40_59(45, D, E, T, A, B, C, X(13), X(15), X(5), X(10));
293
    BODY_40_59(46, C, D, E, T, A, B, X(14), X(0), X(6), X(11));
294
    BODY_40_59(47, B, C, D, E, T, A, X(15), X(1), X(7), X(12));
295
    BODY_40_59(48, A, B, C, D, E, T, X(0), X(2), X(8), X(13));
296
    BODY_40_59(49, T, A, B, C, D, E, X(1), X(3), X(9), X(14));
297
    BODY_40_59(50, E, T, A, B, C, D, X(2), X(4), X(10), X(15));
298
    BODY_40_59(51, D, E, T, A, B, C, X(3), X(5), X(11), X(0));
299
    BODY_40_59(52, C, D, E, T, A, B, X(4), X(6), X(12), X(1));
300
    BODY_40_59(53, B, C, D, E, T, A, X(5), X(7), X(13), X(2));
301
    BODY_40_59(54, A, B, C, D, E, T, X(6), X(8), X(14), X(3));
302
    BODY_40_59(55, T, A, B, C, D, E, X(7), X(9), X(15), X(4));
303
    BODY_40_59(56, E, T, A, B, C, D, X(8), X(10), X(0), X(5));
304
    BODY_40_59(57, D, E, T, A, B, C, X(9), X(11), X(1), X(6));
305
    BODY_40_59(58, C, D, E, T, A, B, X(10), X(12), X(2), X(7));
306
    BODY_40_59(59, B, C, D, E, T, A, X(11), X(13), X(3), X(8));
307
308
    BODY_60_79(60, A, B, C, D, E, T, X(12), X(14), X(4), X(9));
309
    BODY_60_79(61, T, A, B, C, D, E, X(13), X(15), X(5), X(10));
310
    BODY_60_79(62, E, T, A, B, C, D, X(14), X(0), X(6), X(11));
311
    BODY_60_79(63, D, E, T, A, B, C, X(15), X(1), X(7), X(12));
312
    BODY_60_79(64, C, D, E, T, A, B, X(0), X(2), X(8), X(13));
313
    BODY_60_79(65, B, C, D, E, T, A, X(1), X(3), X(9), X(14));
314
    BODY_60_79(66, A, B, C, D, E, T, X(2), X(4), X(10), X(15));
315
    BODY_60_79(67, T, A, B, C, D, E, X(3), X(5), X(11), X(0));
316
    BODY_60_79(68, E, T, A, B, C, D, X(4), X(6), X(12), X(1));
317
    BODY_60_79(69, D, E, T, A, B, C, X(5), X(7), X(13), X(2));
318
    BODY_60_79(70, C, D, E, T, A, B, X(6), X(8), X(14), X(3));
319
    BODY_60_79(71, B, C, D, E, T, A, X(7), X(9), X(15), X(4));
320
    BODY_60_79(72, A, B, C, D, E, T, X(8), X(10), X(0), X(5));
321
    BODY_60_79(73, T, A, B, C, D, E, X(9), X(11), X(1), X(6));
322
    BODY_60_79(74, E, T, A, B, C, D, X(10), X(12), X(2), X(7));
323
    BODY_60_79(75, D, E, T, A, B, C, X(11), X(13), X(3), X(8));
324
    BODY_60_79(76, C, D, E, T, A, B, X(12), X(14), X(4), X(9));
325
    BODY_60_79(77, B, C, D, E, T, A, X(13), X(15), X(5), X(10));
326
    BODY_60_79(78, A, B, C, D, E, T, X(14), X(0), X(6), X(11));
327
    BODY_60_79(79, T, A, B, C, D, E, X(15), X(1), X(7), X(12));
328
329
    state[0] = (state[0] + E) & 0xffffffffL;
330
    state[1] = (state[1] + T) & 0xffffffffL;
331
    state[2] = (state[2] + A) & 0xffffffffL;
332
    state[3] = (state[3] + B) & 0xffffffffL;
333
    state[4] = (state[4] + C) & 0xffffffffL;
334
335
    if (--num == 0) {
336
      break;
337
    }
338
339
    A = state[0];
340
    B = state[1];
341
    C = state[2];
342
    D = state[3];
343
    E = state[4];
344
  }
345
}
346
#endif  // !SHA1_ASM_NOHW
347
348
static void sha1_block_data_order(uint32_t state[5], const uint8_t *data,
349
487k
                                  size_t num) {
350
487k
#if defined(SHA1_ASM_HW)
351
487k
  if (sha1_hw_capable()) {
352
487k
    sha1_block_data_order_hw(state, data, num);
353
487k
    return;
354
487k
  }
355
0
#endif
356
0
#if defined(SHA1_ASM_AVX2)
357
0
  if (sha1_avx2_capable()) {
358
0
    sha1_block_data_order_avx2(state, data, num);
359
0
    return;
360
0
  }
361
0
#endif
362
0
#if defined(SHA1_ASM_AVX)
363
0
  if (sha1_avx_capable()) {
364
0
    sha1_block_data_order_avx(state, data, num);
365
0
    return;
366
0
  }
367
0
#endif
368
0
#if defined(SHA1_ASM_SSSE3)
369
0
  if (sha1_ssse3_capable()) {
370
0
    sha1_block_data_order_ssse3(state, data, num);
371
0
    return;
372
0
  }
373
0
#endif
374
#if defined(SHA1_ASM_NEON)
375
  if (CRYPTO_is_NEON_capable()) {
376
    sha1_block_data_order_neon(state, data, num);
377
    return;
378
  }
379
#endif
380
0
  sha1_block_data_order_nohw(state, data, num);
381
0
}
382
383
#endif  // !SHA1_ASM
384
385
#undef Xupdate
386
#undef K_00_19
387
#undef K_20_39
388
#undef K_40_59
389
#undef K_60_79
390
#undef F_00_19
391
#undef F_20_39
392
#undef F_40_59
393
#undef F_60_79
394
#undef BODY_00_15
395
#undef BODY_16_19
396
#undef BODY_20_31
397
#undef BODY_32_39
398
#undef BODY_40_59
399
#undef BODY_60_79
400
#undef X