Coverage Report

Created: 2022-12-08 06:09

/src/libgcrypt/cipher/sha1.c
Line
Count
Source (jump to first uncovered line)
1
/* sha1.c - SHA1 hash function
2
 * Copyright (C) 1998, 2001, 2002, 2003, 2008 Free Software Foundation, Inc.
3
 *
4
 * This file is part of Libgcrypt.
5
 *
6
 * Libgcrypt is free software; you can redistribute it and/or modify
7
 * it under the terms of the GNU Lesser General Public License as
8
 * published by the Free Software Foundation; either version 2.1 of
9
 * the License, or (at your option) any later version.
10
 *
11
 * Libgcrypt is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14
 * GNU Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this program; if not, see <http://www.gnu.org/licenses/>.
18
 */
19
20
21
/*  Test vectors:
22
 *
23
 *  "abc"
24
 *  A999 3E36 4706 816A BA3E  2571 7850 C26C 9CD0 D89D
25
 *
26
 *  "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq"
27
 *  8498 3E44 1C3B D26E BAAE  4AA1 F951 29E5 E546 70F1
28
 */
29
30
31
#include <config.h>
32
#include <stdio.h>
33
#include <stdlib.h>
34
#include <string.h>
35
#ifdef HAVE_STDINT_H
36
# include <stdint.h>
37
#endif
38
39
#include "g10lib.h"
40
#include "bithelp.h"
41
#include "bufhelp.h"
42
#include "cipher.h"
43
#include "sha1.h"
44
45
46
/* USE_SSSE3 indicates whether to compile with Intel SSSE3 code. */
47
#undef USE_SSSE3
48
#if defined(__x86_64__) && defined(HAVE_GCC_INLINE_ASM_SSSE3) && \
49
    (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \
50
     defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS))
51
# define USE_SSSE3 1
52
#endif
53
54
/* USE_AVX indicates whether to compile with Intel AVX code. */
55
#undef USE_AVX
56
#if defined(__x86_64__) && defined(HAVE_GCC_INLINE_ASM_AVX) && \
57
    (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \
58
     defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS))
59
# define USE_AVX 1
60
#endif
61
62
/* USE_BMI2 indicates whether to compile with Intel AVX/BMI2 code. */
63
#undef USE_BMI2
64
#if defined(__x86_64__) && defined(HAVE_GCC_INLINE_ASM_AVX) && \
65
    defined(HAVE_GCC_INLINE_ASM_BMI2) && \
66
    (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \
67
     defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS))
68
# define USE_BMI2 1
69
#endif
70
71
/* USE_AVX2 indicates whether to compile with Intel AVX2/BMI2 code. */
72
#undef USE_AVX2
73
#if defined(USE_BMI2) && defined(HAVE_GCC_INLINE_ASM_AVX2)
74
# define USE_AVX2 1
75
#endif
76
77
/* USE_SHAEXT indicates whether to compile with Intel SHA Extension code. */
78
#undef USE_SHAEXT
79
#if defined(HAVE_GCC_INLINE_ASM_SHAEXT) && \
80
    defined(HAVE_GCC_INLINE_ASM_SSE41) && \
81
    defined(ENABLE_SHAEXT_SUPPORT)
82
# define USE_SHAEXT 1
83
#endif
84
85
/* USE_NEON indicates whether to enable ARM NEON assembly code. */
86
#undef USE_NEON
87
#ifdef ENABLE_NEON_SUPPORT
88
# if defined(HAVE_ARM_ARCH_V6) && defined(__ARMEL__) \
89
     && defined(HAVE_COMPATIBLE_GCC_ARM_PLATFORM_AS) \
90
     && defined(HAVE_GCC_INLINE_ASM_NEON)
91
#  define USE_NEON 1
92
# endif
93
#endif
94
95
/* USE_ARM_CE indicates whether to enable ARMv8 Crypto Extension assembly
96
 * code. */
97
#undef USE_ARM_CE
98
#ifdef ENABLE_ARM_CRYPTO_SUPPORT
99
# if defined(HAVE_ARM_ARCH_V6) && defined(__ARMEL__) \
100
     && defined(HAVE_COMPATIBLE_GCC_ARM_PLATFORM_AS) \
101
     && defined(HAVE_GCC_INLINE_ASM_AARCH32_CRYPTO)
102
#  define USE_ARM_CE 1
103
# elif defined(__AARCH64EL__) \
104
       && defined(HAVE_COMPATIBLE_GCC_AARCH64_PLATFORM_AS) \
105
       && defined(HAVE_GCC_INLINE_ASM_AARCH64_CRYPTO)
106
#  define USE_ARM_CE 1
107
# endif
108
#endif
109
110
111
/* A macro to test whether P is properly aligned for an u32 type.
112
   Note that config.h provides a suitable replacement for uintptr_t if
113
   it does not exist in stdint.h.  */
114
/* #if __GNUC__ >= 2 */
115
/* # define U32_ALIGNED_P(p) (!(((uintptr_t)p) % __alignof__ (u32))) */
116
/* #else */
117
/* # define U32_ALIGNED_P(p) (!(((uintptr_t)p) % sizeof (u32))) */
118
/* #endif */
119
120
121
122
/* Assembly implementations use SystemV ABI, ABI conversion and additional
123
 * stack to store XMM6-XMM15 needed on Win64. */
124
#undef ASM_FUNC_ABI
125
#undef ASM_EXTRA_STACK
126
#if defined(USE_SSSE3) || defined(USE_AVX) || defined(USE_BMI2) || \
127
    defined(USE_SHAEXT)
128
# ifdef HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS
129
#  define ASM_FUNC_ABI __attribute__((sysv_abi))
130
#  define ASM_EXTRA_STACK (10 * 16 + sizeof(void *) * 4)
131
# else
132
#  define ASM_FUNC_ABI
133
671k
#  define ASM_EXTRA_STACK 0
134
# endif
135
#endif
136
137
138
#ifdef USE_SSSE3
139
unsigned int
140
_gcry_sha1_transform_amd64_ssse3 (void *state, const unsigned char *data,
141
                                  size_t nblks) ASM_FUNC_ABI;
142
143
static unsigned int
144
do_sha1_transform_amd64_ssse3 (void *ctx, const unsigned char *data,
145
                               size_t nblks)
146
0
{
147
0
  SHA1_CONTEXT *hd = ctx;
148
0
  return _gcry_sha1_transform_amd64_ssse3 (&hd->h0, data, nblks)
149
0
         + ASM_EXTRA_STACK;
150
0
}
151
#endif
152
153
#ifdef USE_AVX
154
unsigned int
155
_gcry_sha1_transform_amd64_avx (void *state, const unsigned char *data,
156
                                 size_t nblks) ASM_FUNC_ABI;
157
158
static unsigned int
159
do_sha1_transform_amd64_avx (void *ctx, const unsigned char *data,
160
                             size_t nblks)
161
0
{
162
0
  SHA1_CONTEXT *hd = ctx;
163
0
  return _gcry_sha1_transform_amd64_avx (&hd->h0, data, nblks)
164
0
         + ASM_EXTRA_STACK;
165
0
}
166
#endif
167
168
#ifdef USE_BMI2
169
unsigned int
170
_gcry_sha1_transform_amd64_avx_bmi2 (void *state, const unsigned char *data,
171
                                     size_t nblks) ASM_FUNC_ABI;
172
173
static unsigned int
174
do_sha1_transform_amd64_avx_bmi2 (void *ctx, const unsigned char *data,
175
                                  size_t nblks)
176
651k
{
177
651k
  SHA1_CONTEXT *hd = ctx;
178
651k
  return _gcry_sha1_transform_amd64_avx_bmi2 (&hd->h0, data, nblks)
179
651k
         + ASM_EXTRA_STACK;
180
651k
}
181
182
#ifdef USE_AVX2
183
unsigned int
184
_gcry_sha1_transform_amd64_avx2_bmi2 (void *state, const unsigned char *data,
185
                                      size_t nblks) ASM_FUNC_ABI;
186
187
static unsigned int
188
do_sha1_transform_amd64_avx2_bmi2 (void *ctx, const unsigned char *data,
189
                                   size_t nblks)
190
671k
{
191
671k
  SHA1_CONTEXT *hd = ctx;
192
193
  /* AVX2/BMI2 function only handles pair of blocks so nblks needs to be
194
   * multiple of 2 and function does not handle zero nblks. Use AVX/BMI2
195
   * code to handle these cases. */
196
197
671k
  if (nblks <= 1)
198
651k
    return do_sha1_transform_amd64_avx_bmi2 (ctx, data, nblks);
199
200
19.8k
  if (nblks & 1)
201
19.6k
    {
202
19.6k
      (void)_gcry_sha1_transform_amd64_avx_bmi2 (&hd->h0, data, 1);
203
19.6k
      nblks--;
204
19.6k
      data += 64;
205
19.6k
    }
206
207
19.8k
  return _gcry_sha1_transform_amd64_avx2_bmi2 (&hd->h0, data, nblks)
208
19.8k
         + ASM_EXTRA_STACK;
209
671k
}
210
#endif /* USE_AVX2 */
211
#endif /* USE_BMI2 */
212
213
#ifdef USE_SHAEXT
214
/* Does not need ASM_FUNC_ABI */
215
unsigned int
216
_gcry_sha1_transform_intel_shaext (void *state, const unsigned char *data,
217
                                   size_t nblks);
218
219
static unsigned int
220
do_sha1_transform_intel_shaext (void *ctx, const unsigned char *data,
221
                                size_t nblks)
222
0
{
223
0
  SHA1_CONTEXT *hd = ctx;
224
0
  return _gcry_sha1_transform_intel_shaext (&hd->h0, data, nblks);
225
0
}
226
#endif
227
228
#ifdef USE_NEON
229
unsigned int
230
_gcry_sha1_transform_armv7_neon (void *state, const unsigned char *data,
231
                                 size_t nblks);
232
233
static unsigned int
234
do_sha1_transform_armv7_neon (void *ctx, const unsigned char *data,
235
                              size_t nblks)
236
{
237
  SHA1_CONTEXT *hd = ctx;
238
  return _gcry_sha1_transform_armv7_neon (&hd->h0, data, nblks);
239
}
240
#endif
241
242
#ifdef USE_ARM_CE
243
unsigned int
244
_gcry_sha1_transform_armv8_ce (void *state, const unsigned char *data,
245
                               size_t nblks);
246
247
static unsigned int
248
do_sha1_transform_armv8_ce (void *ctx, const unsigned char *data,
249
                            size_t nblks)
250
{
251
  SHA1_CONTEXT *hd = ctx;
252
  return _gcry_sha1_transform_armv8_ce (&hd->h0, data, nblks);
253
}
254
#endif
255
256
#ifdef SHA1_USE_S390X_CRYPTO
257
#include "asm-inline-s390x.h"
258
259
static unsigned int
260
do_sha1_transform_s390x (void *ctx, const unsigned char *data, size_t nblks)
261
{
262
  SHA1_CONTEXT *hd = ctx;
263
264
  kimd_execute (KMID_FUNCTION_SHA1, &hd->h0, data, nblks * 64);
265
  return 0;
266
}
267
268
static unsigned int
269
do_sha1_final_s390x (void *ctx, const unsigned char *data, size_t datalen,
270
         u32 len_msb, u32 len_lsb)
271
{
272
  SHA1_CONTEXT *hd = ctx;
273
274
  /* Make sure that 'final_len' is positioned at correct offset relative
275
   * to 'h0'. This is because we are passing 'h0' pointer as start of
276
   * parameter block to 'klmd' instruction. */
277
278
  gcry_assert (offsetof (SHA1_CONTEXT, final_len_msb)
279
         - offsetof (SHA1_CONTEXT, h0) == 5 * sizeof(u32));
280
  gcry_assert (offsetof (SHA1_CONTEXT, final_len_lsb)
281
         - offsetof (SHA1_CONTEXT, final_len_msb) == 1 * sizeof(u32));
282
283
  hd->final_len_msb = len_msb;
284
  hd->final_len_lsb = len_lsb;
285
286
  klmd_execute (KMID_FUNCTION_SHA1, &hd->h0, data, datalen);
287
  return 0;
288
}
289
#endif
290
291
292
static unsigned int
293
do_transform_generic (void *c, const unsigned char *data, size_t nblks);
294
295
296
static void
297
sha1_init (void *context, unsigned int flags)
298
83.0k
{
299
83.0k
  SHA1_CONTEXT *hd = context;
300
83.0k
  unsigned int features = _gcry_get_hw_features ();
301
302
83.0k
  (void)flags;
303
304
83.0k
  hd->h0 = 0x67452301;
305
83.0k
  hd->h1 = 0xefcdab89;
306
83.0k
  hd->h2 = 0x98badcfe;
307
83.0k
  hd->h3 = 0x10325476;
308
83.0k
  hd->h4 = 0xc3d2e1f0;
309
310
83.0k
  hd->bctx.nblocks = 0;
311
83.0k
  hd->bctx.nblocks_high = 0;
312
83.0k
  hd->bctx.count = 0;
313
83.0k
  hd->bctx.blocksize_shift = _gcry_ctz(64);
314
315
  /* Order of feature checks is important here; last match will be
316
   * selected.  Keep slower implementations at the top and faster at
317
   * the bottom.  */
318
83.0k
  hd->bctx.bwrite = do_transform_generic;
319
83.0k
#ifdef USE_SSSE3
320
83.0k
  if ((features & HWF_INTEL_SSSE3) != 0)
321
83.0k
    hd->bctx.bwrite = do_sha1_transform_amd64_ssse3;
322
83.0k
#endif
323
83.0k
#ifdef USE_AVX
324
  /* AVX implementation uses SHLD which is known to be slow on non-Intel CPUs.
325
   * Therefore use this implementation on Intel CPUs only. */
326
83.0k
  if ((features & HWF_INTEL_AVX) && (features & HWF_INTEL_FAST_SHLD))
327
83.0k
    hd->bctx.bwrite = do_sha1_transform_amd64_avx;
328
83.0k
#endif
329
83.0k
#ifdef USE_BMI2
330
83.0k
  if ((features & HWF_INTEL_AVX) && (features & HWF_INTEL_BMI2))
331
83.0k
    hd->bctx.bwrite = do_sha1_transform_amd64_avx_bmi2;
332
83.0k
#endif
333
83.0k
#ifdef USE_AVX2
334
83.0k
  if ((features & HWF_INTEL_AVX2) && (features & HWF_INTEL_AVX) &&
335
83.0k
      (features & HWF_INTEL_BMI2))
336
83.0k
    hd->bctx.bwrite = do_sha1_transform_amd64_avx2_bmi2;
337
83.0k
#endif
338
83.0k
#ifdef USE_SHAEXT
339
83.0k
  if ((features & HWF_INTEL_SHAEXT) && (features & HWF_INTEL_SSE4_1))
340
0
    hd->bctx.bwrite = do_sha1_transform_intel_shaext;
341
83.0k
#endif
342
#ifdef USE_NEON
343
  if ((features & HWF_ARM_NEON) != 0)
344
    hd->bctx.bwrite = do_sha1_transform_armv7_neon;
345
#endif
346
#ifdef USE_ARM_CE
347
  if ((features & HWF_ARM_SHA1) != 0)
348
    hd->bctx.bwrite = do_sha1_transform_armv8_ce;
349
#endif
350
#ifdef SHA1_USE_S390X_CRYPTO
351
  hd->use_s390x_crypto = 0;
352
  if ((features & HWF_S390X_MSA) != 0)
353
    {
354
      if ((kimd_query () & km_function_to_mask (KMID_FUNCTION_SHA1)) &&
355
    (klmd_query () & km_function_to_mask (KMID_FUNCTION_SHA1)))
356
  {
357
    hd->bctx.bwrite = do_sha1_transform_s390x;
358
    hd->use_s390x_crypto = 1;
359
  }
360
    }
361
#endif
362
363
83.0k
  (void)features;
364
83.0k
}
365
366
/*
367
 * Initialize the context HD. This is used to prepare the use of
368
 * _gcry_sha1_mixblock.  WARNING: This is a special purpose function
369
 * for exclusive use by random-csprng.c.
370
 */
371
void
372
_gcry_sha1_mixblock_init (SHA1_CONTEXT *hd)
373
19.5k
{
374
19.5k
  sha1_init (hd, 0);
375
19.5k
}
376
377
378
/* Round function macros. */
379
#define K1  0x5A827999L
380
#define K2  0x6ED9EBA1L
381
#define K3  0x8F1BBCDCL
382
#define K4  0xCA62C1D6L
383
0
#define F1(x,y,z)   ( z ^ ( x & ( y ^ z ) ) )
384
0
#define F2(x,y,z)   ( x ^ y ^ z )
385
0
#define F3(x,y,z)   ( ( x & y ) | ( z & ( x | y ) ) )
386
0
#define F4(x,y,z)   ( x ^ y ^ z )
387
#define M(i) ( tm =    x[ i    &0x0f]  \
388
                     ^ x[(i-14)&0x0f]  \
389
         ^ x[(i-8) &0x0f]  \
390
                     ^ x[(i-3) &0x0f], \
391
                     (x[i&0x0f] = rol(tm, 1)))
392
0
#define R(a,b,c,d,e,f,k,m)  do { e += rol( a, 5 )     \
393
0
                                + f( b, c, d )  \
394
0
              + k       \
395
0
              + m;        \
396
0
         b = rol( b, 30 );    \
397
0
             } while(0)
398
399
/*
400
 * Transform NBLOCKS of each 64 bytes (16 32-bit words) at DATA.
401
 */
402
static unsigned int
403
do_transform_generic (void *ctx, const unsigned char *data, size_t nblks)
404
0
{
405
0
  SHA1_CONTEXT *hd = ctx;
406
407
0
  do
408
0
    {
409
0
      const u32 *idata = (const void *)data;
410
0
      u32 a, b, c, d, e; /* Local copies of the chaining variables.  */
411
0
      u32 tm;            /* Helper.  */
412
0
      u32 x[16];         /* The array we work on. */
413
414
0
#define I(i) (x[i] = buf_get_be32(idata + i))
415
416
      /* Get the values of the chaining variables. */
417
0
      a = hd->h0;
418
0
      b = hd->h1;
419
0
      c = hd->h2;
420
0
      d = hd->h3;
421
0
      e = hd->h4;
422
423
      /* Transform. */
424
0
      R( a, b, c, d, e, F1, K1, I( 0) );
425
0
      R( e, a, b, c, d, F1, K1, I( 1) );
426
0
      R( d, e, a, b, c, F1, K1, I( 2) );
427
0
      R( c, d, e, a, b, F1, K1, I( 3) );
428
0
      R( b, c, d, e, a, F1, K1, I( 4) );
429
0
      R( a, b, c, d, e, F1, K1, I( 5) );
430
0
      R( e, a, b, c, d, F1, K1, I( 6) );
431
0
      R( d, e, a, b, c, F1, K1, I( 7) );
432
0
      R( c, d, e, a, b, F1, K1, I( 8) );
433
0
      R( b, c, d, e, a, F1, K1, I( 9) );
434
0
      R( a, b, c, d, e, F1, K1, I(10) );
435
0
      R( e, a, b, c, d, F1, K1, I(11) );
436
0
      R( d, e, a, b, c, F1, K1, I(12) );
437
0
      R( c, d, e, a, b, F1, K1, I(13) );
438
0
      R( b, c, d, e, a, F1, K1, I(14) );
439
0
      R( a, b, c, d, e, F1, K1, I(15) );
440
0
      R( e, a, b, c, d, F1, K1, M(16) );
441
0
      R( d, e, a, b, c, F1, K1, M(17) );
442
0
      R( c, d, e, a, b, F1, K1, M(18) );
443
0
      R( b, c, d, e, a, F1, K1, M(19) );
444
0
      R( a, b, c, d, e, F2, K2, M(20) );
445
0
      R( e, a, b, c, d, F2, K2, M(21) );
446
0
      R( d, e, a, b, c, F2, K2, M(22) );
447
0
      R( c, d, e, a, b, F2, K2, M(23) );
448
0
      R( b, c, d, e, a, F2, K2, M(24) );
449
0
      R( a, b, c, d, e, F2, K2, M(25) );
450
0
      R( e, a, b, c, d, F2, K2, M(26) );
451
0
      R( d, e, a, b, c, F2, K2, M(27) );
452
0
      R( c, d, e, a, b, F2, K2, M(28) );
453
0
      R( b, c, d, e, a, F2, K2, M(29) );
454
0
      R( a, b, c, d, e, F2, K2, M(30) );
455
0
      R( e, a, b, c, d, F2, K2, M(31) );
456
0
      R( d, e, a, b, c, F2, K2, M(32) );
457
0
      R( c, d, e, a, b, F2, K2, M(33) );
458
0
      R( b, c, d, e, a, F2, K2, M(34) );
459
0
      R( a, b, c, d, e, F2, K2, M(35) );
460
0
      R( e, a, b, c, d, F2, K2, M(36) );
461
0
      R( d, e, a, b, c, F2, K2, M(37) );
462
0
      R( c, d, e, a, b, F2, K2, M(38) );
463
0
      R( b, c, d, e, a, F2, K2, M(39) );
464
0
      R( a, b, c, d, e, F3, K3, M(40) );
465
0
      R( e, a, b, c, d, F3, K3, M(41) );
466
0
      R( d, e, a, b, c, F3, K3, M(42) );
467
0
      R( c, d, e, a, b, F3, K3, M(43) );
468
0
      R( b, c, d, e, a, F3, K3, M(44) );
469
0
      R( a, b, c, d, e, F3, K3, M(45) );
470
0
      R( e, a, b, c, d, F3, K3, M(46) );
471
0
      R( d, e, a, b, c, F3, K3, M(47) );
472
0
      R( c, d, e, a, b, F3, K3, M(48) );
473
0
      R( b, c, d, e, a, F3, K3, M(49) );
474
0
      R( a, b, c, d, e, F3, K3, M(50) );
475
0
      R( e, a, b, c, d, F3, K3, M(51) );
476
0
      R( d, e, a, b, c, F3, K3, M(52) );
477
0
      R( c, d, e, a, b, F3, K3, M(53) );
478
0
      R( b, c, d, e, a, F3, K3, M(54) );
479
0
      R( a, b, c, d, e, F3, K3, M(55) );
480
0
      R( e, a, b, c, d, F3, K3, M(56) );
481
0
      R( d, e, a, b, c, F3, K3, M(57) );
482
0
      R( c, d, e, a, b, F3, K3, M(58) );
483
0
      R( b, c, d, e, a, F3, K3, M(59) );
484
0
      R( a, b, c, d, e, F4, K4, M(60) );
485
0
      R( e, a, b, c, d, F4, K4, M(61) );
486
0
      R( d, e, a, b, c, F4, K4, M(62) );
487
0
      R( c, d, e, a, b, F4, K4, M(63) );
488
0
      R( b, c, d, e, a, F4, K4, M(64) );
489
0
      R( a, b, c, d, e, F4, K4, M(65) );
490
0
      R( e, a, b, c, d, F4, K4, M(66) );
491
0
      R( d, e, a, b, c, F4, K4, M(67) );
492
0
      R( c, d, e, a, b, F4, K4, M(68) );
493
0
      R( b, c, d, e, a, F4, K4, M(69) );
494
0
      R( a, b, c, d, e, F4, K4, M(70) );
495
0
      R( e, a, b, c, d, F4, K4, M(71) );
496
0
      R( d, e, a, b, c, F4, K4, M(72) );
497
0
      R( c, d, e, a, b, F4, K4, M(73) );
498
0
      R( b, c, d, e, a, F4, K4, M(74) );
499
0
      R( a, b, c, d, e, F4, K4, M(75) );
500
0
      R( e, a, b, c, d, F4, K4, M(76) );
501
0
      R( d, e, a, b, c, F4, K4, M(77) );
502
0
      R( c, d, e, a, b, F4, K4, M(78) );
503
0
      R( b, c, d, e, a, F4, K4, M(79) );
504
505
      /* Update the chaining variables. */
506
0
      hd->h0 += a;
507
0
      hd->h1 += b;
508
0
      hd->h2 += c;
509
0
      hd->h3 += d;
510
0
      hd->h4 += e;
511
512
0
      data += 64;
513
0
    }
514
0
  while (--nblks);
515
516
0
  return 88+4*sizeof(void*);
517
0
}
518
519
520
/*
521
 * Apply the SHA-1 transform function on the buffer BLOCKOF64BYTE
522
 * which must have a length 64 bytes.  BLOCKOF64BYTE must be 32-bit
523
 * aligned.  Updates the 20 bytes in BLOCKOF64BYTE with its mixed
524
 * content.  Returns the number of bytes which should be burned on the
525
 * stack.  You need to use _gcry_sha1_mixblock_init to initialize the
526
 * context.
527
 * WARNING: This is a special purpose function for exclusive use by
528
 * random-csprng.c.
529
 */
530
unsigned int
531
_gcry_sha1_mixblock (SHA1_CONTEXT *hd, void *blockof64byte)
532
587k
{
533
587k
  u32 *p = blockof64byte;
534
587k
  unsigned int nburn;
535
536
587k
  nburn = (*hd->bctx.bwrite) (hd, blockof64byte, 1);
537
587k
  p[0] = hd->h0;
538
587k
  p[1] = hd->h1;
539
587k
  p[2] = hd->h2;
540
587k
  p[3] = hd->h3;
541
587k
  p[4] = hd->h4;
542
543
587k
  return nburn;
544
587k
}
545
546
547
/* The routine final terminates the computation and
548
 * returns the digest.
549
 * The handle is prepared for a new cycle, but adding bytes to the
550
 * handle will the destroy the returned buffer.
551
 * Returns: 20 bytes representing the digest.
552
 */
553
554
static void
555
sha1_final(void *context)
556
63.4k
{
557
63.4k
  SHA1_CONTEXT *hd = context;
558
63.4k
  u32 t, th, msb, lsb;
559
63.4k
  unsigned char *p;
560
63.4k
  unsigned int burn;
561
562
63.4k
  t = hd->bctx.nblocks;
563
63.4k
  if (sizeof t == sizeof hd->bctx.nblocks)
564
0
    th = hd->bctx.nblocks_high;
565
63.4k
  else
566
63.4k
    th = hd->bctx.nblocks >> 32;
567
568
  /* multiply by 64 to make a byte count */
569
63.4k
  lsb = t << 6;
570
63.4k
  msb = (th << 6) | (t >> 26);
571
  /* add the count */
572
63.4k
  t = lsb;
573
63.4k
  if( (lsb += hd->bctx.count) < t )
574
0
    msb++;
575
  /* multiply by 8 to make a bit count */
576
63.4k
  t = lsb;
577
63.4k
  lsb <<= 3;
578
63.4k
  msb <<= 3;
579
63.4k
  msb |= t >> 29;
580
581
63.4k
  if (0)
582
0
    { }
583
#ifdef SHA1_USE_S390X_CRYPTO
584
  else if (hd->use_s390x_crypto)
585
    {
586
      burn = do_sha1_final_s390x (hd, hd->bctx.buf, hd->bctx.count, msb, lsb);
587
    }
588
#endif
589
63.4k
  else if (hd->bctx.count < 56)  /* enough room */
590
63.2k
    {
591
63.2k
      hd->bctx.buf[hd->bctx.count++] = 0x80; /* pad */
592
63.2k
      if (hd->bctx.count < 56)
593
63.0k
  memset (&hd->bctx.buf[hd->bctx.count], 0, 56 - hd->bctx.count);
594
595
      /* append the 64 bit count */
596
63.2k
      buf_put_be32(hd->bctx.buf + 56, msb);
597
63.2k
      buf_put_be32(hd->bctx.buf + 60, lsb);
598
63.2k
      burn = (*hd->bctx.bwrite) ( hd, hd->bctx.buf, 1 );
599
63.2k
    }
600
206
  else  /* need one extra block */
601
206
    {
602
206
      hd->bctx.buf[hd->bctx.count++] = 0x80; /* pad character */
603
      /* fill pad and next block with zeroes */
604
206
      memset (&hd->bctx.buf[hd->bctx.count], 0, 64 - hd->bctx.count + 56);
605
606
      /* append the 64 bit count */
607
206
      buf_put_be32(hd->bctx.buf + 64 + 56, msb);
608
206
      buf_put_be32(hd->bctx.buf + 64 + 60, lsb);
609
206
      burn = (*hd->bctx.bwrite) ( hd, hd->bctx.buf, 2 );
610
206
    }
611
612
63.4k
  p = hd->bctx.buf;
613
317k
#define X(a) do { buf_put_be32(p, hd->h##a); p += 4; } while(0)
614
63.4k
  X(0);
615
63.4k
  X(1);
616
63.4k
  X(2);
617
63.4k
  X(3);
618
63.4k
  X(4);
619
63.4k
#undef X
620
621
63.4k
  hd->bctx.count = 0;
622
623
63.4k
  _gcry_burn_stack (burn);
624
63.4k
}
625
626
static unsigned char *
627
sha1_read( void *context )
628
43.8k
{
629
43.8k
  SHA1_CONTEXT *hd = context;
630
631
43.8k
  return hd->bctx.buf;
632
43.8k
}
633
634
635
/****************
636
 * Shortcut functions which puts the hash value of the supplied buffer iov
637
 * into outbuf which must have a size of 20 bytes.
638
 */
639
static void
640
_gcry_sha1_hash_buffers (void *outbuf, size_t nbytes,
641
       const gcry_buffer_t *iov, int iovcnt)
642
19.5k
{
643
19.5k
  SHA1_CONTEXT hd;
644
645
19.5k
  (void)nbytes;
646
647
19.5k
  sha1_init (&hd, 0);
648
39.1k
  for (;iovcnt > 0; iov++, iovcnt--)
649
19.5k
    _gcry_md_block_write (&hd,
650
19.5k
                          (const char*)iov[0].data + iov[0].off, iov[0].len);
651
19.5k
  sha1_final (&hd);
652
19.5k
  memcpy (outbuf, hd.bctx.buf, 20);
653
19.5k
}
654
655
/* Variant of the above shortcut function using a single buffer.  */
656
void
657
_gcry_sha1_hash_buffer (void *outbuf, const void *buffer, size_t length)
658
19.5k
{
659
19.5k
  gcry_buffer_t iov = { 0 };
660
661
19.5k
  iov.data = (void *)buffer;
662
19.5k
  iov.len = length;
663
664
19.5k
  _gcry_sha1_hash_buffers (outbuf, 20, &iov, 1);
665
19.5k
}
666
667
668

669
/*
670
     Self-test section.
671
 */
672
673
674
static gpg_err_code_t
675
selftests_sha1 (int extended, selftest_report_func_t report)
676
0
{
677
0
  const char *what;
678
0
  const char *errtxt;
679
680
0
  what = "short string";
681
0
  errtxt = _gcry_hash_selftest_check_one
682
0
    (GCRY_MD_SHA1, 0,
683
0
     "abc", 3,
684
0
     "\xA9\x99\x3E\x36\x47\x06\x81\x6A\xBA\x3E"
685
0
     "\x25\x71\x78\x50\xC2\x6C\x9C\xD0\xD8\x9D", 20);
686
0
  if (errtxt)
687
0
    goto failed;
688
689
0
  if (extended)
690
0
    {
691
0
      what = "long string";
692
0
      errtxt = _gcry_hash_selftest_check_one
693
0
        (GCRY_MD_SHA1, 0,
694
0
         "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq", 56,
695
0
         "\x84\x98\x3E\x44\x1C\x3B\xD2\x6E\xBA\xAE"
696
0
         "\x4A\xA1\xF9\x51\x29\xE5\xE5\x46\x70\xF1", 20);
697
0
      if (errtxt)
698
0
        goto failed;
699
700
0
      what = "one million \"a\"";
701
0
      errtxt = _gcry_hash_selftest_check_one
702
0
        (GCRY_MD_SHA1, 1,
703
0
         NULL, 0,
704
0
         "\x34\xAA\x97\x3C\xD4\xC4\xDA\xA4\xF6\x1E"
705
0
         "\xEB\x2B\xDB\xAD\x27\x31\x65\x34\x01\x6F", 20);
706
0
      if (errtxt)
707
0
        goto failed;
708
0
    }
709
710
0
  return 0; /* Succeeded. */
711
712
0
 failed:
713
0
  if (report)
714
0
    report ("digest", GCRY_MD_SHA1, what, errtxt);
715
0
  return GPG_ERR_SELFTEST_FAILED;
716
0
}
717
718
719
/* Run a full self-test for ALGO and return 0 on success.  */
720
static gpg_err_code_t
721
run_selftests (int algo, int extended, selftest_report_func_t report)
722
0
{
723
0
  gpg_err_code_t ec;
724
725
0
  switch (algo)
726
0
    {
727
0
    case GCRY_MD_SHA1:
728
0
      ec = selftests_sha1 (extended, report);
729
0
      break;
730
0
    default:
731
0
      ec = GPG_ERR_DIGEST_ALGO;
732
0
      break;
733
734
0
    }
735
0
  return ec;
736
0
}
737
738
739
740

741
static const unsigned char asn[15] = /* Object ID is 1.3.14.3.2.26 */
742
  { 0x30, 0x21, 0x30, 0x09, 0x06, 0x05, 0x2b, 0x0e, 0x03,
743
    0x02, 0x1a, 0x05, 0x00, 0x04, 0x14 };
744
745
static const gcry_md_oid_spec_t oid_spec_sha1[] =
746
  {
747
    /* iso.member-body.us.rsadsi.pkcs.pkcs-1.5 (sha1WithRSAEncryption) */
748
    { "1.2.840.113549.1.1.5" },
749
    /* iso.member-body.us.x9-57.x9cm.3 (dsaWithSha1)*/
750
    { "1.2.840.10040.4.3" },
751
    /* from NIST's OIW  (sha1) */
752
    { "1.3.14.3.2.26" },
753
    /* from NIST OIW (sha-1WithRSAEncryption) */
754
    { "1.3.14.3.2.29" },
755
    /* iso.member-body.us.ansi-x9-62.signatures.ecdsa-with-sha1 */
756
    { "1.2.840.10045.4.1" },
757
    { NULL },
758
  };
759
760
const gcry_md_spec_t _gcry_digest_spec_sha1 =
761
  {
762
    GCRY_MD_SHA1, {0, 1},
763
    "SHA1", asn, DIM (asn), oid_spec_sha1, 20,
764
    sha1_init, _gcry_md_block_write, sha1_final, sha1_read, NULL,
765
    _gcry_sha1_hash_buffers,
766
    sizeof (SHA1_CONTEXT),
767
    run_selftests
768
  };