Coverage Report

Created: 2022-12-08 06:10

/src/libgcrypt/cipher/cipher-gcm.c
Line
Count
Source (jump to first uncovered line)
1
/* cipher-gcm.c  - Generic Galois Counter Mode implementation
2
 * Copyright (C) 2013 Dmitry Eremin-Solenikov
3
 * Copyright (C) 2013, 2018-2019 Jussi Kivilinna <jussi.kivilinna@iki.fi>
4
 *
5
 * This file is part of Libgcrypt.
6
 *
7
 * Libgcrypt is free software; you can redistribute it and/or modify
8
 * it under the terms of the GNU Lesser general Public License as
9
 * published by the Free Software Foundation; either version 2.1 of
10
 * the License, or (at your option) any later version.
11
 *
12
 * Libgcrypt is distributed in the hope that it will be useful,
13
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15
 * GNU Lesser General Public License for more details.
16
 *
17
 * You should have received a copy of the GNU Lesser General Public
18
 * License along with this program; if not, see <http://www.gnu.org/licenses/>.
19
 */
20
21
#include <config.h>
22
#include <stdio.h>
23
#include <stdlib.h>
24
#include <string.h>
25
#include <errno.h>
26
27
#include "g10lib.h"
28
#include "cipher.h"
29
#include "bufhelp.h"
30
#include "./cipher-internal.h"
31
32
33
static gcry_err_code_t _gcry_cipher_gcm_setiv_zero (gcry_cipher_hd_t c);
34
35
/* Helper macro to force alignment to 16 or 64 bytes.  */
36
#ifdef HAVE_GCC_ATTRIBUTE_ALIGNED
37
# define ATTR_ALIGNED_64  __attribute__ ((aligned (64)))
38
#else
39
# define ATTR_ALIGNED_64
40
#endif
41
42
43
#ifdef GCM_USE_INTEL_PCLMUL
44
extern void _gcry_ghash_setup_intel_pclmul (gcry_cipher_hd_t c,
45
              unsigned int hw_features);
46
#endif
47
48
#ifdef GCM_USE_ARM_PMULL
49
extern void _gcry_ghash_setup_armv8_ce_pmull (void *gcm_key, void *gcm_table);
50
51
extern unsigned int _gcry_ghash_armv8_ce_pmull (void *gcm_key, byte *result,
52
                                                const byte *buf, size_t nblocks,
53
                                                void *gcm_table);
54
55
extern unsigned int _gcry_polyval_armv8_ce_pmull (void *gcm_key, byte *result,
56
                                                  const byte *buf,
57
                                                  size_t nblocks,
58
                                                  void *gcm_table);
59
60
static void
61
ghash_setup_armv8_ce_pmull (gcry_cipher_hd_t c)
62
{
63
  _gcry_ghash_setup_armv8_ce_pmull(c->u_mode.gcm.u_ghash_key.key,
64
                                   c->u_mode.gcm.gcm_table);
65
}
66
67
static unsigned int
68
ghash_armv8_ce_pmull (gcry_cipher_hd_t c, byte *result, const byte *buf,
69
                      size_t nblocks)
70
{
71
  return _gcry_ghash_armv8_ce_pmull(c->u_mode.gcm.u_ghash_key.key, result, buf,
72
                                    nblocks, c->u_mode.gcm.gcm_table);
73
}
74
75
static unsigned int
76
polyval_armv8_ce_pmull (gcry_cipher_hd_t c, byte *result, const byte *buf,
77
                        size_t nblocks)
78
{
79
  return _gcry_polyval_armv8_ce_pmull(c->u_mode.gcm.u_ghash_key.key, result,
80
                                      buf, nblocks, c->u_mode.gcm.gcm_table);
81
}
82
#endif /* GCM_USE_ARM_PMULL */
83
84
#ifdef GCM_USE_ARM_NEON
85
extern void _gcry_ghash_setup_armv7_neon (void *gcm_key);
86
87
extern unsigned int _gcry_ghash_armv7_neon (void *gcm_key, byte *result,
88
              const byte *buf, size_t nblocks);
89
90
static void
91
ghash_setup_armv7_neon (gcry_cipher_hd_t c)
92
{
93
  _gcry_ghash_setup_armv7_neon(c->u_mode.gcm.u_ghash_key.key);
94
}
95
96
static unsigned int
97
ghash_armv7_neon (gcry_cipher_hd_t c, byte *result, const byte *buf,
98
      size_t nblocks)
99
{
100
  return _gcry_ghash_armv7_neon(c->u_mode.gcm.u_ghash_key.key, result, buf,
101
        nblocks);
102
}
103
#endif /* GCM_USE_ARM_NEON */
104
105
#ifdef GCM_USE_S390X_CRYPTO
106
#include "asm-inline-s390x.h"
107
108
static unsigned int
109
ghash_s390x_kimd (gcry_cipher_hd_t c, byte *result, const byte *buf,
110
      size_t nblocks)
111
{
112
  u128_t params[2];
113
114
  memcpy (&params[0], result, 16);
115
  memcpy (&params[1], c->u_mode.gcm.u_ghash_key.key, 16);
116
117
  kimd_execute (KMID_FUNCTION_GHASH, &params, buf, nblocks * 16);
118
119
  memcpy (result, &params[0], 16);
120
  wipememory (params, sizeof(params));
121
  return 0;
122
}
123
#endif /* GCM_USE_S390X_CRYPTO*/
124
125
#ifdef GCM_USE_PPC_VPMSUM
126
extern void _gcry_ghash_setup_ppc_vpmsum (void *gcm_table, void *gcm_key);
127
128
/* result is 128-bits */
129
extern unsigned int _gcry_ghash_ppc_vpmsum (byte *result, void *gcm_table,
130
              const byte *buf, size_t nblocks);
131
132
static void
133
ghash_setup_ppc_vpmsum (gcry_cipher_hd_t c)
134
{
135
  _gcry_ghash_setup_ppc_vpmsum(c->u_mode.gcm.gcm_table,
136
             c->u_mode.gcm.u_ghash_key.key);
137
}
138
139
static unsigned int
140
ghash_ppc_vpmsum (gcry_cipher_hd_t c, byte *result, const byte *buf,
141
      size_t nblocks)
142
{
143
  return _gcry_ghash_ppc_vpmsum(result, c->u_mode.gcm.gcm_table, buf,
144
        nblocks);
145
}
146
#endif /* GCM_USE_PPC_VPMSUM */
147
148
#ifdef GCM_USE_TABLES
149
static struct
150
{
151
  volatile u32 counter_head;
152
  u32 cacheline_align[64 / 4 - 1];
153
  u16 R[256];
154
  volatile u32 counter_tail;
155
} gcm_table ATTR_ALIGNED_64 =
156
  {
157
    0,
158
    { 0, },
159
    {
160
      0x0000, 0x01c2, 0x0384, 0x0246, 0x0708, 0x06ca, 0x048c, 0x054e,
161
      0x0e10, 0x0fd2, 0x0d94, 0x0c56, 0x0918, 0x08da, 0x0a9c, 0x0b5e,
162
      0x1c20, 0x1de2, 0x1fa4, 0x1e66, 0x1b28, 0x1aea, 0x18ac, 0x196e,
163
      0x1230, 0x13f2, 0x11b4, 0x1076, 0x1538, 0x14fa, 0x16bc, 0x177e,
164
      0x3840, 0x3982, 0x3bc4, 0x3a06, 0x3f48, 0x3e8a, 0x3ccc, 0x3d0e,
165
      0x3650, 0x3792, 0x35d4, 0x3416, 0x3158, 0x309a, 0x32dc, 0x331e,
166
      0x2460, 0x25a2, 0x27e4, 0x2626, 0x2368, 0x22aa, 0x20ec, 0x212e,
167
      0x2a70, 0x2bb2, 0x29f4, 0x2836, 0x2d78, 0x2cba, 0x2efc, 0x2f3e,
168
      0x7080, 0x7142, 0x7304, 0x72c6, 0x7788, 0x764a, 0x740c, 0x75ce,
169
      0x7e90, 0x7f52, 0x7d14, 0x7cd6, 0x7998, 0x785a, 0x7a1c, 0x7bde,
170
      0x6ca0, 0x6d62, 0x6f24, 0x6ee6, 0x6ba8, 0x6a6a, 0x682c, 0x69ee,
171
      0x62b0, 0x6372, 0x6134, 0x60f6, 0x65b8, 0x647a, 0x663c, 0x67fe,
172
      0x48c0, 0x4902, 0x4b44, 0x4a86, 0x4fc8, 0x4e0a, 0x4c4c, 0x4d8e,
173
      0x46d0, 0x4712, 0x4554, 0x4496, 0x41d8, 0x401a, 0x425c, 0x439e,
174
      0x54e0, 0x5522, 0x5764, 0x56a6, 0x53e8, 0x522a, 0x506c, 0x51ae,
175
      0x5af0, 0x5b32, 0x5974, 0x58b6, 0x5df8, 0x5c3a, 0x5e7c, 0x5fbe,
176
      0xe100, 0xe0c2, 0xe284, 0xe346, 0xe608, 0xe7ca, 0xe58c, 0xe44e,
177
      0xef10, 0xeed2, 0xec94, 0xed56, 0xe818, 0xe9da, 0xeb9c, 0xea5e,
178
      0xfd20, 0xfce2, 0xfea4, 0xff66, 0xfa28, 0xfbea, 0xf9ac, 0xf86e,
179
      0xf330, 0xf2f2, 0xf0b4, 0xf176, 0xf438, 0xf5fa, 0xf7bc, 0xf67e,
180
      0xd940, 0xd882, 0xdac4, 0xdb06, 0xde48, 0xdf8a, 0xddcc, 0xdc0e,
181
      0xd750, 0xd692, 0xd4d4, 0xd516, 0xd058, 0xd19a, 0xd3dc, 0xd21e,
182
      0xc560, 0xc4a2, 0xc6e4, 0xc726, 0xc268, 0xc3aa, 0xc1ec, 0xc02e,
183
      0xcb70, 0xcab2, 0xc8f4, 0xc936, 0xcc78, 0xcdba, 0xcffc, 0xce3e,
184
      0x9180, 0x9042, 0x9204, 0x93c6, 0x9688, 0x974a, 0x950c, 0x94ce,
185
      0x9f90, 0x9e52, 0x9c14, 0x9dd6, 0x9898, 0x995a, 0x9b1c, 0x9ade,
186
      0x8da0, 0x8c62, 0x8e24, 0x8fe6, 0x8aa8, 0x8b6a, 0x892c, 0x88ee,
187
      0x83b0, 0x8272, 0x8034, 0x81f6, 0x84b8, 0x857a, 0x873c, 0x86fe,
188
      0xa9c0, 0xa802, 0xaa44, 0xab86, 0xaec8, 0xaf0a, 0xad4c, 0xac8e,
189
      0xa7d0, 0xa612, 0xa454, 0xa596, 0xa0d8, 0xa11a, 0xa35c, 0xa29e,
190
      0xb5e0, 0xb422, 0xb664, 0xb7a6, 0xb2e8, 0xb32a, 0xb16c, 0xb0ae,
191
      0xbbf0, 0xba32, 0xb874, 0xb9b6, 0xbcf8, 0xbd3a, 0xbf7c, 0xbebe,
192
    },
193
    0
194
  };
195
196
0
#define gcmR gcm_table.R
197
198
static inline
199
void prefetch_table(const void *tab, size_t len)
200
0
{
201
0
  const volatile byte *vtab = tab;
202
0
  size_t i;
203
204
0
  for (i = 0; len - i >= 8 * 32; i += 8 * 32)
205
0
    {
206
0
      (void)vtab[i + 0 * 32];
207
0
      (void)vtab[i + 1 * 32];
208
0
      (void)vtab[i + 2 * 32];
209
0
      (void)vtab[i + 3 * 32];
210
0
      (void)vtab[i + 4 * 32];
211
0
      (void)vtab[i + 5 * 32];
212
0
      (void)vtab[i + 6 * 32];
213
0
      (void)vtab[i + 7 * 32];
214
0
    }
215
0
  for (; i < len; i += 32)
216
0
    {
217
0
      (void)vtab[i];
218
0
    }
219
220
0
  (void)vtab[len - 1];
221
0
}
222
223
static inline void
224
do_prefetch_tables (const void *gcmM, size_t gcmM_size)
225
0
{
226
  /* Modify counters to trigger copy-on-write and unsharing if physical pages
227
   * of look-up table are shared between processes.  Modifying counters also
228
   * causes checksums for pages to change and hint same-page merging algorithm
229
   * that these pages are frequently changing.  */
230
0
  gcm_table.counter_head++;
231
0
  gcm_table.counter_tail++;
232
233
  /* Prefetch look-up tables to cache.  */
234
0
  prefetch_table(gcmM, gcmM_size);
235
0
  prefetch_table(&gcm_table, sizeof(gcm_table));
236
0
}
237
238
#ifdef GCM_TABLES_USE_U64
239
static void
240
bshift (u64 * b0, u64 * b1)
241
0
{
242
0
  u64 t[2], mask;
243
244
0
  t[0] = *b0;
245
0
  t[1] = *b1;
246
0
  mask = -(t[1] & 1) & 0xe1;
247
0
  mask <<= 56;
248
249
0
  *b1 = (t[1] >> 1) ^ (t[0] << 63);
250
0
  *b0 = (t[0] >> 1) ^ mask;
251
0
}
252
253
static void
254
do_fillM (unsigned char *h, u64 *M)
255
0
{
256
0
  int i, j;
257
258
0
  M[0 + 0] = 0;
259
0
  M[0 + 16] = 0;
260
261
0
  M[8 + 0] = buf_get_be64 (h + 0);
262
0
  M[8 + 16] = buf_get_be64 (h + 8);
263
264
0
  for (i = 4; i > 0; i /= 2)
265
0
    {
266
0
      M[i + 0] = M[2 * i + 0];
267
0
      M[i + 16] = M[2 * i + 16];
268
269
0
      bshift (&M[i], &M[i + 16]);
270
0
    }
271
272
0
  for (i = 2; i < 16; i *= 2)
273
0
    for (j = 1; j < i; j++)
274
0
      {
275
0
        M[(i + j) + 0] = M[i + 0] ^ M[j + 0];
276
0
        M[(i + j) + 16] = M[i + 16] ^ M[j + 16];
277
0
      }
278
279
0
  for (i = 0; i < 16; i++)
280
0
    {
281
0
      M[i + 32] = (M[i + 0] >> 4) ^ ((u64) gcmR[(M[i + 16] & 0xf) << 4] << 48);
282
0
      M[i + 48] = (M[i + 16] >> 4) ^ (M[i + 0] << 60);
283
0
    }
284
0
}
285
286
static inline unsigned int
287
do_ghash (unsigned char *result, const unsigned char *buf, const u64 *gcmM)
288
0
{
289
0
  u64 V[2];
290
0
  u64 tmp[2];
291
0
  const u64 *M;
292
0
  u64 T;
293
0
  u32 A;
294
0
  int i;
295
296
0
  cipher_block_xor (V, result, buf, 16);
297
0
  V[0] = be_bswap64 (V[0]);
298
0
  V[1] = be_bswap64 (V[1]);
299
300
  /* First round can be manually tweaked based on fact that 'tmp' is zero. */
301
0
  M = &gcmM[(V[1] & 0xf) + 32];
302
0
  V[1] >>= 4;
303
0
  tmp[0] = M[0];
304
0
  tmp[1] = M[16];
305
0
  tmp[0] ^= gcmM[(V[1] & 0xf) + 0];
306
0
  tmp[1] ^= gcmM[(V[1] & 0xf) + 16];
307
0
  V[1] >>= 4;
308
309
0
  i = 6;
310
0
  while (1)
311
0
    {
312
0
      M = &gcmM[(V[1] & 0xf) + 32];
313
0
      V[1] >>= 4;
314
315
0
      A = tmp[1] & 0xff;
316
0
      T = tmp[0];
317
0
      tmp[0] = (T >> 8) ^ ((u64) gcmR[A] << 48) ^ gcmM[(V[1] & 0xf) + 0];
318
0
      tmp[1] = (T << 56) ^ (tmp[1] >> 8) ^ gcmM[(V[1] & 0xf) + 16];
319
320
0
      tmp[0] ^= M[0];
321
0
      tmp[1] ^= M[16];
322
323
0
      if (i == 0)
324
0
        break;
325
326
0
      V[1] >>= 4;
327
0
      --i;
328
0
    }
329
330
0
  i = 7;
331
0
  while (1)
332
0
    {
333
0
      M = &gcmM[(V[0] & 0xf) + 32];
334
0
      V[0] >>= 4;
335
336
0
      A = tmp[1] & 0xff;
337
0
      T = tmp[0];
338
0
      tmp[0] = (T >> 8) ^ ((u64) gcmR[A] << 48) ^ gcmM[(V[0] & 0xf) + 0];
339
0
      tmp[1] = (T << 56) ^ (tmp[1] >> 8) ^ gcmM[(V[0] & 0xf) + 16];
340
341
0
      tmp[0] ^= M[0];
342
0
      tmp[1] ^= M[16];
343
344
0
      if (i == 0)
345
0
        break;
346
347
0
      V[0] >>= 4;
348
0
      --i;
349
0
    }
350
351
0
  buf_put_be64 (result + 0, tmp[0]);
352
0
  buf_put_be64 (result + 8, tmp[1]);
353
354
0
  return (sizeof(V) + sizeof(T) + sizeof(tmp) +
355
0
          sizeof(int)*2 + sizeof(void*)*5);
356
0
}
357
358
#else /*!GCM_TABLES_USE_U64*/
359
360
static void
361
bshift (u32 * M, int i)
362
{
363
  u32 t[4], mask;
364
365
  t[0] = M[i * 4 + 0];
366
  t[1] = M[i * 4 + 1];
367
  t[2] = M[i * 4 + 2];
368
  t[3] = M[i * 4 + 3];
369
  mask = -(t[3] & 1) & 0xe1;
370
371
  M[i * 4 + 3] = (t[3] >> 1) ^ (t[2] << 31);
372
  M[i * 4 + 2] = (t[2] >> 1) ^ (t[1] << 31);
373
  M[i * 4 + 1] = (t[1] >> 1) ^ (t[0] << 31);
374
  M[i * 4 + 0] = (t[0] >> 1) ^ (mask << 24);
375
}
376
377
static void
378
do_fillM (unsigned char *h, u32 *M)
379
{
380
  int i, j;
381
382
  M[0 * 4 + 0] = 0;
383
  M[0 * 4 + 1] = 0;
384
  M[0 * 4 + 2] = 0;
385
  M[0 * 4 + 3] = 0;
386
387
  M[8 * 4 + 0] = buf_get_be32 (h + 0);
388
  M[8 * 4 + 1] = buf_get_be32 (h + 4);
389
  M[8 * 4 + 2] = buf_get_be32 (h + 8);
390
  M[8 * 4 + 3] = buf_get_be32 (h + 12);
391
392
  for (i = 4; i > 0; i /= 2)
393
    {
394
      M[i * 4 + 0] = M[2 * i * 4 + 0];
395
      M[i * 4 + 1] = M[2 * i * 4 + 1];
396
      M[i * 4 + 2] = M[2 * i * 4 + 2];
397
      M[i * 4 + 3] = M[2 * i * 4 + 3];
398
399
      bshift (M, i);
400
    }
401
402
  for (i = 2; i < 16; i *= 2)
403
    for (j = 1; j < i; j++)
404
      {
405
        M[(i + j) * 4 + 0] = M[i * 4 + 0] ^ M[j * 4 + 0];
406
        M[(i + j) * 4 + 1] = M[i * 4 + 1] ^ M[j * 4 + 1];
407
        M[(i + j) * 4 + 2] = M[i * 4 + 2] ^ M[j * 4 + 2];
408
        M[(i + j) * 4 + 3] = M[i * 4 + 3] ^ M[j * 4 + 3];
409
      }
410
411
  for (i = 0; i < 4 * 16; i += 4)
412
    {
413
      M[i + 0 + 64] = (M[i + 0] >> 4)
414
                      ^ ((u64) gcmR[(M[i + 3] << 4) & 0xf0] << 16);
415
      M[i + 1 + 64] = (M[i + 1] >> 4) ^ (M[i + 0] << 28);
416
      M[i + 2 + 64] = (M[i + 2] >> 4) ^ (M[i + 1] << 28);
417
      M[i + 3 + 64] = (M[i + 3] >> 4) ^ (M[i + 2] << 28);
418
    }
419
}
420
421
static inline unsigned int
422
do_ghash (unsigned char *result, const unsigned char *buf, const u32 *gcmM)
423
{
424
  byte V[16];
425
  u32 tmp[4];
426
  u32 v;
427
  const u32 *M, *m;
428
  u32 T[3];
429
  int i;
430
431
  cipher_block_xor (V, result, buf, 16); /* V is big-endian */
432
433
  /* First round can be manually tweaked based on fact that 'tmp' is zero. */
434
  i = 15;
435
436
  v = V[i];
437
  M = &gcmM[(v & 0xf) * 4 + 64];
438
  v = (v & 0xf0) >> 4;
439
  m = &gcmM[v * 4];
440
  v = V[--i];
441
442
  tmp[0] = M[0] ^ m[0];
443
  tmp[1] = M[1] ^ m[1];
444
  tmp[2] = M[2] ^ m[2];
445
  tmp[3] = M[3] ^ m[3];
446
447
  while (1)
448
    {
449
      M = &gcmM[(v & 0xf) * 4 + 64];
450
      v = (v & 0xf0) >> 4;
451
      m = &gcmM[v * 4];
452
453
      T[0] = tmp[0];
454
      T[1] = tmp[1];
455
      T[2] = tmp[2];
456
      tmp[0] = (T[0] >> 8) ^ ((u32) gcmR[tmp[3] & 0xff] << 16) ^ m[0];
457
      tmp[1] = (T[0] << 24) ^ (tmp[1] >> 8) ^ m[1];
458
      tmp[2] = (T[1] << 24) ^ (tmp[2] >> 8) ^ m[2];
459
      tmp[3] = (T[2] << 24) ^ (tmp[3] >> 8) ^ m[3];
460
461
      tmp[0] ^= M[0];
462
      tmp[1] ^= M[1];
463
      tmp[2] ^= M[2];
464
      tmp[3] ^= M[3];
465
466
      if (i == 0)
467
        break;
468
469
      v = V[--i];
470
    }
471
472
  buf_put_be32 (result + 0, tmp[0]);
473
  buf_put_be32 (result + 4, tmp[1]);
474
  buf_put_be32 (result + 8, tmp[2]);
475
  buf_put_be32 (result + 12, tmp[3]);
476
477
  return (sizeof(V) + sizeof(T) + sizeof(tmp) +
478
          sizeof(int)*2 + sizeof(void*)*6);
479
}
480
#endif /*!GCM_TABLES_USE_U64*/
481
482
#define fillM(c) \
483
0
  do_fillM (c->u_mode.gcm.u_ghash_key.key, c->u_mode.gcm.gcm_table)
484
0
#define GHASH(c, result, buf) do_ghash (result, buf, c->u_mode.gcm.gcm_table)
485
#define prefetch_tables(c) \
486
0
  do_prefetch_tables(c->u_mode.gcm.gcm_table, sizeof(c->u_mode.gcm.gcm_table))
487
488
#else
489
490
static unsigned long
491
bshift (unsigned long *b)
492
{
493
  unsigned long c;
494
  int i;
495
  c = b[3] & 1;
496
  for (i = 3; i > 0; i--)
497
    {
498
      b[i] = (b[i] >> 1) | (b[i - 1] << 31);
499
    }
500
  b[i] >>= 1;
501
  return c;
502
}
503
504
static unsigned int
505
do_ghash (unsigned char *hsub, unsigned char *result, const unsigned char *buf)
506
{
507
  unsigned long V[4];
508
  int i, j;
509
  byte *p;
510
511
#ifdef WORDS_BIGENDIAN
512
  p = result;
513
#else
514
  unsigned long T[4];
515
516
  cipher_block_xor (V, result, buf, 16);
517
  for (i = 0; i < 4; i++)
518
    {
519
      V[i] = (V[i] & 0x00ff00ff) << 8 | (V[i] & 0xff00ff00) >> 8;
520
      V[i] = (V[i] & 0x0000ffff) << 16 | (V[i] & 0xffff0000) >> 16;
521
    }
522
  p = (byte *) T;
523
#endif
524
525
  memset (p, 0, 16);
526
527
  for (i = 0; i < 16; i++)
528
    {
529
      for (j = 0x80; j; j >>= 1)
530
        {
531
          if (hsub[i] & j)
532
            cipher_block_xor (p, p, V, 16);
533
          if (bshift (V))
534
            V[0] ^= 0xe1000000;
535
        }
536
    }
537
#ifndef WORDS_BIGENDIAN
538
  for (i = 0, p = (byte *) T; i < 16; i += 4, p += 4)
539
    {
540
      result[i + 0] = p[3];
541
      result[i + 1] = p[2];
542
      result[i + 2] = p[1];
543
      result[i + 3] = p[0];
544
    }
545
#endif
546
547
  return (sizeof(V) + sizeof(T) + sizeof(int)*2 + sizeof(void*)*5);
548
}
549
550
#define fillM(c) do { } while (0)
551
#define GHASH(c, result, buf) do_ghash (c->u_mode.gcm.u_ghash_key.key, result, buf)
552
#define prefetch_tables(c) do {} while (0)
553
554
#endif /* !GCM_USE_TABLES */
555
556
557
static unsigned int
558
ghash_internal (gcry_cipher_hd_t c, byte *result, const byte *buf,
559
                size_t nblocks)
560
0
{
561
0
  const unsigned int blocksize = GCRY_GCM_BLOCK_LEN;
562
0
  unsigned int burn = 0;
563
564
0
  prefetch_tables (c);
565
566
0
  while (nblocks)
567
0
    {
568
0
      burn = GHASH (c, result, buf);
569
0
      buf += blocksize;
570
0
      nblocks--;
571
0
    }
572
573
0
  return burn + (burn ? 5*sizeof(void*) : 0);
574
0
}
575
576
577
static void
578
setupM (gcry_cipher_hd_t c)
579
0
{
580
0
  unsigned int features = _gcry_get_hw_features ();
581
582
0
  c->u_mode.gcm.ghash_fn = NULL;
583
0
  c->u_mode.gcm.polyval_fn = NULL;
584
585
0
  if (0)
586
0
    {
587
0
      (void)features;
588
0
    }
589
0
#ifdef GCM_USE_INTEL_PCLMUL
590
0
  else if (features & HWF_INTEL_PCLMUL)
591
0
    {
592
0
      _gcry_ghash_setup_intel_pclmul (c, features);
593
0
    }
594
0
#endif
595
#ifdef GCM_USE_ARM_PMULL
596
  else if (features & HWF_ARM_PMULL)
597
    {
598
      c->u_mode.gcm.ghash_fn = ghash_armv8_ce_pmull;
599
      c->u_mode.gcm.polyval_fn = polyval_armv8_ce_pmull;
600
      ghash_setup_armv8_ce_pmull (c);
601
    }
602
#endif
603
#ifdef GCM_USE_ARM_NEON
604
  else if (features & HWF_ARM_NEON)
605
    {
606
      c->u_mode.gcm.ghash_fn = ghash_armv7_neon;
607
      ghash_setup_armv7_neon (c);
608
    }
609
#endif
610
#ifdef GCM_USE_PPC_VPMSUM
611
  else if (features & HWF_PPC_VCRYPTO)
612
    {
613
      c->u_mode.gcm.ghash_fn = ghash_ppc_vpmsum;
614
      ghash_setup_ppc_vpmsum (c);
615
    }
616
#endif
617
#ifdef GCM_USE_S390X_CRYPTO
618
  else if (features & HWF_S390X_MSA)
619
    {
620
      if (kimd_query () & km_function_to_mask (KMID_FUNCTION_GHASH))
621
  {
622
    c->u_mode.gcm.ghash_fn = ghash_s390x_kimd;
623
  }
624
    }
625
#endif
626
627
0
  if (c->u_mode.gcm.ghash_fn == NULL)
628
0
    {
629
0
      c->u_mode.gcm.ghash_fn = ghash_internal;
630
0
      fillM (c);
631
0
    }
632
0
}
633
634
635
static inline void
636
gcm_bytecounter_add (u32 ctr[2], size_t add)
637
0
{
638
0
  if (sizeof(add) > sizeof(u32))
639
0
    {
640
0
      u32 high_add = ((add >> 31) >> 1) & 0xffffffff;
641
0
      ctr[1] += high_add;
642
0
    }
643
644
0
  ctr[0] += add;
645
0
  if (ctr[0] >= add)
646
0
    return;
647
0
  ++ctr[1];
648
0
}
649
650
651
static inline u32
652
gcm_add32_be128 (byte *ctr, unsigned int add)
653
0
{
654
  /* 'ctr' must be aligned to four bytes. */
655
0
  const unsigned int blocksize = GCRY_GCM_BLOCK_LEN;
656
0
  u32 *pval = (u32 *)(void *)(ctr + blocksize - sizeof(u32));
657
0
  u32 val;
658
659
0
  val = be_bswap32(*pval) + add;
660
0
  *pval = be_bswap32(val);
661
662
0
  return val; /* return result as host-endian value */
663
0
}
664
665
666
static inline int
667
gcm_check_datalen (u32 ctr[2])
668
0
{
669
  /* len(plaintext) <= 2^39-256 bits == 2^36-32 bytes == 2^32-2 blocks */
670
0
  if (ctr[1] > 0xfU)
671
0
    return 0;
672
0
  if (ctr[1] < 0xfU)
673
0
    return 1;
674
675
0
  if (ctr[0] <= 0xffffffe0U)
676
0
    return 1;
677
678
0
  return 0;
679
0
}
680
681
682
static inline int
683
gcm_check_aadlen_or_ivlen (u32 ctr[2])
684
0
{
685
  /* len(aad/iv) <= 2^64-1 bits ~= 2^61-1 bytes */
686
0
  if (ctr[1] > 0x1fffffffU)
687
0
    return 0;
688
0
  if (ctr[1] < 0x1fffffffU)
689
0
    return 1;
690
691
0
  if (ctr[0] <= 0xffffffffU)
692
0
    return 1;
693
694
0
  return 0;
695
0
}
696
697
698
static void
699
do_ghash_buf(gcry_cipher_hd_t c, byte *hash, const byte *buf,
700
             size_t buflen, int do_padding)
701
0
{
702
0
  unsigned int blocksize = GCRY_GCM_BLOCK_LEN;
703
0
  unsigned int unused = c->u_mode.gcm.mac_unused;
704
0
  ghash_fn_t ghash_fn = c->u_mode.gcm.ghash_fn;
705
0
  size_t nblocks, n;
706
0
  unsigned int burn = 0;
707
708
0
  if (buflen == 0 && (unused == 0 || !do_padding))
709
0
    return;
710
711
0
  do
712
0
    {
713
0
      if (buflen > 0 && (buflen + unused < blocksize || unused > 0))
714
0
        {
715
0
          n = blocksize - unused;
716
0
          n = n < buflen ? n : buflen;
717
718
0
          buf_cpy (&c->u_mode.gcm.macbuf[unused], buf, n);
719
720
0
          unused += n;
721
0
          buf += n;
722
0
          buflen -= n;
723
0
        }
724
0
      if (!buflen)
725
0
        {
726
0
          if (!do_padding && unused < blocksize)
727
0
      {
728
0
        break;
729
0
      }
730
731
0
    n = blocksize - unused;
732
0
    if (n > 0)
733
0
      {
734
0
        memset (&c->u_mode.gcm.macbuf[unused], 0, n);
735
0
        unused = blocksize;
736
0
      }
737
0
        }
738
739
0
      if (unused > 0)
740
0
        {
741
0
          gcry_assert (unused == blocksize);
742
743
          /* Process one block from macbuf.  */
744
0
          burn = ghash_fn (c, hash, c->u_mode.gcm.macbuf, 1);
745
0
          unused = 0;
746
0
        }
747
748
0
      nblocks = buflen / blocksize;
749
750
0
      if (nblocks)
751
0
        {
752
0
          burn = ghash_fn (c, hash, buf, nblocks);
753
0
          buf += blocksize * nblocks;
754
0
          buflen -= blocksize * nblocks;
755
0
        }
756
0
    }
757
0
  while (buflen > 0);
758
759
0
  c->u_mode.gcm.mac_unused = unused;
760
761
0
  if (burn)
762
0
    _gcry_burn_stack (burn);
763
0
}
764
765
766
static gcry_err_code_t
767
gcm_ctr_encrypt (gcry_cipher_hd_t c, byte *outbuf, size_t outbuflen,
768
                 const byte *inbuf, size_t inbuflen)
769
0
{
770
0
  gcry_err_code_t err = 0;
771
772
0
  while (inbuflen)
773
0
    {
774
0
      u32 nblocks_to_overflow;
775
0
      u32 num_ctr_increments;
776
0
      u32 curr_ctr_low;
777
0
      size_t currlen = inbuflen;
778
0
      byte ctr_copy[GCRY_GCM_BLOCK_LEN];
779
0
      int fix_ctr = 0;
780
781
      /* GCM CTR increments only least significant 32-bits, without carry
782
       * to upper 96-bits of counter.  Using generic CTR implementation
783
       * directly would carry 32-bit overflow to upper 96-bit.  Detect
784
       * if input length is long enough to cause overflow, and limit
785
       * input length so that CTR overflow happen but updated CTR value is
786
       * not used to encrypt further input.  After overflow, upper 96 bits
787
       * of CTR are restored to cancel out modification done by generic CTR
788
       * encryption. */
789
790
0
      if (inbuflen > c->unused)
791
0
        {
792
0
          curr_ctr_low = gcm_add32_be128 (c->u_ctr.ctr, 0);
793
794
          /* Number of CTR increments this inbuflen would cause. */
795
0
          num_ctr_increments = (inbuflen - c->unused) / GCRY_GCM_BLOCK_LEN +
796
0
                               !!((inbuflen - c->unused) % GCRY_GCM_BLOCK_LEN);
797
798
0
          if ((u32)(num_ctr_increments + curr_ctr_low) < curr_ctr_low)
799
0
            {
800
0
              nblocks_to_overflow = 0xffffffffU - curr_ctr_low + 1;
801
0
              currlen = nblocks_to_overflow * GCRY_GCM_BLOCK_LEN + c->unused;
802
0
              if (currlen > inbuflen)
803
0
                {
804
0
                  currlen = inbuflen;
805
0
                }
806
807
0
              fix_ctr = 1;
808
0
              cipher_block_cpy(ctr_copy, c->u_ctr.ctr, GCRY_GCM_BLOCK_LEN);
809
0
            }
810
0
        }
811
812
0
      err = _gcry_cipher_ctr_encrypt(c, outbuf, outbuflen, inbuf, currlen);
813
0
      if (err != 0)
814
0
        return err;
815
816
0
      if (fix_ctr)
817
0
        {
818
          /* Lower 32-bits of CTR should now be zero. */
819
0
          gcry_assert(gcm_add32_be128 (c->u_ctr.ctr, 0) == 0);
820
821
          /* Restore upper part of CTR. */
822
0
          buf_cpy(c->u_ctr.ctr, ctr_copy, GCRY_GCM_BLOCK_LEN - sizeof(u32));
823
824
0
          wipememory(ctr_copy, sizeof(ctr_copy));
825
0
        }
826
827
0
      inbuflen -= currlen;
828
0
      inbuf += currlen;
829
0
      outbuflen -= currlen;
830
0
      outbuf += currlen;
831
0
    }
832
833
0
  return err;
834
0
}
835
836
837
static gcry_err_code_t
838
gcm_crypt_inner (gcry_cipher_hd_t c, byte *outbuf, size_t outbuflen,
839
     const byte *inbuf, size_t inbuflen, int encrypt)
840
0
{
841
0
  gcry_err_code_t err;
842
843
0
  while (inbuflen)
844
0
    {
845
0
      size_t currlen = inbuflen;
846
847
      /* Use a bulk method if available.  */
848
0
      if (c->bulk.gcm_crypt)
849
0
  {
850
    /* Bulk method requires that there is no cached data. */
851
0
    if (inbuflen >= GCRY_GCM_BLOCK_LEN && c->u_mode.gcm.mac_unused == 0)
852
0
      {
853
0
        size_t nblks = inbuflen / GCRY_GCM_BLOCK_LEN;
854
0
        size_t nleft;
855
0
        size_t ndone;
856
857
0
        nleft = c->bulk.gcm_crypt (c, outbuf, inbuf, nblks, encrypt);
858
0
        ndone = nblks - nleft;
859
860
0
        inbuf += ndone * GCRY_GCM_BLOCK_LEN;
861
0
        outbuf += ndone * GCRY_GCM_BLOCK_LEN;
862
0
        inbuflen -= ndone * GCRY_GCM_BLOCK_LEN;
863
0
        outbuflen -= ndone * GCRY_GCM_BLOCK_LEN;
864
865
0
        if (inbuflen == 0)
866
0
    break;
867
868
0
        currlen = inbuflen;
869
0
      }
870
0
    else if (c->u_mode.gcm.mac_unused > 0
871
0
             && inbuflen >= GCRY_GCM_BLOCK_LEN
872
0
        + (16 - c->u_mode.gcm.mac_unused))
873
0
      {
874
        /* Handle just enough data so that cache is depleted, and on
875
         * next loop iteration use bulk method. */
876
0
        currlen = 16 - c->u_mode.gcm.mac_unused;
877
878
0
        gcry_assert(currlen);
879
0
      }
880
0
  }
881
882
      /* Since checksumming is done after/before encryption/decryption,
883
       * process input in 24KiB chunks to keep data loaded in L1 cache for
884
       * checksumming/decryption.  However only do splitting if input is
885
       * large enough so that last chunks does not end up being short. */
886
0
      if (currlen > 32 * 1024)
887
0
  currlen = 24 * 1024;
888
889
0
      if (!encrypt)
890
0
  do_ghash_buf(c, c->u_mode.gcm.u_tag.tag, inbuf, currlen, 0);
891
892
0
      err = gcm_ctr_encrypt(c, outbuf, outbuflen, inbuf, currlen);
893
0
      if (err != 0)
894
0
  return err;
895
896
0
      if (encrypt)
897
0
  do_ghash_buf(c, c->u_mode.gcm.u_tag.tag, outbuf, currlen, 0);
898
899
0
      outbuf += currlen;
900
0
      inbuf += currlen;
901
0
      outbuflen -= currlen;
902
0
      inbuflen -= currlen;
903
0
    }
904
905
0
  return 0;
906
0
}
907
908
909
gcry_err_code_t
910
_gcry_cipher_gcm_encrypt (gcry_cipher_hd_t c,
911
                          byte *outbuf, size_t outbuflen,
912
                          const byte *inbuf, size_t inbuflen)
913
0
{
914
0
  if (c->spec->blocksize != GCRY_GCM_BLOCK_LEN)
915
0
    return GPG_ERR_CIPHER_ALGO;
916
0
  if (outbuflen < inbuflen)
917
0
    return GPG_ERR_BUFFER_TOO_SHORT;
918
0
  if (c->u_mode.gcm.datalen_over_limits)
919
0
    return GPG_ERR_INV_LENGTH;
920
0
  if (c->marks.tag
921
0
      || c->u_mode.gcm.ghash_data_finalized
922
0
      || !c->u_mode.gcm.ghash_fn)
923
0
    return GPG_ERR_INV_STATE;
924
925
0
  if (!c->marks.iv)
926
0
    _gcry_cipher_gcm_setiv_zero (c);
927
928
0
  if (c->u_mode.gcm.disallow_encryption_because_of_setiv_in_fips_mode)
929
0
    return GPG_ERR_INV_STATE;
930
931
0
  if (!c->u_mode.gcm.ghash_aad_finalized)
932
0
    {
933
      /* Start of encryption marks end of AAD stream. */
934
0
      do_ghash_buf(c, c->u_mode.gcm.u_tag.tag, NULL, 0, 1);
935
0
      c->u_mode.gcm.ghash_aad_finalized = 1;
936
0
    }
937
938
0
  gcm_bytecounter_add(c->u_mode.gcm.datalen, inbuflen);
939
0
  if (!gcm_check_datalen(c->u_mode.gcm.datalen))
940
0
    {
941
0
      c->u_mode.gcm.datalen_over_limits = 1;
942
0
      return GPG_ERR_INV_LENGTH;
943
0
    }
944
945
0
  return gcm_crypt_inner (c, outbuf, outbuflen, inbuf, inbuflen, 1);
946
0
}
947
948
949
gcry_err_code_t
950
_gcry_cipher_gcm_decrypt (gcry_cipher_hd_t c,
951
                          byte *outbuf, size_t outbuflen,
952
                          const byte *inbuf, size_t inbuflen)
953
0
{
954
0
  if (c->spec->blocksize != GCRY_GCM_BLOCK_LEN)
955
0
    return GPG_ERR_CIPHER_ALGO;
956
0
  if (outbuflen < inbuflen)
957
0
    return GPG_ERR_BUFFER_TOO_SHORT;
958
0
  if (c->u_mode.gcm.datalen_over_limits)
959
0
    return GPG_ERR_INV_LENGTH;
960
0
  if (c->marks.tag
961
0
      || c->u_mode.gcm.ghash_data_finalized
962
0
      || !c->u_mode.gcm.ghash_fn)
963
0
    return GPG_ERR_INV_STATE;
964
965
0
  if (!c->marks.iv)
966
0
    _gcry_cipher_gcm_setiv_zero (c);
967
968
0
  if (!c->u_mode.gcm.ghash_aad_finalized)
969
0
    {
970
      /* Start of decryption marks end of AAD stream. */
971
0
      do_ghash_buf(c, c->u_mode.gcm.u_tag.tag, NULL, 0, 1);
972
0
      c->u_mode.gcm.ghash_aad_finalized = 1;
973
0
    }
974
975
0
  gcm_bytecounter_add(c->u_mode.gcm.datalen, inbuflen);
976
0
  if (!gcm_check_datalen(c->u_mode.gcm.datalen))
977
0
    {
978
0
      c->u_mode.gcm.datalen_over_limits = 1;
979
0
      return GPG_ERR_INV_LENGTH;
980
0
    }
981
982
0
  return gcm_crypt_inner (c, outbuf, outbuflen, inbuf, inbuflen, 0);
983
0
}
984
985
986
gcry_err_code_t
987
_gcry_cipher_gcm_authenticate (gcry_cipher_hd_t c,
988
                               const byte * aadbuf, size_t aadbuflen)
989
0
{
990
0
  if (c->spec->blocksize != GCRY_GCM_BLOCK_LEN)
991
0
    return GPG_ERR_CIPHER_ALGO;
992
0
  if (c->u_mode.gcm.datalen_over_limits)
993
0
    return GPG_ERR_INV_LENGTH;
994
0
  if (c->marks.tag
995
0
      || c->u_mode.gcm.ghash_aad_finalized
996
0
      || c->u_mode.gcm.ghash_data_finalized
997
0
      || !c->u_mode.gcm.ghash_fn)
998
0
    return GPG_ERR_INV_STATE;
999
1000
0
  if (!c->marks.iv)
1001
0
    _gcry_cipher_gcm_setiv_zero (c);
1002
1003
0
  gcm_bytecounter_add(c->u_mode.gcm.aadlen, aadbuflen);
1004
0
  if (!gcm_check_aadlen_or_ivlen(c->u_mode.gcm.aadlen))
1005
0
    {
1006
0
      c->u_mode.gcm.datalen_over_limits = 1;
1007
0
      return GPG_ERR_INV_LENGTH;
1008
0
    }
1009
1010
0
  do_ghash_buf(c, c->u_mode.gcm.u_tag.tag, aadbuf, aadbuflen, 0);
1011
1012
0
  return 0;
1013
0
}
1014
1015
1016
void
1017
_gcry_cipher_gcm_setupM (gcry_cipher_hd_t c)
1018
0
{
1019
0
  setupM (c);
1020
0
}
1021
1022
1023
void
1024
_gcry_cipher_gcm_setkey (gcry_cipher_hd_t c)
1025
0
{
1026
0
  memset (c->u_mode.gcm.u_ghash_key.key, 0, GCRY_GCM_BLOCK_LEN);
1027
1028
0
  c->spec->encrypt (&c->context.c, c->u_mode.gcm.u_ghash_key.key,
1029
0
                    c->u_mode.gcm.u_ghash_key.key);
1030
0
  setupM (c);
1031
0
}
1032
1033
1034
static gcry_err_code_t
1035
_gcry_cipher_gcm_initiv (gcry_cipher_hd_t c, const byte *iv, size_t ivlen)
1036
0
{
1037
0
  memset (c->u_mode.gcm.aadlen, 0, sizeof(c->u_mode.gcm.aadlen));
1038
0
  memset (c->u_mode.gcm.datalen, 0, sizeof(c->u_mode.gcm.datalen));
1039
0
  memset (c->u_mode.gcm.u_tag.tag, 0, GCRY_GCM_BLOCK_LEN);
1040
0
  c->u_mode.gcm.datalen_over_limits = 0;
1041
0
  c->u_mode.gcm.ghash_data_finalized = 0;
1042
0
  c->u_mode.gcm.ghash_aad_finalized = 0;
1043
1044
0
  if (ivlen == 0)
1045
0
    return GPG_ERR_INV_LENGTH;
1046
1047
0
  if (ivlen != GCRY_GCM_BLOCK_LEN - 4)
1048
0
    {
1049
0
      u32 iv_bytes[2] = {0, 0};
1050
0
      u32 bitlengths[2][2];
1051
1052
0
      if (!c->u_mode.gcm.ghash_fn)
1053
0
        return GPG_ERR_INV_STATE;
1054
1055
0
      memset(c->u_ctr.ctr, 0, GCRY_GCM_BLOCK_LEN);
1056
1057
0
      gcm_bytecounter_add(iv_bytes, ivlen);
1058
0
      if (!gcm_check_aadlen_or_ivlen(iv_bytes))
1059
0
        {
1060
0
          c->u_mode.gcm.datalen_over_limits = 1;
1061
0
          return GPG_ERR_INV_LENGTH;
1062
0
        }
1063
1064
0
      do_ghash_buf(c, c->u_ctr.ctr, iv, ivlen, 1);
1065
1066
      /* iv length, 64-bit */
1067
0
      bitlengths[1][1] = be_bswap32(iv_bytes[0] << 3);
1068
0
      bitlengths[1][0] = be_bswap32((iv_bytes[0] >> 29) |
1069
0
                                    (iv_bytes[1] << 3));
1070
      /* zeros, 64-bit */
1071
0
      bitlengths[0][1] = 0;
1072
0
      bitlengths[0][0] = 0;
1073
1074
0
      do_ghash_buf(c, c->u_ctr.ctr, (byte*)bitlengths, GCRY_GCM_BLOCK_LEN, 1);
1075
1076
0
      wipememory (iv_bytes, sizeof iv_bytes);
1077
0
      wipememory (bitlengths, sizeof bitlengths);
1078
0
    }
1079
0
  else
1080
0
    {
1081
      /* 96-bit IV is handled differently. */
1082
0
      memcpy (c->u_ctr.ctr, iv, ivlen);
1083
0
      c->u_ctr.ctr[12] = c->u_ctr.ctr[13] = c->u_ctr.ctr[14] = 0;
1084
0
      c->u_ctr.ctr[15] = 1;
1085
0
    }
1086
1087
0
  c->spec->encrypt (&c->context.c, c->u_mode.gcm.tagiv, c->u_ctr.ctr);
1088
1089
0
  gcm_add32_be128 (c->u_ctr.ctr, 1);
1090
1091
0
  c->unused = 0;
1092
0
  c->marks.iv = 1;
1093
0
  c->marks.tag = 0;
1094
1095
0
  return 0;
1096
0
}
1097
1098
1099
gcry_err_code_t
1100
_gcry_cipher_gcm_setiv (gcry_cipher_hd_t c, const byte *iv, size_t ivlen)
1101
0
{
1102
0
  c->marks.iv = 0;
1103
0
  c->marks.tag = 0;
1104
1105
0
  return _gcry_cipher_gcm_initiv (c, iv, ivlen);
1106
0
}
1107
1108
static gcry_err_code_t
1109
_gcry_cipher_gcm_setiv_zero (gcry_cipher_hd_t c)
1110
0
{
1111
0
  static const unsigned char zerobuf[MAX_BLOCKSIZE];
1112
1113
0
  c->u_mode.gcm.disallow_encryption_because_of_setiv_in_fips_mode = 0;
1114
1115
0
  if (fips_mode ())
1116
0
    {
1117
      /* Direct invocation of GCM setiv in FIPS mode disables encryption. */
1118
0
      c->u_mode.gcm.disallow_encryption_because_of_setiv_in_fips_mode = 1;
1119
0
    }
1120
1121
0
  return _gcry_cipher_gcm_setiv (c, zerobuf, GCRY_GCM_BLOCK_LEN);
1122
0
}
1123
1124
1125
#if 0 && TODO
1126
void
1127
_gcry_cipher_gcm_geniv (gcry_cipher_hd_t c,
1128
                        byte *ivout, size_t ivoutlen, const byte *nonce,
1129
                        size_t noncelen)
1130
{
1131
  /* nonce:    user provided part (might be null) */
1132
  /* noncelen: check if proper length (if nonce not null) */
1133
  /* ivout:    iv used to initialize gcm, output to user */
1134
  /* ivoutlen: check correct size */
1135
  byte iv[IVLEN];
1136
1137
  if (!ivout)
1138
    return GPG_ERR_INV_ARG;
1139
  if (ivoutlen != IVLEN)
1140
    return GPG_ERR_INV_LENGTH;
1141
  if (nonce != NULL && !is_nonce_ok_len(noncelen))
1142
    return GPG_ERR_INV_ARG;
1143
1144
  gcm_generate_iv(iv, nonce, noncelen);
1145
1146
  c->marks.iv = 0;
1147
  c->marks.tag = 0;
1148
  c->u_mode.gcm.disallow_encryption_because_of_setiv_in_fips_mode = 0;
1149
1150
  _gcry_cipher_gcm_initiv (c, iv, IVLEN);
1151
1152
  buf_cpy(ivout, iv, IVLEN);
1153
  wipememory(iv, sizeof(iv));
1154
}
1155
#endif
1156
1157
1158
static int
1159
is_tag_length_valid(size_t taglen)
1160
0
{
1161
0
  switch (taglen)
1162
0
    {
1163
    /* Allowed tag lengths from NIST SP 800-38D.  */
1164
0
    case 128 / 8: /* GCRY_GCM_BLOCK_LEN */
1165
0
    case 120 / 8:
1166
0
    case 112 / 8:
1167
0
    case 104 / 8:
1168
0
    case 96 / 8:
1169
0
    case 64 / 8:
1170
0
    case 32 / 8:
1171
0
      return 1;
1172
1173
0
    default:
1174
0
      return 0;
1175
0
    }
1176
0
}
1177
1178
static gcry_err_code_t
1179
_gcry_cipher_gcm_tag (gcry_cipher_hd_t c,
1180
                      byte * outbuf, size_t outbuflen, int check)
1181
0
{
1182
0
  if (!(is_tag_length_valid (outbuflen) || outbuflen >= GCRY_GCM_BLOCK_LEN))
1183
0
    return GPG_ERR_INV_LENGTH;
1184
0
  if (c->u_mode.gcm.datalen_over_limits)
1185
0
    return GPG_ERR_INV_LENGTH;
1186
1187
0
  if (!c->marks.tag)
1188
0
    {
1189
0
      u32 bitlengths[2][2];
1190
1191
0
      if (!c->u_mode.gcm.ghash_fn)
1192
0
        return GPG_ERR_INV_STATE;
1193
1194
      /* aad length */
1195
0
      bitlengths[0][1] = be_bswap32(c->u_mode.gcm.aadlen[0] << 3);
1196
0
      bitlengths[0][0] = be_bswap32((c->u_mode.gcm.aadlen[0] >> 29) |
1197
0
                                    (c->u_mode.gcm.aadlen[1] << 3));
1198
      /* data length */
1199
0
      bitlengths[1][1] = be_bswap32(c->u_mode.gcm.datalen[0] << 3);
1200
0
      bitlengths[1][0] = be_bswap32((c->u_mode.gcm.datalen[0] >> 29) |
1201
0
                                    (c->u_mode.gcm.datalen[1] << 3));
1202
1203
      /* Finalize data-stream. */
1204
0
      do_ghash_buf(c, c->u_mode.gcm.u_tag.tag, NULL, 0, 1);
1205
0
      c->u_mode.gcm.ghash_aad_finalized = 1;
1206
0
      c->u_mode.gcm.ghash_data_finalized = 1;
1207
1208
      /* Add bitlengths to tag. */
1209
0
      do_ghash_buf(c, c->u_mode.gcm.u_tag.tag, (byte*)bitlengths,
1210
0
                   GCRY_GCM_BLOCK_LEN, 1);
1211
0
      cipher_block_xor (c->u_mode.gcm.u_tag.tag, c->u_mode.gcm.tagiv,
1212
0
                        c->u_mode.gcm.u_tag.tag, GCRY_GCM_BLOCK_LEN);
1213
0
      c->marks.tag = 1;
1214
1215
0
      wipememory (bitlengths, sizeof (bitlengths));
1216
0
      wipememory (c->u_mode.gcm.macbuf, GCRY_GCM_BLOCK_LEN);
1217
0
      wipememory (c->u_mode.gcm.tagiv, GCRY_GCM_BLOCK_LEN);
1218
0
      wipememory (c->u_mode.gcm.aadlen, sizeof (c->u_mode.gcm.aadlen));
1219
0
      wipememory (c->u_mode.gcm.datalen, sizeof (c->u_mode.gcm.datalen));
1220
0
    }
1221
1222
0
  if (!check)
1223
0
    {
1224
0
      if (outbuflen > GCRY_GCM_BLOCK_LEN)
1225
0
        outbuflen = GCRY_GCM_BLOCK_LEN;
1226
1227
      /* NB: We already checked that OUTBUF is large enough to hold
1228
       * the result or has valid truncated length.  */
1229
0
      memcpy (outbuf, c->u_mode.gcm.u_tag.tag, outbuflen);
1230
0
    }
1231
0
  else
1232
0
    {
1233
      /* OUTBUFLEN gives the length of the user supplied tag in OUTBUF
1234
       * and thus we need to compare its length first.  */
1235
0
      if (!is_tag_length_valid (outbuflen)
1236
0
          || !buf_eq_const (outbuf, c->u_mode.gcm.u_tag.tag, outbuflen))
1237
0
        return GPG_ERR_CHECKSUM;
1238
0
    }
1239
1240
0
  return 0;
1241
0
}
1242
1243
1244
gcry_err_code_t
1245
_gcry_cipher_gcm_get_tag (gcry_cipher_hd_t c, unsigned char *outtag,
1246
                          size_t taglen)
1247
0
{
1248
  /* Outputting authentication tag is part of encryption. */
1249
0
  if (c->u_mode.gcm.disallow_encryption_because_of_setiv_in_fips_mode)
1250
0
    return GPG_ERR_INV_STATE;
1251
1252
0
  return _gcry_cipher_gcm_tag (c, outtag, taglen, 0);
1253
0
}
1254
1255
gcry_err_code_t
1256
_gcry_cipher_gcm_check_tag (gcry_cipher_hd_t c, const unsigned char *intag,
1257
                            size_t taglen)
1258
0
{
1259
0
  return _gcry_cipher_gcm_tag (c, (unsigned char *) intag, taglen, 1);
1260
0
}