Coverage Report

Created: 2022-12-08 06:10

/src/libgcrypt/cipher/cipher-ocb.c
Line
Count
Source (jump to first uncovered line)
1
/* cipher-ocb.c -  OCB cipher mode
2
 * Copyright (C) 2015, 2016 g10 Code GmbH
3
 *
4
 * This file is part of Libgcrypt.
5
 *
6
 * Libgcrypt is free software; you can redistribute it and/or modify
7
 * it under the terms of the GNU Lesser general Public License as
8
 * published by the Free Software Foundation; either version 2.1 of
9
 * the License, or (at your option) any later version.
10
 *
11
 * Libgcrypt is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14
 * GNU Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this program; if not, see <http://www.gnu.org/licenses/>.
18
 *
19
 *
20
 * OCB is covered by several patents but may be used freely by most
21
 * software.  See http://web.cs.ucdavis.edu/~rogaway/ocb/license.htm .
22
 * In particular license 1 is suitable for Libgcrypt: See
23
 * http://web.cs.ucdavis.edu/~rogaway/ocb/license1.pdf for the full
24
 * license document; it basically says:
25
 *
26
 *   License 1 — License for Open-Source Software Implementations of OCB
27
 *               (Jan 9, 2013)
28
 *
29
 *   Under this license, you are authorized to make, use, and
30
 *   distribute open-source software implementations of OCB. This
31
 *   license terminates for you if you sue someone over their
32
 *   open-source software implementation of OCB claiming that you have
33
 *   a patent covering their implementation.
34
 */
35
36
37
#include <config.h>
38
#include <stdio.h>
39
#include <stdlib.h>
40
#include <string.h>
41
#include <errno.h>
42
43
#include "g10lib.h"
44
#include "cipher.h"
45
#include "bufhelp.h"
46
#include "./cipher-internal.h"
47
48
49
/* Double the OCB_BLOCK_LEN sized block B in-place.  */
50
static inline void
51
double_block (u64 b[2])
52
0
{
53
0
  u64 l_0, l, r;
54
55
0
  l = b[1];
56
0
  r = b[0];
57
58
0
  l_0 = -(l >> 63);
59
0
  l = (l + l) ^ (r >> 63);
60
0
  r = (r + r) ^ (l_0 & 135);
61
62
0
  b[1] = l;
63
0
  b[0] = r;
64
0
}
65
66
67
/* Copy OCB_BLOCK_LEN from buffer S starting at bit offset BITOFF to
68
 * buffer D.  */
69
static void
70
bit_copy (unsigned char d[16], const unsigned char s[24], unsigned int bitoff)
71
0
{
72
0
  u64 s0l, s1l, s1r, s2r;
73
0
  unsigned int shift;
74
0
  unsigned int byteoff;
75
76
0
  byteoff = bitoff / 8;
77
0
  shift = bitoff % 8;
78
79
0
  s0l = buf_get_be64 (s + byteoff + 0);
80
0
  s1l = buf_get_be64 (s + byteoff + 8);
81
0
  s1r = shift ? s1l : 0;
82
0
  s2r = shift ? buf_get_be64 (s + 16) << (8 * byteoff) : 0;
83
84
0
  buf_put_be64 (d + 0, (s0l << shift) | (s1r >> ((64 - shift) & 63)));
85
0
  buf_put_be64 (d + 8, (s1l << shift) | (s2r >> ((64 - shift) & 63)));
86
0
}
87
88
89
/* Get L_big value for block N, where N is multiple of 65536. */
90
static void
91
ocb_get_L_big (gcry_cipher_hd_t c, u64 n, unsigned char *l_buf)
92
0
{
93
0
  int ntz = _gcry_ctz64 (n);
94
0
  u64 L[2];
95
96
0
  gcry_assert(ntz >= OCB_L_TABLE_SIZE);
97
98
0
  L[1] = buf_get_be64 (c->u_mode.ocb.L[OCB_L_TABLE_SIZE - 1]);
99
0
  L[0] = buf_get_be64 (c->u_mode.ocb.L[OCB_L_TABLE_SIZE - 1] + 8);
100
101
0
  for (ntz -= OCB_L_TABLE_SIZE - 1; ntz; ntz--)
102
0
    double_block (L);
103
104
0
  buf_put_be64 (l_buf + 0, L[1]);
105
0
  buf_put_be64 (l_buf + 8, L[0]);
106
0
}
107
108
109
/* Called after key has been set. Sets up L table. */
110
void
111
_gcry_cipher_ocb_setkey (gcry_cipher_hd_t c)
112
0
{
113
0
  unsigned char ktop[OCB_BLOCK_LEN];
114
0
  unsigned int burn = 0;
115
0
  unsigned int nburn;
116
0
  u64 L[2];
117
0
  int i;
118
119
  /* L_star = E(zero_128) */
120
0
  memset (ktop, 0, OCB_BLOCK_LEN);
121
0
  nburn = c->spec->encrypt (&c->context.c, c->u_mode.ocb.L_star, ktop);
122
0
  burn = nburn > burn ? nburn : burn;
123
  /* L_dollar = double(L_star)  */
124
0
  L[1] = buf_get_be64 (c->u_mode.ocb.L_star);
125
0
  L[0] = buf_get_be64 (c->u_mode.ocb.L_star + 8);
126
0
  double_block (L);
127
0
  buf_put_be64 (c->u_mode.ocb.L_dollar + 0, L[1]);
128
0
  buf_put_be64 (c->u_mode.ocb.L_dollar + 8, L[0]);
129
  /* L_0 = double(L_dollar), ...  */
130
0
  double_block (L);
131
0
  buf_put_be64 (c->u_mode.ocb.L[0] + 0, L[1]);
132
0
  buf_put_be64 (c->u_mode.ocb.L[0] + 8, L[0]);
133
0
  for (i = 1; i < OCB_L_TABLE_SIZE; i++)
134
0
    {
135
0
      double_block (L);
136
0
      buf_put_be64 (c->u_mode.ocb.L[i] + 0, L[1]);
137
0
      buf_put_be64 (c->u_mode.ocb.L[i] + 8, L[0]);
138
0
    }
139
  /* Precalculated offset L0+L1 */
140
0
  cipher_block_xor (c->u_mode.ocb.L0L1,
141
0
        c->u_mode.ocb.L[0], c->u_mode.ocb.L[1], OCB_BLOCK_LEN);
142
143
  /* Cleanup */
144
0
  wipememory (ktop, sizeof ktop);
145
0
  if (burn > 0)
146
0
    _gcry_burn_stack (burn + 4*sizeof(void*));
147
0
}
148
149
150
/* Set the nonce for OCB.  This requires that the key has been set.
151
   Using it again resets start a new encryption cycle using the same
152
   key.  */
153
gcry_err_code_t
154
_gcry_cipher_ocb_set_nonce (gcry_cipher_hd_t c, const unsigned char *nonce,
155
                            size_t noncelen)
156
0
{
157
0
  unsigned char ktop[OCB_BLOCK_LEN];
158
0
  unsigned char stretch[OCB_BLOCK_LEN + 8];
159
0
  unsigned int bottom;
160
0
  unsigned int burn = 0;
161
0
  unsigned int nburn;
162
163
  /* Check args.  */
164
0
  if (!c->marks.key)
165
0
    return GPG_ERR_INV_STATE;  /* Key must have been set first.  */
166
0
  switch (c->u_mode.ocb.taglen)
167
0
    {
168
0
    case 8:
169
0
    case 12:
170
0
    case 16:
171
0
      break;
172
0
    default:
173
0
      return GPG_ERR_BUG; /* Invalid tag length. */
174
0
    }
175
176
0
  if (c->spec->blocksize != OCB_BLOCK_LEN)
177
0
    return GPG_ERR_CIPHER_ALGO;
178
0
  if (!nonce)
179
0
    return GPG_ERR_INV_ARG;
180
  /* 120 bit is the allowed maximum.  In addition we impose a minimum
181
     of 64 bit.  */
182
0
  if (noncelen > (120/8) || noncelen < (64/8) || noncelen >= OCB_BLOCK_LEN)
183
0
    return GPG_ERR_INV_LENGTH;
184
185
  /* Prepare the nonce.  */
186
0
  memset (ktop, 0, OCB_BLOCK_LEN);
187
0
  buf_cpy (ktop + (OCB_BLOCK_LEN - noncelen), nonce, noncelen);
188
0
  ktop[0] = ((c->u_mode.ocb.taglen * 8) % 128) << 1;
189
0
  ktop[OCB_BLOCK_LEN - noncelen - 1] |= 1;
190
0
  bottom = ktop[OCB_BLOCK_LEN - 1] & 0x3f;
191
0
  ktop[OCB_BLOCK_LEN - 1] &= 0xc0; /* Zero the bottom bits.  */
192
0
  nburn = c->spec->encrypt (&c->context.c, ktop, ktop);
193
0
  burn = nburn > burn ? nburn : burn;
194
  /* Stretch = Ktop || (Ktop[1..64] xor Ktop[9..72]) */
195
0
  cipher_block_cpy (stretch, ktop, OCB_BLOCK_LEN);
196
0
  cipher_block_xor (stretch + OCB_BLOCK_LEN, ktop, ktop + 1, 8);
197
  /* Offset_0 = Stretch[1+bottom..128+bottom]
198
     (We use the IV field to store the offset) */
199
0
  bit_copy (c->u_iv.iv, stretch, bottom);
200
0
  c->marks.iv = 1;
201
202
  /* Checksum_0 = zeros(128)
203
     (We use the CTR field to store the checksum) */
204
0
  memset (c->u_ctr.ctr, 0, OCB_BLOCK_LEN);
205
206
  /* Clear AAD buffer.  */
207
0
  memset (c->u_mode.ocb.aad_offset, 0, OCB_BLOCK_LEN);
208
0
  memset (c->u_mode.ocb.aad_sum, 0, OCB_BLOCK_LEN);
209
210
  /* Setup other values.  */
211
0
  memset (c->lastiv, 0, sizeof(c->lastiv));
212
0
  c->unused = 0;
213
0
  c->marks.tag = 0;
214
0
  c->marks.finalize = 0;
215
0
  c->u_mode.ocb.data_nblocks = 0;
216
0
  c->u_mode.ocb.aad_nblocks = 0;
217
0
  c->u_mode.ocb.aad_nleftover = 0;
218
0
  c->u_mode.ocb.data_finalized = 0;
219
0
  c->u_mode.ocb.aad_finalized = 0;
220
221
  /* log_printhex ("L_*       ", c->u_mode.ocb.L_star, OCB_BLOCK_LEN); */
222
  /* log_printhex ("L_$       ", c->u_mode.ocb.L_dollar, OCB_BLOCK_LEN); */
223
  /* log_printhex ("L_0       ", c->u_mode.ocb.L[0], OCB_BLOCK_LEN); */
224
  /* log_printhex ("L_1       ", c->u_mode.ocb.L[1], OCB_BLOCK_LEN); */
225
  /* log_debug (   "bottom    : %u (decimal)\n", bottom); */
226
  /* log_printhex ("Ktop      ", ktop, OCB_BLOCK_LEN); */
227
  /* log_printhex ("Stretch   ", stretch, sizeof stretch); */
228
  /* log_printhex ("Offset_0  ", c->u_iv.iv, OCB_BLOCK_LEN); */
229
230
  /* Cleanup */
231
0
  wipememory (ktop, sizeof ktop);
232
0
  wipememory (stretch, sizeof stretch);
233
0
  if (burn > 0)
234
0
    _gcry_burn_stack (burn + 4*sizeof(void*));
235
236
0
  return 0;
237
0
}
238
239
240
/* Process additional authentication data.  This implementation allows
241
   to add additional authentication data at any time before the final
242
   gcry_cipher_gettag.  */
243
gcry_err_code_t
244
_gcry_cipher_ocb_authenticate (gcry_cipher_hd_t c, const unsigned char *abuf,
245
                               size_t abuflen)
246
0
{
247
0
  const size_t table_maxblks = 1 << OCB_L_TABLE_SIZE;
248
0
  const u32 table_size_mask = ((1 << OCB_L_TABLE_SIZE) - 1);
249
0
  unsigned char l_tmp[OCB_BLOCK_LEN];
250
0
  unsigned int burn = 0;
251
0
  unsigned int nburn;
252
0
  size_t n;
253
254
  /* Check that a nonce and thus a key has been set and that we have
255
     not yet computed the tag.  We also return an error if the aad has
256
     been finalized (i.e. a short block has been processed).  */
257
0
  if (!c->marks.iv || c->marks.tag || c->u_mode.ocb.aad_finalized)
258
0
    return GPG_ERR_INV_STATE;
259
260
  /* Check correct usage and arguments.  */
261
0
  if (c->spec->blocksize != OCB_BLOCK_LEN)
262
0
    return GPG_ERR_CIPHER_ALGO;
263
264
  /* Process remaining data from the last call first.  */
265
0
  if (c->u_mode.ocb.aad_nleftover)
266
0
    {
267
0
      n = abuflen;
268
0
      if (n > OCB_BLOCK_LEN - c->u_mode.ocb.aad_nleftover)
269
0
  n = OCB_BLOCK_LEN - c->u_mode.ocb.aad_nleftover;
270
271
0
      buf_cpy (&c->u_mode.ocb.aad_leftover[c->u_mode.ocb.aad_nleftover],
272
0
         abuf, n);
273
0
      c->u_mode.ocb.aad_nleftover += n;
274
0
      abuf += n;
275
0
      abuflen -= n;
276
277
0
      if (c->u_mode.ocb.aad_nleftover == OCB_BLOCK_LEN)
278
0
        {
279
0
          c->u_mode.ocb.aad_nblocks++;
280
281
0
          if ((c->u_mode.ocb.aad_nblocks % table_maxblks) == 0)
282
0
            {
283
              /* Table overflow, L needs to be generated. */
284
0
              ocb_get_L_big(c, c->u_mode.ocb.aad_nblocks + 1, l_tmp);
285
0
            }
286
0
          else
287
0
            {
288
0
              cipher_block_cpy (l_tmp, ocb_get_l (c, c->u_mode.ocb.aad_nblocks),
289
0
                                OCB_BLOCK_LEN);
290
0
            }
291
292
          /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */
293
0
          cipher_block_xor_1 (c->u_mode.ocb.aad_offset, l_tmp, OCB_BLOCK_LEN);
294
          /* Sum_i = Sum_{i-1} xor ENCIPHER(K, A_i xor Offset_i)  */
295
0
          cipher_block_xor (l_tmp, c->u_mode.ocb.aad_offset,
296
0
                            c->u_mode.ocb.aad_leftover, OCB_BLOCK_LEN);
297
0
          nburn = c->spec->encrypt (&c->context.c, l_tmp, l_tmp);
298
0
          burn = nburn > burn ? nburn : burn;
299
0
          cipher_block_xor_1 (c->u_mode.ocb.aad_sum, l_tmp, OCB_BLOCK_LEN);
300
301
0
          c->u_mode.ocb.aad_nleftover = 0;
302
0
        }
303
0
    }
304
305
0
  if (!abuflen)
306
0
    {
307
0
      if (burn > 0)
308
0
        _gcry_burn_stack (burn + 4*sizeof(void*));
309
310
0
      return 0;
311
0
    }
312
313
  /* Full blocks handling. */
314
0
  while (abuflen >= OCB_BLOCK_LEN)
315
0
    {
316
0
      size_t nblks = abuflen / OCB_BLOCK_LEN;
317
0
      size_t nmaxblks;
318
319
      /* Check how many blocks to process till table overflow. */
320
0
      nmaxblks = (c->u_mode.ocb.aad_nblocks + 1) % table_maxblks;
321
0
      nmaxblks = (table_maxblks - nmaxblks) % table_maxblks;
322
323
0
      if (nmaxblks == 0)
324
0
        {
325
          /* Table overflow, generate L and process one block. */
326
0
          c->u_mode.ocb.aad_nblocks++;
327
0
          ocb_get_L_big(c, c->u_mode.ocb.aad_nblocks, l_tmp);
328
329
          /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */
330
0
          cipher_block_xor_1 (c->u_mode.ocb.aad_offset, l_tmp, OCB_BLOCK_LEN);
331
          /* Sum_i = Sum_{i-1} xor ENCIPHER(K, A_i xor Offset_i)  */
332
0
          cipher_block_xor (l_tmp, c->u_mode.ocb.aad_offset, abuf,
333
0
                            OCB_BLOCK_LEN);
334
0
          nburn = c->spec->encrypt (&c->context.c, l_tmp, l_tmp);
335
0
          burn = nburn > burn ? nburn : burn;
336
0
          cipher_block_xor_1 (c->u_mode.ocb.aad_sum, l_tmp, OCB_BLOCK_LEN);
337
338
0
          abuf += OCB_BLOCK_LEN;
339
0
          abuflen -= OCB_BLOCK_LEN;
340
0
          nblks--;
341
342
          /* With overflow handled, retry loop again. Next overflow will
343
           * happen after 65535 blocks. */
344
0
          continue;
345
0
        }
346
347
0
      nblks = nblks < nmaxblks ? nblks : nmaxblks;
348
349
      /* Use a bulk method if available.  */
350
0
      if (nblks && c->bulk.ocb_auth)
351
0
        {
352
0
          size_t nleft;
353
0
          size_t ndone;
354
355
0
          nleft = c->bulk.ocb_auth (c, abuf, nblks);
356
0
          ndone = nblks - nleft;
357
358
0
          abuf += ndone * OCB_BLOCK_LEN;
359
0
          abuflen -= ndone * OCB_BLOCK_LEN;
360
0
          nblks = nleft;
361
0
        }
362
363
      /* Hash all full blocks.  */
364
0
      while (nblks)
365
0
        {
366
0
          c->u_mode.ocb.aad_nblocks++;
367
368
0
          gcry_assert(c->u_mode.ocb.aad_nblocks & table_size_mask);
369
370
          /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */
371
0
          cipher_block_xor_1 (c->u_mode.ocb.aad_offset,
372
0
                              ocb_get_l (c, c->u_mode.ocb.aad_nblocks),
373
0
                              OCB_BLOCK_LEN);
374
          /* Sum_i = Sum_{i-1} xor ENCIPHER(K, A_i xor Offset_i)  */
375
0
          cipher_block_xor (l_tmp, c->u_mode.ocb.aad_offset, abuf,
376
0
                            OCB_BLOCK_LEN);
377
0
          nburn = c->spec->encrypt (&c->context.c, l_tmp, l_tmp);
378
0
          burn = nburn > burn ? nburn : burn;
379
0
          cipher_block_xor_1 (c->u_mode.ocb.aad_sum, l_tmp, OCB_BLOCK_LEN);
380
381
0
          abuf += OCB_BLOCK_LEN;
382
0
          abuflen -= OCB_BLOCK_LEN;
383
0
          nblks--;
384
0
        }
385
0
    }
386
387
  /* Store away the remaining data.  */
388
0
  if (abuflen)
389
0
    {
390
0
      n = abuflen;
391
0
      if (n > OCB_BLOCK_LEN - c->u_mode.ocb.aad_nleftover)
392
0
  n = OCB_BLOCK_LEN - c->u_mode.ocb.aad_nleftover;
393
394
0
      buf_cpy (&c->u_mode.ocb.aad_leftover[c->u_mode.ocb.aad_nleftover],
395
0
         abuf, n);
396
0
      c->u_mode.ocb.aad_nleftover += n;
397
0
      abuf += n;
398
0
      abuflen -= n;
399
0
    }
400
401
0
  gcry_assert (!abuflen);
402
403
0
  if (burn > 0)
404
0
    _gcry_burn_stack (burn + 4*sizeof(void*));
405
406
0
  return 0;
407
0
}
408
409
410
/* Hash final partial AAD block.  */
411
static void
412
ocb_aad_finalize (gcry_cipher_hd_t c)
413
0
{
414
0
  unsigned char l_tmp[OCB_BLOCK_LEN];
415
0
  unsigned int burn = 0;
416
0
  unsigned int nburn;
417
418
  /* Check that a nonce and thus a key has been set and that we have
419
     not yet computed the tag.  We also skip this if the aad has been
420
     finalized.  */
421
0
  if (!c->marks.iv || c->marks.tag || c->u_mode.ocb.aad_finalized)
422
0
    return;
423
0
  if (c->spec->blocksize != OCB_BLOCK_LEN)
424
0
    return;  /* Ooops.  */
425
426
  /* Hash final partial block if any.  */
427
0
  if (c->u_mode.ocb.aad_nleftover)
428
0
    {
429
      /* Offset_* = Offset_m xor L_*  */
430
0
      cipher_block_xor_1 (c->u_mode.ocb.aad_offset,
431
0
                          c->u_mode.ocb.L_star, OCB_BLOCK_LEN);
432
      /* CipherInput = (A_* || 1 || zeros(127-bitlen(A_*))) xor Offset_*  */
433
0
      buf_cpy (l_tmp, c->u_mode.ocb.aad_leftover, c->u_mode.ocb.aad_nleftover);
434
0
      memset (l_tmp + c->u_mode.ocb.aad_nleftover, 0,
435
0
              OCB_BLOCK_LEN - c->u_mode.ocb.aad_nleftover);
436
0
      l_tmp[c->u_mode.ocb.aad_nleftover] = 0x80;
437
0
      cipher_block_xor_1 (l_tmp, c->u_mode.ocb.aad_offset, OCB_BLOCK_LEN);
438
      /* Sum = Sum_m xor ENCIPHER(K, CipherInput)  */
439
0
      nburn = c->spec->encrypt (&c->context.c, l_tmp, l_tmp);
440
0
      burn = nburn > burn ? nburn : burn;
441
0
      cipher_block_xor_1 (c->u_mode.ocb.aad_sum, l_tmp, OCB_BLOCK_LEN);
442
443
0
      c->u_mode.ocb.aad_nleftover = 0;
444
0
    }
445
446
  /* Mark AAD as finalized so that gcry_cipher_ocb_authenticate can
447
   * return an erro when called again.  */
448
0
  c->u_mode.ocb.aad_finalized = 1;
449
450
0
  if (burn > 0)
451
0
    _gcry_burn_stack (burn + 4*sizeof(void*));
452
0
}
453
454
455
456
/* Checksumming for encrypt and decrypt.  */
457
static void
458
ocb_checksum (unsigned char *chksum, const unsigned char *plainbuf,
459
              size_t nblks)
460
0
{
461
0
  while (nblks > 0)
462
0
    {
463
      /* Checksum_i = Checksum_{i-1} xor P_i  */
464
0
      cipher_block_xor_1(chksum, plainbuf, OCB_BLOCK_LEN);
465
466
0
      plainbuf += OCB_BLOCK_LEN;
467
0
      nblks--;
468
0
    }
469
0
}
470
471
472
/* Common code for encrypt and decrypt.  */
473
static gcry_err_code_t
474
ocb_crypt (gcry_cipher_hd_t c, int encrypt,
475
           unsigned char *outbuf, size_t outbuflen,
476
           const unsigned char *inbuf, size_t inbuflen)
477
0
{
478
0
  const size_t table_maxblks = 1 << OCB_L_TABLE_SIZE;
479
0
  const u32 table_size_mask = ((1 << OCB_L_TABLE_SIZE) - 1);
480
0
  unsigned char l_tmp[OCB_BLOCK_LEN];
481
0
  unsigned int burn = 0;
482
0
  unsigned int nburn;
483
0
  gcry_cipher_encrypt_t crypt_fn =
484
0
      encrypt ? c->spec->encrypt : c->spec->decrypt;
485
486
  /* Check that a nonce and thus a key has been set and that we are
487
     not yet in end of data state. */
488
0
  if (!c->marks.iv || c->u_mode.ocb.data_finalized)
489
0
    return GPG_ERR_INV_STATE;
490
491
  /* Check correct usage and arguments.  */
492
0
  if (c->spec->blocksize != OCB_BLOCK_LEN)
493
0
    return GPG_ERR_CIPHER_ALGO;
494
0
  if (outbuflen < inbuflen)
495
0
    return GPG_ERR_BUFFER_TOO_SHORT;
496
0
  if (c->marks.finalize)
497
0
    ; /* Allow arbitarty length. */
498
0
  else if ((inbuflen % OCB_BLOCK_LEN))
499
0
    return GPG_ERR_INV_LENGTH;  /* We support only full blocks for now.  */
500
501
  /* Full blocks handling. */
502
0
  while (inbuflen >= OCB_BLOCK_LEN)
503
0
    {
504
0
      size_t nblks = inbuflen / OCB_BLOCK_LEN;
505
0
      size_t nmaxblks;
506
507
      /* Check how many blocks to process till table overflow. */
508
0
      nmaxblks = (c->u_mode.ocb.data_nblocks + 1) % table_maxblks;
509
0
      nmaxblks = (table_maxblks - nmaxblks) % table_maxblks;
510
511
0
      if (nmaxblks == 0)
512
0
        {
513
          /* Table overflow, generate L and process one block. */
514
0
          c->u_mode.ocb.data_nblocks++;
515
0
          ocb_get_L_big(c, c->u_mode.ocb.data_nblocks, l_tmp);
516
517
0
          if (encrypt)
518
0
            {
519
              /* Checksum_i = Checksum_{i-1} xor P_i  */
520
0
              ocb_checksum (c->u_ctr.ctr, inbuf, 1);
521
0
            }
522
523
          /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */
524
0
          cipher_block_xor_1 (c->u_iv.iv, l_tmp, OCB_BLOCK_LEN);
525
          /* C_i = Offset_i xor ENCIPHER(K, P_i xor Offset_i)  */
526
0
          cipher_block_xor (outbuf, c->u_iv.iv, inbuf, OCB_BLOCK_LEN);
527
0
          nburn = crypt_fn (&c->context.c, outbuf, outbuf);
528
0
          burn = nburn > burn ? nburn : burn;
529
0
          cipher_block_xor_1 (outbuf, c->u_iv.iv, OCB_BLOCK_LEN);
530
531
0
          if (!encrypt)
532
0
            {
533
              /* Checksum_i = Checksum_{i-1} xor P_i  */
534
0
              ocb_checksum (c->u_ctr.ctr, outbuf, 1);
535
0
            }
536
537
0
          inbuf += OCB_BLOCK_LEN;
538
0
          inbuflen -= OCB_BLOCK_LEN;
539
0
          outbuf += OCB_BLOCK_LEN;
540
0
          outbuflen =- OCB_BLOCK_LEN;
541
0
          nblks--;
542
543
          /* With overflow handled, retry loop again. Next overflow will
544
           * happen after 65535 blocks. */
545
0
          continue;
546
0
        }
547
548
0
      nblks = nblks < nmaxblks ? nblks : nmaxblks;
549
550
      /* Since checksum xoring is done before/after encryption/decryption,
551
   process input in 24KiB chunks to keep data loaded in L1 cache for
552
   checksumming.  However only do splitting if input is large enough
553
   so that last chunks does not end up being short. */
554
0
      if (nblks > 32 * 1024 / OCB_BLOCK_LEN)
555
0
  nblks = 24 * 1024 / OCB_BLOCK_LEN;
556
557
      /* Use a bulk method if available.  */
558
0
      if (nblks && c->bulk.ocb_crypt)
559
0
        {
560
0
          size_t nleft;
561
0
          size_t ndone;
562
563
0
          nleft = c->bulk.ocb_crypt (c, outbuf, inbuf, nblks, encrypt);
564
0
          ndone = nblks - nleft;
565
566
0
          inbuf += ndone * OCB_BLOCK_LEN;
567
0
          outbuf += ndone * OCB_BLOCK_LEN;
568
0
          inbuflen -= ndone * OCB_BLOCK_LEN;
569
0
          outbuflen -= ndone * OCB_BLOCK_LEN;
570
0
          nblks = nleft;
571
0
        }
572
573
0
      if (nblks)
574
0
        {
575
0
          size_t nblks_chksum = nblks;
576
577
0
          if (encrypt)
578
0
            {
579
              /* Checksum_i = Checksum_{i-1} xor P_i  */
580
0
              ocb_checksum (c->u_ctr.ctr, inbuf, nblks_chksum);
581
0
            }
582
583
          /* Encrypt all full blocks.  */
584
0
          while (nblks)
585
0
            {
586
0
              c->u_mode.ocb.data_nblocks++;
587
588
0
              gcry_assert(c->u_mode.ocb.data_nblocks & table_size_mask);
589
590
              /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */
591
0
              cipher_block_xor_1 (c->u_iv.iv,
592
0
                                  ocb_get_l (c, c->u_mode.ocb.data_nblocks),
593
0
                                  OCB_BLOCK_LEN);
594
              /* C_i = Offset_i xor ENCIPHER(K, P_i xor Offset_i)  */
595
0
              cipher_block_xor (outbuf, c->u_iv.iv, inbuf, OCB_BLOCK_LEN);
596
0
              nburn = crypt_fn (&c->context.c, outbuf, outbuf);
597
0
              burn = nburn > burn ? nburn : burn;
598
0
              cipher_block_xor_1 (outbuf, c->u_iv.iv, OCB_BLOCK_LEN);
599
600
0
              inbuf += OCB_BLOCK_LEN;
601
0
              inbuflen -= OCB_BLOCK_LEN;
602
0
              outbuf += OCB_BLOCK_LEN;
603
0
              outbuflen =- OCB_BLOCK_LEN;
604
0
              nblks--;
605
0
            }
606
607
0
          if (!encrypt)
608
0
            {
609
              /* Checksum_i = Checksum_{i-1} xor P_i  */
610
0
              ocb_checksum (c->u_ctr.ctr,
611
0
                            outbuf - nblks_chksum * OCB_BLOCK_LEN,
612
0
                            nblks_chksum);
613
0
            }
614
0
        }
615
0
    }
616
617
  /* Encrypt final partial block.  Note that we expect INBUFLEN to be
618
     shorter than OCB_BLOCK_LEN (see above).  */
619
0
  if (inbuflen)
620
0
    {
621
0
      unsigned char pad[OCB_BLOCK_LEN];
622
623
      /* Offset_* = Offset_m xor L_*  */
624
0
      cipher_block_xor_1 (c->u_iv.iv, c->u_mode.ocb.L_star, OCB_BLOCK_LEN);
625
      /* Pad = ENCIPHER(K, Offset_*) */
626
0
      nburn = c->spec->encrypt (&c->context.c, pad, c->u_iv.iv);
627
0
      burn = nburn > burn ? nburn : burn;
628
629
0
      if (encrypt)
630
0
        {
631
          /* Checksum_* = Checksum_m xor (P_* || 1 || zeros(127-bitlen(P_*))) */
632
          /* Note that INBUFLEN is less than OCB_BLOCK_LEN.  */
633
0
          buf_cpy (l_tmp, inbuf, inbuflen);
634
0
          memset (l_tmp + inbuflen, 0, OCB_BLOCK_LEN - inbuflen);
635
0
          l_tmp[inbuflen] = 0x80;
636
0
          cipher_block_xor_1 (c->u_ctr.ctr, l_tmp, OCB_BLOCK_LEN);
637
          /* C_* = P_* xor Pad[1..bitlen(P_*)] */
638
0
          buf_xor (outbuf, inbuf, pad, inbuflen);
639
0
        }
640
0
      else
641
0
        {
642
          /* P_* = C_* xor Pad[1..bitlen(C_*)] */
643
          /* Checksum_* = Checksum_m xor (P_* || 1 || zeros(127-bitlen(P_*))) */
644
0
          cipher_block_cpy (l_tmp, pad, OCB_BLOCK_LEN);
645
0
          buf_cpy (l_tmp, inbuf, inbuflen);
646
0
          cipher_block_xor_1 (l_tmp, pad, OCB_BLOCK_LEN);
647
0
          l_tmp[inbuflen] = 0x80;
648
0
          buf_cpy (outbuf, l_tmp, inbuflen);
649
650
0
          cipher_block_xor_1 (c->u_ctr.ctr, l_tmp, OCB_BLOCK_LEN);
651
0
        }
652
0
    }
653
654
  /* Compute the tag if the finalize flag has been set.  */
655
0
  if (c->marks.finalize)
656
0
    {
657
      /* Tag = ENCIPHER(K, Checksum xor Offset xor L_$) xor HASH(K,A) */
658
0
      cipher_block_xor (c->u_mode.ocb.tag, c->u_ctr.ctr, c->u_iv.iv,
659
0
                        OCB_BLOCK_LEN);
660
0
      cipher_block_xor_1 (c->u_mode.ocb.tag, c->u_mode.ocb.L_dollar,
661
0
                          OCB_BLOCK_LEN);
662
0
      nburn = c->spec->encrypt (&c->context.c,
663
0
                                c->u_mode.ocb.tag, c->u_mode.ocb.tag);
664
0
      burn = nburn > burn ? nburn : burn;
665
666
0
      c->u_mode.ocb.data_finalized = 1;
667
      /* Note that the the final part of the tag computation is done
668
         by _gcry_cipher_ocb_get_tag.  */
669
0
    }
670
671
0
  if (burn > 0)
672
0
    _gcry_burn_stack (burn + 4*sizeof(void*));
673
674
0
  return 0;
675
0
}
676
677
678
/* Encrypt (INBUF,INBUFLEN) in OCB mode to OUTBUF.  OUTBUFLEN gives
679
   the allocated size of OUTBUF.  This function accepts only multiples
680
   of a full block unless gcry_cipher_final has been called in which
681
   case the next block may have any length.  */
682
gcry_err_code_t
683
_gcry_cipher_ocb_encrypt (gcry_cipher_hd_t c,
684
                          unsigned char *outbuf, size_t outbuflen,
685
                          const unsigned char *inbuf, size_t inbuflen)
686
687
0
{
688
0
  return ocb_crypt (c, 1, outbuf, outbuflen, inbuf, inbuflen);
689
0
}
690
691
692
/* Decrypt (INBUF,INBUFLEN) in OCB mode to OUTBUF.  OUTBUFLEN gives
693
   the allocated size of OUTBUF.  This function accepts only multiples
694
   of a full block unless gcry_cipher_final has been called in which
695
   case the next block may have any length.  */
696
gcry_err_code_t
697
_gcry_cipher_ocb_decrypt (gcry_cipher_hd_t c,
698
                          unsigned char *outbuf, size_t outbuflen,
699
                          const unsigned char *inbuf, size_t inbuflen)
700
0
{
701
0
  return ocb_crypt (c, 0, outbuf, outbuflen, inbuf, inbuflen);
702
0
}
703
704
705
/* Compute the tag.  The last data operation has already done some
706
   part of it.  To allow adding AAD even after having done all data,
707
   we finish the tag computation only here.  */
708
static void
709
compute_tag_if_needed (gcry_cipher_hd_t c)
710
0
{
711
0
  if (!c->marks.tag)
712
0
    {
713
0
      ocb_aad_finalize (c);
714
0
      cipher_block_xor_1 (c->u_mode.ocb.tag, c->u_mode.ocb.aad_sum,
715
0
                          OCB_BLOCK_LEN);
716
0
      c->marks.tag = 1;
717
0
    }
718
0
}
719
720
721
/* Copy the already computed tag to OUTTAG.  OUTTAGSIZE is the
722
   allocated size of OUTTAG; the function returns an error if that is
723
   too short to hold the tag.  */
724
gcry_err_code_t
725
_gcry_cipher_ocb_get_tag (gcry_cipher_hd_t c,
726
                          unsigned char *outtag, size_t outtagsize)
727
0
{
728
0
  if (c->u_mode.ocb.taglen > outtagsize)
729
0
    return GPG_ERR_BUFFER_TOO_SHORT;
730
0
  if (!c->u_mode.ocb.data_finalized)
731
0
    return GPG_ERR_INV_STATE; /* Data has not yet been finalized.  */
732
733
0
  compute_tag_if_needed (c);
734
735
0
  memcpy (outtag, c->u_mode.ocb.tag, c->u_mode.ocb.taglen);
736
737
0
  return 0;
738
0
}
739
740
741
/* Check that the tag (INTAG,TAGLEN) matches the computed tag for the
742
   handle C.  */
743
gcry_err_code_t
744
_gcry_cipher_ocb_check_tag (gcry_cipher_hd_t c, const unsigned char *intag,
745
          size_t taglen)
746
0
{
747
0
  size_t n;
748
749
0
  if (!c->u_mode.ocb.data_finalized)
750
0
    return GPG_ERR_INV_STATE; /* Data has not yet been finalized.  */
751
752
0
  compute_tag_if_needed (c);
753
754
0
  n = c->u_mode.ocb.taglen;
755
0
  if (taglen < n)
756
0
    n = taglen;
757
758
0
  if (!buf_eq_const (intag, c->u_mode.ocb.tag, n)
759
0
      || c->u_mode.ocb.taglen != taglen)
760
0
    return GPG_ERR_CHECKSUM;
761
762
0
  return 0;
763
0
}