Coverage Report

Created: 2024-07-23 07:36

/src/gnutls/lib/nettle/backport/rsa-sign-tr.c
Line
Count
Source (jump to first uncovered line)
1
/* rsa-sign-tr.c
2
3
   Creating RSA signatures, with some additional checks.
4
5
   Copyright (C) 2001, 2015 Niels Möller
6
   Copyright (C) 2012 Nikos Mavrogiannopoulos
7
   Copyright (C) 2018 Red Hat Inc.
8
9
   This file is part of GNU Nettle.
10
11
   GNU Nettle is free software: you can redistribute it and/or
12
   modify it under the terms of either:
13
14
     * the GNU Lesser General Public License as published by the Free
15
       Software Foundation; either version 3 of the License, or (at your
16
       option) any later version.
17
18
   or
19
20
     * the GNU General Public License as published by the Free
21
       Software Foundation; either version 2 of the License, or (at your
22
       option) any later version.
23
24
   or both in parallel, as here.
25
26
   GNU Nettle is distributed in the hope that it will be useful,
27
   but WITHOUT ANY WARRANTY; without even the implied warranty of
28
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
29
   General Public License for more details.
30
31
   You should have received copies of the GNU General Public License and
32
   the GNU Lesser General Public License along with this program.  If
33
   not, see http://www.gnu.org/licenses/.
34
*/
35
36
#if HAVE_CONFIG_H
37
# include "config.h"
38
#endif
39
40
#include <assert.h>
41
42
#include "gmp-glue.h"
43
#define nettle_rsa_compute_root_tr _gnutls_nettle_backport_rsa_compute_root_tr
44
45
#include <nettle/rsa.h>
46
#include "rsa-internal.h"
47
48
0
#define MAX(a, b) ((a) > (b) ? (a) : (b))
49
50
#if NETTLE_USE_MINI_GMP
51
/* Blinds m, by computing c = m r^e (mod n), for a random r. Also
52
   returns the inverse (ri), for use by rsa_unblind. */
53
static void
54
rsa_blind (const struct rsa_public_key *pub,
55
     void *random_ctx, nettle_random_func *random,
56
     mpz_t c, mpz_t ri, const mpz_t m)
57
{
58
  mpz_t r;
59
60
  mpz_init(r);
61
62
  /* c = m*(r^e)
63
   * ri = r^(-1)
64
   */
65
  do
66
    {
67
      nettle_mpz_random(r, random_ctx, random, pub->n);
68
      /* invert r */
69
    }
70
  while (!mpz_invert (ri, r, pub->n));
71
72
  /* c = c*(r^e) mod n */
73
  mpz_powm_sec(r, r, pub->e, pub->n);
74
  mpz_mul(c, m, r);
75
  mpz_fdiv_r(c, c, pub->n);
76
77
  mpz_clear(r);
78
}
79
80
/* m = c ri mod n */
81
static void
82
rsa_unblind (const struct rsa_public_key *pub,
83
       mpz_t m, const mpz_t ri, const mpz_t c)
84
{
85
  mpz_mul(m, c, ri);
86
  mpz_fdiv_r(m, m, pub->n);
87
}
88
89
/* Checks for any errors done in the RSA computation. That avoids
90
 * attacks which rely on faults on hardware, or even software MPI
91
 * implementation. */
92
int
93
rsa_compute_root_tr(const struct rsa_public_key *pub,
94
        const struct rsa_private_key *key,
95
        void *random_ctx, nettle_random_func *random,
96
        mpz_t x, const mpz_t m)
97
{
98
  int res;
99
  mpz_t t, mb, xb, ri;
100
101
  /* mpz_powm_sec handles only odd moduli. If p, q or n is even, the
102
     key is invalid and rejected by rsa_private_key_prepare. However,
103
     some applications, notably gnutls, don't use this function, and
104
     we don't want an invalid key to lead to a crash down inside
105
     mpz_powm_sec. So do an additional check here. */
106
  if (mpz_even_p (pub->n) || mpz_even_p (key->p) || mpz_even_p (key->q))
107
    return 0;
108
109
  mpz_init (mb);
110
  mpz_init (xb);
111
  mpz_init (ri);
112
  mpz_init (t);
113
114
  rsa_blind (pub, random_ctx, random, mb, ri, m);
115
116
  rsa_compute_root (key, xb, mb);
117
118
  mpz_powm_sec(t, xb, pub->e, pub->n);
119
  res = (mpz_cmp(mb, t) == 0);
120
121
  if (res)
122
    rsa_unblind (pub, x, ri, xb);
123
124
  mpz_clear (mb);
125
  mpz_clear (xb);
126
  mpz_clear (ri);
127
  mpz_clear (t);
128
129
  return res;
130
}
131
132
int
133
_rsa_sec_compute_root_tr(const struct rsa_public_key *pub,
134
       const struct rsa_private_key *key,
135
       void *random_ctx, nettle_random_func *random,
136
       mp_limb_t *x, const mp_limb_t *m)
137
{
138
  mp_size_t nn;
139
  mpz_t mz;
140
  mpz_t xz;
141
  int res;
142
143
  mpz_init(xz);
144
145
  nn = mpz_size (pub->n);
146
147
  res = rsa_compute_root_tr(pub, key, random_ctx, random, xz,
148
          mpz_roinit_n(mz, m, nn));
149
150
  if (res)
151
    mpz_limbs_copy(x, xz, nn);
152
153
  mpz_clear(xz);
154
  return res;
155
}
156
#else
157
/* Blinds m, by computing c = m r^e (mod n), for a random r. Also
158
   returns the inverse (ri), for use by rsa_unblind. Must have c != m,
159
   no in-place operation.*/
160
static void
161
rsa_sec_blind (const struct rsa_public_key *pub,
162
               void *random_ctx, nettle_random_func *random,
163
               mp_limb_t *c, mp_limb_t *ri, const mp_limb_t *m)
164
0
{
165
0
  const mp_limb_t *ep = mpz_limbs_read (pub->e);
166
0
  const mp_limb_t *np = mpz_limbs_read (pub->n);
167
0
  mp_bitcnt_t ebn = mpz_sizeinbase (pub->e, 2);
168
0
  mp_size_t nn = mpz_size (pub->n);
169
0
  size_t itch;
170
0
  size_t i2;
171
0
  mp_limb_t *scratch;
172
0
  TMP_GMP_DECL (tp, mp_limb_t);
173
0
  TMP_GMP_DECL (rp, mp_limb_t);
174
0
  TMP_GMP_DECL (r, uint8_t);
175
176
0
  TMP_GMP_ALLOC (rp, nn);
177
0
  TMP_GMP_ALLOC (r, nn * sizeof(mp_limb_t));
178
179
  /* c = m*(r^e) mod n */
180
0
  itch = mpn_sec_powm_itch(nn, ebn, nn);
181
0
  i2 = mpn_sec_mul_itch(nn, nn);
182
0
  itch = MAX(itch, i2);
183
0
  i2 = mpn_sec_div_r_itch(2*nn, nn);
184
0
  itch = MAX(itch, i2);
185
0
  i2 = mpn_sec_invert_itch(nn);
186
0
  itch = MAX(itch, i2);
187
188
0
  TMP_GMP_ALLOC (tp, 2*nn  + itch);
189
0
  scratch = tp + 2*nn;
190
191
  /* ri = r^(-1) */
192
0
  do
193
0
    {
194
0
      random(random_ctx, nn * sizeof(mp_limb_t), (uint8_t *)r);
195
0
      mpn_set_base256(rp, nn, r, nn * sizeof(mp_limb_t));
196
0
      mpn_copyi(tp, rp, nn);
197
      /* invert r */
198
0
    }
199
0
  while (!mpn_sec_invert (ri, tp, np, nn, 2 * nn * GMP_NUMB_BITS, scratch));
200
201
0
  mpn_sec_powm (c, rp, nn, ep, ebn, np, nn, scratch);
202
0
  mpn_sec_mul (tp, c, nn, m, nn, scratch);
203
0
  mpn_sec_div_r (tp, 2*nn, np, nn, scratch);
204
0
  mpn_copyi(c, tp, nn);
205
206
0
  TMP_GMP_FREE (r);
207
0
  TMP_GMP_FREE (rp);
208
0
  TMP_GMP_FREE (tp);
209
0
}
210
211
/* m = c ri mod n. Allows x == c. */
212
static void
213
rsa_sec_unblind (const struct rsa_public_key *pub,
214
                 mp_limb_t *x, mp_limb_t *ri, const mp_limb_t *c)
215
0
{
216
0
  const mp_limb_t *np = mpz_limbs_read (pub->n);
217
0
  mp_size_t nn = mpz_size (pub->n);
218
219
0
  size_t itch;
220
0
  size_t i2;
221
0
  mp_limb_t *scratch;
222
0
  TMP_GMP_DECL(tp, mp_limb_t);
223
224
0
  itch = mpn_sec_mul_itch(nn, nn);
225
0
  i2 = mpn_sec_div_r_itch(nn + nn, nn);
226
0
  itch = MAX(itch, i2);
227
228
0
  TMP_GMP_ALLOC (tp, nn + nn + itch);
229
0
  scratch = tp + nn + nn;
230
231
0
  mpn_sec_mul (tp, c, nn, ri, nn, scratch);
232
0
  mpn_sec_div_r (tp, nn + nn, np, nn, scratch);
233
0
  mpn_copyi(x, tp, nn);
234
235
0
  TMP_GMP_FREE (tp);
236
0
}
237
238
static int
239
sec_equal(const mp_limb_t *a, const mp_limb_t *b, size_t limbs)
240
0
{
241
0
  volatile mp_limb_t z = 0;
242
0
  size_t i;
243
244
0
  for (i = 0; i < limbs; i++)
245
0
    {
246
0
      z |= (a[i] ^ b[i]);
247
0
    }
248
249
0
  return z == 0;
250
0
}
251
252
static int
253
rsa_sec_check_root(const struct rsa_public_key *pub,
254
                   const mp_limb_t *x, const mp_limb_t *m)
255
0
{
256
0
  mp_size_t nn = mpz_size (pub->n);
257
0
  mp_size_t ebn = mpz_sizeinbase (pub->e, 2);
258
0
  const mp_limb_t *np = mpz_limbs_read (pub->n);
259
0
  const mp_limb_t *ep = mpz_limbs_read (pub->e);
260
0
  int ret;
261
262
0
  mp_size_t itch;
263
264
0
  mp_limb_t *scratch;
265
0
  TMP_GMP_DECL(tp, mp_limb_t);
266
267
0
  itch = mpn_sec_powm_itch (nn, ebn, nn);
268
0
  TMP_GMP_ALLOC (tp, nn + itch);
269
0
  scratch = tp + nn;
270
271
0
  mpn_sec_powm(tp, x, nn, ep, ebn, np, nn, scratch);
272
0
  ret = sec_equal(tp, m, nn);
273
274
0
  TMP_GMP_FREE (tp);
275
0
  return ret;
276
0
}
277
278
static void
279
cnd_mpn_zero (int cnd, volatile mp_ptr rp, mp_size_t n)
280
0
{
281
0
  volatile mp_limb_t c;
282
0
  volatile mp_limb_t mask = (mp_limb_t) cnd - 1;
283
284
0
  while (--n >= 0)
285
0
    {
286
0
      c = rp[n];
287
0
      c &= mask;
288
0
      rp[n] = c;
289
0
    }
290
0
}
291
292
/* Checks for any errors done in the RSA computation. That avoids
293
 * attacks which rely on faults on hardware, or even software MPI
294
 * implementation.
295
 * This version is side-channel silent even in case of error,
296
 * the destination buffer is always overwritten */
297
int
298
_rsa_sec_compute_root_tr(const struct rsa_public_key *pub,
299
       const struct rsa_private_key *key,
300
       void *random_ctx, nettle_random_func *random,
301
       mp_limb_t *x, const mp_limb_t *m)
302
0
{
303
0
  TMP_GMP_DECL (c, mp_limb_t);
304
0
  TMP_GMP_DECL (ri, mp_limb_t);
305
0
  TMP_GMP_DECL (scratch, mp_limb_t);
306
0
  size_t key_limb_size;
307
0
  int ret;
308
309
0
  key_limb_size = mpz_size(pub->n);
310
311
  /* mpz_powm_sec handles only odd moduli. If p, q or n is even, the
312
     key is invalid and rejected by rsa_private_key_prepare. However,
313
     some applications, notably gnutls, don't use this function, and
314
     we don't want an invalid key to lead to a crash down inside
315
     mpz_powm_sec. So do an additional check here. */
316
0
  if (mpz_even_p (pub->n) || mpz_even_p (key->p) || mpz_even_p (key->q))
317
0
    {
318
0
      mpn_zero(x, key_limb_size);
319
0
      return 0;
320
0
    }
321
322
0
  assert(mpz_size(pub->n) == key_limb_size);
323
324
0
  TMP_GMP_ALLOC (c, key_limb_size);
325
0
  TMP_GMP_ALLOC (ri, key_limb_size);
326
0
  TMP_GMP_ALLOC (scratch, _rsa_sec_compute_root_itch(key));
327
328
0
  rsa_sec_blind (pub, random_ctx, random, c, ri, m);
329
330
0
  _rsa_sec_compute_root(key, x, c, scratch);
331
332
0
  ret = rsa_sec_check_root(pub, x, c);
333
334
0
  rsa_sec_unblind(pub, x, ri, x);
335
336
0
  cnd_mpn_zero(1 - ret, x, key_limb_size);
337
338
0
  TMP_GMP_FREE (scratch);
339
0
  TMP_GMP_FREE (ri);
340
0
  TMP_GMP_FREE (c);
341
0
  return ret;
342
0
}
343
344
/* Checks for any errors done in the RSA computation. That avoids
345
 * attacks which rely on faults on hardware, or even software MPI
346
 * implementation.
347
 * This version is maintained for API compatibility reasons. It
348
 * is not completely side-channel silent. There are conditionals
349
 * in buffer copying both in case of success or error.
350
 */
351
int
352
rsa_compute_root_tr(const struct rsa_public_key *pub,
353
        const struct rsa_private_key *key,
354
        void *random_ctx, nettle_random_func *random,
355
        mpz_t x, const mpz_t m)
356
0
{
357
0
  TMP_GMP_DECL (l, mp_limb_t);
358
0
  mp_size_t nn = mpz_size(pub->n);
359
0
  int res;
360
361
0
  TMP_GMP_ALLOC (l, nn);
362
0
  mpz_limbs_copy(l, m, nn);
363
364
0
  res = _rsa_sec_compute_root_tr (pub, key, random_ctx, random, l, l);
365
0
  if (res) {
366
0
    mp_limb_t *xp = mpz_limbs_write (x, nn);
367
0
    mpn_copyi (xp, l, nn);
368
0
    mpz_limbs_finish (x, nn);
369
0
  }
370
371
0
  TMP_GMP_FREE (l);
372
0
  return res;
373
0
}
374
#endif