Coverage Report

Created: 2025-07-23 06:43

/src/nettle/rsa-sec-compute-root.c
Line
Count
Source (jump to first uncovered line)
1
/* rsa-sec-compute-root.c
2
3
   Side-channel silent RSA root computation.
4
5
   Copyright (C) 2018 Niels Möller
6
   Copyright (C) 2018 Red Hat, Inc
7
8
   This file is part of GNU Nettle.
9
10
   GNU Nettle is free software: you can redistribute it and/or
11
   modify it under the terms of either:
12
13
     * the GNU Lesser General Public License as published by the Free
14
       Software Foundation; either version 3 of the License, or (at your
15
       option) any later version.
16
17
   or
18
19
     * the GNU General Public License as published by the Free
20
       Software Foundation; either version 2 of the License, or (at your
21
       option) any later version.
22
23
   or both in parallel, as here.
24
25
   GNU Nettle is distributed in the hope that it will be useful,
26
   but WITHOUT ANY WARRANTY; without even the implied warranty of
27
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
28
   General Public License for more details.
29
30
   You should have received copies of the GNU General Public License and
31
   the GNU Lesser General Public License along with this program.  If
32
   not, see http://www.gnu.org/licenses/.
33
*/
34
35
#if HAVE_CONFIG_H
36
# include "config.h"
37
#endif
38
39
#include <assert.h>
40
41
#include "rsa.h"
42
#include "rsa-internal.h"
43
#include "gmp-glue.h"
44
45
#if !NETTLE_USE_MINI_GMP
46
470k
#define MAX(a, b) ((a) > (b) ? (a) : (b))
47
48
/* Like mpn_sec_mul_itch, monotonously increasing in operand sizes. */
49
static mp_size_t
50
sec_mul_itch (mp_size_t an, mp_size_t bn)
51
104k
{
52
104k
  if (an >= bn)
53
104k
    return mpn_sec_mul_itch (an, bn);
54
0
  else
55
0
    return mpn_sec_mul_itch (bn, an);
56
104k
}
57
58
/* Writes an + bn limbs to the rp area */
59
static void
60
sec_mul (mp_limb_t *rp,
61
   const mp_limb_t *ap, mp_size_t an,
62
   const mp_limb_t *bp, mp_size_t bn, mp_limb_t *scratch)
63
78.3k
{
64
78.3k
  if (an >= bn)
65
78.3k
    mpn_sec_mul (rp, ap, an, bp, bn, scratch);
66
0
  else
67
0
    mpn_sec_mul (rp, bp, bn, ap, an, scratch);
68
78.3k
}
69
70
static mp_size_t
71
sec_mod_mul_itch (mp_size_t an, mp_size_t bn, mp_size_t mn)
72
52.2k
{
73
52.2k
  mp_size_t mul_itch = sec_mul_itch (an, bn);
74
52.2k
  mp_size_t mod_itch = mpn_sec_div_r_itch (an + bn, mn);
75
52.2k
  return MAX(mul_itch, mod_itch);
76
52.2k
}
77
78
/* Sets r <-- a b % m. Needs space for an + bn limbs at rp. It is
79
   required than an + bn >= mn. */
80
static void
81
sec_mod_mul (mp_limb_t *rp,
82
       const mp_limb_t *ap, mp_size_t an,
83
       const mp_limb_t *bp, mp_size_t bn,
84
       const mp_limb_t *mp, mp_size_t mn,
85
       mp_limb_t *scratch)
86
52.2k
{
87
52.2k
  assert (an + bn >= mn);
88
52.2k
  sec_mul (rp, ap, an, bp, bn, scratch);
89
52.2k
  mpn_sec_div_r (rp, an + bn, mp, mn, scratch);
90
52.2k
}
91
92
static mp_size_t
93
sec_powm_itch (mp_size_t bn, mp_size_t en, mp_size_t mn)
94
104k
{
95
104k
  mp_size_t mod_itch = bn + mpn_sec_div_r_itch (bn, mn);
96
104k
  mp_size_t pow_itch = mn + mpn_sec_powm_itch (mn, en * GMP_NUMB_BITS, mn);
97
104k
  return MAX (mod_itch, pow_itch);
98
104k
}
99
100
/* Sets r <-- b ^ e % m. Performs an initial reduction b mod m, and
101
   requires bn >= mn. */
102
static void
103
sec_powm (mp_limb_t *rp,
104
    const mp_limb_t *bp, mp_size_t bn,
105
    const mp_limb_t *ep, mp_size_t en,
106
    const mp_limb_t *mp, mp_size_t mn, mp_limb_t *scratch)
107
52.2k
{
108
52.2k
  assert (bn >= mn);
109
52.2k
  assert (en <= mn);
110
52.2k
  mpn_copyi (scratch, bp, bn);
111
52.2k
  mpn_sec_div_r (scratch, bn, mp, mn, scratch + bn);
112
52.2k
  mpn_sec_powm (rp, scratch, mn, ep, en * GMP_NUMB_BITS, mp, mn,
113
52.2k
    scratch + mn);
114
52.2k
}
115
116
mp_size_t
117
_rsa_sec_compute_root_itch (const struct rsa_private_key *key)
118
52.2k
{
119
52.2k
  mp_size_t nn = NETTLE_OCTET_SIZE_TO_LIMB_SIZE (key->size);
120
52.2k
  mp_size_t pn = mpz_size (key->p);
121
52.2k
  mp_size_t qn = mpz_size (key->q);
122
52.2k
  mp_size_t an = mpz_size (key->a);
123
52.2k
  mp_size_t bn = mpz_size (key->b);
124
52.2k
  mp_size_t cn = mpz_size (key->c);
125
126
52.2k
  mp_size_t powm_p_itch = sec_powm_itch (nn, an, pn);
127
52.2k
  mp_size_t powm_q_itch = sec_powm_itch (nn, bn, qn);
128
52.2k
  mp_size_t mod_mul_itch = cn + MAX(pn, qn) 
129
52.2k
    + sec_mod_mul_itch (MAX(pn, qn), cn, pn);
130
131
52.2k
  mp_size_t mul_itch = sec_mul_itch (qn, pn);
132
52.2k
  mp_size_t add_1_itch = mpn_sec_add_1_itch (nn - qn);
133
134
  /* pn + qn for the product q * r_mod_p' */
135
52.2k
  mp_size_t itch = pn + qn + MAX (mul_itch, add_1_itch);
136
137
52.2k
  itch = MAX (itch, powm_p_itch);
138
52.2k
  itch = MAX (itch, powm_q_itch);
139
52.2k
  itch = MAX (itch, mod_mul_itch);
140
141
  /* pn + qn for the r_mod_p and r_mod_q temporaries. */
142
52.2k
  return pn + qn + itch;
143
52.2k
}
144
145
void
146
_rsa_sec_compute_root (const struct rsa_private_key *key,
147
           mp_limb_t *rp, const mp_limb_t *mp,
148
           mp_limb_t *scratch)
149
26.1k
{
150
26.1k
  mp_size_t nn = NETTLE_OCTET_SIZE_TO_LIMB_SIZE (key->size);
151
152
  /* The common case is pn = qn. This function would be simpler if we
153
   * could require that pn >= qn. */
154
26.1k
  const mp_limb_t *pp = mpz_limbs_read (key->p);
155
26.1k
  const mp_limb_t *qp = mpz_limbs_read (key->q);
156
157
26.1k
  mp_size_t pn = mpz_size (key->p);
158
26.1k
  mp_size_t qn = mpz_size (key->q);
159
26.1k
  mp_size_t an = mpz_size (key->a);
160
26.1k
  mp_size_t bn = mpz_size (key->b);
161
26.1k
  mp_size_t cn = mpz_size (key->c);
162
163
26.1k
  mp_limb_t *r_mod_p = scratch;
164
26.1k
  mp_limb_t *r_mod_q = scratch + pn;
165
26.1k
  mp_limb_t *scratch_out = r_mod_q + qn;
166
26.1k
  mp_limb_t cy;
167
168
26.1k
  assert (pn <= nn);
169
26.1k
  assert (qn <= nn);
170
26.1k
  assert (an <= pn);
171
26.1k
  assert (bn <= qn);
172
26.1k
  assert (cn <= pn);
173
174
  /* Compute r_mod_p = m^d % p = (m%p)^a % p */
175
26.1k
  sec_powm (r_mod_p, mp, nn, mpz_limbs_read (key->a), an, pp, pn, scratch_out);
176
  /* Compute r_mod_q = m^d % q = (m%q)^b % q */
177
26.1k
  sec_powm (r_mod_q, mp, nn, mpz_limbs_read (key->b), bn, qp, qn, scratch_out);
178
179
  /* Set r_mod_p' = r_mod_p * c % p - r_mod_q * c % p . */
180
26.1k
  sec_mod_mul (scratch_out, r_mod_p, pn, mpz_limbs_read (key->c), cn, pp, pn,
181
26.1k
         scratch_out + cn + pn);
182
26.1k
  mpn_copyi (r_mod_p, scratch_out, pn);
183
184
26.1k
  sec_mod_mul (scratch_out, r_mod_q, qn, mpz_limbs_read (key->c), cn, pp, pn,
185
26.1k
         scratch_out + cn + qn);
186
26.1k
  cy = mpn_sub_n (r_mod_p, r_mod_p, scratch_out, pn);
187
26.1k
  mpn_cnd_add_n (cy, r_mod_p, r_mod_p, pp, pn);
188
189
  /* Finally, compute x = r_mod_q + q r_mod_p' */
190
26.1k
  sec_mul (scratch_out, qp, qn, r_mod_p, pn, scratch_out + pn + qn);
191
192
26.1k
  cy = mpn_add_n (rp, scratch_out, r_mod_q, qn);
193
26.1k
  mpn_sec_add_1 (rp + qn, scratch_out + qn, nn - qn, cy, scratch_out + pn + qn);
194
26.1k
}
195
#endif