Coverage Report

Created: 2026-02-14 06:49

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/nettle/rsa-sec-compute-root.c
Line
Count
Source
1
/* rsa-sec-compute-root.c
2
3
   Side-channel silent RSA root computation.
4
5
   Copyright (C) 2018 Niels Möller
6
   Copyright (C) 2018 Red Hat, Inc
7
8
   This file is part of GNU Nettle.
9
10
   GNU Nettle is free software: you can redistribute it and/or
11
   modify it under the terms of either:
12
13
     * the GNU Lesser General Public License as published by the Free
14
       Software Foundation; either version 3 of the License, or (at your
15
       option) any later version.
16
17
   or
18
19
     * the GNU General Public License as published by the Free
20
       Software Foundation; either version 2 of the License, or (at your
21
       option) any later version.
22
23
   or both in parallel, as here.
24
25
   GNU Nettle is distributed in the hope that it will be useful,
26
   but WITHOUT ANY WARRANTY; without even the implied warranty of
27
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
28
   General Public License for more details.
29
30
   You should have received copies of the GNU General Public License and
31
   the GNU Lesser General Public License along with this program.  If
32
   not, see http://www.gnu.org/licenses/.
33
*/
34
35
#if HAVE_CONFIG_H
36
# include "config.h"
37
#endif
38
39
#include <assert.h>
40
41
#include "rsa.h"
42
#include "rsa-internal.h"
43
#include "gmp-glue.h"
44
45
#if !NETTLE_USE_MINI_GMP
46
365k
#define MAX(a, b) ((a) > (b) ? (a) : (b))
47
48
/* Like mpn_sec_mul_itch, monotonously increasing in operand sizes. */
49
static mp_size_t
50
sec_mul_itch (mp_size_t an, mp_size_t bn)
51
81.1k
{
52
81.1k
  if (an >= bn)
53
81.1k
    return mpn_sec_mul_itch (an, bn);
54
0
  else
55
0
    return mpn_sec_mul_itch (bn, an);
56
81.1k
}
57
58
/* Writes an + bn limbs to the rp area */
59
static void
60
sec_mul (mp_limb_t *rp,
61
   const mp_limb_t *ap, mp_size_t an,
62
   const mp_limb_t *bp, mp_size_t bn, mp_limb_t *scratch)
63
60.8k
{
64
60.8k
  if (an >= bn)
65
60.8k
    mpn_sec_mul (rp, ap, an, bp, bn, scratch);
66
0
  else
67
0
    mpn_sec_mul (rp, bp, bn, ap, an, scratch);
68
60.8k
}
69
70
static mp_size_t
71
sec_mod_mul_itch (mp_size_t an, mp_size_t bn, mp_size_t mn)
72
40.5k
{
73
40.5k
  mp_size_t mul_itch = sec_mul_itch (an, bn);
74
40.5k
  mp_size_t mod_itch = mpn_sec_div_r_itch (an + bn, mn);
75
40.5k
  return MAX(mul_itch, mod_itch);
76
40.5k
}
77
78
/* Sets r <-- a b % m. Needs space for an + bn limbs at rp. It is
79
   required than an + bn >= mn. */
80
static void
81
sec_mod_mul (mp_limb_t *rp,
82
       const mp_limb_t *ap, mp_size_t an,
83
       const mp_limb_t *bp, mp_size_t bn,
84
       const mp_limb_t *mp, mp_size_t mn,
85
       mp_limb_t *scratch)
86
40.5k
{
87
40.5k
  assert (an + bn >= mn);
88
40.5k
  sec_mul (rp, ap, an, bp, bn, scratch);
89
40.5k
  mpn_sec_div_r (rp, an + bn, mp, mn, scratch);
90
40.5k
}
91
92
static mp_size_t
93
sec_powm_itch (mp_size_t bn, mp_size_t en, mp_size_t mn)
94
81.1k
{
95
81.1k
  mp_size_t mod_itch = bn + mpn_sec_div_r_itch (bn, mn);
96
81.1k
  mp_size_t pow_itch = mn + mpn_sec_powm_itch (mn, en * GMP_NUMB_BITS, mn);
97
81.1k
  return MAX (mod_itch, pow_itch);
98
81.1k
}
99
100
/* Sets r <-- b ^ e % m. Performs an initial reduction b mod m, and
101
   requires bn >= mn. */
102
static void
103
sec_powm (mp_limb_t *rp,
104
    const mp_limb_t *bp, mp_size_t bn,
105
    const mp_limb_t *ep, mp_size_t en,
106
    const mp_limb_t *mp, mp_size_t mn, mp_limb_t *scratch)
107
40.5k
{
108
40.5k
  assert (bn >= mn);
109
40.5k
  assert (en <= mn);
110
40.5k
  mpn_copyi (scratch, bp, bn);
111
40.5k
  mpn_sec_div_r (scratch, bn, mp, mn, scratch + bn);
112
40.5k
  mpn_sec_powm (rp, scratch, mn, ep, en * GMP_NUMB_BITS, mp, mn,
113
40.5k
    scratch + mn);
114
40.5k
}
115
116
mp_size_t
117
_rsa_sec_compute_root_itch (const struct rsa_private_key *key)
118
40.5k
{
119
40.5k
  mp_size_t nn = NETTLE_OCTET_SIZE_TO_LIMB_SIZE (key->size);
120
40.5k
  mp_size_t pn = mpz_size (key->p);
121
40.5k
  mp_size_t qn = mpz_size (key->q);
122
40.5k
  mp_size_t an = mpz_size (key->a);
123
40.5k
  mp_size_t bn = mpz_size (key->b);
124
40.5k
  mp_size_t cn = mpz_size (key->c);
125
126
40.5k
  mp_size_t powm_p_itch = sec_powm_itch (nn, an, pn);
127
40.5k
  mp_size_t powm_q_itch = sec_powm_itch (nn, bn, qn);
128
40.5k
  mp_size_t mod_mul_itch = cn + MAX(pn, qn) 
129
40.5k
    + sec_mod_mul_itch (MAX(pn, qn), cn, pn);
130
131
40.5k
  mp_size_t mul_itch = sec_mul_itch (qn, pn);
132
40.5k
  mp_size_t add_1_itch = mpn_sec_add_1_itch (nn - qn);
133
134
  /* pn + qn for the product q * r_mod_p' */
135
40.5k
  mp_size_t itch = pn + qn + MAX (mul_itch, add_1_itch);
136
137
40.5k
  itch = MAX (itch, powm_p_itch);
138
40.5k
  itch = MAX (itch, powm_q_itch);
139
40.5k
  itch = MAX (itch, mod_mul_itch);
140
141
  /* pn + qn for the r_mod_p and r_mod_q temporaries. */
142
40.5k
  return pn + qn + itch;
143
40.5k
}
144
145
void
146
_rsa_sec_compute_root (const struct rsa_private_key *key,
147
           mp_limb_t *rp, const mp_limb_t *mp,
148
           mp_limb_t *scratch)
149
20.2k
{
150
20.2k
  mp_size_t nn = NETTLE_OCTET_SIZE_TO_LIMB_SIZE (key->size);
151
152
  /* The common case is pn = qn. This function would be simpler if we
153
   * could require that pn >= qn. */
154
20.2k
  const mp_limb_t *pp = mpz_limbs_read (key->p);
155
20.2k
  const mp_limb_t *qp = mpz_limbs_read (key->q);
156
157
20.2k
  mp_size_t pn = mpz_size (key->p);
158
20.2k
  mp_size_t qn = mpz_size (key->q);
159
20.2k
  mp_size_t an = mpz_size (key->a);
160
20.2k
  mp_size_t bn = mpz_size (key->b);
161
20.2k
  mp_size_t cn = mpz_size (key->c);
162
163
20.2k
  mp_limb_t *r_mod_p = scratch;
164
20.2k
  mp_limb_t *r_mod_q = scratch + pn;
165
20.2k
  mp_limb_t *scratch_out = r_mod_q + qn;
166
20.2k
  mp_limb_t cy;
167
168
20.2k
  assert (pn <= nn);
169
20.2k
  assert (qn <= nn);
170
20.2k
  assert (an <= pn);
171
20.2k
  assert (bn <= qn);
172
20.2k
  assert (cn <= pn);
173
174
  /* Compute r_mod_p = m^d % p = (m%p)^a % p */
175
20.2k
  sec_powm (r_mod_p, mp, nn, mpz_limbs_read (key->a), an, pp, pn, scratch_out);
176
  /* Compute r_mod_q = m^d % q = (m%q)^b % q */
177
20.2k
  sec_powm (r_mod_q, mp, nn, mpz_limbs_read (key->b), bn, qp, qn, scratch_out);
178
179
  /* Set r_mod_p' = r_mod_p * c % p - r_mod_q * c % p . */
180
20.2k
  sec_mod_mul (scratch_out, r_mod_p, pn, mpz_limbs_read (key->c), cn, pp, pn,
181
20.2k
         scratch_out + cn + pn);
182
20.2k
  mpn_copyi (r_mod_p, scratch_out, pn);
183
184
20.2k
  sec_mod_mul (scratch_out, r_mod_q, qn, mpz_limbs_read (key->c), cn, pp, pn,
185
20.2k
         scratch_out + cn + qn);
186
20.2k
  cy = mpn_sub_n (r_mod_p, r_mod_p, scratch_out, pn);
187
20.2k
  mpn_cnd_add_n (cy, r_mod_p, r_mod_p, pp, pn);
188
189
  /* Finally, compute x = r_mod_q + q r_mod_p' */
190
20.2k
  sec_mul (scratch_out, qp, qn, r_mod_p, pn, scratch_out + pn + qn);
191
192
20.2k
  cy = mpn_add_n (rp, scratch_out, r_mod_q, qn);
193
20.2k
  mpn_sec_add_1 (rp + qn, scratch_out + qn, nn - qn, cy, scratch_out + pn + qn);
194
20.2k
}
195
#endif