Coverage Report

Created: 2025-12-31 06:37

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/nettle/rsa-sec-compute-root.c
Line
Count
Source
1
/* rsa-sec-compute-root.c
2
3
   Side-channel silent RSA root computation.
4
5
   Copyright (C) 2018 Niels Möller
6
   Copyright (C) 2018 Red Hat, Inc
7
8
   This file is part of GNU Nettle.
9
10
   GNU Nettle is free software: you can redistribute it and/or
11
   modify it under the terms of either:
12
13
     * the GNU Lesser General Public License as published by the Free
14
       Software Foundation; either version 3 of the License, or (at your
15
       option) any later version.
16
17
   or
18
19
     * the GNU General Public License as published by the Free
20
       Software Foundation; either version 2 of the License, or (at your
21
       option) any later version.
22
23
   or both in parallel, as here.
24
25
   GNU Nettle is distributed in the hope that it will be useful,
26
   but WITHOUT ANY WARRANTY; without even the implied warranty of
27
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
28
   General Public License for more details.
29
30
   You should have received copies of the GNU General Public License and
31
   the GNU Lesser General Public License along with this program.  If
32
   not, see http://www.gnu.org/licenses/.
33
*/
34
35
#if HAVE_CONFIG_H
36
# include "config.h"
37
#endif
38
39
#include <assert.h>
40
41
#include "rsa.h"
42
#include "rsa-internal.h"
43
#include "gmp-glue.h"
44
45
#if !NETTLE_USE_MINI_GMP
46
481k
#define MAX(a, b) ((a) > (b) ? (a) : (b))
47
48
/* Like mpn_sec_mul_itch, monotonously increasing in operand sizes. */
49
static mp_size_t
50
sec_mul_itch (mp_size_t an, mp_size_t bn)
51
106k
{
52
106k
  if (an >= bn)
53
106k
    return mpn_sec_mul_itch (an, bn);
54
0
  else
55
0
    return mpn_sec_mul_itch (bn, an);
56
106k
}
57
58
/* Writes an + bn limbs to the rp area */
59
static void
60
sec_mul (mp_limb_t *rp,
61
   const mp_limb_t *ap, mp_size_t an,
62
   const mp_limb_t *bp, mp_size_t bn, mp_limb_t *scratch)
63
80.2k
{
64
80.2k
  if (an >= bn)
65
80.2k
    mpn_sec_mul (rp, ap, an, bp, bn, scratch);
66
0
  else
67
0
    mpn_sec_mul (rp, bp, bn, ap, an, scratch);
68
80.2k
}
69
70
static mp_size_t
71
sec_mod_mul_itch (mp_size_t an, mp_size_t bn, mp_size_t mn)
72
53.4k
{
73
53.4k
  mp_size_t mul_itch = sec_mul_itch (an, bn);
74
53.4k
  mp_size_t mod_itch = mpn_sec_div_r_itch (an + bn, mn);
75
53.4k
  return MAX(mul_itch, mod_itch);
76
53.4k
}
77
78
/* Sets r <-- a b % m. Needs space for an + bn limbs at rp. It is
79
   required than an + bn >= mn. */
80
static void
81
sec_mod_mul (mp_limb_t *rp,
82
       const mp_limb_t *ap, mp_size_t an,
83
       const mp_limb_t *bp, mp_size_t bn,
84
       const mp_limb_t *mp, mp_size_t mn,
85
       mp_limb_t *scratch)
86
53.4k
{
87
53.4k
  assert (an + bn >= mn);
88
53.4k
  sec_mul (rp, ap, an, bp, bn, scratch);
89
53.4k
  mpn_sec_div_r (rp, an + bn, mp, mn, scratch);
90
53.4k
}
91
92
static mp_size_t
93
sec_powm_itch (mp_size_t bn, mp_size_t en, mp_size_t mn)
94
106k
{
95
106k
  mp_size_t mod_itch = bn + mpn_sec_div_r_itch (bn, mn);
96
106k
  mp_size_t pow_itch = mn + mpn_sec_powm_itch (mn, en * GMP_NUMB_BITS, mn);
97
106k
  return MAX (mod_itch, pow_itch);
98
106k
}
99
100
/* Sets r <-- b ^ e % m. Performs an initial reduction b mod m, and
101
   requires bn >= mn. */
102
static void
103
sec_powm (mp_limb_t *rp,
104
    const mp_limb_t *bp, mp_size_t bn,
105
    const mp_limb_t *ep, mp_size_t en,
106
    const mp_limb_t *mp, mp_size_t mn, mp_limb_t *scratch)
107
53.4k
{
108
53.4k
  assert (bn >= mn);
109
53.4k
  assert (en <= mn);
110
53.4k
  mpn_copyi (scratch, bp, bn);
111
53.4k
  mpn_sec_div_r (scratch, bn, mp, mn, scratch + bn);
112
53.4k
  mpn_sec_powm (rp, scratch, mn, ep, en * GMP_NUMB_BITS, mp, mn,
113
53.4k
    scratch + mn);
114
53.4k
}
115
116
mp_size_t
117
_rsa_sec_compute_root_itch (const struct rsa_private_key *key)
118
53.4k
{
119
53.4k
  mp_size_t nn = NETTLE_OCTET_SIZE_TO_LIMB_SIZE (key->size);
120
53.4k
  mp_size_t pn = mpz_size (key->p);
121
53.4k
  mp_size_t qn = mpz_size (key->q);
122
53.4k
  mp_size_t an = mpz_size (key->a);
123
53.4k
  mp_size_t bn = mpz_size (key->b);
124
53.4k
  mp_size_t cn = mpz_size (key->c);
125
126
53.4k
  mp_size_t powm_p_itch = sec_powm_itch (nn, an, pn);
127
53.4k
  mp_size_t powm_q_itch = sec_powm_itch (nn, bn, qn);
128
53.4k
  mp_size_t mod_mul_itch = cn + MAX(pn, qn) 
129
53.4k
    + sec_mod_mul_itch (MAX(pn, qn), cn, pn);
130
131
53.4k
  mp_size_t mul_itch = sec_mul_itch (qn, pn);
132
53.4k
  mp_size_t add_1_itch = mpn_sec_add_1_itch (nn - qn);
133
134
  /* pn + qn for the product q * r_mod_p' */
135
53.4k
  mp_size_t itch = pn + qn + MAX (mul_itch, add_1_itch);
136
137
53.4k
  itch = MAX (itch, powm_p_itch);
138
53.4k
  itch = MAX (itch, powm_q_itch);
139
53.4k
  itch = MAX (itch, mod_mul_itch);
140
141
  /* pn + qn for the r_mod_p and r_mod_q temporaries. */
142
53.4k
  return pn + qn + itch;
143
53.4k
}
144
145
void
146
_rsa_sec_compute_root (const struct rsa_private_key *key,
147
           mp_limb_t *rp, const mp_limb_t *mp,
148
           mp_limb_t *scratch)
149
26.7k
{
150
26.7k
  mp_size_t nn = NETTLE_OCTET_SIZE_TO_LIMB_SIZE (key->size);
151
152
  /* The common case is pn = qn. This function would be simpler if we
153
   * could require that pn >= qn. */
154
26.7k
  const mp_limb_t *pp = mpz_limbs_read (key->p);
155
26.7k
  const mp_limb_t *qp = mpz_limbs_read (key->q);
156
157
26.7k
  mp_size_t pn = mpz_size (key->p);
158
26.7k
  mp_size_t qn = mpz_size (key->q);
159
26.7k
  mp_size_t an = mpz_size (key->a);
160
26.7k
  mp_size_t bn = mpz_size (key->b);
161
26.7k
  mp_size_t cn = mpz_size (key->c);
162
163
26.7k
  mp_limb_t *r_mod_p = scratch;
164
26.7k
  mp_limb_t *r_mod_q = scratch + pn;
165
26.7k
  mp_limb_t *scratch_out = r_mod_q + qn;
166
26.7k
  mp_limb_t cy;
167
168
26.7k
  assert (pn <= nn);
169
26.7k
  assert (qn <= nn);
170
26.7k
  assert (an <= pn);
171
26.7k
  assert (bn <= qn);
172
26.7k
  assert (cn <= pn);
173
174
  /* Compute r_mod_p = m^d % p = (m%p)^a % p */
175
26.7k
  sec_powm (r_mod_p, mp, nn, mpz_limbs_read (key->a), an, pp, pn, scratch_out);
176
  /* Compute r_mod_q = m^d % q = (m%q)^b % q */
177
26.7k
  sec_powm (r_mod_q, mp, nn, mpz_limbs_read (key->b), bn, qp, qn, scratch_out);
178
179
  /* Set r_mod_p' = r_mod_p * c % p - r_mod_q * c % p . */
180
26.7k
  sec_mod_mul (scratch_out, r_mod_p, pn, mpz_limbs_read (key->c), cn, pp, pn,
181
26.7k
         scratch_out + cn + pn);
182
26.7k
  mpn_copyi (r_mod_p, scratch_out, pn);
183
184
26.7k
  sec_mod_mul (scratch_out, r_mod_q, qn, mpz_limbs_read (key->c), cn, pp, pn,
185
26.7k
         scratch_out + cn + qn);
186
26.7k
  cy = mpn_sub_n (r_mod_p, r_mod_p, scratch_out, pn);
187
26.7k
  mpn_cnd_add_n (cy, r_mod_p, r_mod_p, pp, pn);
188
189
  /* Finally, compute x = r_mod_q + q r_mod_p' */
190
26.7k
  sec_mul (scratch_out, qp, qn, r_mod_p, pn, scratch_out + pn + qn);
191
192
26.7k
  cy = mpn_add_n (rp, scratch_out, r_mod_q, qn);
193
26.7k
  mpn_sec_add_1 (rp + qn, scratch_out + qn, nn - qn, cy, scratch_out + pn + qn);
194
26.7k
}
195
#endif