/src/nettle/rsa-sign-tr.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* rsa-sign-tr.c |
2 | | |
3 | | Creating RSA signatures, with some additional checks. |
4 | | |
5 | | Copyright (C) 2001, 2015 Niels Möller |
6 | | Copyright (C) 2012 Nikos Mavrogiannopoulos |
7 | | Copyright (C) 2018 Red Hat Inc. |
8 | | |
9 | | This file is part of GNU Nettle. |
10 | | |
11 | | GNU Nettle is free software: you can redistribute it and/or |
12 | | modify it under the terms of either: |
13 | | |
14 | | * the GNU Lesser General Public License as published by the Free |
15 | | Software Foundation; either version 3 of the License, or (at your |
16 | | option) any later version. |
17 | | |
18 | | or |
19 | | |
20 | | * the GNU General Public License as published by the Free |
21 | | Software Foundation; either version 2 of the License, or (at your |
22 | | option) any later version. |
23 | | |
24 | | or both in parallel, as here. |
25 | | |
26 | | GNU Nettle is distributed in the hope that it will be useful, |
27 | | but WITHOUT ANY WARRANTY; without even the implied warranty of |
28 | | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
29 | | General Public License for more details. |
30 | | |
31 | | You should have received copies of the GNU General Public License and |
32 | | the GNU Lesser General Public License along with this program. If |
33 | | not, see http://www.gnu.org/licenses/. |
34 | | */ |
35 | | |
36 | | #if HAVE_CONFIG_H |
37 | | # include "config.h" |
38 | | #endif |
39 | | |
40 | | #include <assert.h> |
41 | | |
42 | | #include "gmp-glue.h" |
43 | | #include "rsa.h" |
44 | | #include "rsa-internal.h" |
45 | | |
46 | | #define MAX(a, b) ((a) > (b) ? (a) : (b)) |
47 | | |
48 | | #if NETTLE_USE_MINI_GMP |
49 | | /* Blinds m, by computing c = m r^e (mod n), for a random r. Also |
50 | | returns the inverse (ri), for use by rsa_unblind. */ |
51 | | static void |
52 | | rsa_blind (const struct rsa_public_key *pub, |
53 | | void *random_ctx, nettle_random_func *random, |
54 | | mpz_t c, mpz_t ri, const mpz_t m) |
55 | 0 | { |
56 | 0 | mpz_t r; |
57 | |
|
58 | 0 | mpz_init(r); |
59 | | |
60 | | /* c = m*(r^e) |
61 | | * ri = r^(-1) |
62 | | */ |
63 | 0 | do |
64 | 0 | { |
65 | 0 | nettle_mpz_random(r, random_ctx, random, pub->n); |
66 | | /* invert r */ |
67 | 0 | } |
68 | 0 | while (!mpz_invert (ri, r, pub->n)); |
69 | | |
70 | | /* c = c*(r^e) mod n */ |
71 | 0 | mpz_powm_sec(r, r, pub->e, pub->n); |
72 | 0 | mpz_mul(c, m, r); |
73 | 0 | mpz_fdiv_r(c, c, pub->n); |
74 | |
|
75 | 0 | mpz_clear(r); |
76 | 0 | } |
77 | | |
78 | | /* m = c ri mod n */ |
79 | | static void |
80 | | rsa_unblind (const struct rsa_public_key *pub, |
81 | | mpz_t m, const mpz_t ri, const mpz_t c) |
82 | 0 | { |
83 | 0 | mpz_mul(m, c, ri); |
84 | 0 | mpz_fdiv_r(m, m, pub->n); |
85 | 0 | } |
86 | | |
87 | | /* Checks for any errors done in the RSA computation. That avoids |
88 | | * attacks which rely on faults on hardware, or even software MPI |
89 | | * implementation. */ |
90 | | int |
91 | | rsa_compute_root_tr(const struct rsa_public_key *pub, |
92 | | const struct rsa_private_key *key, |
93 | | void *random_ctx, nettle_random_func *random, |
94 | | mpz_t x, const mpz_t m) |
95 | 0 | { |
96 | 0 | int res; |
97 | 0 | mpz_t t, mb, xb, ri; |
98 | | |
99 | | /* mpz_powm_sec handles only odd moduli. If p, q or n is even, the |
100 | | key is invalid and rejected by rsa_private_key_prepare. However, |
101 | | some applications, notably gnutls, don't use this function, and |
102 | | we don't want an invalid key to lead to a crash down inside |
103 | | mpz_powm_sec. So do an additional check here. */ |
104 | 0 | if (mpz_even_p (pub->n) || mpz_even_p (key->p) || mpz_even_p (key->q)) |
105 | 0 | return 0; |
106 | | |
107 | 0 | mpz_init (mb); |
108 | 0 | mpz_init (xb); |
109 | 0 | mpz_init (ri); |
110 | 0 | mpz_init (t); |
111 | |
|
112 | 0 | rsa_blind (pub, random_ctx, random, mb, ri, m); |
113 | |
|
114 | 0 | rsa_compute_root (key, xb, mb); |
115 | |
|
116 | 0 | mpz_powm_sec(t, xb, pub->e, pub->n); |
117 | 0 | res = (mpz_cmp(mb, t) == 0); |
118 | |
|
119 | 0 | if (res) |
120 | 0 | rsa_unblind (pub, x, ri, xb); |
121 | |
|
122 | 0 | mpz_clear (mb); |
123 | 0 | mpz_clear (xb); |
124 | 0 | mpz_clear (ri); |
125 | 0 | mpz_clear (t); |
126 | |
|
127 | 0 | return res; |
128 | 0 | } |
129 | | |
130 | | int |
131 | | _rsa_sec_compute_root_tr(const struct rsa_public_key *pub, |
132 | | const struct rsa_private_key *key, |
133 | | void *random_ctx, nettle_random_func *random, |
134 | | mp_limb_t *x, const mp_limb_t *m) |
135 | 0 | { |
136 | 0 | mp_size_t nn; |
137 | 0 | mpz_t mz; |
138 | 0 | mpz_t xz; |
139 | 0 | int res; |
140 | |
|
141 | 0 | mpz_init(xz); |
142 | |
|
143 | 0 | nn = mpz_size (pub->n); |
144 | |
|
145 | 0 | res = rsa_compute_root_tr(pub, key, random_ctx, random, xz, |
146 | 0 | mpz_roinit_n(mz, m, nn)); |
147 | |
|
148 | 0 | if (res) |
149 | 0 | mpz_limbs_copy(x, xz, nn); |
150 | |
|
151 | 0 | mpz_clear(xz); |
152 | 0 | return res; |
153 | 0 | } |
154 | | #else |
155 | | /* Blinds m, by computing c = m r^e (mod n), for a random r. Also |
156 | | returns the inverse (ri), for use by rsa_unblind. Must have c != m, |
157 | | no in-place operation.*/ |
158 | | static void |
159 | | rsa_sec_blind (const struct rsa_public_key *pub, |
160 | | void *random_ctx, nettle_random_func *random, |
161 | | mp_limb_t *c, mp_limb_t *ri, const mp_limb_t *m) |
162 | | { |
163 | | const mp_limb_t *ep = mpz_limbs_read (pub->e); |
164 | | const mp_limb_t *np = mpz_limbs_read (pub->n); |
165 | | mp_bitcnt_t ebn = mpz_sizeinbase (pub->e, 2); |
166 | | mp_size_t nn = mpz_size (pub->n); |
167 | | size_t itch; |
168 | | size_t i2; |
169 | | mp_limb_t *scratch; |
170 | | TMP_GMP_DECL (tp, mp_limb_t); |
171 | | TMP_GMP_DECL (rp, mp_limb_t); |
172 | | TMP_GMP_DECL (r, uint8_t); |
173 | | |
174 | | TMP_GMP_ALLOC (rp, nn); |
175 | | TMP_GMP_ALLOC (r, nn * sizeof(mp_limb_t)); |
176 | | |
177 | | /* c = m*(r^e) mod n */ |
178 | | itch = mpn_sec_powm_itch(nn, ebn, nn); |
179 | | i2 = mpn_sec_mul_itch(nn, nn); |
180 | | itch = MAX(itch, i2); |
181 | | i2 = mpn_sec_div_r_itch(2*nn, nn); |
182 | | itch = MAX(itch, i2); |
183 | | i2 = mpn_sec_invert_itch(nn); |
184 | | itch = MAX(itch, i2); |
185 | | |
186 | | TMP_GMP_ALLOC (tp, 2*nn + itch); |
187 | | scratch = tp + 2*nn; |
188 | | |
189 | | /* ri = r^(-1) */ |
190 | | do |
191 | | { |
192 | | random(random_ctx, nn * sizeof(mp_limb_t), (uint8_t *)r); |
193 | | mpn_set_base256(rp, nn, r, nn * sizeof(mp_limb_t)); |
194 | | mpn_copyi(tp, rp, nn); |
195 | | /* invert r */ |
196 | | } |
197 | | while (!mpn_sec_invert (ri, tp, np, nn, 2 * nn * GMP_NUMB_BITS, scratch)); |
198 | | |
199 | | mpn_sec_powm (c, rp, nn, ep, ebn, np, nn, scratch); |
200 | | mpn_sec_mul (tp, c, nn, m, nn, scratch); |
201 | | mpn_sec_div_r (tp, 2*nn, np, nn, scratch); |
202 | | mpn_copyi(c, tp, nn); |
203 | | |
204 | | TMP_GMP_FREE (r); |
205 | | TMP_GMP_FREE (rp); |
206 | | TMP_GMP_FREE (tp); |
207 | | } |
208 | | |
209 | | /* m = c ri mod n. Allows x == c. */ |
210 | | static void |
211 | | rsa_sec_unblind (const struct rsa_public_key *pub, |
212 | | mp_limb_t *x, mp_limb_t *ri, const mp_limb_t *c) |
213 | | { |
214 | | const mp_limb_t *np = mpz_limbs_read (pub->n); |
215 | | mp_size_t nn = mpz_size (pub->n); |
216 | | |
217 | | size_t itch; |
218 | | size_t i2; |
219 | | mp_limb_t *scratch; |
220 | | TMP_GMP_DECL(tp, mp_limb_t); |
221 | | |
222 | | itch = mpn_sec_mul_itch(nn, nn); |
223 | | i2 = mpn_sec_div_r_itch(nn + nn, nn); |
224 | | itch = MAX(itch, i2); |
225 | | |
226 | | TMP_GMP_ALLOC (tp, nn + nn + itch); |
227 | | scratch = tp + nn + nn; |
228 | | |
229 | | mpn_sec_mul (tp, c, nn, ri, nn, scratch); |
230 | | mpn_sec_div_r (tp, nn + nn, np, nn, scratch); |
231 | | mpn_copyi(x, tp, nn); |
232 | | |
233 | | TMP_GMP_FREE (tp); |
234 | | } |
235 | | |
236 | | static int |
237 | | sec_equal(const mp_limb_t *a, const mp_limb_t *b, size_t limbs) |
238 | | { |
239 | | volatile mp_limb_t z = 0; |
240 | | size_t i; |
241 | | |
242 | | for (i = 0; i < limbs; i++) |
243 | | { |
244 | | z |= (a[i] ^ b[i]); |
245 | | } |
246 | | |
247 | | return z == 0; |
248 | | } |
249 | | |
250 | | static int |
251 | | rsa_sec_check_root(const struct rsa_public_key *pub, |
252 | | const mp_limb_t *x, const mp_limb_t *m) |
253 | | { |
254 | | mp_size_t nn = mpz_size (pub->n); |
255 | | mp_size_t ebn = mpz_sizeinbase (pub->e, 2); |
256 | | const mp_limb_t *np = mpz_limbs_read (pub->n); |
257 | | const mp_limb_t *ep = mpz_limbs_read (pub->e); |
258 | | int ret; |
259 | | |
260 | | mp_size_t itch; |
261 | | |
262 | | mp_limb_t *scratch; |
263 | | TMP_GMP_DECL(tp, mp_limb_t); |
264 | | |
265 | | itch = mpn_sec_powm_itch (nn, ebn, nn); |
266 | | TMP_GMP_ALLOC (tp, nn + itch); |
267 | | scratch = tp + nn; |
268 | | |
269 | | mpn_sec_powm(tp, x, nn, ep, ebn, np, nn, scratch); |
270 | | ret = sec_equal(tp, m, nn); |
271 | | |
272 | | TMP_GMP_FREE (tp); |
273 | | return ret; |
274 | | } |
275 | | |
276 | | static void |
277 | | cnd_mpn_zero (int cnd, volatile mp_ptr rp, mp_size_t n) |
278 | | { |
279 | | volatile mp_limb_t c; |
280 | | volatile mp_limb_t mask = (mp_limb_t) cnd - 1; |
281 | | |
282 | | while (--n >= 0) |
283 | | { |
284 | | c = rp[n]; |
285 | | c &= mask; |
286 | | rp[n] = c; |
287 | | } |
288 | | } |
289 | | |
290 | | /* Checks for any errors done in the RSA computation. That avoids |
291 | | * attacks which rely on faults on hardware, or even software MPI |
292 | | * implementation. |
293 | | * This version is side-channel silent even in case of error, |
294 | | * the destination buffer is always overwritten */ |
295 | | int |
296 | | _rsa_sec_compute_root_tr(const struct rsa_public_key *pub, |
297 | | const struct rsa_private_key *key, |
298 | | void *random_ctx, nettle_random_func *random, |
299 | | mp_limb_t *x, const mp_limb_t *m) |
300 | | { |
301 | | TMP_GMP_DECL (c, mp_limb_t); |
302 | | TMP_GMP_DECL (ri, mp_limb_t); |
303 | | TMP_GMP_DECL (scratch, mp_limb_t); |
304 | | size_t key_limb_size; |
305 | | int ret; |
306 | | |
307 | | key_limb_size = mpz_size(pub->n); |
308 | | |
309 | | /* mpz_powm_sec handles only odd moduli. If p, q or n is even, the |
310 | | key is invalid and rejected by rsa_private_key_prepare. However, |
311 | | some applications, notably gnutls, don't use this function, and |
312 | | we don't want an invalid key to lead to a crash down inside |
313 | | mpz_powm_sec. So do an additional check here. */ |
314 | | if (mpz_even_p (pub->n) || mpz_even_p (key->p) || mpz_even_p (key->q)) |
315 | | { |
316 | | mpn_zero(x, key_limb_size); |
317 | | return 0; |
318 | | } |
319 | | |
320 | | assert(mpz_size(pub->n) == key_limb_size); |
321 | | |
322 | | TMP_GMP_ALLOC (c, key_limb_size); |
323 | | TMP_GMP_ALLOC (ri, key_limb_size); |
324 | | TMP_GMP_ALLOC (scratch, _rsa_sec_compute_root_itch(key)); |
325 | | |
326 | | rsa_sec_blind (pub, random_ctx, random, c, ri, m); |
327 | | |
328 | | _rsa_sec_compute_root(key, x, c, scratch); |
329 | | |
330 | | ret = rsa_sec_check_root(pub, x, c); |
331 | | |
332 | | rsa_sec_unblind(pub, x, ri, x); |
333 | | |
334 | | cnd_mpn_zero(1 - ret, x, key_limb_size); |
335 | | |
336 | | TMP_GMP_FREE (scratch); |
337 | | TMP_GMP_FREE (ri); |
338 | | TMP_GMP_FREE (c); |
339 | | return ret; |
340 | | } |
341 | | |
342 | | /* Checks for any errors done in the RSA computation. That avoids |
343 | | * attacks which rely on faults on hardware, or even software MPI |
344 | | * implementation. |
345 | | * This version is maintained for API compatibility reasons. It |
346 | | * is not completely side-channel silent. There are conditionals |
347 | | * in buffer copying both in case of success or error. |
348 | | */ |
349 | | int |
350 | | rsa_compute_root_tr(const struct rsa_public_key *pub, |
351 | | const struct rsa_private_key *key, |
352 | | void *random_ctx, nettle_random_func *random, |
353 | | mpz_t x, const mpz_t m) |
354 | | { |
355 | | TMP_GMP_DECL (l, mp_limb_t); |
356 | | mp_size_t nn = mpz_size(pub->n); |
357 | | int res; |
358 | | |
359 | | TMP_GMP_ALLOC (l, nn); |
360 | | mpz_limbs_copy(l, m, nn); |
361 | | |
362 | | res = _rsa_sec_compute_root_tr (pub, key, random_ctx, random, l, l); |
363 | | if (res) { |
364 | | mp_limb_t *xp = mpz_limbs_write (x, nn); |
365 | | mpn_copyi (xp, l, nn); |
366 | | mpz_limbs_finish (x, nn); |
367 | | } |
368 | | |
369 | | TMP_GMP_FREE (l); |
370 | | return res; |
371 | | } |
372 | | #endif |