/src/openssl/crypto/bn/bn_exp.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright 1995-2023 The OpenSSL Project Authors. All Rights Reserved. |
3 | | * |
4 | | * Licensed under the Apache License 2.0 (the "License"). You may not use |
5 | | * this file except in compliance with the License. You can obtain a copy |
6 | | * in the file LICENSE in the source distribution or at |
7 | | * https://www.openssl.org/source/license.html |
8 | | */ |
9 | | |
10 | | #include "internal/cryptlib.h" |
11 | | #include "internal/constant_time.h" |
12 | | #include "bn_local.h" |
13 | | |
14 | | #include <stdlib.h> |
15 | | #ifdef _WIN32 |
16 | | # include <malloc.h> |
17 | | # ifndef alloca |
18 | | # define alloca _alloca |
19 | | # endif |
20 | | #elif defined(__GNUC__) |
21 | | # ifndef alloca |
22 | | # define alloca(s) __builtin_alloca((s)) |
23 | | # endif |
24 | | #elif defined(__sun) |
25 | | # include <alloca.h> |
26 | | #endif |
27 | | |
28 | | #include "rsaz_exp.h" |
29 | | |
30 | | #undef SPARC_T4_MONT |
31 | | #if defined(OPENSSL_BN_ASM_MONT) && (defined(__sparc__) || defined(__sparc)) |
32 | | # include "crypto/sparc_arch.h" |
33 | | # define SPARC_T4_MONT |
34 | | #endif |
35 | | |
36 | | /* maximum precomputation table size for *variable* sliding windows */ |
37 | | #define TABLE_SIZE 32 |
38 | | |
39 | | /* |
40 | | * Beyond this limit the constant time code is disabled due to |
41 | | * the possible overflow in the computation of powerbufLen in |
42 | | * BN_mod_exp_mont_consttime. |
43 | | * When this limit is exceeded, the computation will be done using |
44 | | * non-constant time code, but it will take very long. |
45 | | */ |
46 | 163k | #define BN_CONSTTIME_SIZE_LIMIT (INT_MAX / BN_BYTES / 256) |
47 | | |
48 | | /* this one works - simple but works */ |
49 | | int BN_exp(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx) |
50 | 1 | { |
51 | 1 | int i, bits, ret = 0; |
52 | 1 | BIGNUM *v, *rr; |
53 | | |
54 | 1 | if (BN_get_flags(p, BN_FLG_CONSTTIME) != 0 |
55 | 1 | || BN_get_flags(a, BN_FLG_CONSTTIME) != 0) { |
56 | | /* BN_FLG_CONSTTIME only supported by BN_mod_exp_mont() */ |
57 | 0 | ERR_raise(ERR_LIB_BN, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); |
58 | 0 | return 0; |
59 | 0 | } |
60 | | |
61 | 1 | BN_CTX_start(ctx); |
62 | 1 | rr = ((r == a) || (r == p)) ? BN_CTX_get(ctx) : r; |
63 | 1 | v = BN_CTX_get(ctx); |
64 | 1 | if (rr == NULL || v == NULL) |
65 | 0 | goto err; |
66 | | |
67 | 1 | if (BN_copy(v, a) == NULL) |
68 | 0 | goto err; |
69 | 1 | bits = BN_num_bits(p); |
70 | | |
71 | 1 | if (BN_is_odd(p)) { |
72 | 1 | if (BN_copy(rr, a) == NULL) |
73 | 0 | goto err; |
74 | 1 | } else { |
75 | 0 | if (!BN_one(rr)) |
76 | 0 | goto err; |
77 | 0 | } |
78 | | |
79 | 6 | for (i = 1; i < bits; i++) { |
80 | 5 | if (!BN_sqr(v, v, ctx)) |
81 | 0 | goto err; |
82 | 5 | if (BN_is_bit_set(p, i)) { |
83 | 4 | if (!BN_mul(rr, rr, v, ctx)) |
84 | 0 | goto err; |
85 | 4 | } |
86 | 5 | } |
87 | 1 | if (r != rr && BN_copy(r, rr) == NULL) |
88 | 0 | goto err; |
89 | | |
90 | 1 | ret = 1; |
91 | 1 | err: |
92 | 1 | BN_CTX_end(ctx); |
93 | 1 | bn_check_top(r); |
94 | 1 | return ret; |
95 | 1 | } |
96 | | |
97 | | int BN_mod_exp(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, const BIGNUM *m, |
98 | | BN_CTX *ctx) |
99 | 408 | { |
100 | 408 | int ret; |
101 | | |
102 | 408 | bn_check_top(a); |
103 | 408 | bn_check_top(p); |
104 | 408 | bn_check_top(m); |
105 | | |
106 | | /*- |
107 | | * For even modulus m = 2^k*m_odd, it might make sense to compute |
108 | | * a^p mod m_odd and a^p mod 2^k separately (with Montgomery |
109 | | * exponentiation for the odd part), using appropriate exponent |
110 | | * reductions, and combine the results using the CRT. |
111 | | * |
112 | | * For now, we use Montgomery only if the modulus is odd; otherwise, |
113 | | * exponentiation using the reciprocal-based quick remaindering |
114 | | * algorithm is used. |
115 | | * |
116 | | * (Timing obtained with expspeed.c [computations a^p mod m |
117 | | * where a, p, m are of the same length: 256, 512, 1024, 2048, |
118 | | * 4096, 8192 bits], compared to the running time of the |
119 | | * standard algorithm: |
120 | | * |
121 | | * BN_mod_exp_mont 33 .. 40 % [AMD K6-2, Linux, debug configuration] |
122 | | * 55 .. 77 % [UltraSparc processor, but |
123 | | * debug-solaris-sparcv8-gcc conf.] |
124 | | * |
125 | | * BN_mod_exp_recp 50 .. 70 % [AMD K6-2, Linux, debug configuration] |
126 | | * 62 .. 118 % [UltraSparc, debug-solaris-sparcv8-gcc] |
127 | | * |
128 | | * On the Sparc, BN_mod_exp_recp was faster than BN_mod_exp_mont |
129 | | * at 2048 and more bits, but at 512 and 1024 bits, it was |
130 | | * slower even than the standard algorithm! |
131 | | * |
132 | | * "Real" timings [linux-elf, solaris-sparcv9-gcc configurations] |
133 | | * should be obtained when the new Montgomery reduction code |
134 | | * has been integrated into OpenSSL.) |
135 | | */ |
136 | | |
137 | 408 | #define MONT_MUL_MOD |
138 | 408 | #define MONT_EXP_WORD |
139 | 408 | #define RECP_MUL_MOD |
140 | | |
141 | 408 | #ifdef MONT_MUL_MOD |
142 | 408 | if (BN_is_odd(m)) { |
143 | 171 | # ifdef MONT_EXP_WORD |
144 | 171 | if (a->top == 1 && !a->neg |
145 | 171 | && (BN_get_flags(p, BN_FLG_CONSTTIME) == 0) |
146 | 171 | && (BN_get_flags(a, BN_FLG_CONSTTIME) == 0) |
147 | 171 | && (BN_get_flags(m, BN_FLG_CONSTTIME) == 0)) { |
148 | 51 | BN_ULONG A = a->d[0]; |
149 | 51 | ret = BN_mod_exp_mont_word(r, A, p, m, ctx, NULL); |
150 | 51 | } else |
151 | 120 | # endif |
152 | 120 | ret = BN_mod_exp_mont(r, a, p, m, ctx, NULL); |
153 | 171 | } else |
154 | 237 | #endif |
155 | 237 | #ifdef RECP_MUL_MOD |
156 | 237 | { |
157 | 237 | ret = BN_mod_exp_recp(r, a, p, m, ctx); |
158 | 237 | } |
159 | | #else |
160 | | { |
161 | | ret = BN_mod_exp_simple(r, a, p, m, ctx); |
162 | | } |
163 | | #endif |
164 | | |
165 | 408 | bn_check_top(r); |
166 | 408 | return ret; |
167 | 408 | } |
168 | | |
169 | | int BN_mod_exp_recp(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, |
170 | | const BIGNUM *m, BN_CTX *ctx) |
171 | 237 | { |
172 | 237 | int i, j, bits, ret = 0, wstart, wend, window; |
173 | 237 | int start = 1; |
174 | 237 | BIGNUM *aa; |
175 | | /* Table of variables obtained from 'ctx' */ |
176 | 237 | BIGNUM *val[TABLE_SIZE]; |
177 | 237 | BN_RECP_CTX recp; |
178 | | |
179 | 237 | if (BN_get_flags(p, BN_FLG_CONSTTIME) != 0 |
180 | 237 | || BN_get_flags(a, BN_FLG_CONSTTIME) != 0 |
181 | 237 | || BN_get_flags(m, BN_FLG_CONSTTIME) != 0) { |
182 | | /* BN_FLG_CONSTTIME only supported by BN_mod_exp_mont() */ |
183 | 1 | ERR_raise(ERR_LIB_BN, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); |
184 | 1 | return 0; |
185 | 1 | } |
186 | | |
187 | 236 | bits = BN_num_bits(p); |
188 | 236 | if (bits == 0) { |
189 | | /* x**0 mod 1, or x**0 mod -1 is still zero. */ |
190 | 1 | if (BN_abs_is_word(m, 1)) { |
191 | 0 | ret = 1; |
192 | 0 | BN_zero(r); |
193 | 1 | } else { |
194 | 1 | ret = BN_one(r); |
195 | 1 | } |
196 | 1 | return ret; |
197 | 1 | } |
198 | | |
199 | 235 | BN_RECP_CTX_init(&recp); |
200 | | |
201 | 235 | BN_CTX_start(ctx); |
202 | 235 | aa = BN_CTX_get(ctx); |
203 | 235 | val[0] = BN_CTX_get(ctx); |
204 | 235 | if (val[0] == NULL) |
205 | 0 | goto err; |
206 | | |
207 | 235 | if (m->neg) { |
208 | | /* ignore sign of 'm' */ |
209 | 0 | if (!BN_copy(aa, m)) |
210 | 0 | goto err; |
211 | 0 | aa->neg = 0; |
212 | 0 | if (BN_RECP_CTX_set(&recp, aa, ctx) <= 0) |
213 | 0 | goto err; |
214 | 235 | } else { |
215 | 235 | if (BN_RECP_CTX_set(&recp, m, ctx) <= 0) |
216 | 1 | goto err; |
217 | 235 | } |
218 | | |
219 | 234 | if (!BN_nnmod(val[0], a, m, ctx)) |
220 | 0 | goto err; /* 1 */ |
221 | 234 | if (BN_is_zero(val[0])) { |
222 | 1 | BN_zero(r); |
223 | 1 | ret = 1; |
224 | 1 | goto err; |
225 | 1 | } |
226 | | |
227 | 233 | window = BN_window_bits_for_exponent_size(bits); |
228 | 233 | if (window > 1) { |
229 | 230 | if (!BN_mod_mul_reciprocal(aa, val[0], val[0], &recp, ctx)) |
230 | 0 | goto err; /* 2 */ |
231 | 230 | j = 1 << (window - 1); |
232 | 3.82k | for (i = 1; i < j; i++) { |
233 | 3.59k | if (((val[i] = BN_CTX_get(ctx)) == NULL) || |
234 | 3.59k | !BN_mod_mul_reciprocal(val[i], val[i - 1], aa, &recp, ctx)) |
235 | 0 | goto err; |
236 | 3.59k | } |
237 | 230 | } |
238 | | |
239 | 233 | start = 1; /* This is used to avoid multiplication etc |
240 | | * when there is only the value '1' in the |
241 | | * buffer. */ |
242 | 233 | wstart = bits - 1; /* The top bit of the window */ |
243 | 233 | wend = 0; /* The bottom bit of the window */ |
244 | | |
245 | 233 | if (r == p) { |
246 | 0 | BIGNUM *p_dup = BN_CTX_get(ctx); |
247 | |
|
248 | 0 | if (p_dup == NULL || BN_copy(p_dup, p) == NULL) |
249 | 0 | goto err; |
250 | 0 | p = p_dup; |
251 | 0 | } |
252 | | |
253 | 233 | if (!BN_one(r)) |
254 | 0 | goto err; |
255 | | |
256 | 114k | for (;;) { |
257 | 114k | int wvalue; /* The 'value' of the window */ |
258 | | |
259 | 114k | if (BN_is_bit_set(p, wstart) == 0) { |
260 | 75.6k | if (!start) |
261 | 75.6k | if (!BN_mod_mul_reciprocal(r, r, r, &recp, ctx)) |
262 | 0 | goto err; |
263 | 75.6k | if (wstart == 0) |
264 | 187 | break; |
265 | 75.4k | wstart--; |
266 | 75.4k | continue; |
267 | 75.6k | } |
268 | | /* |
269 | | * We now have wstart on a 'set' bit, we now need to work out how bit |
270 | | * a window to do. To do this we need to scan forward until the last |
271 | | * set bit before the end of the window |
272 | | */ |
273 | 38.7k | wvalue = 1; |
274 | 38.7k | wend = 0; |
275 | 222k | for (i = 1; i < window; i++) { |
276 | 183k | if (wstart - i < 0) |
277 | 107 | break; |
278 | 183k | if (BN_is_bit_set(p, wstart - i)) { |
279 | 91.0k | wvalue <<= (i - wend); |
280 | 91.0k | wvalue |= 1; |
281 | 91.0k | wend = i; |
282 | 91.0k | } |
283 | 183k | } |
284 | | |
285 | | /* wend is the size of the current window */ |
286 | 38.7k | j = wend + 1; |
287 | | /* add the 'bytes above' */ |
288 | 38.7k | if (!start) |
289 | 221k | for (i = 0; i < j; i++) { |
290 | 183k | if (!BN_mod_mul_reciprocal(r, r, r, &recp, ctx)) |
291 | 0 | goto err; |
292 | 183k | } |
293 | | |
294 | | /* wvalue will be an odd number < 2^window */ |
295 | 38.7k | if (!BN_mod_mul_reciprocal(r, r, val[wvalue >> 1], &recp, ctx)) |
296 | 0 | goto err; |
297 | | |
298 | | /* move the 'window' down further */ |
299 | 38.7k | wstart -= wend + 1; |
300 | 38.7k | start = 0; |
301 | 38.7k | if (wstart < 0) |
302 | 46 | break; |
303 | 38.7k | } |
304 | 233 | ret = 1; |
305 | 235 | err: |
306 | 235 | BN_CTX_end(ctx); |
307 | 235 | BN_RECP_CTX_free(&recp); |
308 | 235 | bn_check_top(r); |
309 | 235 | return ret; |
310 | 233 | } |
311 | | |
312 | | int BN_mod_exp_mont(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p, |
313 | | const BIGNUM *m, BN_CTX *ctx, BN_MONT_CTX *in_mont) |
314 | 81.2k | { |
315 | 81.2k | int i, j, bits, ret = 0, wstart, wend, window; |
316 | 81.2k | int start = 1; |
317 | 81.2k | BIGNUM *d, *r; |
318 | 81.2k | const BIGNUM *aa; |
319 | | /* Table of variables obtained from 'ctx' */ |
320 | 81.2k | BIGNUM *val[TABLE_SIZE]; |
321 | 81.2k | BN_MONT_CTX *mont = NULL; |
322 | | |
323 | 81.2k | bn_check_top(a); |
324 | 81.2k | bn_check_top(p); |
325 | 81.2k | bn_check_top(m); |
326 | | |
327 | 81.2k | if (!BN_is_odd(m)) { |
328 | 64 | ERR_raise(ERR_LIB_BN, BN_R_CALLED_WITH_EVEN_MODULUS); |
329 | 64 | return 0; |
330 | 64 | } |
331 | | |
332 | 81.2k | if (m->top <= BN_CONSTTIME_SIZE_LIMIT |
333 | 81.2k | && (BN_get_flags(p, BN_FLG_CONSTTIME) != 0 |
334 | 81.2k | || BN_get_flags(a, BN_FLG_CONSTTIME) != 0 |
335 | 81.2k | || BN_get_flags(m, BN_FLG_CONSTTIME) != 0)) { |
336 | 824 | return BN_mod_exp_mont_consttime(rr, a, p, m, ctx, in_mont); |
337 | 824 | } |
338 | | |
339 | 80.4k | bits = BN_num_bits(p); |
340 | 80.4k | if (bits == 0) { |
341 | | /* x**0 mod 1, or x**0 mod -1 is still zero. */ |
342 | 3 | if (BN_abs_is_word(m, 1)) { |
343 | 1 | ret = 1; |
344 | 1 | BN_zero(rr); |
345 | 2 | } else { |
346 | 2 | ret = BN_one(rr); |
347 | 2 | } |
348 | 3 | return ret; |
349 | 3 | } |
350 | | |
351 | 80.3k | BN_CTX_start(ctx); |
352 | 80.3k | d = BN_CTX_get(ctx); |
353 | 80.3k | r = BN_CTX_get(ctx); |
354 | 80.3k | val[0] = BN_CTX_get(ctx); |
355 | 80.3k | if (val[0] == NULL) |
356 | 0 | goto err; |
357 | | |
358 | | /* |
359 | | * If this is not done, things will break in the montgomery part |
360 | | */ |
361 | | |
362 | 80.3k | if (in_mont != NULL) |
363 | 80.2k | mont = in_mont; |
364 | 157 | else { |
365 | 157 | if ((mont = BN_MONT_CTX_new()) == NULL) |
366 | 0 | goto err; |
367 | 157 | if (!BN_MONT_CTX_set(mont, m, ctx)) |
368 | 0 | goto err; |
369 | 157 | } |
370 | | |
371 | 80.3k | if (a->neg || BN_ucmp(a, m) >= 0) { |
372 | 34 | if (!BN_nnmod(val[0], a, m, ctx)) |
373 | 0 | goto err; |
374 | 34 | aa = val[0]; |
375 | 34 | } else |
376 | 80.3k | aa = a; |
377 | 80.3k | if (!bn_to_mont_fixed_top(val[0], aa, mont, ctx)) |
378 | 0 | goto err; /* 1 */ |
379 | | |
380 | 80.3k | window = BN_window_bits_for_exponent_size(bits); |
381 | 80.3k | if (window > 1) { |
382 | 80.3k | if (!bn_mul_mont_fixed_top(d, val[0], val[0], mont, ctx)) |
383 | 0 | goto err; /* 2 */ |
384 | 80.3k | j = 1 << (window - 1); |
385 | 1.81M | for (i = 1; i < j; i++) { |
386 | 1.73M | if (((val[i] = BN_CTX_get(ctx)) == NULL) || |
387 | 1.73M | !bn_mul_mont_fixed_top(val[i], val[i - 1], d, mont, ctx)) |
388 | 0 | goto err; |
389 | 1.73M | } |
390 | 80.3k | } |
391 | | |
392 | 80.3k | start = 1; /* This is used to avoid multiplication etc |
393 | | * when there is only the value '1' in the |
394 | | * buffer. */ |
395 | 80.3k | wstart = bits - 1; /* The top bit of the window */ |
396 | 80.3k | wend = 0; /* The bottom bit of the window */ |
397 | | |
398 | 80.3k | #if 1 /* by Shay Gueron's suggestion */ |
399 | 80.3k | j = m->top; /* borrow j */ |
400 | 80.3k | if (m->d[j - 1] & (((BN_ULONG)1) << (BN_BITS2 - 1))) { |
401 | 49.4k | if (bn_wexpand(r, j) == NULL) |
402 | 0 | goto err; |
403 | | /* 2^(top*BN_BITS2) - m */ |
404 | 49.4k | r->d[0] = (0 - m->d[0]) & BN_MASK2; |
405 | 784k | for (i = 1; i < j; i++) |
406 | 734k | r->d[i] = (~m->d[i]) & BN_MASK2; |
407 | 49.4k | r->top = j; |
408 | 49.4k | r->flags |= BN_FLG_FIXED_TOP; |
409 | 49.4k | } else |
410 | 30.9k | #endif |
411 | 30.9k | if (!bn_to_mont_fixed_top(r, BN_value_one(), mont, ctx)) |
412 | 0 | goto err; |
413 | 23.9M | for (;;) { |
414 | 23.9M | int wvalue; /* The 'value' of the window */ |
415 | | |
416 | 23.9M | if (BN_is_bit_set(p, wstart) == 0) { |
417 | 15.7M | if (!start) { |
418 | 15.7M | if (!bn_mul_mont_fixed_top(r, r, r, mont, ctx)) |
419 | 0 | goto err; |
420 | 15.7M | } |
421 | 15.7M | if (wstart == 0) |
422 | 469 | break; |
423 | 15.7M | wstart--; |
424 | 15.7M | continue; |
425 | 15.7M | } |
426 | | /* |
427 | | * We now have wstart on a 'set' bit, we now need to work out how bit |
428 | | * a window to do. To do this we need to scan forward until the last |
429 | | * set bit before the end of the window |
430 | | */ |
431 | 8.19M | wvalue = 1; |
432 | 8.19M | wend = 0; |
433 | 46.8M | for (i = 1; i < window; i++) { |
434 | 38.7M | if (wstart - i < 0) |
435 | 67.5k | break; |
436 | 38.7M | if (BN_is_bit_set(p, wstart - i)) { |
437 | 19.4M | wvalue <<= (i - wend); |
438 | 19.4M | wvalue |= 1; |
439 | 19.4M | wend = i; |
440 | 19.4M | } |
441 | 38.7M | } |
442 | | |
443 | | /* wend is the size of the current window */ |
444 | 8.19M | j = wend + 1; |
445 | | /* add the 'bytes above' */ |
446 | 8.19M | if (!start) |
447 | 46.9M | for (i = 0; i < j; i++) { |
448 | 38.8M | if (!bn_mul_mont_fixed_top(r, r, r, mont, ctx)) |
449 | 0 | goto err; |
450 | 38.8M | } |
451 | | |
452 | | /* wvalue will be an odd number < 2^window */ |
453 | 8.19M | if (!bn_mul_mont_fixed_top(r, r, val[wvalue >> 1], mont, ctx)) |
454 | 0 | goto err; |
455 | | |
456 | | /* move the 'window' down further */ |
457 | 8.19M | wstart -= wend + 1; |
458 | 8.19M | start = 0; |
459 | 8.19M | if (wstart < 0) |
460 | 79.9k | break; |
461 | 8.19M | } |
462 | | /* |
463 | | * Done with zero-padded intermediate BIGNUMs. Final BN_from_montgomery |
464 | | * removes padding [if any] and makes return value suitable for public |
465 | | * API consumer. |
466 | | */ |
467 | | #if defined(SPARC_T4_MONT) |
468 | | if (OPENSSL_sparcv9cap_P[0] & (SPARCV9_VIS3 | SPARCV9_PREFER_FPU)) { |
469 | | j = mont->N.top; /* borrow j */ |
470 | | val[0]->d[0] = 1; /* borrow val[0] */ |
471 | | for (i = 1; i < j; i++) |
472 | | val[0]->d[i] = 0; |
473 | | val[0]->top = j; |
474 | | if (!BN_mod_mul_montgomery(rr, r, val[0], mont, ctx)) |
475 | | goto err; |
476 | | } else |
477 | | #endif |
478 | 80.3k | if (!BN_from_montgomery(rr, r, mont, ctx)) |
479 | 0 | goto err; |
480 | 80.3k | ret = 1; |
481 | 80.3k | err: |
482 | 80.3k | if (in_mont == NULL) |
483 | 157 | BN_MONT_CTX_free(mont); |
484 | 80.3k | BN_CTX_end(ctx); |
485 | 80.3k | bn_check_top(rr); |
486 | 80.3k | return ret; |
487 | 80.3k | } |
488 | | |
489 | | static BN_ULONG bn_get_bits(const BIGNUM *a, int bitpos) |
490 | 224k | { |
491 | 224k | BN_ULONG ret = 0; |
492 | 224k | int wordpos; |
493 | | |
494 | 224k | wordpos = bitpos / BN_BITS2; |
495 | 224k | bitpos %= BN_BITS2; |
496 | 224k | if (wordpos >= 0 && wordpos < a->top) { |
497 | 224k | ret = a->d[wordpos] & BN_MASK2; |
498 | 224k | if (bitpos) { |
499 | 217k | ret >>= bitpos; |
500 | 217k | if (++wordpos < a->top) |
501 | 201k | ret |= a->d[wordpos] << (BN_BITS2 - bitpos); |
502 | 217k | } |
503 | 224k | } |
504 | | |
505 | 224k | return ret & BN_MASK2; |
506 | 224k | } |
507 | | |
508 | | /* |
509 | | * BN_mod_exp_mont_consttime() stores the precomputed powers in a specific |
510 | | * layout so that accessing any of these table values shows the same access |
511 | | * pattern as far as cache lines are concerned. The following functions are |
512 | | * used to transfer a BIGNUM from/to that table. |
513 | | */ |
514 | | |
515 | | static int MOD_EXP_CTIME_COPY_TO_PREBUF(const BIGNUM *b, int top, |
516 | | unsigned char *buf, int idx, |
517 | | int window) |
518 | 27.3k | { |
519 | 27.3k | int i, j; |
520 | 27.3k | int width = 1 << window; |
521 | 27.3k | BN_ULONG *table = (BN_ULONG *)buf; |
522 | | |
523 | 27.3k | if (top > b->top) |
524 | 0 | top = b->top; /* this works because 'buf' is explicitly |
525 | | * zeroed */ |
526 | 1.07M | for (i = 0, j = idx; i < top; i++, j += width) { |
527 | 1.04M | table[j] = b->d[i]; |
528 | 1.04M | } |
529 | | |
530 | 27.3k | return 1; |
531 | 27.3k | } |
532 | | |
533 | | static int MOD_EXP_CTIME_COPY_FROM_PREBUF(BIGNUM *b, int top, |
534 | | unsigned char *buf, int idx, |
535 | | int window) |
536 | 224k | { |
537 | 224k | int i, j; |
538 | 224k | int width = 1 << window; |
539 | | /* |
540 | | * We declare table 'volatile' in order to discourage compiler |
541 | | * from reordering loads from the table. Concern is that if |
542 | | * reordered in specific manner loads might give away the |
543 | | * information we are trying to conceal. Some would argue that |
544 | | * compiler can reorder them anyway, but it can as well be |
545 | | * argued that doing so would be violation of standard... |
546 | | */ |
547 | 224k | volatile BN_ULONG *table = (volatile BN_ULONG *)buf; |
548 | | |
549 | 224k | if (bn_wexpand(b, top) == NULL) |
550 | 0 | return 0; |
551 | | |
552 | 224k | if (window <= 3) { |
553 | 74.7k | for (i = 0; i < top; i++, table += width) { |
554 | 70.7k | BN_ULONG acc = 0; |
555 | | |
556 | 637k | for (j = 0; j < width; j++) { |
557 | 566k | acc |= table[j] & |
558 | 566k | ((BN_ULONG)0 - (constant_time_eq_int(j,idx)&1)); |
559 | 566k | } |
560 | | |
561 | 70.7k | b->d[i] = acc; |
562 | 70.7k | } |
563 | 220k | } else { |
564 | 220k | int xstride = 1 << (window - 2); |
565 | 220k | BN_ULONG y0, y1, y2, y3; |
566 | | |
567 | 220k | i = idx >> (window - 2); /* equivalent of idx / xstride */ |
568 | 220k | idx &= xstride - 1; /* equivalent of idx % xstride */ |
569 | | |
570 | 220k | y0 = (BN_ULONG)0 - (constant_time_eq_int(i,0)&1); |
571 | 220k | y1 = (BN_ULONG)0 - (constant_time_eq_int(i,1)&1); |
572 | 220k | y2 = (BN_ULONG)0 - (constant_time_eq_int(i,2)&1); |
573 | 220k | y3 = (BN_ULONG)0 - (constant_time_eq_int(i,3)&1); |
574 | | |
575 | 16.4M | for (i = 0; i < top; i++, table += width) { |
576 | 16.2M | BN_ULONG acc = 0; |
577 | | |
578 | 271M | for (j = 0; j < xstride; j++) { |
579 | 255M | acc |= ( (table[j + 0 * xstride] & y0) | |
580 | 255M | (table[j + 1 * xstride] & y1) | |
581 | 255M | (table[j + 2 * xstride] & y2) | |
582 | 255M | (table[j + 3 * xstride] & y3) ) |
583 | 255M | & ((BN_ULONG)0 - (constant_time_eq_int(j,idx)&1)); |
584 | 255M | } |
585 | | |
586 | 16.2M | b->d[i] = acc; |
587 | 16.2M | } |
588 | 220k | } |
589 | | |
590 | 224k | b->top = top; |
591 | 224k | b->flags |= BN_FLG_FIXED_TOP; |
592 | 224k | return 1; |
593 | 224k | } |
594 | | |
595 | | /* |
596 | | * Given a pointer value, compute the next address that is a cache line |
597 | | * multiple. |
598 | | */ |
599 | | #define MOD_EXP_CTIME_ALIGN(x_) \ |
600 | 1.37k | ((unsigned char*)(x_) + (MOD_EXP_CTIME_MIN_CACHE_LINE_WIDTH - (((size_t)(x_)) & (MOD_EXP_CTIME_MIN_CACHE_LINE_MASK)))) |
601 | | |
602 | | /* |
603 | | * This variant of BN_mod_exp_mont() uses fixed windows and the special |
604 | | * precomputation memory layout to limit data-dependency to a minimum to |
605 | | * protect secret exponents (cf. the hyper-threading timing attacks pointed |
606 | | * out by Colin Percival, |
607 | | * http://www.daemonology.net/hyperthreading-considered-harmful/) |
608 | | */ |
609 | | int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p, |
610 | | const BIGNUM *m, BN_CTX *ctx, |
611 | | BN_MONT_CTX *in_mont) |
612 | 2.25k | { |
613 | 2.25k | int i, bits, ret = 0, window, wvalue, wmask, window0; |
614 | 2.25k | int top; |
615 | 2.25k | BN_MONT_CTX *mont = NULL; |
616 | | |
617 | 2.25k | int numPowers; |
618 | 2.25k | unsigned char *powerbufFree = NULL; |
619 | 2.25k | int powerbufLen = 0; |
620 | 2.25k | unsigned char *powerbuf = NULL; |
621 | 2.25k | BIGNUM tmp, am; |
622 | | #if defined(SPARC_T4_MONT) |
623 | | unsigned int t4 = 0; |
624 | | #endif |
625 | | |
626 | 2.25k | bn_check_top(a); |
627 | 2.25k | bn_check_top(p); |
628 | 2.25k | bn_check_top(m); |
629 | | |
630 | 2.25k | if (!BN_is_odd(m)) { |
631 | 839 | ERR_raise(ERR_LIB_BN, BN_R_CALLED_WITH_EVEN_MODULUS); |
632 | 839 | return 0; |
633 | 839 | } |
634 | | |
635 | 1.41k | top = m->top; |
636 | | |
637 | 1.41k | if (top > BN_CONSTTIME_SIZE_LIMIT) { |
638 | | /* Prevent overflowing the powerbufLen computation below */ |
639 | 0 | return BN_mod_exp_mont(rr, a, p, m, ctx, in_mont); |
640 | 0 | } |
641 | | |
642 | | /* |
643 | | * Use all bits stored in |p|, rather than |BN_num_bits|, so we do not leak |
644 | | * whether the top bits are zero. |
645 | | */ |
646 | 1.41k | bits = p->top * BN_BITS2; |
647 | 1.41k | if (bits == 0) { |
648 | | /* x**0 mod 1, or x**0 mod -1 is still zero. */ |
649 | 34 | if (BN_abs_is_word(m, 1)) { |
650 | 7 | ret = 1; |
651 | 7 | BN_zero(rr); |
652 | 27 | } else { |
653 | 27 | ret = BN_one(rr); |
654 | 27 | } |
655 | 34 | return ret; |
656 | 34 | } |
657 | | |
658 | 1.37k | BN_CTX_start(ctx); |
659 | | |
660 | | /* |
661 | | * Allocate a montgomery context if it was not supplied by the caller. If |
662 | | * this is not done, things will break in the montgomery part. |
663 | | */ |
664 | 1.37k | if (in_mont != NULL) |
665 | 589 | mont = in_mont; |
666 | 790 | else { |
667 | 790 | if ((mont = BN_MONT_CTX_new()) == NULL) |
668 | 0 | goto err; |
669 | 790 | if (!BN_MONT_CTX_set(mont, m, ctx)) |
670 | 0 | goto err; |
671 | 790 | } |
672 | | |
673 | 1.37k | if (a->neg || BN_ucmp(a, m) >= 0) { |
674 | 567 | BIGNUM *reduced = BN_CTX_get(ctx); |
675 | 567 | if (reduced == NULL |
676 | 567 | || !BN_nnmod(reduced, a, m, ctx)) { |
677 | 0 | goto err; |
678 | 0 | } |
679 | 567 | a = reduced; |
680 | 567 | } |
681 | | |
682 | | #ifdef RSAZ_ENABLED |
683 | | /* |
684 | | * If the size of the operands allow it, perform the optimized |
685 | | * RSAZ exponentiation. For further information see |
686 | | * crypto/bn/rsaz_exp.c and accompanying assembly modules. |
687 | | */ |
688 | 804 | if ((16 == a->top) && (16 == p->top) && (BN_num_bits(m) == 1024) |
689 | 804 | && rsaz_avx2_eligible()) { |
690 | 0 | if (NULL == bn_wexpand(rr, 16)) |
691 | 0 | goto err; |
692 | 0 | RSAZ_1024_mod_exp_avx2(rr->d, a->d, p->d, m->d, mont->RR.d, |
693 | 0 | mont->n0[0]); |
694 | 0 | rr->top = 16; |
695 | 0 | rr->neg = 0; |
696 | 0 | bn_correct_top(rr); |
697 | 0 | ret = 1; |
698 | 0 | goto err; |
699 | 804 | } else if ((8 == a->top) && (8 == p->top) && (BN_num_bits(m) == 512)) { |
700 | 1 | if (NULL == bn_wexpand(rr, 8)) |
701 | 0 | goto err; |
702 | 1 | RSAZ_512_mod_exp(rr->d, a->d, p->d, m->d, mont->n0[0], mont->RR.d); |
703 | 1 | rr->top = 8; |
704 | 1 | rr->neg = 0; |
705 | 1 | bn_correct_top(rr); |
706 | 1 | ret = 1; |
707 | 1 | goto err; |
708 | 1 | } |
709 | 803 | #endif |
710 | | |
711 | | /* Get the window size to use with size of p. */ |
712 | 1.37k | window = BN_window_bits_for_ctime_exponent_size(bits); |
713 | | #if defined(SPARC_T4_MONT) |
714 | | if (window >= 5 && (top & 15) == 0 && top <= 64 && |
715 | | (OPENSSL_sparcv9cap_P[1] & (CFR_MONTMUL | CFR_MONTSQR)) == |
716 | | (CFR_MONTMUL | CFR_MONTSQR) && (t4 = OPENSSL_sparcv9cap_P[0])) |
717 | | window = 5; |
718 | | else |
719 | | #endif |
720 | | #if defined(OPENSSL_BN_ASM_MONT5) |
721 | 803 | if (window >= 5 && top <= BN_SOFT_LIMIT) { |
722 | 405 | window = 5; /* ~5% improvement for RSA2048 sign, and even |
723 | | * for RSA4096 */ |
724 | | /* reserve space for mont->N.d[] copy */ |
725 | 405 | powerbufLen += top * sizeof(mont->N.d[0]); |
726 | 405 | } |
727 | | #endif |
728 | 1.37k | (void)0; |
729 | | |
730 | | /* |
731 | | * Allocate a buffer large enough to hold all of the pre-computed powers |
732 | | * of am, am itself and tmp. |
733 | | */ |
734 | 1.37k | numPowers = 1 << window; |
735 | 1.37k | powerbufLen += sizeof(m->d[0]) * (top * numPowers + |
736 | 1.37k | ((2 * top) > |
737 | 1.37k | numPowers ? (2 * top) : numPowers)); |
738 | 1.37k | #ifdef alloca |
739 | 1.37k | if (powerbufLen < 3072) |
740 | 997 | powerbufFree = |
741 | 997 | alloca(powerbufLen + MOD_EXP_CTIME_MIN_CACHE_LINE_WIDTH); |
742 | 381 | else |
743 | 381 | #endif |
744 | 381 | if ((powerbufFree = |
745 | 381 | OPENSSL_malloc(powerbufLen + MOD_EXP_CTIME_MIN_CACHE_LINE_WIDTH)) |
746 | 381 | == NULL) |
747 | 0 | goto err; |
748 | | |
749 | 1.37k | powerbuf = MOD_EXP_CTIME_ALIGN(powerbufFree); |
750 | 1.37k | memset(powerbuf, 0, powerbufLen); |
751 | | |
752 | 1.37k | #ifdef alloca |
753 | 1.37k | if (powerbufLen < 3072) |
754 | 997 | powerbufFree = NULL; |
755 | 1.37k | #endif |
756 | | |
757 | | /* lay down tmp and am right after powers table */ |
758 | 1.37k | tmp.d = (BN_ULONG *)(powerbuf + sizeof(m->d[0]) * top * numPowers); |
759 | 1.37k | am.d = tmp.d + top; |
760 | 1.37k | tmp.top = am.top = 0; |
761 | 1.37k | tmp.dmax = am.dmax = top; |
762 | 1.37k | tmp.neg = am.neg = 0; |
763 | 1.37k | tmp.flags = am.flags = BN_FLG_STATIC_DATA; |
764 | | |
765 | | /* prepare a^0 in Montgomery domain */ |
766 | 1.37k | #if 1 /* by Shay Gueron's suggestion */ |
767 | 1.37k | if (m->d[top - 1] & (((BN_ULONG)1) << (BN_BITS2 - 1))) { |
768 | | /* 2^(top*BN_BITS2) - m */ |
769 | 437 | tmp.d[0] = (0 - m->d[0]) & BN_MASK2; |
770 | 12.7k | for (i = 1; i < top; i++) |
771 | 12.2k | tmp.d[i] = (~m->d[i]) & BN_MASK2; |
772 | 437 | tmp.top = top; |
773 | 437 | } else |
774 | 941 | #endif |
775 | 941 | if (!bn_to_mont_fixed_top(&tmp, BN_value_one(), mont, ctx)) |
776 | 0 | goto err; |
777 | | |
778 | | /* prepare a^1 in Montgomery domain */ |
779 | 1.37k | if (!bn_to_mont_fixed_top(&am, a, mont, ctx)) |
780 | 0 | goto err; |
781 | | |
782 | 1.37k | if (top > BN_SOFT_LIMIT) |
783 | 0 | goto fallback; |
784 | | |
785 | | #if defined(SPARC_T4_MONT) |
786 | | if (t4) { |
787 | | typedef int (*bn_pwr5_mont_f) (BN_ULONG *tp, const BN_ULONG *np, |
788 | | const BN_ULONG *n0, const void *table, |
789 | | int power, int bits); |
790 | | int bn_pwr5_mont_t4_8(BN_ULONG *tp, const BN_ULONG *np, |
791 | | const BN_ULONG *n0, const void *table, |
792 | | int power, int bits); |
793 | | int bn_pwr5_mont_t4_16(BN_ULONG *tp, const BN_ULONG *np, |
794 | | const BN_ULONG *n0, const void *table, |
795 | | int power, int bits); |
796 | | int bn_pwr5_mont_t4_24(BN_ULONG *tp, const BN_ULONG *np, |
797 | | const BN_ULONG *n0, const void *table, |
798 | | int power, int bits); |
799 | | int bn_pwr5_mont_t4_32(BN_ULONG *tp, const BN_ULONG *np, |
800 | | const BN_ULONG *n0, const void *table, |
801 | | int power, int bits); |
802 | | static const bn_pwr5_mont_f pwr5_funcs[4] = { |
803 | | bn_pwr5_mont_t4_8, bn_pwr5_mont_t4_16, |
804 | | bn_pwr5_mont_t4_24, bn_pwr5_mont_t4_32 |
805 | | }; |
806 | | bn_pwr5_mont_f pwr5_worker = pwr5_funcs[top / 16 - 1]; |
807 | | |
808 | | typedef int (*bn_mul_mont_f) (BN_ULONG *rp, const BN_ULONG *ap, |
809 | | const void *bp, const BN_ULONG *np, |
810 | | const BN_ULONG *n0); |
811 | | int bn_mul_mont_t4_8(BN_ULONG *rp, const BN_ULONG *ap, const void *bp, |
812 | | const BN_ULONG *np, const BN_ULONG *n0); |
813 | | int bn_mul_mont_t4_16(BN_ULONG *rp, const BN_ULONG *ap, |
814 | | const void *bp, const BN_ULONG *np, |
815 | | const BN_ULONG *n0); |
816 | | int bn_mul_mont_t4_24(BN_ULONG *rp, const BN_ULONG *ap, |
817 | | const void *bp, const BN_ULONG *np, |
818 | | const BN_ULONG *n0); |
819 | | int bn_mul_mont_t4_32(BN_ULONG *rp, const BN_ULONG *ap, |
820 | | const void *bp, const BN_ULONG *np, |
821 | | const BN_ULONG *n0); |
822 | | static const bn_mul_mont_f mul_funcs[4] = { |
823 | | bn_mul_mont_t4_8, bn_mul_mont_t4_16, |
824 | | bn_mul_mont_t4_24, bn_mul_mont_t4_32 |
825 | | }; |
826 | | bn_mul_mont_f mul_worker = mul_funcs[top / 16 - 1]; |
827 | | |
828 | | void bn_mul_mont_vis3(BN_ULONG *rp, const BN_ULONG *ap, |
829 | | const void *bp, const BN_ULONG *np, |
830 | | const BN_ULONG *n0, int num); |
831 | | void bn_mul_mont_t4(BN_ULONG *rp, const BN_ULONG *ap, |
832 | | const void *bp, const BN_ULONG *np, |
833 | | const BN_ULONG *n0, int num); |
834 | | void bn_mul_mont_gather5_t4(BN_ULONG *rp, const BN_ULONG *ap, |
835 | | const void *table, const BN_ULONG *np, |
836 | | const BN_ULONG *n0, int num, int power); |
837 | | void bn_flip_n_scatter5_t4(const BN_ULONG *inp, size_t num, |
838 | | void *table, size_t power); |
839 | | void bn_gather5_t4(BN_ULONG *out, size_t num, |
840 | | void *table, size_t power); |
841 | | void bn_flip_t4(BN_ULONG *dst, BN_ULONG *src, size_t num); |
842 | | |
843 | | BN_ULONG *np = mont->N.d, *n0 = mont->n0; |
844 | | int stride = 5 * (6 - (top / 16 - 1)); /* multiple of 5, but less |
845 | | * than 32 */ |
846 | | |
847 | | /* |
848 | | * BN_to_montgomery can contaminate words above .top [in |
849 | | * BN_DEBUG build... |
850 | | */ |
851 | | for (i = am.top; i < top; i++) |
852 | | am.d[i] = 0; |
853 | | for (i = tmp.top; i < top; i++) |
854 | | tmp.d[i] = 0; |
855 | | |
856 | | bn_flip_n_scatter5_t4(tmp.d, top, powerbuf, 0); |
857 | | bn_flip_n_scatter5_t4(am.d, top, powerbuf, 1); |
858 | | if (!(*mul_worker) (tmp.d, am.d, am.d, np, n0) && |
859 | | !(*mul_worker) (tmp.d, am.d, am.d, np, n0)) |
860 | | bn_mul_mont_vis3(tmp.d, am.d, am.d, np, n0, top); |
861 | | bn_flip_n_scatter5_t4(tmp.d, top, powerbuf, 2); |
862 | | |
863 | | for (i = 3; i < 32; i++) { |
864 | | /* Calculate a^i = a^(i-1) * a */ |
865 | | if (!(*mul_worker) (tmp.d, tmp.d, am.d, np, n0) && |
866 | | !(*mul_worker) (tmp.d, tmp.d, am.d, np, n0)) |
867 | | bn_mul_mont_vis3(tmp.d, tmp.d, am.d, np, n0, top); |
868 | | bn_flip_n_scatter5_t4(tmp.d, top, powerbuf, i); |
869 | | } |
870 | | |
871 | | /* switch to 64-bit domain */ |
872 | | np = alloca(top * sizeof(BN_ULONG)); |
873 | | top /= 2; |
874 | | bn_flip_t4(np, mont->N.d, top); |
875 | | |
876 | | /* |
877 | | * The exponent may not have a whole number of fixed-size windows. |
878 | | * To simplify the main loop, the initial window has between 1 and |
879 | | * full-window-size bits such that what remains is always a whole |
880 | | * number of windows |
881 | | */ |
882 | | window0 = (bits - 1) % 5 + 1; |
883 | | wmask = (1 << window0) - 1; |
884 | | bits -= window0; |
885 | | wvalue = bn_get_bits(p, bits) & wmask; |
886 | | bn_gather5_t4(tmp.d, top, powerbuf, wvalue); |
887 | | |
888 | | /* |
889 | | * Scan the exponent one window at a time starting from the most |
890 | | * significant bits. |
891 | | */ |
892 | | while (bits > 0) { |
893 | | if (bits < stride) |
894 | | stride = bits; |
895 | | bits -= stride; |
896 | | wvalue = bn_get_bits(p, bits); |
897 | | |
898 | | if ((*pwr5_worker) (tmp.d, np, n0, powerbuf, wvalue, stride)) |
899 | | continue; |
900 | | /* retry once and fall back */ |
901 | | if ((*pwr5_worker) (tmp.d, np, n0, powerbuf, wvalue, stride)) |
902 | | continue; |
903 | | |
904 | | bits += stride - 5; |
905 | | wvalue >>= stride - 5; |
906 | | wvalue &= 31; |
907 | | bn_mul_mont_t4(tmp.d, tmp.d, tmp.d, np, n0, top); |
908 | | bn_mul_mont_t4(tmp.d, tmp.d, tmp.d, np, n0, top); |
909 | | bn_mul_mont_t4(tmp.d, tmp.d, tmp.d, np, n0, top); |
910 | | bn_mul_mont_t4(tmp.d, tmp.d, tmp.d, np, n0, top); |
911 | | bn_mul_mont_t4(tmp.d, tmp.d, tmp.d, np, n0, top); |
912 | | bn_mul_mont_gather5_t4(tmp.d, tmp.d, powerbuf, np, n0, top, |
913 | | wvalue); |
914 | | } |
915 | | |
916 | | bn_flip_t4(tmp.d, tmp.d, top); |
917 | | top *= 2; |
918 | | /* back to 32-bit domain */ |
919 | | tmp.top = top; |
920 | | bn_correct_top(&tmp); |
921 | | OPENSSL_cleanse(np, top * sizeof(BN_ULONG)); |
922 | | } else |
923 | | #endif |
924 | | #if defined(OPENSSL_BN_ASM_MONT5) |
925 | 803 | if (window == 5 && top > 1) { |
926 | | /* |
927 | | * This optimization uses ideas from https://eprint.iacr.org/2011/239, |
928 | | * specifically optimization of cache-timing attack countermeasures, |
929 | | * pre-computation optimization, and Almost Montgomery Multiplication. |
930 | | * |
931 | | * The paper discusses a 4-bit window to optimize 512-bit modular |
932 | | * exponentiation, used in RSA-1024 with CRT, but RSA-1024 is no longer |
933 | | * important. |
934 | | * |
935 | | * |bn_mul_mont_gather5| and |bn_power5| implement the "almost" |
936 | | * reduction variant, so the values here may not be fully reduced. |
937 | | * They are bounded by R (i.e. they fit in |top| words), not |m|. |
938 | | * Additionally, we pass these "almost" reduced inputs into |
939 | | * |bn_mul_mont|, which implements the normal reduction variant. |
940 | | * Given those inputs, |bn_mul_mont| may not give reduced |
941 | | * output, but it will still produce "almost" reduced output. |
942 | | */ |
943 | 308 | void bn_mul_mont_gather5(BN_ULONG *rp, const BN_ULONG *ap, |
944 | 308 | const void *table, const BN_ULONG *np, |
945 | 308 | const BN_ULONG *n0, int num, int power); |
946 | 308 | void bn_scatter5(const BN_ULONG *inp, size_t num, |
947 | 308 | void *table, size_t power); |
948 | 308 | void bn_gather5(BN_ULONG *out, size_t num, void *table, size_t power); |
949 | 308 | void bn_power5(BN_ULONG *rp, const BN_ULONG *ap, |
950 | 308 | const void *table, const BN_ULONG *np, |
951 | 308 | const BN_ULONG *n0, int num, int power); |
952 | 308 | int bn_get_bits5(const BN_ULONG *ap, int off); |
953 | | |
954 | 308 | BN_ULONG *n0 = mont->n0, *np; |
955 | | |
956 | | /* |
957 | | * BN_to_montgomery can contaminate words above .top [in |
958 | | * BN_DEBUG build... |
959 | | */ |
960 | 308 | for (i = am.top; i < top; i++) |
961 | 0 | am.d[i] = 0; |
962 | 308 | for (i = tmp.top; i < top; i++) |
963 | 0 | tmp.d[i] = 0; |
964 | | |
965 | | /* |
966 | | * copy mont->N.d[] to improve cache locality |
967 | | */ |
968 | 8.54k | for (np = am.d + top, i = 0; i < top; i++) |
969 | 8.23k | np[i] = mont->N.d[i]; |
970 | | |
971 | | bn_scatter5(tmp.d, top, powerbuf, 0); |
972 | | bn_scatter5(am.d, am.top, powerbuf, 1); |
973 | | bn_mul_mont(tmp.d, am.d, am.d, np, n0, top); |
974 | | bn_scatter5(tmp.d, top, powerbuf, 2); |
975 | | |
976 | | # if 0 |
977 | | for (i = 3; i < 32; i++) { |
978 | | /* Calculate a^i = a^(i-1) * a */ |
979 | | bn_mul_mont_gather5(tmp.d, am.d, powerbuf, np, n0, top, i - 1); |
980 | | bn_scatter5(tmp.d, top, powerbuf, i); |
981 | | } |
982 | | # else |
983 | | /* same as above, but uses squaring for 1/2 of operations */ |
984 | 1.23k | for (i = 4; i < 32; i *= 2) { |
985 | 924 | bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top); |
986 | 924 | bn_scatter5(tmp.d, top, powerbuf, i); |
987 | 924 | } |
988 | 1.23k | for (i = 3; i < 8; i += 2) { |
989 | 924 | int j; |
990 | 924 | bn_mul_mont_gather5(tmp.d, am.d, powerbuf, np, n0, top, i - 1); |
991 | 924 | bn_scatter5(tmp.d, top, powerbuf, i); |
992 | 3.08k | for (j = 2 * i; j < 32; j *= 2) { |
993 | 2.15k | bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top); |
994 | 2.15k | bn_scatter5(tmp.d, top, powerbuf, j); |
995 | 2.15k | } |
996 | 924 | } |
997 | 1.54k | for (; i < 16; i += 2) { |
998 | 1.23k | bn_mul_mont_gather5(tmp.d, am.d, powerbuf, np, n0, top, i - 1); |
999 | 1.23k | bn_scatter5(tmp.d, top, powerbuf, i); |
1000 | 1.23k | bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top); |
1001 | 1.23k | bn_scatter5(tmp.d, top, powerbuf, 2 * i); |
1002 | 1.23k | } |
1003 | 2.77k | for (; i < 32; i += 2) { |
1004 | 2.46k | bn_mul_mont_gather5(tmp.d, am.d, powerbuf, np, n0, top, i - 1); |
1005 | 2.46k | bn_scatter5(tmp.d, top, powerbuf, i); |
1006 | 2.46k | } |
1007 | 308 | # endif |
1008 | | /* |
1009 | | * The exponent may not have a whole number of fixed-size windows. |
1010 | | * To simplify the main loop, the initial window has between 1 and |
1011 | | * full-window-size bits such that what remains is always a whole |
1012 | | * number of windows |
1013 | | */ |
1014 | 308 | window0 = (bits - 1) % 5 + 1; |
1015 | 308 | wmask = (1 << window0) - 1; |
1016 | 308 | bits -= window0; |
1017 | 308 | wvalue = bn_get_bits(p, bits) & wmask; |
1018 | 308 | bn_gather5(tmp.d, top, powerbuf, wvalue); |
1019 | | |
1020 | | /* |
1021 | | * Scan the exponent one window at a time starting from the most |
1022 | | * significant bits. |
1023 | | */ |
1024 | 308 | if (top & 7) { |
1025 | 86.8k | while (bits > 0) { |
1026 | 86.5k | bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top); |
1027 | 86.5k | bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top); |
1028 | 86.5k | bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top); |
1029 | 86.5k | bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top); |
1030 | 86.5k | bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top); |
1031 | 86.5k | bn_mul_mont_gather5(tmp.d, tmp.d, powerbuf, np, n0, top, |
1032 | 86.5k | bn_get_bits5(p->d, bits -= 5)); |
1033 | 86.5k | } |
1034 | 244 | } else { |
1035 | 22.6k | while (bits > 0) { |
1036 | 22.5k | bn_power5(tmp.d, tmp.d, powerbuf, np, n0, top, |
1037 | 22.5k | bn_get_bits5(p->d, bits -= 5)); |
1038 | 22.5k | } |
1039 | 64 | } |
1040 | | |
1041 | 308 | tmp.top = top; |
1042 | | /* |
1043 | | * The result is now in |tmp| in Montgomery form, but it may not be |
1044 | | * fully reduced. This is within bounds for |BN_from_montgomery| |
1045 | | * (tmp < R <= m*R) so it will, when converting from Montgomery form, |
1046 | | * produce a fully reduced result. |
1047 | | * |
1048 | | * This differs from Figure 2 of the paper, which uses AMM(h, 1) to |
1049 | | * convert from Montgomery form with unreduced output, followed by an |
1050 | | * extra reduction step. In the paper's terminology, we replace |
1051 | | * steps 9 and 10 with MM(h, 1). |
1052 | | */ |
1053 | 308 | } else |
1054 | 495 | #endif |
1055 | 495 | { |
1056 | 495 | fallback: |
1057 | 1.07k | if (!MOD_EXP_CTIME_COPY_TO_PREBUF(&tmp, top, powerbuf, 0, window)) |
1058 | 0 | goto err; |
1059 | 1.07k | if (!MOD_EXP_CTIME_COPY_TO_PREBUF(&am, top, powerbuf, 1, window)) |
1060 | 0 | goto err; |
1061 | | |
1062 | | /* |
1063 | | * If the window size is greater than 1, then calculate |
1064 | | * val[i=2..2^winsize-1]. Powers are computed as a*a^(i-1) (even |
1065 | | * powers could instead be computed as (a^(i/2))^2 to use the slight |
1066 | | * performance advantage of sqr over mul). |
1067 | | */ |
1068 | 1.07k | if (window > 1) { |
1069 | 1.07k | if (!bn_mul_mont_fixed_top(&tmp, &am, &am, mont, ctx)) |
1070 | 0 | goto err; |
1071 | 1.07k | if (!MOD_EXP_CTIME_COPY_TO_PREBUF(&tmp, top, powerbuf, 2, |
1072 | 1.07k | window)) |
1073 | 0 | goto err; |
1074 | 25.2k | for (i = 3; i < numPowers; i++) { |
1075 | | /* Calculate a^i = a^(i-1) * a */ |
1076 | 24.1k | if (!bn_mul_mont_fixed_top(&tmp, &am, &tmp, mont, ctx)) |
1077 | 0 | goto err; |
1078 | 24.1k | if (!MOD_EXP_CTIME_COPY_TO_PREBUF(&tmp, top, powerbuf, i, |
1079 | 24.1k | window)) |
1080 | 0 | goto err; |
1081 | 24.1k | } |
1082 | 1.07k | } |
1083 | | |
1084 | | /* |
1085 | | * The exponent may not have a whole number of fixed-size windows. |
1086 | | * To simplify the main loop, the initial window has between 1 and |
1087 | | * full-window-size bits such that what remains is always a whole |
1088 | | * number of windows |
1089 | | */ |
1090 | 1.07k | window0 = (bits - 1) % window + 1; |
1091 | 495 | wmask = (1 << window0) - 1; |
1092 | 495 | bits -= window0; |
1093 | 495 | wvalue = bn_get_bits(p, bits) & wmask; |
1094 | 1.07k | if (!MOD_EXP_CTIME_COPY_FROM_PREBUF(&tmp, top, powerbuf, wvalue, |
1095 | 1.07k | window)) |
1096 | 0 | goto err; |
1097 | | |
1098 | 1.07k | wmask = (1 << window) - 1; |
1099 | | /* |
1100 | | * Scan the exponent one window at a time starting from the most |
1101 | | * significant bits. |
1102 | | */ |
1103 | 224k | while (bits > 0) { |
1104 | | |
1105 | | /* Square the result window-size times */ |
1106 | 1.45M | for (i = 0; i < window; i++) |
1107 | 1.23M | if (!bn_mul_mont_fixed_top(&tmp, &tmp, &tmp, mont, ctx)) |
1108 | 0 | goto err; |
1109 | | |
1110 | | /* |
1111 | | * Get a window's worth of bits from the exponent |
1112 | | * This avoids calling BN_is_bit_set for each bit, which |
1113 | | * is not only slower but also makes each bit vulnerable to |
1114 | | * EM (and likely other) side-channel attacks like One&Done |
1115 | | * (for details see "One&Done: A Single-Decryption EM-Based |
1116 | | * Attack on OpenSSL's Constant-Time Blinded RSA" by M. Alam, |
1117 | | * H. Khan, M. Dey, N. Sinha, R. Callan, A. Zajic, and |
1118 | | * M. Prvulovic, in USENIX Security'18) |
1119 | | */ |
1120 | 223k | bits -= window; |
1121 | 223k | wvalue = bn_get_bits(p, bits) & wmask; |
1122 | | /* |
1123 | | * Fetch the appropriate pre-computed value from the pre-buf |
1124 | | */ |
1125 | 223k | if (!MOD_EXP_CTIME_COPY_FROM_PREBUF(&am, top, powerbuf, wvalue, |
1126 | 223k | window)) |
1127 | 0 | goto err; |
1128 | | |
1129 | | /* Multiply the result into the intermediate result */ |
1130 | 223k | if (!bn_mul_mont_fixed_top(&tmp, &tmp, &am, mont, ctx)) |
1131 | 0 | goto err; |
1132 | 223k | } |
1133 | 495 | } |
1134 | | |
1135 | | /* |
1136 | | * Done with zero-padded intermediate BIGNUMs. Final BN_from_montgomery |
1137 | | * removes padding [if any] and makes return value suitable for public |
1138 | | * API consumer. |
1139 | | */ |
1140 | | #if defined(SPARC_T4_MONT) |
1141 | | if (OPENSSL_sparcv9cap_P[0] & (SPARCV9_VIS3 | SPARCV9_PREFER_FPU)) { |
1142 | | am.d[0] = 1; /* borrow am */ |
1143 | | for (i = 1; i < top; i++) |
1144 | | am.d[i] = 0; |
1145 | | if (!BN_mod_mul_montgomery(rr, &tmp, &am, mont, ctx)) |
1146 | | goto err; |
1147 | | } else |
1148 | | #endif |
1149 | 1.37k | if (!BN_from_montgomery(rr, &tmp, mont, ctx)) |
1150 | 0 | goto err; |
1151 | 1.37k | ret = 1; |
1152 | 1.37k | err: |
1153 | 1.37k | if (in_mont == NULL) |
1154 | 790 | BN_MONT_CTX_free(mont); |
1155 | 1.37k | if (powerbuf != NULL) { |
1156 | 1.37k | OPENSSL_cleanse(powerbuf, powerbufLen); |
1157 | 1.37k | OPENSSL_free(powerbufFree); |
1158 | 1.37k | } |
1159 | 1.37k | BN_CTX_end(ctx); |
1160 | 1.37k | return ret; |
1161 | 1.37k | } BN_mod_exp_mont_consttime Line | Count | Source | 612 | 929 | { | 613 | 929 | int i, bits, ret = 0, window, wvalue, wmask, window0; | 614 | 929 | int top; | 615 | 929 | BN_MONT_CTX *mont = NULL; | 616 | | | 617 | 929 | int numPowers; | 618 | 929 | unsigned char *powerbufFree = NULL; | 619 | 929 | int powerbufLen = 0; | 620 | 929 | unsigned char *powerbuf = NULL; | 621 | 929 | BIGNUM tmp, am; | 622 | | #if defined(SPARC_T4_MONT) | 623 | | unsigned int t4 = 0; | 624 | | #endif | 625 | | | 626 | 929 | bn_check_top(a); | 627 | 929 | bn_check_top(p); | 628 | 929 | bn_check_top(m); | 629 | | | 630 | 929 | if (!BN_is_odd(m)) { | 631 | 337 | ERR_raise(ERR_LIB_BN, BN_R_CALLED_WITH_EVEN_MODULUS); | 632 | 337 | return 0; | 633 | 337 | } | 634 | | | 635 | 592 | top = m->top; | 636 | | | 637 | 592 | if (top > BN_CONSTTIME_SIZE_LIMIT) { | 638 | | /* Prevent overflowing the powerbufLen computation below */ | 639 | 0 | return BN_mod_exp_mont(rr, a, p, m, ctx, in_mont); | 640 | 0 | } | 641 | | | 642 | | /* | 643 | | * Use all bits stored in |p|, rather than |BN_num_bits|, so we do not leak | 644 | | * whether the top bits are zero. | 645 | | */ | 646 | 592 | bits = p->top * BN_BITS2; | 647 | 592 | if (bits == 0) { | 648 | | /* x**0 mod 1, or x**0 mod -1 is still zero. */ | 649 | 17 | if (BN_abs_is_word(m, 1)) { | 650 | 4 | ret = 1; | 651 | 4 | BN_zero(rr); | 652 | 13 | } else { | 653 | 13 | ret = BN_one(rr); | 654 | 13 | } | 655 | 17 | return ret; | 656 | 17 | } | 657 | | | 658 | 575 | BN_CTX_start(ctx); | 659 | | | 660 | | /* | 661 | | * Allocate a montgomery context if it was not supplied by the caller. If | 662 | | * this is not done, things will break in the montgomery part. | 663 | | */ | 664 | 575 | if (in_mont != NULL) | 665 | 285 | mont = in_mont; | 666 | 290 | else { | 667 | 290 | if ((mont = BN_MONT_CTX_new()) == NULL) | 668 | 0 | goto err; | 669 | 290 | if (!BN_MONT_CTX_set(mont, m, ctx)) | 670 | 0 | goto err; | 671 | 290 | } | 672 | | | 673 | 575 | if (a->neg || BN_ucmp(a, m) >= 0) { | 674 | 166 | BIGNUM *reduced = BN_CTX_get(ctx); | 675 | 166 | if (reduced == NULL | 676 | 166 | || !BN_nnmod(reduced, a, m, ctx)) { | 677 | 0 | goto err; | 678 | 0 | } | 679 | 166 | a = reduced; | 680 | 166 | } | 681 | | | 682 | | #ifdef RSAZ_ENABLED | 683 | | /* | 684 | | * If the size of the operands allow it, perform the optimized | 685 | | * RSAZ exponentiation. For further information see | 686 | | * crypto/bn/rsaz_exp.c and accompanying assembly modules. | 687 | | */ | 688 | | if ((16 == a->top) && (16 == p->top) && (BN_num_bits(m) == 1024) | 689 | | && rsaz_avx2_eligible()) { | 690 | | if (NULL == bn_wexpand(rr, 16)) | 691 | | goto err; | 692 | | RSAZ_1024_mod_exp_avx2(rr->d, a->d, p->d, m->d, mont->RR.d, | 693 | | mont->n0[0]); | 694 | | rr->top = 16; | 695 | | rr->neg = 0; | 696 | | bn_correct_top(rr); | 697 | | ret = 1; | 698 | | goto err; | 699 | | } else if ((8 == a->top) && (8 == p->top) && (BN_num_bits(m) == 512)) { | 700 | | if (NULL == bn_wexpand(rr, 8)) | 701 | | goto err; | 702 | | RSAZ_512_mod_exp(rr->d, a->d, p->d, m->d, mont->n0[0], mont->RR.d); | 703 | | rr->top = 8; | 704 | | rr->neg = 0; | 705 | | bn_correct_top(rr); | 706 | | ret = 1; | 707 | | goto err; | 708 | | } | 709 | | #endif | 710 | | | 711 | | /* Get the window size to use with size of p. */ | 712 | 575 | window = BN_window_bits_for_ctime_exponent_size(bits); | 713 | | #if defined(SPARC_T4_MONT) | 714 | | if (window >= 5 && (top & 15) == 0 && top <= 64 && | 715 | | (OPENSSL_sparcv9cap_P[1] & (CFR_MONTMUL | CFR_MONTSQR)) == | 716 | | (CFR_MONTMUL | CFR_MONTSQR) && (t4 = OPENSSL_sparcv9cap_P[0])) | 717 | | window = 5; | 718 | | else | 719 | | #endif | 720 | | #if defined(OPENSSL_BN_ASM_MONT5) | 721 | | if (window >= 5 && top <= BN_SOFT_LIMIT) { | 722 | | window = 5; /* ~5% improvement for RSA2048 sign, and even | 723 | | * for RSA4096 */ | 724 | | /* reserve space for mont->N.d[] copy */ | 725 | | powerbufLen += top * sizeof(mont->N.d[0]); | 726 | | } | 727 | | #endif | 728 | 575 | (void)0; | 729 | | | 730 | | /* | 731 | | * Allocate a buffer large enough to hold all of the pre-computed powers | 732 | | * of am, am itself and tmp. | 733 | | */ | 734 | 575 | numPowers = 1 << window; | 735 | 575 | powerbufLen += sizeof(m->d[0]) * (top * numPowers + | 736 | 575 | ((2 * top) > | 737 | 575 | numPowers ? (2 * top) : numPowers)); | 738 | 575 | #ifdef alloca | 739 | 575 | if (powerbufLen < 3072) | 740 | 377 | powerbufFree = | 741 | 377 | alloca(powerbufLen + MOD_EXP_CTIME_MIN_CACHE_LINE_WIDTH); | 742 | 198 | else | 743 | 198 | #endif | 744 | 198 | if ((powerbufFree = | 745 | 198 | OPENSSL_malloc(powerbufLen + MOD_EXP_CTIME_MIN_CACHE_LINE_WIDTH)) | 746 | 198 | == NULL) | 747 | 0 | goto err; | 748 | | | 749 | 575 | powerbuf = MOD_EXP_CTIME_ALIGN(powerbufFree); | 750 | 575 | memset(powerbuf, 0, powerbufLen); | 751 | | | 752 | 575 | #ifdef alloca | 753 | 575 | if (powerbufLen < 3072) | 754 | 377 | powerbufFree = NULL; | 755 | 575 | #endif | 756 | | | 757 | | /* lay down tmp and am right after powers table */ | 758 | 575 | tmp.d = (BN_ULONG *)(powerbuf + sizeof(m->d[0]) * top * numPowers); | 759 | 575 | am.d = tmp.d + top; | 760 | 575 | tmp.top = am.top = 0; | 761 | 575 | tmp.dmax = am.dmax = top; | 762 | 575 | tmp.neg = am.neg = 0; | 763 | 575 | tmp.flags = am.flags = BN_FLG_STATIC_DATA; | 764 | | | 765 | | /* prepare a^0 in Montgomery domain */ | 766 | 575 | #if 1 /* by Shay Gueron's suggestion */ | 767 | 575 | if (m->d[top - 1] & (((BN_ULONG)1) << (BN_BITS2 - 1))) { | 768 | | /* 2^(top*BN_BITS2) - m */ | 769 | 209 | tmp.d[0] = (0 - m->d[0]) & BN_MASK2; | 770 | 10.7k | for (i = 1; i < top; i++) | 771 | 10.5k | tmp.d[i] = (~m->d[i]) & BN_MASK2; | 772 | 209 | tmp.top = top; | 773 | 209 | } else | 774 | 366 | #endif | 775 | 366 | if (!bn_to_mont_fixed_top(&tmp, BN_value_one(), mont, ctx)) | 776 | 0 | goto err; | 777 | | | 778 | | /* prepare a^1 in Montgomery domain */ | 779 | 575 | if (!bn_to_mont_fixed_top(&am, a, mont, ctx)) | 780 | 0 | goto err; | 781 | | | 782 | 575 | if (top > BN_SOFT_LIMIT) | 783 | 0 | goto fallback; | 784 | | | 785 | | #if defined(SPARC_T4_MONT) | 786 | | if (t4) { | 787 | | typedef int (*bn_pwr5_mont_f) (BN_ULONG *tp, const BN_ULONG *np, | 788 | | const BN_ULONG *n0, const void *table, | 789 | | int power, int bits); | 790 | | int bn_pwr5_mont_t4_8(BN_ULONG *tp, const BN_ULONG *np, | 791 | | const BN_ULONG *n0, const void *table, | 792 | | int power, int bits); | 793 | | int bn_pwr5_mont_t4_16(BN_ULONG *tp, const BN_ULONG *np, | 794 | | const BN_ULONG *n0, const void *table, | 795 | | int power, int bits); | 796 | | int bn_pwr5_mont_t4_24(BN_ULONG *tp, const BN_ULONG *np, | 797 | | const BN_ULONG *n0, const void *table, | 798 | | int power, int bits); | 799 | | int bn_pwr5_mont_t4_32(BN_ULONG *tp, const BN_ULONG *np, | 800 | | const BN_ULONG *n0, const void *table, | 801 | | int power, int bits); | 802 | | static const bn_pwr5_mont_f pwr5_funcs[4] = { | 803 | | bn_pwr5_mont_t4_8, bn_pwr5_mont_t4_16, | 804 | | bn_pwr5_mont_t4_24, bn_pwr5_mont_t4_32 | 805 | | }; | 806 | | bn_pwr5_mont_f pwr5_worker = pwr5_funcs[top / 16 - 1]; | 807 | | | 808 | | typedef int (*bn_mul_mont_f) (BN_ULONG *rp, const BN_ULONG *ap, | 809 | | const void *bp, const BN_ULONG *np, | 810 | | const BN_ULONG *n0); | 811 | | int bn_mul_mont_t4_8(BN_ULONG *rp, const BN_ULONG *ap, const void *bp, | 812 | | const BN_ULONG *np, const BN_ULONG *n0); | 813 | | int bn_mul_mont_t4_16(BN_ULONG *rp, const BN_ULONG *ap, | 814 | | const void *bp, const BN_ULONG *np, | 815 | | const BN_ULONG *n0); | 816 | | int bn_mul_mont_t4_24(BN_ULONG *rp, const BN_ULONG *ap, | 817 | | const void *bp, const BN_ULONG *np, | 818 | | const BN_ULONG *n0); | 819 | | int bn_mul_mont_t4_32(BN_ULONG *rp, const BN_ULONG *ap, | 820 | | const void *bp, const BN_ULONG *np, | 821 | | const BN_ULONG *n0); | 822 | | static const bn_mul_mont_f mul_funcs[4] = { | 823 | | bn_mul_mont_t4_8, bn_mul_mont_t4_16, | 824 | | bn_mul_mont_t4_24, bn_mul_mont_t4_32 | 825 | | }; | 826 | | bn_mul_mont_f mul_worker = mul_funcs[top / 16 - 1]; | 827 | | | 828 | | void bn_mul_mont_vis3(BN_ULONG *rp, const BN_ULONG *ap, | 829 | | const void *bp, const BN_ULONG *np, | 830 | | const BN_ULONG *n0, int num); | 831 | | void bn_mul_mont_t4(BN_ULONG *rp, const BN_ULONG *ap, | 832 | | const void *bp, const BN_ULONG *np, | 833 | | const BN_ULONG *n0, int num); | 834 | | void bn_mul_mont_gather5_t4(BN_ULONG *rp, const BN_ULONG *ap, | 835 | | const void *table, const BN_ULONG *np, | 836 | | const BN_ULONG *n0, int num, int power); | 837 | | void bn_flip_n_scatter5_t4(const BN_ULONG *inp, size_t num, | 838 | | void *table, size_t power); | 839 | | void bn_gather5_t4(BN_ULONG *out, size_t num, | 840 | | void *table, size_t power); | 841 | | void bn_flip_t4(BN_ULONG *dst, BN_ULONG *src, size_t num); | 842 | | | 843 | | BN_ULONG *np = mont->N.d, *n0 = mont->n0; | 844 | | int stride = 5 * (6 - (top / 16 - 1)); /* multiple of 5, but less | 845 | | * than 32 */ | 846 | | | 847 | | /* | 848 | | * BN_to_montgomery can contaminate words above .top [in | 849 | | * BN_DEBUG build... | 850 | | */ | 851 | | for (i = am.top; i < top; i++) | 852 | | am.d[i] = 0; | 853 | | for (i = tmp.top; i < top; i++) | 854 | | tmp.d[i] = 0; | 855 | | | 856 | | bn_flip_n_scatter5_t4(tmp.d, top, powerbuf, 0); | 857 | | bn_flip_n_scatter5_t4(am.d, top, powerbuf, 1); | 858 | | if (!(*mul_worker) (tmp.d, am.d, am.d, np, n0) && | 859 | | !(*mul_worker) (tmp.d, am.d, am.d, np, n0)) | 860 | | bn_mul_mont_vis3(tmp.d, am.d, am.d, np, n0, top); | 861 | | bn_flip_n_scatter5_t4(tmp.d, top, powerbuf, 2); | 862 | | | 863 | | for (i = 3; i < 32; i++) { | 864 | | /* Calculate a^i = a^(i-1) * a */ | 865 | | if (!(*mul_worker) (tmp.d, tmp.d, am.d, np, n0) && | 866 | | !(*mul_worker) (tmp.d, tmp.d, am.d, np, n0)) | 867 | | bn_mul_mont_vis3(tmp.d, tmp.d, am.d, np, n0, top); | 868 | | bn_flip_n_scatter5_t4(tmp.d, top, powerbuf, i); | 869 | | } | 870 | | | 871 | | /* switch to 64-bit domain */ | 872 | | np = alloca(top * sizeof(BN_ULONG)); | 873 | | top /= 2; | 874 | | bn_flip_t4(np, mont->N.d, top); | 875 | | | 876 | | /* | 877 | | * The exponent may not have a whole number of fixed-size windows. | 878 | | * To simplify the main loop, the initial window has between 1 and | 879 | | * full-window-size bits such that what remains is always a whole | 880 | | * number of windows | 881 | | */ | 882 | | window0 = (bits - 1) % 5 + 1; | 883 | | wmask = (1 << window0) - 1; | 884 | | bits -= window0; | 885 | | wvalue = bn_get_bits(p, bits) & wmask; | 886 | | bn_gather5_t4(tmp.d, top, powerbuf, wvalue); | 887 | | | 888 | | /* | 889 | | * Scan the exponent one window at a time starting from the most | 890 | | * significant bits. | 891 | | */ | 892 | | while (bits > 0) { | 893 | | if (bits < stride) | 894 | | stride = bits; | 895 | | bits -= stride; | 896 | | wvalue = bn_get_bits(p, bits); | 897 | | | 898 | | if ((*pwr5_worker) (tmp.d, np, n0, powerbuf, wvalue, stride)) | 899 | | continue; | 900 | | /* retry once and fall back */ | 901 | | if ((*pwr5_worker) (tmp.d, np, n0, powerbuf, wvalue, stride)) | 902 | | continue; | 903 | | | 904 | | bits += stride - 5; | 905 | | wvalue >>= stride - 5; | 906 | | wvalue &= 31; | 907 | | bn_mul_mont_t4(tmp.d, tmp.d, tmp.d, np, n0, top); | 908 | | bn_mul_mont_t4(tmp.d, tmp.d, tmp.d, np, n0, top); | 909 | | bn_mul_mont_t4(tmp.d, tmp.d, tmp.d, np, n0, top); | 910 | | bn_mul_mont_t4(tmp.d, tmp.d, tmp.d, np, n0, top); | 911 | | bn_mul_mont_t4(tmp.d, tmp.d, tmp.d, np, n0, top); | 912 | | bn_mul_mont_gather5_t4(tmp.d, tmp.d, powerbuf, np, n0, top, | 913 | | wvalue); | 914 | | } | 915 | | | 916 | | bn_flip_t4(tmp.d, tmp.d, top); | 917 | | top *= 2; | 918 | | /* back to 32-bit domain */ | 919 | | tmp.top = top; | 920 | | bn_correct_top(&tmp); | 921 | | OPENSSL_cleanse(np, top * sizeof(BN_ULONG)); | 922 | | } else | 923 | | #endif | 924 | | #if defined(OPENSSL_BN_ASM_MONT5) | 925 | | if (window == 5 && top > 1) { | 926 | | /* | 927 | | * This optimization uses ideas from https://eprint.iacr.org/2011/239, | 928 | | * specifically optimization of cache-timing attack countermeasures, | 929 | | * pre-computation optimization, and Almost Montgomery Multiplication. | 930 | | * | 931 | | * The paper discusses a 4-bit window to optimize 512-bit modular | 932 | | * exponentiation, used in RSA-1024 with CRT, but RSA-1024 is no longer | 933 | | * important. | 934 | | * | 935 | | * |bn_mul_mont_gather5| and |bn_power5| implement the "almost" | 936 | | * reduction variant, so the values here may not be fully reduced. | 937 | | * They are bounded by R (i.e. they fit in |top| words), not |m|. | 938 | | * Additionally, we pass these "almost" reduced inputs into | 939 | | * |bn_mul_mont|, which implements the normal reduction variant. | 940 | | * Given those inputs, |bn_mul_mont| may not give reduced | 941 | | * output, but it will still produce "almost" reduced output. | 942 | | */ | 943 | | void bn_mul_mont_gather5(BN_ULONG *rp, const BN_ULONG *ap, | 944 | | const void *table, const BN_ULONG *np, | 945 | | const BN_ULONG *n0, int num, int power); | 946 | | void bn_scatter5(const BN_ULONG *inp, size_t num, | 947 | | void *table, size_t power); | 948 | | void bn_gather5(BN_ULONG *out, size_t num, void *table, size_t power); | 949 | | void bn_power5(BN_ULONG *rp, const BN_ULONG *ap, | 950 | | const void *table, const BN_ULONG *np, | 951 | | const BN_ULONG *n0, int num, int power); | 952 | | int bn_get_bits5(const BN_ULONG *ap, int off); | 953 | | | 954 | | BN_ULONG *n0 = mont->n0, *np; | 955 | | | 956 | | /* | 957 | | * BN_to_montgomery can contaminate words above .top [in | 958 | | * BN_DEBUG build... | 959 | | */ | 960 | | for (i = am.top; i < top; i++) | 961 | | am.d[i] = 0; | 962 | | for (i = tmp.top; i < top; i++) | 963 | | tmp.d[i] = 0; | 964 | | | 965 | | /* | 966 | | * copy mont->N.d[] to improve cache locality | 967 | | */ | 968 | | for (np = am.d + top, i = 0; i < top; i++) | 969 | | np[i] = mont->N.d[i]; | 970 | | | 971 | | bn_scatter5(tmp.d, top, powerbuf, 0); | 972 | | bn_scatter5(am.d, am.top, powerbuf, 1); | 973 | | bn_mul_mont(tmp.d, am.d, am.d, np, n0, top); | 974 | | bn_scatter5(tmp.d, top, powerbuf, 2); | 975 | | | 976 | | # if 0 | 977 | | for (i = 3; i < 32; i++) { | 978 | | /* Calculate a^i = a^(i-1) * a */ | 979 | | bn_mul_mont_gather5(tmp.d, am.d, powerbuf, np, n0, top, i - 1); | 980 | | bn_scatter5(tmp.d, top, powerbuf, i); | 981 | | } | 982 | | # else | 983 | | /* same as above, but uses squaring for 1/2 of operations */ | 984 | | for (i = 4; i < 32; i *= 2) { | 985 | | bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top); | 986 | | bn_scatter5(tmp.d, top, powerbuf, i); | 987 | | } | 988 | | for (i = 3; i < 8; i += 2) { | 989 | | int j; | 990 | | bn_mul_mont_gather5(tmp.d, am.d, powerbuf, np, n0, top, i - 1); | 991 | | bn_scatter5(tmp.d, top, powerbuf, i); | 992 | | for (j = 2 * i; j < 32; j *= 2) { | 993 | | bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top); | 994 | | bn_scatter5(tmp.d, top, powerbuf, j); | 995 | | } | 996 | | } | 997 | | for (; i < 16; i += 2) { | 998 | | bn_mul_mont_gather5(tmp.d, am.d, powerbuf, np, n0, top, i - 1); | 999 | | bn_scatter5(tmp.d, top, powerbuf, i); | 1000 | | bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top); | 1001 | | bn_scatter5(tmp.d, top, powerbuf, 2 * i); | 1002 | | } | 1003 | | for (; i < 32; i += 2) { | 1004 | | bn_mul_mont_gather5(tmp.d, am.d, powerbuf, np, n0, top, i - 1); | 1005 | | bn_scatter5(tmp.d, top, powerbuf, i); | 1006 | | } | 1007 | | # endif | 1008 | | /* | 1009 | | * The exponent may not have a whole number of fixed-size windows. | 1010 | | * To simplify the main loop, the initial window has between 1 and | 1011 | | * full-window-size bits such that what remains is always a whole | 1012 | | * number of windows | 1013 | | */ | 1014 | | window0 = (bits - 1) % 5 + 1; | 1015 | | wmask = (1 << window0) - 1; | 1016 | | bits -= window0; | 1017 | | wvalue = bn_get_bits(p, bits) & wmask; | 1018 | | bn_gather5(tmp.d, top, powerbuf, wvalue); | 1019 | | | 1020 | | /* | 1021 | | * Scan the exponent one window at a time starting from the most | 1022 | | * significant bits. | 1023 | | */ | 1024 | | if (top & 7) { | 1025 | | while (bits > 0) { | 1026 | | bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top); | 1027 | | bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top); | 1028 | | bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top); | 1029 | | bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top); | 1030 | | bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top); | 1031 | | bn_mul_mont_gather5(tmp.d, tmp.d, powerbuf, np, n0, top, | 1032 | | bn_get_bits5(p->d, bits -= 5)); | 1033 | | } | 1034 | | } else { | 1035 | | while (bits > 0) { | 1036 | | bn_power5(tmp.d, tmp.d, powerbuf, np, n0, top, | 1037 | | bn_get_bits5(p->d, bits -= 5)); | 1038 | | } | 1039 | | } | 1040 | | | 1041 | | tmp.top = top; | 1042 | | /* | 1043 | | * The result is now in |tmp| in Montgomery form, but it may not be | 1044 | | * fully reduced. This is within bounds for |BN_from_montgomery| | 1045 | | * (tmp < R <= m*R) so it will, when converting from Montgomery form, | 1046 | | * produce a fully reduced result. | 1047 | | * | 1048 | | * This differs from Figure 2 of the paper, which uses AMM(h, 1) to | 1049 | | * convert from Montgomery form with unreduced output, followed by an | 1050 | | * extra reduction step. In the paper's terminology, we replace | 1051 | | * steps 9 and 10 with MM(h, 1). | 1052 | | */ | 1053 | | } else | 1054 | | #endif | 1055 | 575 | { | 1056 | 575 | fallback: | 1057 | 575 | if (!MOD_EXP_CTIME_COPY_TO_PREBUF(&tmp, top, powerbuf, 0, window)) | 1058 | 0 | goto err; | 1059 | 575 | if (!MOD_EXP_CTIME_COPY_TO_PREBUF(&am, top, powerbuf, 1, window)) | 1060 | 0 | goto err; | 1061 | | | 1062 | | /* | 1063 | | * If the window size is greater than 1, then calculate | 1064 | | * val[i=2..2^winsize-1]. Powers are computed as a*a^(i-1) (even | 1065 | | * powers could instead be computed as (a^(i/2))^2 to use the slight | 1066 | | * performance advantage of sqr over mul). | 1067 | | */ | 1068 | 575 | if (window > 1) { | 1069 | 575 | if (!bn_mul_mont_fixed_top(&tmp, &am, &am, mont, ctx)) | 1070 | 0 | goto err; | 1071 | 575 | if (!MOD_EXP_CTIME_COPY_TO_PREBUF(&tmp, top, powerbuf, 2, | 1072 | 575 | window)) | 1073 | 0 | goto err; | 1074 | 17.5k | for (i = 3; i < numPowers; i++) { | 1075 | | /* Calculate a^i = a^(i-1) * a */ | 1076 | 17.0k | if (!bn_mul_mont_fixed_top(&tmp, &am, &tmp, mont, ctx)) | 1077 | 0 | goto err; | 1078 | 17.0k | if (!MOD_EXP_CTIME_COPY_TO_PREBUF(&tmp, top, powerbuf, i, | 1079 | 17.0k | window)) | 1080 | 0 | goto err; | 1081 | 17.0k | } | 1082 | 575 | } | 1083 | | | 1084 | | /* | 1085 | | * The exponent may not have a whole number of fixed-size windows. | 1086 | | * To simplify the main loop, the initial window has between 1 and | 1087 | | * full-window-size bits such that what remains is always a whole | 1088 | | * number of windows | 1089 | | */ | 1090 | 575 | window0 = (bits - 1) % window + 1; | 1091 | 575 | wmask = (1 << window0) - 1; | 1092 | 575 | bits -= window0; | 1093 | 575 | wvalue = bn_get_bits(p, bits) & wmask; | 1094 | 575 | if (!MOD_EXP_CTIME_COPY_FROM_PREBUF(&tmp, top, powerbuf, wvalue, | 1095 | 575 | window)) | 1096 | 0 | goto err; | 1097 | | | 1098 | 575 | wmask = (1 << window) - 1; | 1099 | | /* | 1100 | | * Scan the exponent one window at a time starting from the most | 1101 | | * significant bits. | 1102 | | */ | 1103 | 174k | while (bits > 0) { | 1104 | | | 1105 | | /* Square the result window-size times */ | 1106 | 1.17M | for (i = 0; i < window; i++) | 1107 | 1.00M | if (!bn_mul_mont_fixed_top(&tmp, &tmp, &tmp, mont, ctx)) | 1108 | 0 | goto err; | 1109 | | | 1110 | | /* | 1111 | | * Get a window's worth of bits from the exponent | 1112 | | * This avoids calling BN_is_bit_set for each bit, which | 1113 | | * is not only slower but also makes each bit vulnerable to | 1114 | | * EM (and likely other) side-channel attacks like One&Done | 1115 | | * (for details see "One&Done: A Single-Decryption EM-Based | 1116 | | * Attack on OpenSSL's Constant-Time Blinded RSA" by M. Alam, | 1117 | | * H. Khan, M. Dey, N. Sinha, R. Callan, A. Zajic, and | 1118 | | * M. Prvulovic, in USENIX Security'18) | 1119 | | */ | 1120 | 173k | bits -= window; | 1121 | 173k | wvalue = bn_get_bits(p, bits) & wmask; | 1122 | | /* | 1123 | | * Fetch the appropriate pre-computed value from the pre-buf | 1124 | | */ | 1125 | 173k | if (!MOD_EXP_CTIME_COPY_FROM_PREBUF(&am, top, powerbuf, wvalue, | 1126 | 173k | window)) | 1127 | 0 | goto err; | 1128 | | | 1129 | | /* Multiply the result into the intermediate result */ | 1130 | 173k | if (!bn_mul_mont_fixed_top(&tmp, &tmp, &am, mont, ctx)) | 1131 | 0 | goto err; | 1132 | 173k | } | 1133 | 575 | } | 1134 | | | 1135 | | /* | 1136 | | * Done with zero-padded intermediate BIGNUMs. Final BN_from_montgomery | 1137 | | * removes padding [if any] and makes return value suitable for public | 1138 | | * API consumer. | 1139 | | */ | 1140 | | #if defined(SPARC_T4_MONT) | 1141 | | if (OPENSSL_sparcv9cap_P[0] & (SPARCV9_VIS3 | SPARCV9_PREFER_FPU)) { | 1142 | | am.d[0] = 1; /* borrow am */ | 1143 | | for (i = 1; i < top; i++) | 1144 | | am.d[i] = 0; | 1145 | | if (!BN_mod_mul_montgomery(rr, &tmp, &am, mont, ctx)) | 1146 | | goto err; | 1147 | | } else | 1148 | | #endif | 1149 | 575 | if (!BN_from_montgomery(rr, &tmp, mont, ctx)) | 1150 | 0 | goto err; | 1151 | 575 | ret = 1; | 1152 | 575 | err: | 1153 | 575 | if (in_mont == NULL) | 1154 | 290 | BN_MONT_CTX_free(mont); | 1155 | 575 | if (powerbuf != NULL) { | 1156 | 575 | OPENSSL_cleanse(powerbuf, powerbufLen); | 1157 | 575 | OPENSSL_free(powerbufFree); | 1158 | 575 | } | 1159 | 575 | BN_CTX_end(ctx); | 1160 | 575 | return ret; | 1161 | 575 | } |
BN_mod_exp_mont_consttime Line | Count | Source | 612 | 1.32k | { | 613 | 1.32k | int i, bits, ret = 0, window, wvalue, wmask, window0; | 614 | 1.32k | int top; | 615 | 1.32k | BN_MONT_CTX *mont = NULL; | 616 | | | 617 | 1.32k | int numPowers; | 618 | 1.32k | unsigned char *powerbufFree = NULL; | 619 | 1.32k | int powerbufLen = 0; | 620 | 1.32k | unsigned char *powerbuf = NULL; | 621 | 1.32k | BIGNUM tmp, am; | 622 | | #if defined(SPARC_T4_MONT) | 623 | | unsigned int t4 = 0; | 624 | | #endif | 625 | | | 626 | 1.32k | bn_check_top(a); | 627 | 1.32k | bn_check_top(p); | 628 | 1.32k | bn_check_top(m); | 629 | | | 630 | 1.32k | if (!BN_is_odd(m)) { | 631 | 502 | ERR_raise(ERR_LIB_BN, BN_R_CALLED_WITH_EVEN_MODULUS); | 632 | 502 | return 0; | 633 | 502 | } | 634 | | | 635 | 821 | top = m->top; | 636 | | | 637 | 821 | if (top > BN_CONSTTIME_SIZE_LIMIT) { | 638 | | /* Prevent overflowing the powerbufLen computation below */ | 639 | 0 | return BN_mod_exp_mont(rr, a, p, m, ctx, in_mont); | 640 | 0 | } | 641 | | | 642 | | /* | 643 | | * Use all bits stored in |p|, rather than |BN_num_bits|, so we do not leak | 644 | | * whether the top bits are zero. | 645 | | */ | 646 | 821 | bits = p->top * BN_BITS2; | 647 | 821 | if (bits == 0) { | 648 | | /* x**0 mod 1, or x**0 mod -1 is still zero. */ | 649 | 17 | if (BN_abs_is_word(m, 1)) { | 650 | 3 | ret = 1; | 651 | 3 | BN_zero(rr); | 652 | 14 | } else { | 653 | 14 | ret = BN_one(rr); | 654 | 14 | } | 655 | 17 | return ret; | 656 | 17 | } | 657 | | | 658 | 804 | BN_CTX_start(ctx); | 659 | | | 660 | | /* | 661 | | * Allocate a montgomery context if it was not supplied by the caller. If | 662 | | * this is not done, things will break in the montgomery part. | 663 | | */ | 664 | 804 | if (in_mont != NULL) | 665 | 304 | mont = in_mont; | 666 | 500 | else { | 667 | 500 | if ((mont = BN_MONT_CTX_new()) == NULL) | 668 | 0 | goto err; | 669 | 500 | if (!BN_MONT_CTX_set(mont, m, ctx)) | 670 | 0 | goto err; | 671 | 500 | } | 672 | | | 673 | 804 | if (a->neg || BN_ucmp(a, m) >= 0) { | 674 | 401 | BIGNUM *reduced = BN_CTX_get(ctx); | 675 | 401 | if (reduced == NULL | 676 | 401 | || !BN_nnmod(reduced, a, m, ctx)) { | 677 | 0 | goto err; | 678 | 0 | } | 679 | 401 | a = reduced; | 680 | 401 | } | 681 | | | 682 | 804 | #ifdef RSAZ_ENABLED | 683 | | /* | 684 | | * If the size of the operands allow it, perform the optimized | 685 | | * RSAZ exponentiation. For further information see | 686 | | * crypto/bn/rsaz_exp.c and accompanying assembly modules. | 687 | | */ | 688 | 804 | if ((16 == a->top) && (16 == p->top) && (BN_num_bits(m) == 1024) | 689 | 804 | && rsaz_avx2_eligible()) { | 690 | 0 | if (NULL == bn_wexpand(rr, 16)) | 691 | 0 | goto err; | 692 | 0 | RSAZ_1024_mod_exp_avx2(rr->d, a->d, p->d, m->d, mont->RR.d, | 693 | 0 | mont->n0[0]); | 694 | 0 | rr->top = 16; | 695 | 0 | rr->neg = 0; | 696 | 0 | bn_correct_top(rr); | 697 | 0 | ret = 1; | 698 | 0 | goto err; | 699 | 804 | } else if ((8 == a->top) && (8 == p->top) && (BN_num_bits(m) == 512)) { | 700 | 1 | if (NULL == bn_wexpand(rr, 8)) | 701 | 0 | goto err; | 702 | 1 | RSAZ_512_mod_exp(rr->d, a->d, p->d, m->d, mont->n0[0], mont->RR.d); | 703 | 1 | rr->top = 8; | 704 | 1 | rr->neg = 0; | 705 | 1 | bn_correct_top(rr); | 706 | 1 | ret = 1; | 707 | 1 | goto err; | 708 | 1 | } | 709 | 803 | #endif | 710 | | | 711 | | /* Get the window size to use with size of p. */ | 712 | 803 | window = BN_window_bits_for_ctime_exponent_size(bits); | 713 | | #if defined(SPARC_T4_MONT) | 714 | | if (window >= 5 && (top & 15) == 0 && top <= 64 && | 715 | | (OPENSSL_sparcv9cap_P[1] & (CFR_MONTMUL | CFR_MONTSQR)) == | 716 | | (CFR_MONTMUL | CFR_MONTSQR) && (t4 = OPENSSL_sparcv9cap_P[0])) | 717 | | window = 5; | 718 | | else | 719 | | #endif | 720 | 803 | #if defined(OPENSSL_BN_ASM_MONT5) | 721 | 803 | if (window >= 5 && top <= BN_SOFT_LIMIT) { | 722 | 405 | window = 5; /* ~5% improvement for RSA2048 sign, and even | 723 | | * for RSA4096 */ | 724 | | /* reserve space for mont->N.d[] copy */ | 725 | 405 | powerbufLen += top * sizeof(mont->N.d[0]); | 726 | 405 | } | 727 | 803 | #endif | 728 | 803 | (void)0; | 729 | | | 730 | | /* | 731 | | * Allocate a buffer large enough to hold all of the pre-computed powers | 732 | | * of am, am itself and tmp. | 733 | | */ | 734 | 803 | numPowers = 1 << window; | 735 | 803 | powerbufLen += sizeof(m->d[0]) * (top * numPowers + | 736 | 803 | ((2 * top) > | 737 | 803 | numPowers ? (2 * top) : numPowers)); | 738 | 803 | #ifdef alloca | 739 | 803 | if (powerbufLen < 3072) | 740 | 620 | powerbufFree = | 741 | 620 | alloca(powerbufLen + MOD_EXP_CTIME_MIN_CACHE_LINE_WIDTH); | 742 | 183 | else | 743 | 183 | #endif | 744 | 183 | if ((powerbufFree = | 745 | 183 | OPENSSL_malloc(powerbufLen + MOD_EXP_CTIME_MIN_CACHE_LINE_WIDTH)) | 746 | 183 | == NULL) | 747 | 0 | goto err; | 748 | | | 749 | 803 | powerbuf = MOD_EXP_CTIME_ALIGN(powerbufFree); | 750 | 803 | memset(powerbuf, 0, powerbufLen); | 751 | | | 752 | 803 | #ifdef alloca | 753 | 803 | if (powerbufLen < 3072) | 754 | 620 | powerbufFree = NULL; | 755 | 803 | #endif | 756 | | | 757 | | /* lay down tmp and am right after powers table */ | 758 | 803 | tmp.d = (BN_ULONG *)(powerbuf + sizeof(m->d[0]) * top * numPowers); | 759 | 803 | am.d = tmp.d + top; | 760 | 803 | tmp.top = am.top = 0; | 761 | 803 | tmp.dmax = am.dmax = top; | 762 | 803 | tmp.neg = am.neg = 0; | 763 | 803 | tmp.flags = am.flags = BN_FLG_STATIC_DATA; | 764 | | | 765 | | /* prepare a^0 in Montgomery domain */ | 766 | 803 | #if 1 /* by Shay Gueron's suggestion */ | 767 | 803 | if (m->d[top - 1] & (((BN_ULONG)1) << (BN_BITS2 - 1))) { | 768 | | /* 2^(top*BN_BITS2) - m */ | 769 | 228 | tmp.d[0] = (0 - m->d[0]) & BN_MASK2; | 770 | 1.98k | for (i = 1; i < top; i++) | 771 | 1.75k | tmp.d[i] = (~m->d[i]) & BN_MASK2; | 772 | 228 | tmp.top = top; | 773 | 228 | } else | 774 | 575 | #endif | 775 | 575 | if (!bn_to_mont_fixed_top(&tmp, BN_value_one(), mont, ctx)) | 776 | 0 | goto err; | 777 | | | 778 | | /* prepare a^1 in Montgomery domain */ | 779 | 803 | if (!bn_to_mont_fixed_top(&am, a, mont, ctx)) | 780 | 0 | goto err; | 781 | | | 782 | 803 | if (top > BN_SOFT_LIMIT) | 783 | 0 | goto fallback; | 784 | | | 785 | | #if defined(SPARC_T4_MONT) | 786 | | if (t4) { | 787 | | typedef int (*bn_pwr5_mont_f) (BN_ULONG *tp, const BN_ULONG *np, | 788 | | const BN_ULONG *n0, const void *table, | 789 | | int power, int bits); | 790 | | int bn_pwr5_mont_t4_8(BN_ULONG *tp, const BN_ULONG *np, | 791 | | const BN_ULONG *n0, const void *table, | 792 | | int power, int bits); | 793 | | int bn_pwr5_mont_t4_16(BN_ULONG *tp, const BN_ULONG *np, | 794 | | const BN_ULONG *n0, const void *table, | 795 | | int power, int bits); | 796 | | int bn_pwr5_mont_t4_24(BN_ULONG *tp, const BN_ULONG *np, | 797 | | const BN_ULONG *n0, const void *table, | 798 | | int power, int bits); | 799 | | int bn_pwr5_mont_t4_32(BN_ULONG *tp, const BN_ULONG *np, | 800 | | const BN_ULONG *n0, const void *table, | 801 | | int power, int bits); | 802 | | static const bn_pwr5_mont_f pwr5_funcs[4] = { | 803 | | bn_pwr5_mont_t4_8, bn_pwr5_mont_t4_16, | 804 | | bn_pwr5_mont_t4_24, bn_pwr5_mont_t4_32 | 805 | | }; | 806 | | bn_pwr5_mont_f pwr5_worker = pwr5_funcs[top / 16 - 1]; | 807 | | | 808 | | typedef int (*bn_mul_mont_f) (BN_ULONG *rp, const BN_ULONG *ap, | 809 | | const void *bp, const BN_ULONG *np, | 810 | | const BN_ULONG *n0); | 811 | | int bn_mul_mont_t4_8(BN_ULONG *rp, const BN_ULONG *ap, const void *bp, | 812 | | const BN_ULONG *np, const BN_ULONG *n0); | 813 | | int bn_mul_mont_t4_16(BN_ULONG *rp, const BN_ULONG *ap, | 814 | | const void *bp, const BN_ULONG *np, | 815 | | const BN_ULONG *n0); | 816 | | int bn_mul_mont_t4_24(BN_ULONG *rp, const BN_ULONG *ap, | 817 | | const void *bp, const BN_ULONG *np, | 818 | | const BN_ULONG *n0); | 819 | | int bn_mul_mont_t4_32(BN_ULONG *rp, const BN_ULONG *ap, | 820 | | const void *bp, const BN_ULONG *np, | 821 | | const BN_ULONG *n0); | 822 | | static const bn_mul_mont_f mul_funcs[4] = { | 823 | | bn_mul_mont_t4_8, bn_mul_mont_t4_16, | 824 | | bn_mul_mont_t4_24, bn_mul_mont_t4_32 | 825 | | }; | 826 | | bn_mul_mont_f mul_worker = mul_funcs[top / 16 - 1]; | 827 | | | 828 | | void bn_mul_mont_vis3(BN_ULONG *rp, const BN_ULONG *ap, | 829 | | const void *bp, const BN_ULONG *np, | 830 | | const BN_ULONG *n0, int num); | 831 | | void bn_mul_mont_t4(BN_ULONG *rp, const BN_ULONG *ap, | 832 | | const void *bp, const BN_ULONG *np, | 833 | | const BN_ULONG *n0, int num); | 834 | | void bn_mul_mont_gather5_t4(BN_ULONG *rp, const BN_ULONG *ap, | 835 | | const void *table, const BN_ULONG *np, | 836 | | const BN_ULONG *n0, int num, int power); | 837 | | void bn_flip_n_scatter5_t4(const BN_ULONG *inp, size_t num, | 838 | | void *table, size_t power); | 839 | | void bn_gather5_t4(BN_ULONG *out, size_t num, | 840 | | void *table, size_t power); | 841 | | void bn_flip_t4(BN_ULONG *dst, BN_ULONG *src, size_t num); | 842 | | | 843 | | BN_ULONG *np = mont->N.d, *n0 = mont->n0; | 844 | | int stride = 5 * (6 - (top / 16 - 1)); /* multiple of 5, but less | 845 | | * than 32 */ | 846 | | | 847 | | /* | 848 | | * BN_to_montgomery can contaminate words above .top [in | 849 | | * BN_DEBUG build... | 850 | | */ | 851 | | for (i = am.top; i < top; i++) | 852 | | am.d[i] = 0; | 853 | | for (i = tmp.top; i < top; i++) | 854 | | tmp.d[i] = 0; | 855 | | | 856 | | bn_flip_n_scatter5_t4(tmp.d, top, powerbuf, 0); | 857 | | bn_flip_n_scatter5_t4(am.d, top, powerbuf, 1); | 858 | | if (!(*mul_worker) (tmp.d, am.d, am.d, np, n0) && | 859 | | !(*mul_worker) (tmp.d, am.d, am.d, np, n0)) | 860 | | bn_mul_mont_vis3(tmp.d, am.d, am.d, np, n0, top); | 861 | | bn_flip_n_scatter5_t4(tmp.d, top, powerbuf, 2); | 862 | | | 863 | | for (i = 3; i < 32; i++) { | 864 | | /* Calculate a^i = a^(i-1) * a */ | 865 | | if (!(*mul_worker) (tmp.d, tmp.d, am.d, np, n0) && | 866 | | !(*mul_worker) (tmp.d, tmp.d, am.d, np, n0)) | 867 | | bn_mul_mont_vis3(tmp.d, tmp.d, am.d, np, n0, top); | 868 | | bn_flip_n_scatter5_t4(tmp.d, top, powerbuf, i); | 869 | | } | 870 | | | 871 | | /* switch to 64-bit domain */ | 872 | | np = alloca(top * sizeof(BN_ULONG)); | 873 | | top /= 2; | 874 | | bn_flip_t4(np, mont->N.d, top); | 875 | | | 876 | | /* | 877 | | * The exponent may not have a whole number of fixed-size windows. | 878 | | * To simplify the main loop, the initial window has between 1 and | 879 | | * full-window-size bits such that what remains is always a whole | 880 | | * number of windows | 881 | | */ | 882 | | window0 = (bits - 1) % 5 + 1; | 883 | | wmask = (1 << window0) - 1; | 884 | | bits -= window0; | 885 | | wvalue = bn_get_bits(p, bits) & wmask; | 886 | | bn_gather5_t4(tmp.d, top, powerbuf, wvalue); | 887 | | | 888 | | /* | 889 | | * Scan the exponent one window at a time starting from the most | 890 | | * significant bits. | 891 | | */ | 892 | | while (bits > 0) { | 893 | | if (bits < stride) | 894 | | stride = bits; | 895 | | bits -= stride; | 896 | | wvalue = bn_get_bits(p, bits); | 897 | | | 898 | | if ((*pwr5_worker) (tmp.d, np, n0, powerbuf, wvalue, stride)) | 899 | | continue; | 900 | | /* retry once and fall back */ | 901 | | if ((*pwr5_worker) (tmp.d, np, n0, powerbuf, wvalue, stride)) | 902 | | continue; | 903 | | | 904 | | bits += stride - 5; | 905 | | wvalue >>= stride - 5; | 906 | | wvalue &= 31; | 907 | | bn_mul_mont_t4(tmp.d, tmp.d, tmp.d, np, n0, top); | 908 | | bn_mul_mont_t4(tmp.d, tmp.d, tmp.d, np, n0, top); | 909 | | bn_mul_mont_t4(tmp.d, tmp.d, tmp.d, np, n0, top); | 910 | | bn_mul_mont_t4(tmp.d, tmp.d, tmp.d, np, n0, top); | 911 | | bn_mul_mont_t4(tmp.d, tmp.d, tmp.d, np, n0, top); | 912 | | bn_mul_mont_gather5_t4(tmp.d, tmp.d, powerbuf, np, n0, top, | 913 | | wvalue); | 914 | | } | 915 | | | 916 | | bn_flip_t4(tmp.d, tmp.d, top); | 917 | | top *= 2; | 918 | | /* back to 32-bit domain */ | 919 | | tmp.top = top; | 920 | | bn_correct_top(&tmp); | 921 | | OPENSSL_cleanse(np, top * sizeof(BN_ULONG)); | 922 | | } else | 923 | | #endif | 924 | 803 | #if defined(OPENSSL_BN_ASM_MONT5) | 925 | 803 | if (window == 5 && top > 1) { | 926 | | /* | 927 | | * This optimization uses ideas from https://eprint.iacr.org/2011/239, | 928 | | * specifically optimization of cache-timing attack countermeasures, | 929 | | * pre-computation optimization, and Almost Montgomery Multiplication. | 930 | | * | 931 | | * The paper discusses a 4-bit window to optimize 512-bit modular | 932 | | * exponentiation, used in RSA-1024 with CRT, but RSA-1024 is no longer | 933 | | * important. | 934 | | * | 935 | | * |bn_mul_mont_gather5| and |bn_power5| implement the "almost" | 936 | | * reduction variant, so the values here may not be fully reduced. | 937 | | * They are bounded by R (i.e. they fit in |top| words), not |m|. | 938 | | * Additionally, we pass these "almost" reduced inputs into | 939 | | * |bn_mul_mont|, which implements the normal reduction variant. | 940 | | * Given those inputs, |bn_mul_mont| may not give reduced | 941 | | * output, but it will still produce "almost" reduced output. | 942 | | */ | 943 | 308 | void bn_mul_mont_gather5(BN_ULONG *rp, const BN_ULONG *ap, | 944 | 308 | const void *table, const BN_ULONG *np, | 945 | 308 | const BN_ULONG *n0, int num, int power); | 946 | 308 | void bn_scatter5(const BN_ULONG *inp, size_t num, | 947 | 308 | void *table, size_t power); | 948 | 308 | void bn_gather5(BN_ULONG *out, size_t num, void *table, size_t power); | 949 | 308 | void bn_power5(BN_ULONG *rp, const BN_ULONG *ap, | 950 | 308 | const void *table, const BN_ULONG *np, | 951 | 308 | const BN_ULONG *n0, int num, int power); | 952 | 308 | int bn_get_bits5(const BN_ULONG *ap, int off); | 953 | | | 954 | 308 | BN_ULONG *n0 = mont->n0, *np; | 955 | | | 956 | | /* | 957 | | * BN_to_montgomery can contaminate words above .top [in | 958 | | * BN_DEBUG build... | 959 | | */ | 960 | 308 | for (i = am.top; i < top; i++) | 961 | 0 | am.d[i] = 0; | 962 | 308 | for (i = tmp.top; i < top; i++) | 963 | 0 | tmp.d[i] = 0; | 964 | | | 965 | | /* | 966 | | * copy mont->N.d[] to improve cache locality | 967 | | */ | 968 | 8.54k | for (np = am.d + top, i = 0; i < top; i++) | 969 | 8.23k | np[i] = mont->N.d[i]; | 970 | | | 971 | 308 | bn_scatter5(tmp.d, top, powerbuf, 0); | 972 | 308 | bn_scatter5(am.d, am.top, powerbuf, 1); | 973 | 308 | bn_mul_mont(tmp.d, am.d, am.d, np, n0, top); | 974 | 308 | bn_scatter5(tmp.d, top, powerbuf, 2); | 975 | | | 976 | | # if 0 | 977 | | for (i = 3; i < 32; i++) { | 978 | | /* Calculate a^i = a^(i-1) * a */ | 979 | | bn_mul_mont_gather5(tmp.d, am.d, powerbuf, np, n0, top, i - 1); | 980 | | bn_scatter5(tmp.d, top, powerbuf, i); | 981 | | } | 982 | | # else | 983 | | /* same as above, but uses squaring for 1/2 of operations */ | 984 | 1.23k | for (i = 4; i < 32; i *= 2) { | 985 | 924 | bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top); | 986 | 924 | bn_scatter5(tmp.d, top, powerbuf, i); | 987 | 924 | } | 988 | 1.23k | for (i = 3; i < 8; i += 2) { | 989 | 924 | int j; | 990 | 924 | bn_mul_mont_gather5(tmp.d, am.d, powerbuf, np, n0, top, i - 1); | 991 | 924 | bn_scatter5(tmp.d, top, powerbuf, i); | 992 | 3.08k | for (j = 2 * i; j < 32; j *= 2) { | 993 | 2.15k | bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top); | 994 | 2.15k | bn_scatter5(tmp.d, top, powerbuf, j); | 995 | 2.15k | } | 996 | 924 | } | 997 | 1.54k | for (; i < 16; i += 2) { | 998 | 1.23k | bn_mul_mont_gather5(tmp.d, am.d, powerbuf, np, n0, top, i - 1); | 999 | 1.23k | bn_scatter5(tmp.d, top, powerbuf, i); | 1000 | 1.23k | bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top); | 1001 | 1.23k | bn_scatter5(tmp.d, top, powerbuf, 2 * i); | 1002 | 1.23k | } | 1003 | 2.77k | for (; i < 32; i += 2) { | 1004 | 2.46k | bn_mul_mont_gather5(tmp.d, am.d, powerbuf, np, n0, top, i - 1); | 1005 | 2.46k | bn_scatter5(tmp.d, top, powerbuf, i); | 1006 | 2.46k | } | 1007 | 308 | # endif | 1008 | | /* | 1009 | | * The exponent may not have a whole number of fixed-size windows. | 1010 | | * To simplify the main loop, the initial window has between 1 and | 1011 | | * full-window-size bits such that what remains is always a whole | 1012 | | * number of windows | 1013 | | */ | 1014 | 308 | window0 = (bits - 1) % 5 + 1; | 1015 | 308 | wmask = (1 << window0) - 1; | 1016 | 308 | bits -= window0; | 1017 | 308 | wvalue = bn_get_bits(p, bits) & wmask; | 1018 | 308 | bn_gather5(tmp.d, top, powerbuf, wvalue); | 1019 | | | 1020 | | /* | 1021 | | * Scan the exponent one window at a time starting from the most | 1022 | | * significant bits. | 1023 | | */ | 1024 | 308 | if (top & 7) { | 1025 | 86.8k | while (bits > 0) { | 1026 | 86.5k | bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top); | 1027 | 86.5k | bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top); | 1028 | 86.5k | bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top); | 1029 | 86.5k | bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top); | 1030 | 86.5k | bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top); | 1031 | 86.5k | bn_mul_mont_gather5(tmp.d, tmp.d, powerbuf, np, n0, top, | 1032 | 86.5k | bn_get_bits5(p->d, bits -= 5)); | 1033 | 86.5k | } | 1034 | 244 | } else { | 1035 | 22.6k | while (bits > 0) { | 1036 | 22.5k | bn_power5(tmp.d, tmp.d, powerbuf, np, n0, top, | 1037 | 22.5k | bn_get_bits5(p->d, bits -= 5)); | 1038 | 22.5k | } | 1039 | 64 | } | 1040 | | | 1041 | 308 | tmp.top = top; | 1042 | | /* | 1043 | | * The result is now in |tmp| in Montgomery form, but it may not be | 1044 | | * fully reduced. This is within bounds for |BN_from_montgomery| | 1045 | | * (tmp < R <= m*R) so it will, when converting from Montgomery form, | 1046 | | * produce a fully reduced result. | 1047 | | * | 1048 | | * This differs from Figure 2 of the paper, which uses AMM(h, 1) to | 1049 | | * convert from Montgomery form with unreduced output, followed by an | 1050 | | * extra reduction step. In the paper's terminology, we replace | 1051 | | * steps 9 and 10 with MM(h, 1). | 1052 | | */ | 1053 | 308 | } else | 1054 | 495 | #endif | 1055 | 495 | { | 1056 | 495 | fallback: | 1057 | 495 | if (!MOD_EXP_CTIME_COPY_TO_PREBUF(&tmp, top, powerbuf, 0, window)) | 1058 | 0 | goto err; | 1059 | 495 | if (!MOD_EXP_CTIME_COPY_TO_PREBUF(&am, top, powerbuf, 1, window)) | 1060 | 0 | goto err; | 1061 | | | 1062 | | /* | 1063 | | * If the window size is greater than 1, then calculate | 1064 | | * val[i=2..2^winsize-1]. Powers are computed as a*a^(i-1) (even | 1065 | | * powers could instead be computed as (a^(i/2))^2 to use the slight | 1066 | | * performance advantage of sqr over mul). | 1067 | | */ | 1068 | 495 | if (window > 1) { | 1069 | 495 | if (!bn_mul_mont_fixed_top(&tmp, &am, &am, mont, ctx)) | 1070 | 0 | goto err; | 1071 | 495 | if (!MOD_EXP_CTIME_COPY_TO_PREBUF(&tmp, top, powerbuf, 2, | 1072 | 495 | window)) | 1073 | 0 | goto err; | 1074 | 7.61k | for (i = 3; i < numPowers; i++) { | 1075 | | /* Calculate a^i = a^(i-1) * a */ | 1076 | 7.12k | if (!bn_mul_mont_fixed_top(&tmp, &am, &tmp, mont, ctx)) | 1077 | 0 | goto err; | 1078 | 7.12k | if (!MOD_EXP_CTIME_COPY_TO_PREBUF(&tmp, top, powerbuf, i, | 1079 | 7.12k | window)) | 1080 | 0 | goto err; | 1081 | 7.12k | } | 1082 | 495 | } | 1083 | | | 1084 | | /* | 1085 | | * The exponent may not have a whole number of fixed-size windows. | 1086 | | * To simplify the main loop, the initial window has between 1 and | 1087 | | * full-window-size bits such that what remains is always a whole | 1088 | | * number of windows | 1089 | | */ | 1090 | 495 | window0 = (bits - 1) % window + 1; | 1091 | 495 | wmask = (1 << window0) - 1; | 1092 | 495 | bits -= window0; | 1093 | 495 | wvalue = bn_get_bits(p, bits) & wmask; | 1094 | 495 | if (!MOD_EXP_CTIME_COPY_FROM_PREBUF(&tmp, top, powerbuf, wvalue, | 1095 | 495 | window)) | 1096 | 0 | goto err; | 1097 | | | 1098 | 495 | wmask = (1 << window) - 1; | 1099 | | /* | 1100 | | * Scan the exponent one window at a time starting from the most | 1101 | | * significant bits. | 1102 | | */ | 1103 | 50.4k | while (bits > 0) { | 1104 | | | 1105 | | /* Square the result window-size times */ | 1106 | 280k | for (i = 0; i < window; i++) | 1107 | 230k | if (!bn_mul_mont_fixed_top(&tmp, &tmp, &tmp, mont, ctx)) | 1108 | 0 | goto err; | 1109 | | | 1110 | | /* | 1111 | | * Get a window's worth of bits from the exponent | 1112 | | * This avoids calling BN_is_bit_set for each bit, which | 1113 | | * is not only slower but also makes each bit vulnerable to | 1114 | | * EM (and likely other) side-channel attacks like One&Done | 1115 | | * (for details see "One&Done: A Single-Decryption EM-Based | 1116 | | * Attack on OpenSSL's Constant-Time Blinded RSA" by M. Alam, | 1117 | | * H. Khan, M. Dey, N. Sinha, R. Callan, A. Zajic, and | 1118 | | * M. Prvulovic, in USENIX Security'18) | 1119 | | */ | 1120 | 49.9k | bits -= window; | 1121 | 49.9k | wvalue = bn_get_bits(p, bits) & wmask; | 1122 | | /* | 1123 | | * Fetch the appropriate pre-computed value from the pre-buf | 1124 | | */ | 1125 | 49.9k | if (!MOD_EXP_CTIME_COPY_FROM_PREBUF(&am, top, powerbuf, wvalue, | 1126 | 49.9k | window)) | 1127 | 0 | goto err; | 1128 | | | 1129 | | /* Multiply the result into the intermediate result */ | 1130 | 49.9k | if (!bn_mul_mont_fixed_top(&tmp, &tmp, &am, mont, ctx)) | 1131 | 0 | goto err; | 1132 | 49.9k | } | 1133 | 495 | } | 1134 | | | 1135 | | /* | 1136 | | * Done with zero-padded intermediate BIGNUMs. Final BN_from_montgomery | 1137 | | * removes padding [if any] and makes return value suitable for public | 1138 | | * API consumer. | 1139 | | */ | 1140 | | #if defined(SPARC_T4_MONT) | 1141 | | if (OPENSSL_sparcv9cap_P[0] & (SPARCV9_VIS3 | SPARCV9_PREFER_FPU)) { | 1142 | | am.d[0] = 1; /* borrow am */ | 1143 | | for (i = 1; i < top; i++) | 1144 | | am.d[i] = 0; | 1145 | | if (!BN_mod_mul_montgomery(rr, &tmp, &am, mont, ctx)) | 1146 | | goto err; | 1147 | | } else | 1148 | | #endif | 1149 | 803 | if (!BN_from_montgomery(rr, &tmp, mont, ctx)) | 1150 | 0 | goto err; | 1151 | 803 | ret = 1; | 1152 | 804 | err: | 1153 | 804 | if (in_mont == NULL) | 1154 | 500 | BN_MONT_CTX_free(mont); | 1155 | 804 | if (powerbuf != NULL) { | 1156 | 803 | OPENSSL_cleanse(powerbuf, powerbufLen); | 1157 | 803 | OPENSSL_free(powerbufFree); | 1158 | 803 | } | 1159 | 804 | BN_CTX_end(ctx); | 1160 | 804 | return ret; | 1161 | 803 | } |
|
1162 | | |
1163 | | int BN_mod_exp_mont_word(BIGNUM *rr, BN_ULONG a, const BIGNUM *p, |
1164 | | const BIGNUM *m, BN_CTX *ctx, BN_MONT_CTX *in_mont) |
1165 | 59 | { |
1166 | 59 | BN_MONT_CTX *mont = NULL; |
1167 | 59 | int b, bits, ret = 0; |
1168 | 59 | int r_is_one; |
1169 | 59 | BN_ULONG w, next_w; |
1170 | 59 | BIGNUM *r, *t; |
1171 | 59 | BIGNUM *swap_tmp; |
1172 | 59 | #define BN_MOD_MUL_WORD(r, w, m) \ |
1173 | 8.96k | (BN_mul_word(r, (w)) && \ |
1174 | 8.96k | (/* BN_ucmp(r, (m)) < 0 ? 1 :*/ \ |
1175 | 8.96k | (BN_mod(t, r, m, ctx) && (swap_tmp = r, r = t, t = swap_tmp, 1)))) |
1176 | | /* |
1177 | | * BN_MOD_MUL_WORD is only used with 'w' large, so the BN_ucmp test is |
1178 | | * probably more overhead than always using BN_mod (which uses BN_copy if |
1179 | | * a similar test returns true). |
1180 | | */ |
1181 | | /* |
1182 | | * We can use BN_mod and do not need BN_nnmod because our accumulator is |
1183 | | * never negative (the result of BN_mod does not depend on the sign of |
1184 | | * the modulus). |
1185 | | */ |
1186 | 59 | #define BN_TO_MONTGOMERY_WORD(r, w, mont) \ |
1187 | 59 | (BN_set_word(r, (w)) && BN_to_montgomery(r, r, (mont), ctx)) |
1188 | | |
1189 | 59 | if (BN_get_flags(p, BN_FLG_CONSTTIME) != 0 |
1190 | 59 | || BN_get_flags(m, BN_FLG_CONSTTIME) != 0) { |
1191 | | /* BN_FLG_CONSTTIME only supported by BN_mod_exp_mont() */ |
1192 | 0 | ERR_raise(ERR_LIB_BN, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); |
1193 | 0 | return 0; |
1194 | 0 | } |
1195 | | |
1196 | 59 | bn_check_top(p); |
1197 | 59 | bn_check_top(m); |
1198 | | |
1199 | 59 | if (!BN_is_odd(m)) { |
1200 | 3 | ERR_raise(ERR_LIB_BN, BN_R_CALLED_WITH_EVEN_MODULUS); |
1201 | 3 | return 0; |
1202 | 3 | } |
1203 | 56 | if (m->top == 1) |
1204 | 23 | a %= m->d[0]; /* make sure that 'a' is reduced */ |
1205 | | |
1206 | 56 | bits = BN_num_bits(p); |
1207 | 56 | if (bits == 0) { |
1208 | | /* x**0 mod 1, or x**0 mod -1 is still zero. */ |
1209 | 4 | if (BN_abs_is_word(m, 1)) { |
1210 | 2 | ret = 1; |
1211 | 2 | BN_zero(rr); |
1212 | 2 | } else { |
1213 | 2 | ret = BN_one(rr); |
1214 | 2 | } |
1215 | 4 | return ret; |
1216 | 4 | } |
1217 | 52 | if (a == 0) { |
1218 | 2 | BN_zero(rr); |
1219 | 2 | ret = 1; |
1220 | 2 | return ret; |
1221 | 2 | } |
1222 | | |
1223 | 50 | BN_CTX_start(ctx); |
1224 | 50 | r = BN_CTX_get(ctx); |
1225 | 50 | t = BN_CTX_get(ctx); |
1226 | 50 | if (t == NULL) |
1227 | 0 | goto err; |
1228 | | |
1229 | 50 | if (in_mont != NULL) |
1230 | 0 | mont = in_mont; |
1231 | 50 | else { |
1232 | 50 | if ((mont = BN_MONT_CTX_new()) == NULL) |
1233 | 0 | goto err; |
1234 | 50 | if (!BN_MONT_CTX_set(mont, m, ctx)) |
1235 | 0 | goto err; |
1236 | 50 | } |
1237 | | |
1238 | 50 | r_is_one = 1; /* except for Montgomery factor */ |
1239 | | |
1240 | | /* bits-1 >= 0 */ |
1241 | | |
1242 | | /* The result is accumulated in the product r*w. */ |
1243 | 50 | w = a; /* bit 'bits-1' of 'p' is always set */ |
1244 | 20.2k | for (b = bits - 2; b >= 0; b--) { |
1245 | | /* First, square r*w. */ |
1246 | 20.2k | next_w = w * w; |
1247 | 20.2k | if ((next_w / w) != w) { /* overflow */ |
1248 | 8.87k | if (r_is_one) { |
1249 | 48 | if (!BN_TO_MONTGOMERY_WORD(r, w, mont)) |
1250 | 0 | goto err; |
1251 | 48 | r_is_one = 0; |
1252 | 8.82k | } else { |
1253 | 8.82k | if (!BN_MOD_MUL_WORD(r, w, m)) |
1254 | 0 | goto err; |
1255 | 8.82k | } |
1256 | 8.87k | next_w = 1; |
1257 | 8.87k | } |
1258 | 20.2k | w = next_w; |
1259 | 20.2k | if (!r_is_one) { |
1260 | 20.1k | if (!BN_mod_mul_montgomery(r, r, r, mont, ctx)) |
1261 | 0 | goto err; |
1262 | 20.1k | } |
1263 | | |
1264 | | /* Second, multiply r*w by 'a' if exponent bit is set. */ |
1265 | 20.2k | if (BN_is_bit_set(p, b)) { |
1266 | 10.2k | next_w = w * a; |
1267 | 10.2k | if ((next_w / a) != w) { /* overflow */ |
1268 | 107 | if (r_is_one) { |
1269 | 1 | if (!BN_TO_MONTGOMERY_WORD(r, w, mont)) |
1270 | 0 | goto err; |
1271 | 1 | r_is_one = 0; |
1272 | 106 | } else { |
1273 | 106 | if (!BN_MOD_MUL_WORD(r, w, m)) |
1274 | 0 | goto err; |
1275 | 106 | } |
1276 | 107 | next_w = a; |
1277 | 107 | } |
1278 | 10.2k | w = next_w; |
1279 | 10.2k | } |
1280 | 20.2k | } |
1281 | | |
1282 | | /* Finally, set r:=r*w. */ |
1283 | 50 | if (w != 1) { |
1284 | 29 | if (r_is_one) { |
1285 | 1 | if (!BN_TO_MONTGOMERY_WORD(r, w, mont)) |
1286 | 0 | goto err; |
1287 | 1 | r_is_one = 0; |
1288 | 28 | } else { |
1289 | 28 | if (!BN_MOD_MUL_WORD(r, w, m)) |
1290 | 0 | goto err; |
1291 | 28 | } |
1292 | 29 | } |
1293 | | |
1294 | 50 | if (r_is_one) { /* can happen only if a == 1 */ |
1295 | 0 | if (!BN_one(rr)) |
1296 | 0 | goto err; |
1297 | 50 | } else { |
1298 | 50 | if (!BN_from_montgomery(rr, r, mont, ctx)) |
1299 | 0 | goto err; |
1300 | 50 | } |
1301 | 50 | ret = 1; |
1302 | 50 | err: |
1303 | 50 | if (in_mont == NULL) |
1304 | 50 | BN_MONT_CTX_free(mont); |
1305 | 50 | BN_CTX_end(ctx); |
1306 | 50 | bn_check_top(rr); |
1307 | 50 | return ret; |
1308 | 50 | } |
1309 | | |
1310 | | /* The old fallback, simple version :-) */ |
1311 | | int BN_mod_exp_simple(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, |
1312 | | const BIGNUM *m, BN_CTX *ctx) |
1313 | 535 | { |
1314 | 535 | int i, j, bits, ret = 0, wstart, wend, window; |
1315 | 535 | int start = 1; |
1316 | 535 | BIGNUM *d; |
1317 | | /* Table of variables obtained from 'ctx' */ |
1318 | 535 | BIGNUM *val[TABLE_SIZE]; |
1319 | | |
1320 | 535 | if (BN_get_flags(p, BN_FLG_CONSTTIME) != 0 |
1321 | 535 | || BN_get_flags(a, BN_FLG_CONSTTIME) != 0 |
1322 | 535 | || BN_get_flags(m, BN_FLG_CONSTTIME) != 0) { |
1323 | | /* BN_FLG_CONSTTIME only supported by BN_mod_exp_mont() */ |
1324 | 4 | ERR_raise(ERR_LIB_BN, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); |
1325 | 4 | return 0; |
1326 | 4 | } |
1327 | | |
1328 | 531 | if (r == m) { |
1329 | 0 | ERR_raise(ERR_LIB_BN, ERR_R_PASSED_INVALID_ARGUMENT); |
1330 | 0 | return 0; |
1331 | 0 | } |
1332 | | |
1333 | 531 | bits = BN_num_bits(p); |
1334 | 531 | if (bits == 0) { |
1335 | | /* x**0 mod 1, or x**0 mod -1 is still zero. */ |
1336 | 9 | if (BN_abs_is_word(m, 1)) { |
1337 | 2 | ret = 1; |
1338 | 2 | BN_zero(r); |
1339 | 7 | } else { |
1340 | 7 | ret = BN_one(r); |
1341 | 7 | } |
1342 | 9 | return ret; |
1343 | 9 | } |
1344 | | |
1345 | 522 | BN_CTX_start(ctx); |
1346 | 522 | d = BN_CTX_get(ctx); |
1347 | 522 | val[0] = BN_CTX_get(ctx); |
1348 | 522 | if (val[0] == NULL) |
1349 | 0 | goto err; |
1350 | | |
1351 | 522 | if (!BN_nnmod(val[0], a, m, ctx)) |
1352 | 6 | goto err; /* 1 */ |
1353 | 516 | if (BN_is_zero(val[0])) { |
1354 | 7 | BN_zero(r); |
1355 | 7 | ret = 1; |
1356 | 7 | goto err; |
1357 | 7 | } |
1358 | | |
1359 | 509 | window = BN_window_bits_for_exponent_size(bits); |
1360 | 509 | if (window > 1) { |
1361 | 312 | if (!BN_mod_mul(d, val[0], val[0], m, ctx)) |
1362 | 0 | goto err; /* 2 */ |
1363 | 312 | j = 1 << (window - 1); |
1364 | 4.05k | for (i = 1; i < j; i++) { |
1365 | 3.74k | if (((val[i] = BN_CTX_get(ctx)) == NULL) || |
1366 | 3.74k | !BN_mod_mul(val[i], val[i - 1], d, m, ctx)) |
1367 | 0 | goto err; |
1368 | 3.74k | } |
1369 | 312 | } |
1370 | | |
1371 | 509 | start = 1; /* This is used to avoid multiplication etc |
1372 | | * when there is only the value '1' in the |
1373 | | * buffer. */ |
1374 | 509 | wstart = bits - 1; /* The top bit of the window */ |
1375 | 509 | wend = 0; /* The bottom bit of the window */ |
1376 | | |
1377 | 509 | if (r == p) { |
1378 | 0 | BIGNUM *p_dup = BN_CTX_get(ctx); |
1379 | |
|
1380 | 0 | if (p_dup == NULL || BN_copy(p_dup, p) == NULL) |
1381 | 0 | goto err; |
1382 | 0 | p = p_dup; |
1383 | 0 | } |
1384 | | |
1385 | 509 | if (!BN_one(r)) |
1386 | 0 | goto err; |
1387 | | |
1388 | 103k | for (;;) { |
1389 | 103k | int wvalue; /* The 'value' of the window */ |
1390 | | |
1391 | 103k | if (BN_is_bit_set(p, wstart) == 0) { |
1392 | 68.6k | if (!start) |
1393 | 68.6k | if (!BN_mod_mul(r, r, r, m, ctx)) |
1394 | 0 | goto err; |
1395 | 68.6k | if (wstart == 0) |
1396 | 317 | break; |
1397 | 68.3k | wstart--; |
1398 | 68.3k | continue; |
1399 | 68.6k | } |
1400 | | /* |
1401 | | * We now have wstart on a 'set' bit, we now need to work out how bit |
1402 | | * a window to do. To do this we need to scan forward until the last |
1403 | | * set bit before the end of the window |
1404 | | */ |
1405 | 34.3k | wvalue = 1; |
1406 | 34.3k | wend = 0; |
1407 | 187k | for (i = 1; i < window; i++) { |
1408 | 153k | if (wstart - i < 0) |
1409 | 213 | break; |
1410 | 153k | if (BN_is_bit_set(p, wstart - i)) { |
1411 | 75.8k | wvalue <<= (i - wend); |
1412 | 75.8k | wvalue |= 1; |
1413 | 75.8k | wend = i; |
1414 | 75.8k | } |
1415 | 153k | } |
1416 | | |
1417 | | /* wend is the size of the current window */ |
1418 | 34.3k | j = wend + 1; |
1419 | | /* add the 'bytes above' */ |
1420 | 34.3k | if (!start) |
1421 | 187k | for (i = 0; i < j; i++) { |
1422 | 154k | if (!BN_mod_mul(r, r, r, m, ctx)) |
1423 | 0 | goto err; |
1424 | 154k | } |
1425 | | |
1426 | | /* wvalue will be an odd number < 2^window */ |
1427 | 34.3k | if (!BN_mod_mul(r, r, val[wvalue >> 1], m, ctx)) |
1428 | 0 | goto err; |
1429 | | |
1430 | | /* move the 'window' down further */ |
1431 | 34.3k | wstart -= wend + 1; |
1432 | 34.3k | start = 0; |
1433 | 34.3k | if (wstart < 0) |
1434 | 192 | break; |
1435 | 34.3k | } |
1436 | 509 | ret = 1; |
1437 | 522 | err: |
1438 | 522 | BN_CTX_end(ctx); |
1439 | 522 | bn_check_top(r); |
1440 | 522 | return ret; |
1441 | 509 | } |
1442 | | |
1443 | | /* |
1444 | | * This is a variant of modular exponentiation optimization that does |
1445 | | * parallel 2-primes exponentiation using 256-bit (AVX512VL) AVX512_IFMA ISA |
1446 | | * in 52-bit binary redundant representation. |
1447 | | * If such instructions are not available, or input data size is not supported, |
1448 | | * it falls back to two BN_mod_exp_mont_consttime() calls. |
1449 | | */ |
1450 | | int BN_mod_exp_mont_consttime_x2(BIGNUM *rr1, const BIGNUM *a1, const BIGNUM *p1, |
1451 | | const BIGNUM *m1, BN_MONT_CTX *in_mont1, |
1452 | | BIGNUM *rr2, const BIGNUM *a2, const BIGNUM *p2, |
1453 | | const BIGNUM *m2, BN_MONT_CTX *in_mont2, |
1454 | | BN_CTX *ctx) |
1455 | 9 | { |
1456 | 9 | int ret = 0; |
1457 | | |
1458 | | #ifdef RSAZ_ENABLED |
1459 | | BN_MONT_CTX *mont1 = NULL; |
1460 | | BN_MONT_CTX *mont2 = NULL; |
1461 | | |
1462 | | if (ossl_rsaz_avx512ifma_eligible() && |
1463 | | (((a1->top == 16) && (p1->top == 16) && (BN_num_bits(m1) == 1024) && |
1464 | | (a2->top == 16) && (p2->top == 16) && (BN_num_bits(m2) == 1024)) || |
1465 | | ((a1->top == 24) && (p1->top == 24) && (BN_num_bits(m1) == 1536) && |
1466 | | (a2->top == 24) && (p2->top == 24) && (BN_num_bits(m2) == 1536)) || |
1467 | | ((a1->top == 32) && (p1->top == 32) && (BN_num_bits(m1) == 2048) && |
1468 | | (a2->top == 32) && (p2->top == 32) && (BN_num_bits(m2) == 2048)))) { |
1469 | | |
1470 | | int topn = a1->top; |
1471 | | /* Modulus bits of |m1| and |m2| are equal */ |
1472 | | int mod_bits = BN_num_bits(m1); |
1473 | | |
1474 | | if (bn_wexpand(rr1, topn) == NULL) |
1475 | | goto err; |
1476 | | if (bn_wexpand(rr2, topn) == NULL) |
1477 | | goto err; |
1478 | | |
1479 | | /* Ensure that montgomery contexts are initialized */ |
1480 | | if (in_mont1 != NULL) { |
1481 | | mont1 = in_mont1; |
1482 | | } else { |
1483 | | if ((mont1 = BN_MONT_CTX_new()) == NULL) |
1484 | | goto err; |
1485 | | if (!BN_MONT_CTX_set(mont1, m1, ctx)) |
1486 | | goto err; |
1487 | | } |
1488 | | if (in_mont2 != NULL) { |
1489 | | mont2 = in_mont2; |
1490 | | } else { |
1491 | | if ((mont2 = BN_MONT_CTX_new()) == NULL) |
1492 | | goto err; |
1493 | | if (!BN_MONT_CTX_set(mont2, m2, ctx)) |
1494 | | goto err; |
1495 | | } |
1496 | | |
1497 | | ret = ossl_rsaz_mod_exp_avx512_x2(rr1->d, a1->d, p1->d, m1->d, |
1498 | | mont1->RR.d, mont1->n0[0], |
1499 | | rr2->d, a2->d, p2->d, m2->d, |
1500 | | mont2->RR.d, mont2->n0[0], |
1501 | | mod_bits); |
1502 | | |
1503 | | rr1->top = topn; |
1504 | | rr1->neg = 0; |
1505 | | bn_correct_top(rr1); |
1506 | | bn_check_top(rr1); |
1507 | | |
1508 | | rr2->top = topn; |
1509 | | rr2->neg = 0; |
1510 | | bn_correct_top(rr2); |
1511 | | bn_check_top(rr2); |
1512 | | |
1513 | | goto err; |
1514 | | } |
1515 | | #endif |
1516 | | |
1517 | | /* rr1 = a1^p1 mod m1 */ |
1518 | 9 | ret = BN_mod_exp_mont_consttime(rr1, a1, p1, m1, ctx, in_mont1); |
1519 | | /* rr2 = a2^p2 mod m2 */ |
1520 | 9 | ret &= BN_mod_exp_mont_consttime(rr2, a2, p2, m2, ctx, in_mont2); |
1521 | | |
1522 | | #ifdef RSAZ_ENABLED |
1523 | | err: |
1524 | | if (in_mont2 == NULL) |
1525 | | BN_MONT_CTX_free(mont2); |
1526 | | if (in_mont1 == NULL) |
1527 | | BN_MONT_CTX_free(mont1); |
1528 | | #endif |
1529 | | |
1530 | 9 | return ret; |
1531 | 9 | } |